diff --git a/BUILD.gn b/BUILD.gn index 40e691379fdb548eb9a6292775b15262bfa1c51d..07bdc8aeacef192e7c4b65a93c746c9b75403f35 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -66,3 +66,9 @@ group("maplegendef") { ]) } } + +group("mplsh-lmbc") { + deps = [ "${MAPLEENG_ROOT}/lmbc:mplsh-lmbc" ] +} + + diff --git a/Makefile b/Makefile index e3a7e3bc88374fd545074c6ad755af9a98538cf7..11deae88aa41ab16cd3b4db6ef1400c774a8de53 100644 --- a/Makefile +++ b/Makefile @@ -173,6 +173,11 @@ ctorture: ctorture2: (cd third_party/ctorture; git checkout .; git pull; ./run.sh work.list hir2mpl) +.PHONY: mplsh-lmbc +mplsh-lmbc: + $(call build_gn, $(GN_OPTIONS), mplsh-lmbc) + + THREADS := 50 ifneq ($(findstring test,$(MAKECMDGOALS)),) TESTTARGET := $(MAKECMDGOALS) diff --git a/build/config/BUILDCONFIG.gn b/build/config/BUILDCONFIG.gn index 675724a3a732318314e6e0bdd3c8b57f62eeee75..9de64def59a5c3867a735c2bd09ecebe63e14218 100755 --- a/build/config/BUILDCONFIG.gn +++ b/build/config/BUILDCONFIG.gn @@ -67,6 +67,7 @@ MAPLEALL_ROOT = "${MAPLE_ROOT}/src/mapleall" HIR2MPL_ROOT = "${MAPLE_ROOT}/src/hir2mpl" MAPLE_MRT_ROOT = "${MAPLE_ROOT}/src/mrt" THIRD_PARTY_ROOT = "${MAPLE_ROOT}/third_party" +MAPLEENG_ROOT = "${MAPLE_ROOT}/src/MapleEng" # Put all built library files under lib GN_ARCHIVE_OUTPUT_DIRECTORY = "${MAPLE_BUILD_OUTPUT}/ar/host-x86_64-${OPT}" diff --git a/src/MapleEng/BUILD.gn b/src/MapleEng/BUILD.gn new file mode 100644 index 0000000000000000000000000000000000000000..9ec5543293ca8ba346c786429408048f37b1fbb3 --- /dev/null +++ b/src/MapleEng/BUILD.gn @@ -0,0 +1,19 @@ +# +# Copyright (c) [2022] Futurewei Technologies, Inc. All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +configs = [ "${MAPLEALL_ROOT}:mapleallcompilecfg" ] + +group("mplsh-lmbc") { + deps = [ "${MAPLEENG_ROOT}/lmbc:lmbc_mplsh" ] +} diff --git a/src/MapleEng/Readme.md b/src/MapleEng/Readme.md new file mode 100644 index 0000000000000000000000000000000000000000..5604ff83b7b0c81c66edb2bb9058833f2afad646 --- /dev/null +++ b/src/MapleEng/Readme.md @@ -0,0 +1,45 @@ +``` +# +# Copyright (C) [2022] Futurewei Technologies, Inc. All rights reserved. +# +# OpenArkCompiler is licensed underthe Mulan Permissive Software License v2. +# You can use this software according to the terms and conditions of the MulanPSL - 2.0. +# You may obtain a copy of MulanPSL - 2.0 at: +# +# https://opensource.org/licenses/MulanPSL-2.0 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the MulanPSL - 2.0 for more details. +# +``` +## Maple Engine for Lmbc + Maple Engine for Lmbc is an interpreter that executes Lmbc files (.lmbc) that the + OpenArkCompiler generates. C source code is first parsed by C to Maple frontends, which + can be either hir2mpl or clang2mpl, and the output is then compiled by OpernArkCompiler + into .lmbc (lowered Maple bytecode format) format to be executed by Maple Engine for Lmbc. + +## Build OpernArkCompiler and engine + The following build example assumes OpenArkCompiler root directory at ~/OpenArkCompiler: +``` + cd ~/OpenArkCompiler + source build/envsetup.sh arm release + make + make clang2mpl + make mplsh_lmbc +``` +## Build and run a C app + The following example compiles a C demo program at ~/OpenArkCompiler/test/c_demo/ to lmbc + and runs it with Maple Engine for Lmbc. +``` + cd $MAPLE_ROOT/test/c_demo + $MAPLE_ROOT/src/MapleEng/lmbc/test/c2lmbc.sh printHuawei.c + $MAPLE_EXECUTE_BIN/mplsh-lmbc printHuawei.lmbc +``` +## Running ctorture with Maple Engine for Lmbc +``` + git clone https://gitee.com/hu-_-wen/ctorture + cd ctorture + ./mpleng.sh mpleng.list +``` diff --git a/src/MapleEng/lmbc/BUILD.gn b/src/MapleEng/lmbc/BUILD.gn new file mode 100755 index 0000000000000000000000000000000000000000..4f1012ff5e065a4b4e54149365b2ae42cb5aada8 --- /dev/null +++ b/src/MapleEng/lmbc/BUILD.gn @@ -0,0 +1,55 @@ +# +# Copyright (c) [2022] Futurewei Technologies, Inc. All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +configs = [ "${MAPLEALL_ROOT}:mapleallcompilecfg" ] + +include_directories = [ + "${MAPLEALL_ROOT}/maple_ir/include", + "${MAPLEALL_ROOT}/mpl2mpl/include", + "${MAPLEALL_ROOT}/maple_util/include", + "${MAPLEALL_ROOT}/maple_driver/include", + "${MAPLEALL_ROOT}/mempool/include", + "${THIRD_PARTY_ROOT}/bounds_checking_function/include", + "${MAPLEALL_ROOT}/maple_ipa/include", + "${MAPLEALL_ROOT}/maple_ipa/include/old", + "${MAPLEALL_ROOT}/maple_me/include", + "${MAPLEALL_ROOT}/maple_phase/include", + "${MAPLEALL_ROOT}/maple_be/include/be", + "${MAPLEENG_ROOT}/lmbc/include" +] + +src_mplsh_lmbc = [ + "src/mplsh.cpp", + "src/init.cpp", + "src/mfunction.cpp", + "src/eng_shim.cpp", + "src/load_store.cpp", + "src/invoke_method.cpp", +] + +executable("mplsh-lmbc") { + sources = src_mplsh_lmbc + include_dirs = include_directories + deps = [ + "${MAPLEALL_ROOT}/maple_ir:libmplir", + "${MAPLEALL_ROOT}/mempool:libmempool", + "${MAPLEALL_ROOT}/mpl2mpl:libmpl2mpl", + "${THIRD_PARTY_ROOT}/bounds_checking_function:libHWSecureC", + ] + + libs = [ + "ffi", + "dl", + ] +} diff --git a/src/MapleEng/lmbc/include/eng_shim.h b/src/MapleEng/lmbc/include/eng_shim.h new file mode 100644 index 0000000000000000000000000000000000000000..8e67057c16545ccbdbe23098e79c1ebf37d7bd7a --- /dev/null +++ b/src/MapleEng/lmbc/include/eng_shim.h @@ -0,0 +1,27 @@ +/* + * Copyright (c) [2022] Futurewei Technologies, Inc. All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MPLENG_SHIM_H_ +#define MPLENG_SHIM_H_ + +#include +#include "lmbc_eng.h" + +namespace maple { + +extern "C" int64_t MplEngShim(LmbcFunc*, ...); + +} + +#endif // MPLENG_SHIM_H_ diff --git a/src/MapleEng/lmbc/include/lmbc_eng.h b/src/MapleEng/lmbc/include/lmbc_eng.h new file mode 100644 index 0000000000000000000000000000000000000000..2fb0a21eae78f538bf7da4056022ea09cd2d3ead --- /dev/null +++ b/src/MapleEng/lmbc/include/lmbc_eng.h @@ -0,0 +1,151 @@ +/* + * Copyright (c) [2022] Futurewei Technologies, Inc. All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MPLENG_LMBC_H_ +#define MPLENG_LMBC_H_ + +#include +#include +#include +#include +#include "mir_parser.h" +#include "bin_mplt.h" +#include "opcode_info.h" +#include "mir_function.h" +#include "constantfold.h" +#include "mir_type.h" +#include "mvalue.h" +#include "global_tables.h" +#include "mfunction.h" +#include "common_utils.h" + +namespace maple { +class LmbcMod; + +// Parameter and variable info +struct ParmInf { + PrimType ptyp; + size_t size; + bool isPreg; + bool isVararg; + // use scenarios for storeIdx field: + // ParmInf - idx into pReg array if isPreg (formal is preg) + // ParmInf - idx into formalvar array if formal is var symbol + // ParmInf - offset into func formalagg mem block if PTY_agg formal arg + // ParmInf - offset into func varagg mem blk if PTY_agg var-arg + // VarInf - offset into module globals mem blk if global or PUStatic var + int32 storeIdx; + MIRSymbol *sym; // VarInf only - for global and PUStatic var + PUIdx puIdx; // VarInf only - for PUStatic var + ParmInf(PrimType type, size_t sz, bool ispreg, int32_t storageIdx) : ptyp(type), + size(sz), isPreg(ispreg), storeIdx(storageIdx) {} + ParmInf(PrimType type, size_t sz, bool ispreg, int32_t storageIdx, MIRSymbol *psym) : ptyp(type), + size(sz), isPreg(ispreg), storeIdx(storageIdx), sym(psym) {} + ParmInf(PrimType type, size_t sz, bool ispreg, int32_t storageIdx, MIRSymbol *psym, PUIdx puidx) : ptyp(type), + size(sz), isPreg(ispreg), storeIdx(storageIdx), sym(psym), puIdx(puidx) {} +}; + +using LabelMap = std::unordered_map; + +class LmbcFunc { + public: + LmbcMod *lmbcMod; + MIRFunction *mirFunc; + uint32 retSize; + uint32 frameSize; // auto var size in bytes + uint16 formalsNum; // num formals: vars+pregs + uint32 formalsNumVars; // num formals: vars only + uint32 formalsAggSize; // total struct size of all formal args of type agg + uint32 formalsSize; // total size of all formal args + LabelMap labelMap; // map labelIdx to Stmt address + size_t numPregs; + bool isVarArgs; + std::vector pos2Parm; // formals info lkup by pos order + std::unordered_map stidx2Parm; // formals info lkup by formals stidx + LmbcFunc(LmbcMod *mod, MIRFunction *func); + void ScanFormals(void); + void ScanLabels(StmtNode* stmt); +}; + +class FuncAddr { + public: + union { + LmbcFunc *lmbcFunc; + void *nativeFunc; + } funcPtr; + bool isLmbcFunc; + uint32 formalsAggSize; // native func only + std::string funcName; + FuncAddr(bool lmbcFunc, void *func, std::string funcName, uint32 formalsAggSz = 0); +}; + +using VarInf = struct ParmInf; +using FuncMap = std::unordered_map; + +class LmbcMod { + public: + std::string lmbcPath; + MIRModule* mirMod {nullptr}; + FuncMap funcMap; + LmbcFunc* mainFn {nullptr}; + std::unordered_map globalAndStaticVars; + std::unordered_map globalStrTbl; + int unInitPUStaticsSize {0}; // uninitalized PUStatic vars + uint8* unInitPUStatics{nullptr}; + int globalsSize {0}; // global vars and initialized PUStatic + uint8* globals {nullptr}; + uint32 aggrInitOffset {0}; + + void InitGlobalVars(void); + void InitGlobalVariable(VarInf *pInf); + void InitIntConst(VarInf *pInf, MIRIntConst &intConst, uint8 *dst); + void InitStrConst(VarInf* pInf, MIRStrConst &mirStrConst, uint8 *dst); + void InitAddrofConst(VarInf *pInf, MIRAddrofConst &addrofConst, uint8 *dst); + void InitFloatConst(VarInf *pInf, MIRFloatConst &f32Const, uint8 *dst); + void InitDoubleConst(VarInf *Inf, MIRDoubleConst &f64Const, uint8 *dst); + void InitLblConst(VarInf *pInf, MIRLblConst &labelConst, uint8 *dst); + void InitBitFieldConst(VarInf *pInf, MIRConst &mirConst, int32_t &allocdBits, bool &forceAlign); + uint8_t* GetGlobalVarInitAddr(VarInf* pInf, uint32 align); + void UpdateGlobalVarInitAddr(VarInf* pInf, uint32 size); + void CheckUnamedBitField(MIRStructType &stType, uint32 &prevInitFd, uint32 curFd, int32 &allocdBits); + + LmbcMod(std::string path); + MIRModule* Import(std::string path); + void InitModule(void); + void CalcGlobalAndStaticVarSize(void); + void ScanPUStatic(MIRFunction *func); + LmbcFunc* LkupLmbcFunc(PUIdx puIdx); + + std::vector libHandles; + std::unordered_mapextFuncMap; // PUIdx to ext func addr map + std::unordered_mapextSymMap; // StIdx.FullIdx() to ext sym addr map + void LoadDefLibs(void); + void* FindExtFunc(PUIdx puidx); + void* FindExtSym(StIdx stidx); + void AddGlobalVar(MIRSymbol &sym, VarInf *pInf); + void AddPUStaticVar(PUIdx puIdx, MIRSymbol &sym, VarInf *pInf); + uint8 *GetVarAddr(StIdx stidx); // globa var + uint8 *GetVarAddr(PUIdx puidx, StIdx stidx); // PUStatic var + + void InitAggConst(VarInf *pInf, MIRConst &mirConst); + void InitArrayConst(VarInf *pInf, MIRConst &mirConst); + void InitScalarConst(VarInf *pInf, MIRConst &mirConst); + void InitPointerConst(VarInf *pInf, MIRConst &mirConst); + std::unordered_map PUIdx2FuncAddr; + FuncAddr* GetFuncAddr(PUIdx puIdx); +}; + +} // namespace maple + +#endif // MPLENG_LMBC_H_ diff --git a/src/MapleEng/lmbc/include/massert.h b/src/MapleEng/lmbc/include/massert.h new file mode 100644 index 0000000000000000000000000000000000000000..2030082a9a4c2c54057719bc45a4f942c6bff6a7 --- /dev/null +++ b/src/MapleEng/lmbc/include/massert.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) [2022] Futurewei Technologies, Inc. All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MPLENG_MASSERT_H_ +#define MPLENG_MASSERT_H_ + +#include +#include + +#define MASSERT(cond, fmt, ...) \ + do { \ + if (!(cond)) { \ + fprintf(stderr, __FILE__ ":%d: Assert failed: " fmt "\n", __LINE__, ##__VA_ARGS__); \ + abort(); \ + } \ + } while (0) + +#endif // MPLENG_MASSERT_H_ + diff --git a/src/MapleEng/lmbc/include/mexpression.h b/src/MapleEng/lmbc/include/mexpression.h new file mode 100644 index 0000000000000000000000000000000000000000..306ac1bf8ec019c582b0158febe91d3707bd5a56 --- /dev/null +++ b/src/MapleEng/lmbc/include/mexpression.h @@ -0,0 +1,232 @@ +/* + * Copyright (c) [2022] Futurewei Technologies, Inc. All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MPLENG_MEXPRESSION_H_ +#define MPLENG_MEXPRESSION_H_ + +#include +#include "massert.h" // for MASSERT + +#define EXPRBINOP(exprop, res, op0, op1, exprPtyp) \ + do { \ + switch (exprPtyp) { \ + case PTY_i8: res.x.i8 = op0.x.i8 exprop op1.x.i8; break; \ + case PTY_i16: res.x.i16 = op0.x.i16 exprop op1.x.i16; break; \ + case PTY_i32: res.x.i32 = (int64)op0.x.i32 exprop (int64)op1.x.i32; break; \ + case PTY_i64: res.x.i64 = op0.x.i64 exprop op1.x.i64; break; \ + case PTY_u8: res.x.u8 = op0.x.u8 exprop op1.x.u8; break; \ + case PTY_u16: res.x.u16 = op0.x.u16 exprop op1.x.u16; break; \ + case PTY_u32: res.x.u32 = op0.x.u32 exprop op1.x.u32; break; \ + case PTY_u64: res.x.u64 = op0.x.u64 exprop op1.x.u64; break; \ + case PTY_a64: res.x.u64 = op0.x.u64 exprop op1.x.u64; break; \ + case PTY_f32: res.x.f32 = op0.x.f32 exprop op1.x.f32; break; \ + case PTY_f64: res.x.f64 = op0.x.f64 exprop op1.x.f64; break; \ + default: MIR_FATAL("Unsupported PrimType %d for binary operator %s", exprPtyp, #exprop); \ + } \ + res.ptyp = expr->ptyp; \ + } while (0) + +#define EXPRCOMPOP(exprop, res, op0, op1, optyp, exprptyp) \ + do { \ + switch (optyp) { \ + case PTY_i8: res.x.i64 = op0.x.i8 exprop op1.x.i8; break; \ + case PTY_i16: res.x.i64 = op0.x.i16 exprop op1.x.i16; break; \ + case PTY_i32: res.x.i64 = op0.x.i32 exprop op1.x.i32; break; \ + case PTY_i64: res.x.i64 = op0.x.i64 exprop op1.x.i64; break; \ + case PTY_u8: res.x.i64 = op0.x.u8 exprop op1.x.u8; break; \ + case PTY_u16: res.x.i64 = op0.x.u16 exprop op1.x.u16; break; \ + case PTY_u32: res.x.i64 = op0.x.u32 exprop op1.x.u32; break; \ + case PTY_u64: res.x.i64 = op0.x.u64 exprop op1.x.u64; break; \ + case PTY_a64: res.x.i64 = op0.x.a64 exprop op1.x.a64; break; \ + case PTY_f32: res.x.i64 = op0.x.f32 exprop op1.x.f32; break; \ + case PTY_f64: res.x.i64 = op0.x.f64 exprop op1.x.f64; break; \ + default: MIR_FATAL("Unsupported operand PrimType %d for comparison operator %s", op0.ptyp, #exprop); \ + } \ + res.ptyp = exprptyp; \ + } while (0) + +// EXPRCOMPOP with f32 amd f64 comparison cases taken out because of +// -Wfloat-equal compile flag warnings with != and == on float types. +// The float cases for != and == are special case handled in op handlers. +#define EXPRCOMPOPNOFLOAT(exprop,res, op0, op1, optyp, exprptyp) \ + do { \ + switch (optyp) { \ + case PTY_i8: res.x.i64 = op0.x.i8 exprop op1.x.i8; break; \ + case PTY_i16: res.x.i64 = op0.x.i16 exprop op1.x.i16; break; \ + case PTY_i32: res.x.i64 = op0.x.i32 exprop op1.x.i32; break; \ + case PTY_i64: res.x.i64 = op0.x.i64 exprop op1.x.i64; break; \ + case PTY_u8: res.x.i64 = op0.x.u8 exprop op1.x.u8; break; \ + case PTY_u16: res.x.i64 = op0.x.u16 exprop op1.x.u16; break; \ + case PTY_u32: res.x.i64 = op0.x.u32 exprop op1.x.u32; break; \ + case PTY_u64: res.x.i64 = op0.x.u64 exprop op1.x.u64; break; \ + case PTY_a64: res.x.i64 = op0.x.a64 exprop op1.x.a64; break; \ + default: MIR_FATAL("Unsupported operand PrimType %d for comparison operator %s", op0.ptyp, #exprop); \ + } \ + res.ptyp = exprptyp; \ + } while (0) + +#define EXPRSELECTOP(res, op0, sel1, sel2, exprptyp) \ + do { \ + MValue op1, op2; \ + op1 = CvtType(sel1, exprptyp, sel1.ptyp); \ + op2 = CvtType(sel2, exprptyp, sel2.ptyp); \ + switch (exprptyp) { \ + case PTY_i8: res.x.i8 = op0.x.i64? op1.x.i8 : op2.x.i8; break; \ + case PTY_i16: res.x.i16 = op0.x.i64? op1.x.i16 : op2.x.i16; break; \ + case PTY_i32: res.x.i32 = op0.x.i64? op1.x.i32 : op2.x.i32; break; \ + case PTY_i64: res.x.i64 = op0.x.i64? op1.x.i64 : op2.x.i64; break; \ + case PTY_u8: res.x.u8 = op0.x.i64? op1.x.u8 : op2.x.u8; break; \ + case PTY_u16: res.x.u16 = op0.x.i64? op1.x.u16 : op2.x.u16; break; \ + case PTY_u32: res.x.u32 = op0.x.i64? op1.x.u32 : op2.x.u32; break; \ + case PTY_u64: res.x.u64 = op0.x.i64? op1.x.u64 : op2.x.u64; break; \ + case PTY_a64: res.x.a64 = op0.x.i64? op1.x.a64 : op2.x.a64; break; \ + case PTY_f32: res.x.f32 = op0.x.i64? op1.x.f32 : op2.x.f32; break; \ + case PTY_f64: res.x.f64 = op0.x.i64? op1.x.f64 : op2.x.f64; break; \ + default: MIR_FATAL("Unsupported PrimType %d for select operator", exprptyp); \ + } \ + res.ptyp = exprptyp; \ + } while (0) + +#define EXPRBININTOP(exprop, res, op, op1, exprptyp) \ + do { \ + MValue op0 = CvtType(op, exprptyp, op.ptyp); \ + switch (exprptyp) { \ + case PTY_i8: res.x.i8 = op0.x.i8 exprop op1.x.i8; break; \ + case PTY_i16: res.x.i16 = op0.x.i16 exprop op1.x.i16; break; \ + case PTY_i32: res.x.i32 = op0.x.i32 exprop op1.x.i32; break; \ + case PTY_i64: res.x.i64 = op0.x.i64 exprop op1.x.i64; break; \ + case PTY_u8: res.x.u8 = op0.x.u8 exprop op1.x.u8; break; \ + case PTY_u16: res.x.u16 = op0.x.u16 exprop op1.x.u16; break; \ + case PTY_u32: res.x.u32 = op0.x.u32 exprop op1.x.u32; break; \ + case PTY_u64: res.x.u64 = op0.x.u64 exprop op1.x.u64; break; \ + case PTY_a64: res.x.u64 = op0.x.u64 exprop op1.x.u64; break; \ + default: MIR_FATAL("Unsupported PrimType %d for integer binary operator %s", exprptyp, #exprop); \ + } \ + res.ptyp = exprptyp; \ + } while (0) + +// Used by OP_lshr only +#define EXPRBININTOPUNSIGNED(exprop, res, op0, op1, exprptyp) \ + do { \ + MASSERT((op0.ptyp == exprptyp) || \ + (op0.ptyp == PTY_u32 && exprptyp == PTY_i32) || \ + (op0.ptyp == PTY_i32 && exprptyp == PTY_u32), \ + "BINUINTOP Type mismatch: 0x%02x and 0x%02x", op0.ptyp, exprptyp); \ + switch (op1.ptyp) { \ + case PTY_i8: \ + case PTY_u8: \ + MASSERT(op1.x.u8 <= 64, "OP_lshr shifting more than 64 bites"); \ + break; \ + case PTY_i16: \ + case PTY_u16: \ + MASSERT(op1.x.u16 <= 64, "OP_lshr shifting more than 64 bites"); \ + break; \ + case PTY_i32: \ + case PTY_u32: \ + MASSERT(op1.x.u32 <= 64, "OP_lshr shifting more than 64 bites"); \ + break; \ + case PTY_i64: \ + case PTY_u64: \ + case PTY_a64: \ + MASSERT(op1.x.u64 <= 64, "OP_lshr shifting more than 64 bites"); \ + break; \ + default: \ + MIR_FATAL("Unsupported PrimType %d for unsigned integer binary operator %s", exprptyp, #exprop); \ + break; \ + } \ + switch (exprptyp) { \ + case PTY_i8: res.x.u8 = op0.x.u8 exprop op1.x.u8; break; \ + case PTY_i16: res.x.u16 = op0.x.u16 exprop op1.x.u16; break; \ + case PTY_i32: res.x.u32 = op0.x.u32 exprop op1.x.u32; break; \ + case PTY_i64: res.x.u64 = op0.x.u64 exprop op1.x.u64; break; \ + case PTY_a64: res.x.u64 = op0.x.u64 exprop op1.x.u64; break; \ + case PTY_u8: res.x.u8 = op0.x.u8 exprop op1.x.u8; break; \ + case PTY_u16: res.x.u16 = op0.x.u16 exprop op1.x.u16; break; \ + case PTY_u32: res.x.u32 = op0.x.u32 exprop op1.x.u32; break; \ + case PTY_u64: res.x.u64 = op0.x.u64 exprop op1.x.u64; break; \ + default: MIR_FATAL("Unsupported PrimType %d for unsigned integer binary operator %s", exprptyp, #exprop); \ + } \ + res.ptyp = exprptyp; \ + } while (0) + +#define EXPRMAXMINOP(exprop, res, op0, op1, exprptyp) \ + do { \ + MASSERT(op0.ptyp == op1.ptyp, "MAXMINOP Type mismatch: 0x%02x and 0x%02x", op0.ptyp, op1.ptyp); \ + MASSERT(op0.ptyp == exprptyp, "MAXMINOP Type mismatch: 0x%02x and 0x%02x", op0.ptyp, exprptyp); \ + switch (exprptyp) { \ + case PTY_i8: res.x.i8 = op0.x.i8 exprop op1.x.i8? op0.x.i8 : op1.x.i8; break; \ + case PTY_i16: res.x.i16 = op0.x.i16 exprop op1.x.i16? op0.x.i16 : op1.x.i16; break; \ + case PTY_i32: res.x.i32 = op0.x.i32 exprop op1.x.i32? op0.x.i32 : op1.x.i32; break; \ + case PTY_i64: res.x.i64 = op0.x.i64 exprop op1.x.i64? op0.x.i64 : op1.x.i64; break; \ + case PTY_u8: res.x.u8 = op0.x.u8 exprop op1.x.u8 ? op0.x.u8 : op1.x.u8; break; \ + case PTY_u16: res.x.u16 = op0.x.u16 exprop op1.x.u16? op0.x.u16 : op1.x.u16; break; \ + case PTY_u32: res.x.u32 = op0.x.u32 exprop op1.x.u32? op0.x.u32 : op1.x.u32; break; \ + case PTY_u64: res.x.u64 = op0.x.u64 exprop op1.x.u64? op0.x.u64 : op1.x.u64; break; \ + case PTY_a64: res.x.a64 = op0.x.a64 exprop op1.x.a64? op0.x.a64 : op1.x.a64; break; \ + case PTY_f32: res.x.f32 = op0.x.f32 exprop op1.x.f32? op0.x.f32 : op1.x.f32; break; \ + case PTY_f64: res.x.f64 = op0.x.f64 exprop op1.x.f64? op0.x.f64 : op1.x.f64; break; \ + default: MIR_FATAL("Unsupported PrimType %d for binary max/min operator %s", exprptyp, #exprop); \ + } \ + res.ptyp = exprptyp; \ + } while (0) + +#define EXPRREMOP(exprop, res, op0, op1, exprptyp) \ + do { \ + switch (exprptyp) { \ + case PTY_i8: if (op1.x.i8 == 0) res.x.i8 = 0; \ + else if (op1.x.i8 == -1 && op0.x.i8 == INT8_MIN) op0.x.i8 = 0; \ + else res.x.i8 = op0.x.i8 exprop op1.x.i8; break; \ + case PTY_i16: if (op1.x.i16 == 0) res.x.i16 = 0; \ + else if (op1.x.i16 == -1 && op0.x.i16 == INT16_MIN) op0.x.i16 = 0; \ + else res.x.i16 = op0.x.i16 exprop op1.x.i16; break; \ + case PTY_i32: if (op1.x.i32 == 0) res.x.i32 = 0; \ + else if (op1.x.i32 == -1 && op0.x.i32 == INT32_MIN) op0.x.i32 = 0; \ + else res.x.i32 = op0.x.i32 exprop op1.x.i32; break; \ + case PTY_i64: if (op1.x.i64 == 0) res.x.i64 = 0; \ + else if (op1.x.i64 == -1 && op0.x.i64 == INT64_MIN) op0.x.i64 = 0; \ + else res.x.i64 = op0.x.i64 exprop op1.x.i64; break; \ + case PTY_u8: if (op1.x.u8 == 0) res.x.u8 = 0; \ + else res.x.u8 = op0.x.u8 exprop op1.x.u8; break; \ + case PTY_u16: if (op1.x.u16 == 0) res.x.u16 = 0; \ + else res.x.u16 = op0.x.u16 exprop op1.x.u16; break; \ + case PTY_u32: if (op1.x.u32 == 0) res.x.u32 = 0; \ + else res.x.u32 = op0.x.u32 exprop op1.x.u32; break; \ + case PTY_u64: if (op1.x.u64 == 0) res.x.u64 = 0; \ + else res.x.u64 = op0.x.u64 exprop op1.x.u64; break; \ + default: MIR_FATAL("Unsupported PrimType %d for rem operator %s", exprptyp, #exprop); \ + } \ + res.ptyp = exprptyp; \ + } while (0) + +#define EXPRUNROP(exprop, res, op0, exprptyp) \ + do { \ + MASSERT(op0.ptyp == exprptyp || \ + ((op0.ptyp == PTY_i32 || op0.ptyp == PTY_u32) && \ + (exprptyp == PTY_i32 || exprptyp == PTY_u32)), \ + "UNROP Type mismatch: 0x%02x and 0x%02x", op0.ptyp, exprptyp); \ + switch (exprptyp) { \ + case PTY_i8: res.x.i8 = exprop op0.x.i8; break; \ + case PTY_i16: res.x.i16 = exprop op0.x.i16; break; \ + case PTY_i32: res.x.i32 = exprop op0.x.i32; break; \ + case PTY_i64: res.x.i64 = exprop op0.x.i64; break; \ + case PTY_u8: res.x.u8 = exprop op0.x.u8; break; \ + case PTY_u16: res.x.u16 = exprop op0.x.u16; break; \ + case PTY_u32: res.x.u32 = exprop op0.x.u32; break; \ + case PTY_u64: res.x.u64 = exprop op0.x.u64; break; \ + default: MIR_FATAL("Unsupported PrimType %d for unary operator %s", exprptyp, #exprop); \ + } \ + res.ptyp = exprptyp; \ + } while (0) + +#endif // MPLENG_MEXPRESSION_H_ diff --git a/src/MapleEng/lmbc/include/mfunction.h b/src/MapleEng/lmbc/include/mfunction.h new file mode 100644 index 0000000000000000000000000000000000000000..b56e9fc34ba6b64fe295c465b7b21d7332a801d7 --- /dev/null +++ b/src/MapleEng/lmbc/include/mfunction.h @@ -0,0 +1,107 @@ +/* + * Copyright (c) [2022] Futurewei Technologies, Inc. All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MPLENG_MFUNCTION_H_ +#define MPLENG_MFUNCTION_H_ + +#include +#include + +#include +#include + +#include "mir_nodes.h" +#include "mvalue.h" +#include "lmbc_eng.h" + +#define VARNAMELENGTH 16 +#define ALLOCA_MEMMAX 0x4000 + +namespace maple { + +class LmbcMod; +class LmbcFunc; +struct ParmInf; + +using ffi_fp_t = void(*)(); + +// For x86 valist setup, to force va arg acess from stack pointed to +// by overflow_arg_area, set gp_offset to 48, fp_offset to 304, +// reg_save_rea to null, and overflow_arg_arg to location for vaArgs. +typedef struct { + uint gp_offset; + uint fp_offset; + void *overflow_arg_area; + void *reg_save_area; +} VaListX86_64[1]; + +typedef struct { + void *stack; + void *gr_top; + void *vr_top; + int gr_offs; + int vr_offs; +} VaListAarch64; + +using VaList = VaListAarch64; + +// State of executing Maple function +class MFunction { + public: + // state of an executing function + LmbcFunc* info; // current func + MFunction* caller; // caller of current func + StmtNode* nextStmt; // next maple IR statement to execute + uint8* frame; // stack frame (auto var only) + uint8* fp; // point to bottom of frame + uint8* allocaMem; // point to reserved stack memory for Maple IR OP_alloca + uint32 allocaOffset; // next avail offset in allocaMem + MValue* pRegs; // array of pseudo regs used in function + MValue* formalVars; // array of var/non-preg args passed in + + // for function calls made from this function + uint16 numCallArgs; // number of call args to pass to callee + MValue* callArgs; // array of call args to pass to callee + uint8* aggrArgsBuf; // buffer for PTY_agg call formal args, which offsets into it + uint8* varArgsBuf; // buffer for PTY_agg call var-args, which offsets into it + MValue retVal0; // %retVal0 return from callee + MValue retVal1; // %retval1 return from callee + uint8* vaArgs; // AARCH64 ABI vararg stack for calling va-arg funcs + uint32 vaArgsSize; + + explicit MFunction(LmbcFunc *funcInfo, + MFunction *funcCaller, + uint8 *autoVars, + MValue *pRegs, + MValue *formalVars); + ~MFunction(); + uint8 *Alloca(uint32 size); + uint8 *GetFormalVarAddr(StIdx stidx); + void CallMapleFuncDirect(CallNode *call); + void CallMapleFuncIndirect(IcallNode *icall, LmbcFunc *callInfo); + void CallExtFuncDirect(CallNode* call); + void CallExtFuncIndirect(IcallNode *icallproto, void* fp); + void CallVaArgFunc(int numArgs, LmbcFunc *callInfo); + void CallWithFFI(PrimType ret_ptyp, ffi_fp_t fp); + void CallIntrinsic(IntrinsiccallNode &intrn); +}; + +bool IsExtFunc(PUIdx puIdx, LmbcMod &module); +MValue InvokeFunc(LmbcFunc* fn, MFunction *caller); +MValue EvalExpr(MFunction &func, BaseNode* expr, ParmInf *parm = nullptr); +void mload(uint8* addr, PrimType ptyp, MValue& res, size_t aggSizea = 0); +void mstore(uint8* addr, PrimType ptyp, MValue& val, bool toVarArgStack = false); + +} +#endif // MPLENG_MFUNCTION_H_ diff --git a/src/MapleEng/lmbc/include/mprimtype.h b/src/MapleEng/lmbc/include/mprimtype.h new file mode 100644 index 0000000000000000000000000000000000000000..68b42041c485594df8f720386eec3c24b5b4d939 --- /dev/null +++ b/src/MapleEng/lmbc/include/mprimtype.h @@ -0,0 +1,136 @@ +/* + * Copyright (c) [2022] Futurewei Technologies, Inc. All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MPLENG_MPRIMTYPE_H_ +#define MPLENG_MPRIMTYPE_H_ + +// Define size info of primtypes, some of them are undefined for now(set to 0) +// 1 - 1 byte +// 2 - 2 bytes +// 3 - 4 bytes +// 4 - 8 bytes +// 5 - 16 bytes +// 6 - 32 bytes +#define PTYSIZE_Invalid 0 +#define PTYSIZE_void 0 +#define PTYSIZE_i8 1 +#define PTYSIZE_i16 2 +#define PTYSIZE_i32 3 +#define PTYSIZE_i64 4 +#define PTYSIZE_u8 1 +#define PTYSIZE_u16 2 +#define PTYSIZE_u32 3 +#define PTYSIZE_u64 4 +#define PTYSIZE_u1 1 +#define PTYSIZE_ptr 3 +#define PTYSIZE_ref 3 +#define PTYSIZE_a32 3 +#define PTYSIZE_a64 4 +#define PTYSIZE_f32 3 +#define PTYSIZE_f64 4 +#define PTYSIZE_f128 5 +#define PTYSIZE_c64 5 +#define PTYSIZE_c128 6 +#define PTYSIZE_simplestr 3 +#define PTYSIZE_simpleobj 3 +#define PTYSIZE_dynany 4 +#define PTYSIZE_dynundef 4 +#define PTYSIZE_dynnull 4 +#define PTYSIZE_dynbool 4 +#define PTYSIZE_dyni32 4 +#define PTYSIZE_dynstr 4 +#define PTYSIZE_dynobj 4 +#define PTYSIZE_dynf64 4 +#define PTYSIZE_dynf32 4 +#define PTYSIZE_dynnone 4 +#define PTYSIZE_constStr 0 +#define PTYSIZE_gen 0 +#define PTYSIZE_agg 0 +#define PTYSIZE_v2i64 0 +#define PTYSIZE_v4i32 0 +#define PTYSIZE_v8i16 0 +#define PTYSIZE_v16i8 0 +#define PTYSIZE_v2f64 0 +#define PTYSIZE_v4f32 0 +#define PTYSIZE_unknown 0 +#define PTYSIZE_Derived 0 + +// Define ffi types for each primtype, some of them are unsupported(set to ffi_type_void) +#define FFITYPE_Invalid ffi_type_void +#define FFITYPE_void ffi_type_void +#define FFITYPE_i8 ffi_type_sint8 +#define FFITYPE_i16 ffi_type_sint16 +#define FFITYPE_i32 ffi_type_sint32 +#define FFITYPE_i64 ffi_type_sint64 +#define FFITYPE_u8 ffi_type_uint8 +#define FFITYPE_u16 ffi_type_uint16 +#define FFITYPE_u32 ffi_type_uint32 +#define FFITYPE_u64 ffi_type_uint64 +#define FFITYPE_u1 ffi_type_uint8 +#define FFITYPE_ptr ffi_type_pointer +#define FFITYPE_ref ffi_type_pointer +#define FFITYPE_a32 ffi_type_pointer +#define FFITYPE_a64 ffi_type_pointer +#define FFITYPE_f32 ffi_type_float +#define FFITYPE_f64 ffi_type_double +#define FFITYPE_f128 ffi_type_void +#define FFITYPE_c64 ffi_type_void +#define FFITYPE_c128 ffi_type_void +#define FFITYPE_simplestr ffi_type_void +#define FFITYPE_simpleobj ffi_type_void +#define FFITYPE_dynany ffi_type_void +#define FFITYPE_dynundef ffi_type_void +#define FFITYPE_dynnull ffi_type_void +#define FFITYPE_dynbool ffi_type_void +#define FFITYPE_dyni32 ffi_type_void +#define FFITYPE_dynstr ffi_type_void +#define FFITYPE_dynobj ffi_type_void +#define FFITYPE_dynf64 ffi_type_void +#define FFITYPE_dynf32 ffi_type_void +#define FFITYPE_dynnone ffi_type_void +#define FFITYPE_constStr ffi_type_void +#define FFITYPE_gen ffi_type_void +#define FFITYPE_agg ffi_type_void +#define FFITYPE_v2i64 ffi_type_void +#define FFITYPE_v4i32 ffi_type_void +#define FFITYPE_v8i16 ffi_type_void +#define FFITYPE_v16i8 ffi_type_void +#define FFITYPE_v2f64 ffi_type_void +#define FFITYPE_v4f32 ffi_type_void +#define FFITYPE_unknown ffi_type_void +#define FFITYPE_Derived ffi_type_void +#define FFITYPE_i128 ffi_type_void +#define FFITYPE_u128 ffi_type_void +#define FFITYPE_v2u64 ffi_type_void +#define FFITYPE_v4u32 ffi_type_void +#define FFITYPE_v8u16 ffi_type_void +#define FFITYPE_v16u8 ffi_type_void +#define FFITYPE_v2i32 ffi_type_void +#define FFITYPE_v4i16 ffi_type_void +#define FFITYPE_v8i8 ffi_type_void +#define FFITYPE_v2u32 ffi_type_void +#define FFITYPE_v4u16 ffi_type_void +#define FFITYPE_v8u8 ffi_type_void +#define FFITYPE_v2f32 ffi_type_void +#define FFITYPE_reservedpty1 ffi_type_void +#define FFITYPE_reservedpty2 ffi_type_void +#define FFITYPE_reservedpty3 ffi_type_void +#define FFITYPE_reservedpty4 ffi_type_void +#define FFITYPE_reservedpty5 ffi_type_void +#define FFITYPE_reservedpty6 ffi_type_void +#define FFITYPE_reservedpty7 ffi_type_void +#define FFITYPE_reservedpty8 ffi_type_void +#define FFITYPE_reservedpty9 ffi_type_void +#define FFITYPE_reservedpty10 ffi_type_void +#endif // MPLENG_MPRIMTYPE_H_ diff --git a/src/MapleEng/lmbc/include/mvalue.h b/src/MapleEng/lmbc/include/mvalue.h new file mode 100644 index 0000000000000000000000000000000000000000..f7bb933d8a665155b94b461c6ca2049f99788d34 --- /dev/null +++ b/src/MapleEng/lmbc/include/mvalue.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) [2022] Futurewei Technologies, Inc. All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MPLENG_MVALUE_H_ +#define MPLENG_MVALUE_H_ + +#include +#include "prim_types.h" + +namespace maple { + struct MValue { + union { + int8 i8; + int16 i16; + int32 i32; + int64 i64; + uint8 u8; + uint16 u16; + uint32 u32; + uint64 u64; + float f32; + double f64; + uint8 *a64; // object ref (use uint8_t* instead of void* for reference) + void *ptr; + void *str; + } x; + PrimType ptyp:8; + size_t aggSize; // for PTY_agg only + }; +} + +#endif // MPLENG_MVALUE_H_ + diff --git a/src/MapleEng/lmbc/src/eng_shim.cpp b/src/MapleEng/lmbc/src/eng_shim.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3323aaf9f4d6b1fe529999653085b7a3cb0cfaf2 --- /dev/null +++ b/src/MapleEng/lmbc/src/eng_shim.cpp @@ -0,0 +1,74 @@ +/* + * Copyright (c) [2022] Futurewei Technologies, Inc. All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mfunction.h" +#include "massert.h" +#include "lmbc_eng.h" + +namespace maple { + +extern "C" int64 MplEngShim(LmbcFunc* fn, ...) { + uint8 frame[fn->frameSize]; + MValue pregs[fn->numPregs]; + MValue formalVars[fn->formalsNumVars+1]; + MFunction shim_caller(fn, nullptr, frame, pregs, formalVars); // create local Mfunction obj for shim + + MValue val; + if (fn->formalsNum > 0) { + MValue callArgs[fn->formalsNum]; + va_list args; + va_start (args, fn); + + int argIdx = 0; + while (argIdx < fn->formalsNum) { + // convert argv args to interpreter types and push on operand stack + val.ptyp = fn->pos2Parm[argIdx]->ptyp; + switch (val.ptyp) { + case PTY_i8: + val.x.i8 = va_arg(args, int); + break; + case PTY_i16: + val.x.i16 = va_arg(args, int); + break; + case PTY_i32: + val.x.i32 = va_arg(args, int); + break; + case PTY_i64: + val.x.i64 = va_arg(args, long long); + break; + case PTY_u16: + val.x.u16 = va_arg(args, int); + break; + case PTY_a64: + val.x.a64 = va_arg(args, uint8*); + break; + case PTY_f32: + // Variadic function expects that float arg is promoted to double + case PTY_f64: + val.x.f64 = va_arg(args, double); + break; + default: + MIR_FATAL("Unsupported PrimType %d", val.ptyp); + } + callArgs[argIdx] = val; + ++argIdx; + } + shim_caller.numCallArgs = fn->formalsNum; + shim_caller.callArgs = callArgs; + } + val = InvokeFunc(fn, &shim_caller); + return 0; +} + +} diff --git a/src/MapleEng/lmbc/src/init.cpp b/src/MapleEng/lmbc/src/init.cpp new file mode 100644 index 0000000000000000000000000000000000000000..eccab17f79a4a71a6c1be589d2d178a2b96b32c1 --- /dev/null +++ b/src/MapleEng/lmbc/src/init.cpp @@ -0,0 +1,598 @@ +/* + * Copyright (c) [2022] Futurewei Technologies, Inc. All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "massert.h" +#include "lmbc_eng.h" +#include "eng_shim.h" + +namespace maple { + +// Align offset to required alignment +inline void AlignOffset(uint32 &offset, uint32 align) { + offset = (offset + align-1) & ~(align-1); +} + +LmbcFunc::LmbcFunc(LmbcMod *mod, MIRFunction *func) : lmbcMod(mod), mirFunc(func) { + frameSize = ((func->GetFrameSize()+maplebe::k8ByteSize-1) >> maplebe::k8BitShift) << maplebe::k8BitShift; // round up to nearest 8 + isVarArgs = func->GetMIRFuncType()->IsVarargs(); + numPregs = func->GetPregTab()->Size(); +} + +void LmbcMod::InitModule(void) { + CalcGlobalAndStaticVarSize(); + for (MIRFunction *mirFunc : mirMod->GetFunctionList()) { + if (auto node = mirFunc->GetBody()) { + LmbcFunc* fn = new LmbcFunc(this, mirFunc); + MASSERT(fn, "Create Lmbc function failed"); + fn->ScanFormals(); + fn->ScanLabels(node); + funcMap[mirFunc->GetPuidx()] = fn; + if (mirFunc->GetName().compare("main") == 0) { + mainFn = fn; + } + } + } + InitGlobalVars(); +} + +void LmbcFunc::ScanFormals(void) { + MapleVector formalDefVec = mirFunc->GetFormalDefVec(); + formalsNum = formalDefVec.size(); + formalsSize = 0; + formalsNumVars = 0; + formalsAggSize = 0; + MASSERT(mirFunc->GetReturnType() != nullptr, "mirFunc return type is null"); + retSize = mirFunc->GetReturnType()->GetSize(); + for (uint32 i = 0; i < formalDefVec.size(); i++) { + MIRSymbol* symbol = formalDefVec[i].formalSym; + MIRType* ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(formalDefVec[i].formalTyIdx); + bool isPreg = (symbol->GetSKind() == kStPreg); + int32 storageIdx; + if (ty->GetPrimType() == PTY_agg) { + storageIdx = formalsAggSize; + formalsAggSize += ty->GetSize(); + } else { + storageIdx = isPreg? symbol->GetPreg()->GetPregNo(): ++formalsNumVars; + } + // collect formal params info + ParmInf* pInf = new ParmInf(ty->GetPrimType(), ty->GetSize(), isPreg, storageIdx); + stidx2Parm[symbol->GetStIdx().FullIdx()] = pInf; // formals info map keyed on formals stidx + pos2Parm.push_back(pInf); // vector of formals info in formalDefVec order + formalsSize += ty->GetSize(); + } +} + +void LmbcFunc::ScanLabels(StmtNode* stmt) { + while (stmt != nullptr) { + switch (stmt->op) { + case OP_block: + stmt = static_cast(stmt)->GetFirst(); + ScanLabels(stmt); + break; + case OP_label: + labelMap[static_cast(stmt)->GetLabelIdx()] = stmt; + break; + default: + break; + } + stmt= stmt->GetNext(); + } +} + +// Check for initialized flex array struct member and return size. The number +// of elements is not specified in the array type declaration but determined +// by array initializer. +// - Flex array must be last field of a top level struct +// - Only 1st dim of multi-dim array can be unspecified. Other dims must have bounds. +// - In Maple AST, array with unspecified num elements is defined with 1 element in +// 1st array dim, and is the storage for 1st initialiazed array element. +// The interpreter identifies such arrays by a dim of 1 in the array's 1st dim +// together with an array const initializer with > 1 element (should identify using +// kTypeFArray return from GetKind() but it's returning kTypeArray now ). +uint32 CheckFlexArrayMember(MIRSymbol &sym, MIRType &ty) { + auto &stType = static_cast(ty); + auto &stConst = static_cast(*sym.GetKonst()); + TyIdxFieldAttrPair tfap = stType.GetTyidxFieldAttrPair(stType.GetFieldsSize()-1); // last struct fd + MIRType *lastFdType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tfap.first); + if (lastFdType->GetKind() == kTypeArray && // last struct field is array + static_cast(lastFdType)->GetSizeArrayItem(0) == 1 && // 1st dim of array is 1 + stConst.GetConstVec().size() == stType.GetFieldsSize()) { // there is an initializer for the array + MIRConst &elemConst = *stConst.GetConstVecItem(stConst.GetConstVec().size()-1); // get array initializer + MASSERT(elemConst.GetType().GetKind() == kTypeArray, "array initializer expected"); + auto &arrCt = static_cast(elemConst); + if (arrCt.GetConstVec().size() > 1) { + return (arrCt.GetConstVec().size()-1) * elemConst.GetType().GetSize(); // 1st elem already in arr type def + } + } + return 0; +} + +// Calcluate total memory needed for global vars, Fstatic vars and initialized PUstatic vars. +// Walks global sym table and local sym table of all funcs to gather size info and adds +// var to table for looking up its size, type and offset within the storage segment. +void LmbcMod::CalcGlobalAndStaticVarSize() { + uint32 offset = 0; + for (size_t i = 0; i < GlobalTables::GetGsymTable().GetSymbolTableSize(); ++i) { + MIRSymbol *sym = GlobalTables::GetGsymTable().GetSymbolFromStidx(i); + if (!sym || + !(sym->GetSKind() == kStVar) || + !(sym->GetStorageClass() == kScGlobal || sym->GetStorageClass() == kScFstatic)) { + continue; + } + if (MIRType *ty = sym->GetType()) { + AlignOffset(offset, ty->GetAlign()); // check and align var + VarInf* pInf = new VarInf(ty->GetPrimType(), ty->GetSize(), false, offset, sym); + AddGlobalVar(*sym, pInf); // add var to lookup table + offset += ty->GetSize(); + if (ty->GetKind() == kTypeStruct) { // check and account for flex array member + offset += CheckFlexArrayMember(*sym, *ty); + } + } + } + globalsSize = offset; + // get total size of nitialized function static vars + for (MIRFunction *func : mirMod->GetFunctionList()) { + if (auto node = func->GetBody()) { + ScanPUStatic(func); + } + } +} + +void LmbcMod::ScanPUStatic(MIRFunction *func) { + size_t size = func->GetSymbolTabSize(); + for (size_t i = 0; i < size; ++i) { + MIRSymbol *sym = func->GetSymbolTabItem(i); + if (!sym || !sym->IsPUStatic() || !sym->IsConst()) { // exclude un-init PUStatic + continue; + } + if (MIRType *ty = sym->GetType()) { + VarInf* pInf = new VarInf(ty->GetPrimType(), ty->GetSize(), false, globalsSize, sym, func->GetPuidx()); + AddPUStaticVar(func->GetPuidx(), *sym, pInf); // add var to lookup table + globalsSize += ty->GetSize(); + } + } +} + +// Get the address of a non-agg global var or next field within a global var to init. +// If a PTY_agg var, align offset to next init addr (aggrInitOffset) for ptyp. +// Otherwise, storeIdx should be aligned properly already in CalcGlobalVarsSize. +// - storeIdx: offset of the global var in global var segment +// - aggrInitOffset: offset of the field to init within global var of type PTY_agg +uint8 *LmbcMod::GetGlobalVarInitAddr(VarInf* pInf, uint32 align) { + if (pInf->ptyp != PTY_agg) { // init non-aggr global var + return globals + pInf->storeIdx; + } + AlignOffset(aggrInitOffset, align); + return globals + pInf->storeIdx + aggrInitOffset; +} + +inline void LmbcMod::UpdateGlobalVarInitAddr(VarInf* pInf, uint32 size) { + if (pInf->ptyp == PTY_agg) { + aggrInitOffset += size; + } +} + +// Check for un-named bitfields in initialized global struct vars and +// include in global var memory sizing and field offset calcuations. Un-named +// bit fields appears as gaps in field id between initialized struct fields +// of a global var. +void LmbcMod::CheckUnamedBitField(MIRStructType &stType, uint32 &prevInitFd, uint32 curFd, int32 &allocdBits) { + if (curFd - 1 == prevInitFd) { + prevInitFd = curFd; + return; + } + for (auto i = prevInitFd; i < curFd -1; ++i) { // struct fd idx 0 based; agg const fd 1 based + TyIdxFieldAttrPair tfap = stType.GetTyidxFieldAttrPair(i); + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tfap.first); // type of struct fd + // Gaps in struct fields with initializer are either un-named bit fields or empty struct fields + // Account for bits in un-named bit fields. Skip over emtpy struct fields. + if (ty->GetKind() != kTypeBitField) { + continue; + } + MASSERT(ty->GetKind()==kTypeBitField, "Un-named bitfield expected"); + uint8 bitFdWidth = static_cast(ty)->GetFieldSize(); + uint32 baseFdSz = GetPrimTypeSize(ty->GetPrimType()); + + uint32 align = allocdBits ? 1 : baseFdSz; // align with base fd if no bits have been allocated + AlignOffset(aggrInitOffset, align); + + if (allocdBits + bitFdWidth > (baseFdSz * maplebe::k8BitSize)) { // alloc bits will cross align boundary of base type + aggrInitOffset+= baseFdSz; + allocdBits = bitFdWidth; // alloc bits at new boundary + } else { + allocdBits += bitFdWidth; + } + } + prevInitFd = curFd; +} + +void LmbcMod::InitStrConst(VarInf* pInf, MIRStrConst &mirStrConst, uint8* dst) { + UStrIdx ustrIdx = mirStrConst.GetValue(); + auto it = globalStrTbl.insert( + std::pair(ustrIdx, GlobalTables::GetUStrTable().GetStringFromStrIdx(ustrIdx))); + *(const char **)dst = it.first->second.c_str(); +} + +inline void LmbcMod::InitFloatConst(VarInf *pInf, MIRFloatConst &f32Const, uint8* dst) { + *(float*)dst = f32Const.GetValue(); +} + +inline void LmbcMod::InitDoubleConst(VarInf *pInf, MIRDoubleConst &f64Const, uint8* dst) { + *(double*)dst = f64Const.GetValue(); +} + +void LmbcMod::InitLblConst(VarInf *pInf, MIRLblConst &labelConst, uint8 *dst) { + LabelIdx labelIdx = labelConst.GetValue(); + LmbcFunc *fn = LkupLmbcFunc(labelConst.GetPUIdx()); + StmtNode* label = fn->labelMap[labelIdx]; + MASSERT(label, "InitLblConst label not foound"); + *(StmtNode **)dst = label; +} + +void LmbcMod::InitIntConst(VarInf* pInf, MIRIntConst &intConst, uint8* dst) { + int64 val = intConst.GetExtValue(); + switch (intConst.GetType().GetPrimType()) { + case PTY_i64: + *(int64*)dst = (int64)val; + break; + case PTY_i32: + *(int32*)dst = (int32)val; + break; + case PTY_i16: + *(int16*)dst = (int16)val; + break; + case PTY_i8: + *(int8*)dst = (int8)val; + break; + case PTY_u64: + *(uint64*)dst = (uint64)val; + break; + case PTY_u32: + *(uint32*)dst = (uint32)val; + break; + case PTY_u16: + *(uint16*)dst = (uint16)val; + break; + case PTY_u8: + *(uint8*)dst = (uint8)val; + break; + default: + break; + } +} + +void LmbcMod::InitPointerConst(VarInf *pInf, MIRConst &mirConst) { + uint8 *dst = GetGlobalVarInitAddr(pInf, mirConst.GetType().GetAlign()); + switch (mirConst.GetKind()) { + case kConstAddrof: + InitAddrofConst(pInf, static_cast(mirConst), dst); + break; + case kConstStrConst: + InitStrConst(pInf, static_cast(mirConst), dst); + break; + case kConstInt: { + InitIntConst(pInf, static_cast(mirConst), dst); + break; + } + case kConstAddrofFunc: + default: + MASSERT(false, "InitPointerConst %d kind NYI", mirConst.GetKind()); + break; + } + UpdateGlobalVarInitAddr(pInf, mirConst.GetType().GetSize()); +} + +void SetBitFieldConst(uint8* baseFdAddr, uint32 baseFdSz, uint32 bitsOffset, uint8 bitsSize, MIRConst &elemConst) { + MIRIntConst &intConst = static_cast(elemConst); + int64 val = intConst.GetExtValue(); + uint64 mask = ~(0xffffffffffffffff << bitsSize); + uint64 from = (val & mask) << bitsOffset; + mask = mask << bitsOffset; + switch (elemConst.GetType().GetPrimType()) { + case PTY_i64: + *(int64*)baseFdAddr = ((*(int64*)baseFdAddr) & ~(mask)) | from; + break; + case PTY_i32: + *(int32*)baseFdAddr = ((*(int32*)baseFdAddr) & ~(mask)) | from; + break; + case PTY_i16: + *(int16*)baseFdAddr = ((*(int16*)baseFdAddr) & ~(mask)) | from; + break; + case PTY_i8: + *(int8*)baseFdAddr = ((*(int8*)baseFdAddr) & ~(mask)) | from; + break; + case PTY_u64: + *(uint64*)baseFdAddr = ((*(uint64*)baseFdAddr) & ~(mask)) | from; + break; + case PTY_u32: + *(uint32*)baseFdAddr = ((*(uint32*)baseFdAddr) & ~(mask)) | from; + break; + case PTY_u16: + *(uint16*)baseFdAddr = ((*(uint16*)baseFdAddr) & ~(mask)) | from; + break; + case PTY_u8: + *(uint8*)baseFdAddr = ((*(uint8*)baseFdAddr) & ~(mask)) | from; + break; + default: + MASSERT(false, "Unexpected primary type"); + break; + } +} + +void LmbcMod::InitBitFieldConst(VarInf *pInf, MIRConst &elemConst, int32 &allocdBits, bool &forceAlign) { + uint8 bitFdWidth = static_cast(elemConst.GetType()).GetFieldSize(); + if (!bitFdWidth) { // flag to force align immediate following bit field + forceAlign = true; + return; + } + if (forceAlign) { // align to next boundary + aggrInitOffset += (allocdBits + maplebe::k8ByteSize-1) >> maplebe::k8BitShift; + forceAlign = false; + } + uint32 baseFdSz = GetPrimTypeSize(elemConst.GetType().GetPrimType()); + uint32 align = allocdBits ? 1 : baseFdSz; // align with base fd if no bits have been allocated + uint8* baseFdAddr = GetGlobalVarInitAddr(pInf, align); + + if (allocdBits + bitFdWidth > (baseFdSz * maplebe::k8BitSize)) { // alloc bits will cross align boundary of base type + baseFdAddr = baseFdAddr + baseFdSz; // inc addr & offset by size of base type + SetBitFieldConst(baseFdAddr, baseFdSz, 0, bitFdWidth, elemConst); + aggrInitOffset+= baseFdSz; + allocdBits = bitFdWidth; // alloc bits at new boundary + } else { + SetBitFieldConst(baseFdAddr, baseFdSz, allocdBits, bitFdWidth, elemConst); + allocdBits += bitFdWidth; + } +} + +void LmbcMod::InitAggConst(VarInf *pInf, MIRConst &mirConst) { + auto &stType = static_cast(mirConst.GetType()); + auto &aggConst = static_cast(mirConst); + bool forceAlign = false; + int32 allocdBits = 0; + + AlignOffset(aggrInitOffset, aggConst.GetType().GetAlign()); // next init offset in global var mem + MIRTypeKind prevElemKind = kTypeUnknown; + for (uint32 i = 0, prevInitFd = 0; i < aggConst.GetConstVec().size(); ++i) { + MIRConst &elemConst = *aggConst.GetConstVecItem(i); + MIRType &elemType = elemConst.GetType(); + + // if non bit fd preceded by bit fd, round bit fd to byte boundary + // so next bit fd will start on new boundary + if (prevElemKind == kTypeBitField && elemType.GetKind() != kTypeBitField) { + forceAlign = false; + if (allocdBits) { + aggrInitOffset += (allocdBits + maplebe::k8ByteSize-1) >> maplebe::k8BitShift; // pad preceding bit fd to byte boundary + allocdBits = 0; + } + } + + // No need to check for un-named bit fd if aggr is an array + if (stType.GetKind() != kTypeArray) { + CheckUnamedBitField(stType, prevInitFd, aggConst.GetFieldIdItem(i), allocdBits); + } + switch (elemType.GetKind()) { + case kTypeScalar: + InitScalarConst(pInf, elemConst); + break; + case kTypeStruct: + case kTypeUnion: + InitAggConst(pInf, elemConst); + break; + case kTypeArray: + InitArrayConst(pInf, elemConst); + break; + case kTypePointer: + InitPointerConst(pInf, elemConst); + break; + case kTypeBitField: { + InitBitFieldConst(pInf, elemConst, allocdBits, forceAlign); + break; + } + default: { + MASSERT(false, "init struct type %d NYI", elemType.GetKind()); + break; + } + } + prevElemKind = elemType.GetKind(); + } +} + +void LmbcMod::InitScalarConst(VarInf *pInf, MIRConst &mirConst) { + uint8 *dst = GetGlobalVarInitAddr(pInf, mirConst.GetType().GetAlign()); + switch (mirConst.GetKind()) { + case kConstInt: + InitIntConst(pInf, static_cast(mirConst), dst); + break; + case kConstFloatConst: + InitFloatConst(pInf, static_cast(mirConst), dst); + break; + case kConstDoubleConst: + InitDoubleConst(pInf, static_cast(mirConst), dst); + break; + case kConstStrConst: + InitStrConst(pInf, static_cast(mirConst), dst); + break; + case kConstLblConst: + InitLblConst(pInf, static_cast(mirConst), dst); + break; + case kConstStr16Const: + case kConstAddrof: + case kConstAddrofFunc: + default: + MASSERT(false, "Scalar Const Type %d NYI", mirConst.GetKind()); + break; + } + UpdateGlobalVarInitAddr(pInf, mirConst.GetType().GetSize()); +} + +void LmbcMod::InitArrayConst(VarInf *pInf, MIRConst &mirConst) { + MIRArrayType &arrayType = static_cast(mirConst.GetType()); + MIRAggConst &arrayCt = static_cast(mirConst); + AlignOffset(aggrInitOffset, arrayType.GetAlign()); + + size_t uNum = arrayCt.GetConstVec().size(); + uint32 dim = arrayType.GetSizeArrayItem(0); + TyIdx scalarIdx = arrayType.GetElemTyIdx(); + MIRType *subTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(scalarIdx); + if (uNum == 0 && dim != 0) { + while (subTy->GetKind() == kTypeArray) { + MIRArrayType *aSubTy = static_cast(subTy); + if (aSubTy->GetSizeArrayItem(0) > 0) { + dim *= (aSubTy->GetSizeArrayItem(0)); + } + scalarIdx = aSubTy->GetElemTyIdx(); + subTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(scalarIdx); + } + } + for (size_t i = 0; i < uNum; ++i) { + MIRConst *elemConst = arrayCt.GetConstVecItem(i); + if (IsPrimitiveVector(subTy->GetPrimType())) { + MASSERT(false, "Unexpected primitive vector"); + } else if (IsPrimitiveScalar(elemConst->GetType().GetPrimType())) { + InitScalarConst(pInf, *elemConst); + } else if (elemConst->GetType().GetKind() == kTypeArray) { + InitArrayConst(pInf, *elemConst); + } else if (elemConst->GetType().GetKind() == kTypeStruct || + elemConst->GetType().GetKind() == kTypeClass || + elemConst->GetType().GetKind() == kTypeUnion) { + InitAggConst(pInf, *elemConst); + } else { + MASSERT(false, "InitArrayConst unexpected error"); + } + } +} + +void LmbcMod::InitAddrofConst(VarInf *pInf, MIRAddrofConst &addrofConst, uint8* dst) { + StIdx stIdx = addrofConst.GetSymbolIndex(); + int32 offset = addrofConst.GetOffset(); + uint8 *addr = pInf->sym->IsPUStatic() ? GetVarAddr(pInf->puIdx, stIdx) : GetVarAddr(stIdx); + *(uint8**)dst = addr + offset; +} + +void LmbcMod::InitGlobalVariable(VarInf *pInf) { + MIRConst *mirConst = pInf->sym->GetKonst(); + uint8 *dst = GetGlobalVarInitAddr(pInf, mirConst->GetType().GetAlign()); + + switch (mirConst->GetKind()) { + case kConstAggConst: + aggrInitOffset = 0; + InitAggConst(pInf, *mirConst); + return; + case kConstInt: + InitIntConst(pInf, *static_cast(mirConst), dst); + break; + case kConstFloatConst: + InitFloatConst(pInf, *static_cast(mirConst), dst); + break; + case kConstDoubleConst: + InitDoubleConst(pInf, *static_cast(mirConst), dst); + break; + case kConstAddrof: + InitAddrofConst(pInf, *static_cast(mirConst), dst); + break; + case kConstStrConst: + InitStrConst(pInf, *static_cast(mirConst), dst); + break; + default: + MASSERT(false, "Init MIRConst type %d NYI", mirConst->GetKind()); + break; + } + UpdateGlobalVarInitAddr(pInf, mirConst->GetType().GetSize()); +} + +void LmbcMod::InitGlobalVars(void) { + // alloc mem for global vars + this->globals = (uint8*)malloc(this->globalsSize); + this->unInitPUStatics = (uint8*)malloc(this->unInitPUStaticsSize); + memset_s(this->globals, this->globalsSize, 0, this->globalsSize); + memset_s(this->unInitPUStatics, this->unInitPUStaticsSize, 0, this->unInitPUStaticsSize); + + // init global vars and static vars + for (const auto it : globalAndStaticVars) { + VarInf *pInf = it.second; + if (pInf->sym->IsConst()) { + InitGlobalVariable(pInf); + } + } +} + +inline void LmbcMod::AddGlobalVar(MIRSymbol &sym, VarInf *pInf) { + globalAndStaticVars[sym.GetStIdx().FullIdx()] = pInf; +} + +inline void LmbcMod::AddPUStaticVar(PUIdx puIdx, MIRSymbol &sym, VarInf *pInf) { + globalAndStaticVars[((uint64)puIdx << maplebe::k32BitSize) | sym.GetStIdx().FullIdx()] = pInf; +} + +// global var +uint8 *LmbcMod::GetVarAddr(StIdx stIdx) { + auto it = globalAndStaticVars.find(stIdx.FullIdx()); + MASSERT(it != globalAndStaticVars.end(), "global var not found"); + return globals + it->second->storeIdx; +} + +// PUStatic var +uint8 *LmbcMod::GetVarAddr(PUIdx puIdx, StIdx stIdx) { + auto it = globalAndStaticVars.find(((long)puIdx << maplebe::k32BitSize) | stIdx.FullIdx()); + MASSERT(it != globalAndStaticVars.end(), "PUStatic var not found"); + return globals + it->second->storeIdx; +} + +LmbcFunc *LmbcMod::LkupLmbcFunc(PUIdx puIdx) { + auto it = funcMap.find(puIdx); + return it == funcMap.end()? nullptr: it->second; +} + +FuncAddr::FuncAddr(bool lmbcFunc, void *func, std::string name, uint32 formalsAggSz) { + funcName = name; + isLmbcFunc = lmbcFunc; + formalsAggSize = formalsAggSz; + if (isLmbcFunc) { + funcPtr.lmbcFunc = (LmbcFunc*)func; + } else { + funcPtr.nativeFunc = func; + } +} + +// Get size total of all func parameters of type TY_agg. +uint32 GetAggFormalsSize(MIRFunction *func) { + uint32 totalSize = 0; + MapleVector &formalDefVec = func->GetFormalDefVec(); + for (int i = 0; i < formalDefVec.size(); i++) { + MIRType* ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(formalDefVec[i].formalTyIdx); + if (ty->GetPrimType() == PTY_agg) { + totalSize += ty->GetSize(); + } + } + return totalSize; +} + +FuncAddr* LmbcMod::GetFuncAddr(PUIdx idx) { + FuncAddr *faddr; + if (PUIdx2FuncAddr[idx]) { + return PUIdx2FuncAddr[idx]; + } + MIRFunction *func = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(idx); + MASSERT(func, "Function not found in global table"); + if (IsExtFunc(idx, *this)) { + faddr = new FuncAddr(false, FindExtFunc(idx), func->GetName(), GetAggFormalsSize(func)); + } else { + faddr = new FuncAddr(true, LkupLmbcFunc(idx), func->GetName()); + } + PUIdx2FuncAddr[idx] = faddr; + return faddr; +} + + +} // namespace maple diff --git a/src/MapleEng/lmbc/src/invoke_method.cpp b/src/MapleEng/lmbc/src/invoke_method.cpp new file mode 100644 index 0000000000000000000000000000000000000000..162fda67b061e5db494cd36e5eb9adae39f368f7 --- /dev/null +++ b/src/MapleEng/lmbc/src/invoke_method.cpp @@ -0,0 +1,1322 @@ +/* + * Copyright (c) [2022] Futurewei Technologies, Inc. All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include +#include +#include +#include "mvalue.h" +#include "mprimtype.h" +#include "mfunction.h" +#include "mexpression.h" +#include "opcodes.h" +#include "massert.h" + +namespace maple { + +int64 MVal2Int64(MValue &val) { + switch (val.ptyp) { + case PTY_i64: + return val.x.i64; + case PTY_i32: + return (int64)val.x.i32; + case PTY_i16: + return (int64)val.x.i16; + case PTY_i8: + return (int64)val.x.i8; + case PTY_u32: + return (int64)val.x.u32; + default: + MASSERT(false, "MValue type %d for int64 conversion NYI", val.ptyp); + } +} + +bool IsZero(MValue& cond) { + switch (cond.ptyp) { + case PTY_u8: return cond.x.u8 == 0; + case PTY_u16: return cond.x.u16 == 0; + case PTY_u32: return cond.x.u32 == 0; + case PTY_u64: return cond.x.u64 == 0; + case PTY_i8: return cond.x.i8 == 0; + case PTY_i16: return cond.x.i16 == 0; + case PTY_i32: return cond.x.i32 == 0; + case PTY_i64: return cond.x.i64 == 0; + default: MASSERT(false, "IsZero type %d NYI", cond.ptyp); + } +} + +bool RegAssignZextOrSext(MValue& from, PrimType toTyp, MValue& to) { + switch (toTyp) { + case PTY_u8: + switch (from.ptyp) { + case PTY_u32: to.x.u8 = from.x.u32; break; // special case as needed + default: return false; + } + break; + case PTY_u32: + switch (from.ptyp) { + case PTY_u8: to.x.u64 = from.x.u8; break; + case PTY_u16: to.x.u64 = from.x.u16; break; + case PTY_i32: to.x.u64 = from.x.i32; break; + default: return false; + } + break; + case PTY_i32: + switch (from.ptyp) { + case PTY_i8: to.x.i64 = from.x.i8; break; + case PTY_i16: to.x.i64 = from.x.i16; break; + case PTY_u32: to.x.i64 = from.x.u32; break; + default: return false; + } + break; + case PTY_u64: + switch (from.ptyp) { + case PTY_u8: to.x.u64 = from.x.u8; break; + case PTY_u16: to.x.u64 = from.x.u16; break; + case PTY_u32: to.x.u64 = from.x.u32; break; + case PTY_i64: to.x.u64 = from.x.i64; break; // for large_stack.c + default: return false; + } + break; + case PTY_i64: + switch (from.ptyp) { + case PTY_i8: to.x.i64 = from.x.i8; break; + case PTY_i16: to.x.i64 = from.x.i16; break; + case PTY_i32: to.x.i64 = from.x.i32; break; + default: return false; + } + break; + case PTY_i16: + switch (from.ptyp) { + case PTY_i32: to.x.i16 = from.x.i32; break; + case PTY_u16: to.x.i16 = from.x.u16; break; + default: return false; + } + break; + case PTY_u16: + switch (from.ptyp) { + case PTY_u32: to.x.u16 = from.x.i32; break; + default: return false; + } + break; + default: + return false; + break; + } + to.ptyp = toTyp; + return true; +} + +#define CASE_TOPTYP(toPtyp, toCtyp) \ + case PTY_##toPtyp: \ + if (cvtInt) res.x.toPtyp = (toCtyp)fromInt; else \ + if (cvtUint) res.x.toPtyp = (toCtyp)fromUint; else \ + if (cvtf32) res.x.toPtyp = (toCtyp)fromFloat; else \ + if (cvtf64) res.x.toPtyp = (toCtyp)fromDouble; \ + break; + +MValue CvtType(MValue &opnd, PrimType toPtyp, PrimType fromPtyp) { + MValue res; + intptr_t fromInt; + uintptr_t fromUint; + float fromFloat; + double fromDouble; + bool cvtInt = false; + bool cvtUint = false; + bool cvtf32 = false; + bool cvtf64 = false; + + if (opnd.ptyp == toPtyp) { + return opnd; + } + switch (fromPtyp) { + case PTY_i8: fromInt = opnd.x.i8; cvtInt = true; break; + case PTY_i16: fromInt = opnd.x.i16; cvtInt = true; break; + case PTY_i32: fromInt = opnd.x.i32; cvtInt = true; break; + case PTY_i64: fromInt = opnd.x.i64; cvtInt = true; break; + case PTY_u8: fromUint = opnd.x.u8; cvtUint= true; break; + case PTY_u16: fromUint = opnd.x.u16; cvtUint= true; break; + case PTY_u32: fromUint = opnd.x.u32; cvtUint= true; break; + case PTY_u64: fromUint = opnd.x.u64; cvtUint= true; break; + case PTY_a64: fromUint = opnd.x.u64; cvtUint= true; break; + case PTY_ptr: fromUint = opnd.x.u64; cvtUint= true; break; + case PTY_f32: fromFloat = opnd.x.f32; cvtf32 = true; break; + case PTY_f64: fromDouble= opnd.x.f64; cvtf64 = true; break; + default: MASSERT(false, "OP_cvt from ptyp %d NYI", fromPtyp); break; + } + switch (toPtyp) { + CASE_TOPTYP(i8, int8) + CASE_TOPTYP(i16, int16) + CASE_TOPTYP(i32, int32) + CASE_TOPTYP(i64, int64) + CASE_TOPTYP(u8, uint8) + CASE_TOPTYP(u16, uint16) + CASE_TOPTYP(u32, uint32) + CASE_TOPTYP(u64, uint64) + CASE_TOPTYP(f32, float) + CASE_TOPTYP(f64, double) + case PTY_a64: + if (cvtInt) + res.x.a64 = (uint8*)fromInt; + else if (cvtUint) + res.x.a64 = (uint8*)fromUint; + else + MASSERT(false, "OP_cvt: type %d to %d not supported", fromPtyp, toPtyp); + break; + default: MASSERT(false, "OP_cvt: type %d to %d NYI", fromPtyp, toPtyp); + } + res.ptyp = toPtyp; + return res; +} + + +inline bool CompareFloat(float x, float y, float epsilon = 0.00000001f) { + if (isinf(x) && isinf(y)) { + return true; + } + return (fabs(x - y) < epsilon) ? true : false; +} + +inline bool CompareDouble(double x, double y, double epsilon = 0.0000000000000001f) { + if (isinf(x) && isinf(y)) { + return true; + } + return (fabs(x - y) < epsilon) ? true : false; +} + +void HandleFloatEq(Opcode op, PrimType opndType, MValue &res, MValue &op1, MValue &op2) { + MASSERT(opndType == op1.ptyp && op1.ptyp == op2.ptyp, "Operand type mismatch %d %d", op1.ptyp, op2.ptyp); + switch (op) { + case OP_ne: + if (opndType == PTY_f32) { + res.x.i64 = !CompareFloat(op1.x.f32, op2.x.f32); + } else if (opndType == PTY_f64) { + res.x.i64 = !CompareDouble(op1.x.f64, op2.x.f64); + } else { + MASSERT(false, "Unexpected type"); + } + break; + case OP_eq: + if (opndType == PTY_f32) { + res.x.i64 = CompareFloat(op1.x.f32, op2.x.f32); + } else if (opndType == PTY_f64) { + res.x.i64 = CompareDouble(op1.x.f64, op2.x.f64); + } else { + MASSERT(false, "Unexpected type"); + } + break; + default: + break; + } +} + +void LoadArgs(MFunction& func) { + for (int i=0; i < func.info->formalsNum; ++i) { + if (func.info->pos2Parm[i]->isPreg) { + func.pRegs[func.info->pos2Parm[i]->storeIdx] = func.caller->callArgs[i]; + } else { + func.formalVars[func.info->pos2Parm[i]->storeIdx] = func.caller->callArgs[i]; + } + } +} + +// Handle regassign agg %%retval0 +// Return agg <= 16 bytes in %%retval0 and %%retval1 +void HandleAggrRetval(MValue &rhs, MFunction *caller) { + MASSERT(rhs.aggSize <= 16, "regassign of agg >16 bytes to %%retval0"); + uint64 retval[2] = {0, 0}; + memcpy_s(retval, sizeof(retval), rhs.x.a64, rhs.aggSize); + caller->retVal0.x.u64 = retval[0]; + caller->retVal0.ptyp = PTY_agg; // treat as PTY_u64 if aggSize <= 16 + caller->retVal0.aggSize = rhs.aggSize; + if (rhs.aggSize > maplebe::k8ByteSize) { + caller->retVal1.x.u64 = retval[1]; + caller->retVal1.ptyp = PTY_agg; + caller->retVal1.aggSize= rhs.aggSize; + } +} + +// Walk the Maple LMBC IR tree of a function and execute its statements. +MValue InvokeFunc(LmbcFunc* fn, MFunction *caller) { + MValue retVal; + MValue pregs[fn->numPregs]; // func pregs (incl. func formals that are pregs) + MValue formalVars[fn->formalsNumVars+1]; // func formals that are named vars + alignas(maplebe::k8ByteSize) uint8 frame[fn->frameSize]; // func autovars + MFunction mfunc(fn, caller, frame, pregs, formalVars); // init func execution state + + static void* const labels[] = { + &&label_OP_Undef, +#define OPCODE(base_node,dummy1,dummy2,dummy3) &&label_OP_##base_node, +#include "opcodes.def" +#undef OPCODE + &&label_OP_Undef + }; + + LoadArgs(mfunc); + uint8 buf[ALLOCA_MEMMAX]; + mfunc.allocaMem = buf; +// mfunc.allocaMem = static_cast(alloca(ALLOCA_MEMMAX)); + StmtNode *stmt = mfunc.nextStmt; + goto *(labels[stmt->op]); + +label_OP_Undef: + { + MASSERT(false, "Hit OP_undef"); + } +label_OP_block: + { + stmt = static_cast(stmt)->GetFirst(); + mfunc.nextStmt = stmt; + goto *(labels[stmt->op]); + } +label_OP_iassignfpoff: + { + IassignFPoffNode* node = static_cast(stmt); + int32 offset= node->GetOffset(); + BaseNode* rhs = node->GetRHS(); + MValue val = EvalExpr(mfunc, rhs); + PrimType ptyp = node->ptyp; + mstore(mfunc.fp+offset, ptyp, val); + } + stmt = stmt->GetNext(); + mfunc.nextStmt = stmt; + goto *(labels[stmt->op]); +label_OP_call: + { + CallNode *call = static_cast(stmt); + MValue callArgs[call->NumOpnds()]; // stack for callArgs + mfunc.callArgs = callArgs; + mfunc.numCallArgs = call->NumOpnds(); + if (IsExtFunc(call->GetPUIdx(), *mfunc.info->lmbcMod)) { + mfunc.CallExtFuncDirect(call); + } else { + mfunc.CallMapleFuncDirect(call); + } + } + stmt = stmt->GetNext(); + mfunc.nextStmt = stmt; + goto *(labels[stmt->op]); +label_OP_regassign: + { + RegassignNode* node = static_cast(stmt); + PregIdx regIdx = node->GetRegIdx(); + MValue rhs = EvalExpr(mfunc, node->GetRHS()); + if (node->ptyp == rhs.ptyp) { + MASSERT(regIdx != -kSregRetval1, "regassign to %%%%retval1"); + if (regIdx == -kSregRetval0) { + if (node->ptyp != PTY_agg) { + caller->retVal0 = rhs; + } else { + HandleAggrRetval(rhs, caller); + } + } else { + MASSERT(regIdx < fn->numPregs, "regassign regIdx %d out of bound", regIdx); + mfunc.pRegs[regIdx] = rhs; + } + } else { + bool extended = false; + if (regIdx == -kSregRetval0) { + extended = RegAssignZextOrSext(rhs, node->ptyp, caller->retVal0); + } else if (regIdx == -kSregRetval1) { + extended = RegAssignZextOrSext(rhs, node->ptyp, caller->retVal1); + } else { + MASSERT(regIdx < fn->numPregs, "regassign regIdx %d out of bound", regIdx); + extended = RegAssignZextOrSext(rhs, node->ptyp, mfunc.pRegs[regIdx]); + } + if (!extended) { + if ((node->ptyp == PTY_a64 || node->ptyp == PTY_u64) && + (rhs.ptyp == PTY_a64 || rhs.ptyp == PTY_u64)) { + mfunc.pRegs[regIdx] = rhs; + mfunc.pRegs[regIdx].ptyp = node->ptyp; + } else { + mfunc.pRegs[regIdx] = CvtType(rhs, node->ptyp, rhs.ptyp); + } + } + } + } + stmt = stmt->GetNext(); + mfunc.nextStmt = stmt; + goto *(labels[stmt->op]); +label_OP_brfalse: +label_OP_brtrue: + { + CondGotoNode* node = static_cast(stmt); + uint32 labelIdx = node->GetOffset(); (void)labelIdx; + MValue cond = EvalExpr(mfunc, node->GetRHS()); + StmtNode* label = fn->labelMap[labelIdx]; + if (stmt->op == OP_brfalse && IsZero(cond)) stmt = label; + if (stmt->op == OP_brtrue && !IsZero(cond)) stmt = label; + } + stmt = stmt->GetNext(); + mfunc.nextStmt = stmt; + goto *(labels[stmt->op]); +label_OP_label: + // no-op + stmt = stmt->GetNext(); + mfunc.nextStmt = stmt; + goto *(labels[stmt->op]); +label_OP_goto: + { + uint32 labelIdx = static_cast(stmt)->GetOffset(); + StmtNode* label = fn->labelMap[labelIdx]; + stmt = label; + } + stmt = stmt->GetNext(); + mfunc.nextStmt = stmt; + goto *(labels[stmt->op]); +label_OP_return: + return caller->retVal0; +label_OP_iassignoff: + { + IassignoffNode* node = static_cast(stmt); + int32 offset = node->GetOffset(); + MValue addr = EvalExpr(mfunc, node->Opnd(0)); + MValue rhs = EvalExpr(mfunc, node->Opnd(1)); + mstore(addr.x.a64 + offset, stmt->ptyp, rhs); + } + stmt = stmt->GetNext(); + mfunc.nextStmt = stmt; + goto *(labels[stmt->op]); +label_OP_blkassignoff: + { + BlkassignoffNode* node = static_cast(stmt); + int32 dstOffset = node->offset; + int32 blkSize = node->blockSize; + MValue dstAddr = EvalExpr(mfunc, node->Opnd(0)); + MValue srcAddr = EvalExpr(mfunc, node->Opnd(1)); + memcpy_s(dstAddr.x.a64 + dstOffset, blkSize, srcAddr.x.a64, blkSize); + } + stmt = stmt->GetNext(); + mfunc.nextStmt = stmt; + goto *(labels[stmt->op]); +label_OP_icallproto: + { + IcallNode *icallproto = static_cast(stmt); + MASSERT(icallproto->NumOpnds() > 0, "icallproto num operands is %ld", icallproto->NumOpnds()); + // alloc stack space for call args + MValue callArgs[icallproto->NumOpnds()-1]; + mfunc.callArgs = callArgs; + mfunc.numCallArgs = icallproto->NumOpnds()-1; + // assume func addr in opnd 0 is from addroffunc + MValue fnAddr = EvalExpr(mfunc, icallproto->Opnd(0)); + FuncAddr *faddr = reinterpret_cast(fnAddr.x.a64); + if (faddr->isLmbcFunc) { + mfunc.CallMapleFuncIndirect(icallproto, faddr->funcPtr.lmbcFunc); + } else { + mfunc.CallExtFuncIndirect(icallproto, faddr->funcPtr.nativeFunc); + } + } + stmt = stmt->GetNext(); + mfunc.nextStmt = stmt; + goto *(labels[stmt->op]); +label_OP_rangegoto: + { + RangeGotoNode *rgoto = static_cast(stmt); + int32 tagOffset = rgoto->GetTagOffset(); + MValue opnd = EvalExpr(mfunc, rgoto->Opnd(0)); + int64 tag = MVal2Int64(opnd); + uint32 labelIdx = rgoto->GetRangeGotoTableItem(tag - tagOffset).second; + StmtNode *label = fn->labelMap[labelIdx]; + stmt = label; + } + stmt = stmt->GetNext(); + mfunc.nextStmt = stmt; + goto *(labels[stmt->op]); +label_OP_igoto: + { + MValue opnd = EvalExpr(mfunc, stmt->Opnd(0)); + StmtNode *label = (StmtNode*)opnd.x.a64; + stmt = label; + } + stmt = stmt->GetNext(); + mfunc.nextStmt = stmt; + goto *(labels[stmt->op]); +label_OP_intrinsiccall: + { + mfunc.CallIntrinsic(*static_cast(stmt)); + } + stmt = stmt->GetNext(); + mfunc.nextStmt = stmt; + goto *(labels[stmt->op]); + +label_OP_dassign: +label_OP_piassign: +label_OP_maydassign: +label_OP_iassign: +label_OP_doloop: +label_OP_dowhile: +label_OP_if: +label_OP_while: +label_OP_switch: +label_OP_multiway: +label_OP_foreachelem: +label_OP_comment: +label_OP_eval: +label_OP_free: +label_OP_calcassertge: +label_OP_calcassertlt: +label_OP_assertge: +label_OP_assertlt: +label_OP_callassertle: +label_OP_returnassertle: +label_OP_assignassertle: +label_OP_abort: +label_OP_assertnonnull: +label_OP_assignassertnonnull: +label_OP_callassertnonnull: +label_OP_returnassertnonnull: +label_OP_dread: +label_OP_iread: +label_OP_addrof: +label_OP_iaddrof: +label_OP_sizeoftype: +label_OP_fieldsdist: +label_OP_array: +label_OP_virtualcall: +label_OP_superclasscall: +label_OP_interfacecall: +label_OP_customcall: +label_OP_polymorphiccall: +label_OP_icall: +label_OP_interfaceicall: +label_OP_virtualicall: +label_OP_intrinsiccallwithtype: +label_OP_xintrinsiccall: +label_OP_callassigned: +label_OP_virtualcallassigned: +label_OP_superclasscallassigned: +label_OP_interfacecallassigned: +label_OP_customcallassigned: +label_OP_polymorphiccallassigned: +label_OP_icallassigned: +label_OP_interfaceicallassigned: +label_OP_virtualicallassigned: +label_OP_intrinsiccallassigned: +label_OP_intrinsiccallwithtypeassigned: +label_OP_xintrinsiccallassigned: +label_OP_callinstant: +label_OP_callinstantassigned: +label_OP_virtualcallinstant: +label_OP_virtualcallinstantassigned: +label_OP_superclasscallinstant: +label_OP_superclasscallinstantassigned: +label_OP_interfacecallinstant: +label_OP_interfacecallinstantassigned: +label_OP_jstry: +label_OP_try: +label_OP_cpptry: +label_OP_throw: +label_OP_jscatch: +label_OP_catch: +label_OP_cppcatch: +label_OP_finally: +label_OP_cleanuptry: +label_OP_endtry: +label_OP_safe: +label_OP_endsafe: +label_OP_unsafe: +label_OP_endunsafe: +label_OP_gosub: +label_OP_retsub: +label_OP_syncenter: +label_OP_syncexit: +label_OP_decref: +label_OP_incref: +label_OP_decrefreset: +label_OP_membaracquire: +label_OP_membarrelease: +label_OP_membarstoreload: +label_OP_membarstorestore: +label_OP_ireadoff: +label_OP_ireadfpoff: +label_OP_regread: +label_OP_addroffunc: +label_OP_addroflabel: +label_OP_constval: +label_OP_conststr: +label_OP_conststr16: +label_OP_ceil: +label_OP_cvt: +label_OP_floor: +label_OP_retype: +label_OP_round: +label_OP_trunc: +label_OP_abs: +label_OP_bnot: +label_OP_lnot: +label_OP_neg: +label_OP_recip: +label_OP_sqrt: +label_OP_sext: +label_OP_zext: +label_OP_alloca: +label_OP_malloc: +label_OP_gcmalloc: +label_OP_gcpermalloc: +label_OP_stackmalloc: +label_OP_gcmallocjarray: +label_OP_gcpermallocjarray: +label_OP_stackmallocjarray: +label_OP_resolveinterfacefunc: +label_OP_resolvevirtualfunc: +label_OP_add: +label_OP_sub: +label_OP_mul: +label_OP_div: +label_OP_rem: +label_OP_ashr: +label_OP_lshr: +label_OP_shl: +label_OP_ror: +label_OP_max: +label_OP_min: +label_OP_band: +label_OP_bior: +label_OP_bxor: +label_OP_CG_array_elem_add: +label_OP_eq: +label_OP_ge: +label_OP_gt: +label_OP_le: +label_OP_lt: +label_OP_ne: +label_OP_cmp: +label_OP_cmpl: +label_OP_cmpg: +label_OP_land: +label_OP_lior: +label_OP_cand: +label_OP_cior: +label_OP_select: +label_OP_intrinsicop: +label_OP_intrinsicopwithtype: +label_OP_extractbits: +label_OP_depositbits: +label_OP_iassignpcoff: +label_OP_ireadpcoff: +label_OP_checkpoint: +label_OP_addroffpc: +label_OP_asm: +label_OP_dreadoff: +label_OP_addrofoff: +label_OP_dassignoff: +label_OP_iassignspoff: +label_OP_icallprotoassigned: + { + MASSERT(false, "NIY"); + for(;;); + } + return retVal; +} + +MValue EvalExpr(MFunction& func, BaseNode* expr, ParmInf *parm) { + MValue res; + + static void* const labels[] = { + &&label_OP_Undef, +#define OPCODE(base_node,dummy1,dummy2,dummy3) &&label_OP_##base_node, +#include "opcodes.def" +#undef OPCODE + &&label_OP_Undef + }; + + goto *(labels[expr->op]); +label_OP_Undef: + { + MASSERT(false, "Hit OP_undef"); + } +label_OP_constval: + { + MIRConst* constval = static_cast(expr)->GetConstVal(); + int64 constInt = 0; + float constFloat = 0; + double constDouble = 0; + switch (constval->GetKind()) { + case kConstInt: + constInt = static_cast(constval)->GetExtValue(); + break; + case kConstDoubleConst: + constDouble = static_cast(constval)->GetValue(); + break; + case kConstFloatConst: + constFloat = static_cast(constval)->GetValue(); + break; + default: + MASSERT(false, "constval kind %d NYI", constval->GetKind()); + break; + } + PrimType ptyp = expr->ptyp; + switch (ptyp) { + case PTY_i8: + case PTY_i16: + case PTY_i32: + case PTY_i64: + MASSERT(constval->GetKind() == kConstInt, "ptyp and constval kind mismatch"); + res.x.i64 = constInt; + res.ptyp = ptyp; + break; + case PTY_u8: + case PTY_u16: + case PTY_u32: + case PTY_u64: + case PTY_a64: + MASSERT(constval->GetKind() == kConstInt, "ptyp and constval kind mismatch"); + res.x.u64 = constInt; + res.ptyp = ptyp; + break; + case PTY_f32: + MASSERT(constval->GetKind() == kConstFloatConst, "ptyp and constval kind mismatch"); + res.x.f32 = constFloat; + res.ptyp = ptyp; + break; + case PTY_f64: + MASSERT(constval->GetKind() == kConstDoubleConst, "constval ptyp and kind mismatch"); + res.x.f64 = constDouble; + res.ptyp = ptyp; + break; + default: + MASSERT(false, "ptype %d for constval NYI", ptyp); + break; + } + } + goto _exit; + +label_OP_add: + { + MValue op0 = EvalExpr(func, expr->Opnd(0)); + MValue op1 = EvalExpr(func, expr->Opnd(1)); + switch (expr->ptyp) { + case PTY_i8: res.x.i8 = op0.x.i8 + op1.x.i8; break; + case PTY_i16: res.x.i16 = op0.x.i16 + op1.x.i16; break; + case PTY_i32: res.x.i32 = (int64)op0.x.i32 + (int64)op1.x.i32; break; + case PTY_i64: res.x.i64 = (uint64)op0.x.i64 + (uint64)op1.x.i64; break; + case PTY_u8: res.x.u8 = op0.x.u8 + op1.x.u8; break; + case PTY_u16: res.x.u16 = op0.x.u16 + op1.x.u16; break; + case PTY_u32: res.x.u32 = op0.x.u32 + op1.x.u32; break; + case PTY_u64: res.x.u64 = op0.x.u64 + op1.x.u64; break; + case PTY_a64: res.x.u64 = op0.x.u64 + op1.x.u64; break; + case PTY_f32: res.x.f32 = op0.x.f32 + op1.x.f32; break; + case PTY_f64: res.x.f64 = op0.x.f64 + op1.x.f64; break; + default: MIR_FATAL("Unsupported PrimType %d for binary operator %s", expr->ptyp, "+"); + } + res.ptyp = expr->ptyp; + goto _exit; + } +label_OP_sub: + { + MValue opnd0 = EvalExpr(func, expr->Opnd(0)); + MValue opnd1 = EvalExpr(func, expr->Opnd(1)); + EXPRBINOP(-, res, opnd0, opnd1, expr->ptyp); + goto _exit; + } +label_OP_mul: + { + MValue opnd0 = EvalExpr(func, expr->Opnd(0)); + MValue opnd1 = EvalExpr(func, expr->Opnd(1)); + EXPRBINOP(*, res, opnd0, opnd1, expr->ptyp); + goto _exit; + } +label_OP_div: + { + MValue opnd0 = EvalExpr(func, expr->Opnd(0)); + MValue opnd1 = EvalExpr(func, expr->Opnd(1)); + EXPRBINOP(/, res, opnd0, opnd1, expr->ptyp); + goto _exit; + } +label_OP_regread: + { + PregIdx regIdx = static_cast(expr)->GetRegIdx(); + switch (regIdx) { + case -(kSregFp): + MASSERT(expr->ptyp == PTY_a64, "regread %%FP with wrong ptyp %d", expr->ptyp); + res.x.a64 = func.fp; + break; + case -(kSregRetval0): + if (expr->ptyp == func.retVal0.ptyp) { + res = func.retVal0; + } else if (expr->ptyp == PTY_agg || expr->ptyp == PTY_u64) { + res = func.retVal0; + } else { + res = CvtType(func.retVal0, expr->ptyp, func.retVal0.ptyp); + } + break; + case -(kSregRetval1): + MASSERT(expr->ptyp == func.retVal1.ptyp || + (expr->ptyp == PTY_agg || expr->ptyp == PTY_u64), + "regread %%retVal0 type mismatch: %d %d", expr->ptyp, func.retVal1.ptyp); + res = func.retVal1; + break; + case -(kSregGp): + MASSERT(expr->ptyp == PTY_a64, "regread %%GP with wrong ptyp %d", expr->ptyp); + res.x.a64 = func.info->lmbcMod->unInitPUStatics; + break; + default: + MASSERT(regIdx < func.info->numPregs, "regread regIdx %d out of bound", regIdx); + res = func.pRegs[regIdx]; + break; + } + res.ptyp = expr->ptyp; + goto _exit; + } +label_OP_conststr: + { + UStrIdx ustrIdx = static_cast(expr)->GetStrIdx(); + auto it = func.info->lmbcMod->globalStrTbl.insert( + std::pair(ustrIdx, GlobalTables::GetUStrTable().GetStringFromStrIdx(ustrIdx))); + res.x.ptr = const_cast(it.first->second.c_str()); + res.ptyp = PTY_a64; + goto _exit; + } +label_OP_ireadfpoff: + { + IreadFPoffNode* node = static_cast(expr); + int32 offset = node->GetOffset(); + if (node->ptyp != PTY_agg) { + mload(func.fp+offset, expr->ptyp, res); + goto _exit; + } + // handle PTY_agg + // ireadfpoff agg should be opnd from either call or return, i.e. either + // - caller: call &foo (ireadfpoff agg -16, ireadfpoff agg -28) + // - callee: regassign agg %%retval0 (ireadfpoff agg -12) + // so set agg size in mvalue result with agg size from corresponding func formal or func return, and + // - if from caller, read agg into caller allocated buffer for agg and return the pointer in an mvalue to be passed to callee + // - if from callee, just pass fp offset ptr in an mvalue and regassign will copy it to caller's retval0/1 area + if (func.nextStmt->op == OP_call) { // caller + // check if calling external func: + MASSERT(func.aggrArgsBuf != nullptr, "aggrArgsBuf is null"); + memcpy_s(func.aggrArgsBuf+parm->storeIdx, parm->size, func.fp+offset, parm->size); + mload(func.aggrArgsBuf+parm->storeIdx, expr->ptyp, res, parm->size); + } else { // callee + mload(func.fp+offset, expr->ptyp, res, func.info->retSize); + } + goto _exit; + } +label_OP_ireadoff: + { + int32 offset = static_cast(expr)->GetOffset(); + MValue rhs = EvalExpr(func, static_cast(expr)->Opnd(0)); + if (expr->ptyp == PTY_agg) { + // ireadoff agg should be operand for either call or return (like ireadfpoff) + // - caller: call &f (ireadoff agg 0 (regread a64 %1)) + // - callee: regassign agg %%retval0 (ireadoff agg 0 (addrofoff a64 $x 0)) + MASSERT(rhs.ptyp == PTY_a64, "ireadoff agg RHS not PTY_agg"); + if (func.nextStmt->op == OP_call) { + MASSERT(func.aggrArgsBuf != nullptr, "aggrArgsBuf is null"); + memcpy_s(func.aggrArgsBuf+parm->storeIdx, parm->size, rhs.x.a64 + offset, parm->size); + mload(func.aggrArgsBuf+parm->storeIdx, expr->ptyp, res, parm->size); + } else { + MASSERT(func.nextStmt->op == OP_regassign, "ireadoff agg not used as regassign agg opnd"); + mload(rhs.x.a64 + offset, expr->ptyp, res, func.info->retSize); + } + goto _exit; + } + MASSERT(rhs.ptyp == PTY_a64 || rhs.ptyp == PTY_u64 || rhs.ptyp == PTY_i64, + "ireadoff rhs ptyp %d not PTY_a64 or PTY_u64", rhs.ptyp); + mload(rhs.x.a64+offset, expr->ptyp, res); + goto _exit; + } +label_OP_iread: + { + MASSERT(func.nextStmt->op == OP_call && expr->ptyp == PTY_agg, "iread unexpected outside call"); + MValue addr = EvalExpr(func, expr->Opnd(0)); + MASSERT(func.aggrArgsBuf != nullptr, "aggrArgsBuf is null"); + memcpy_s(func.aggrArgsBuf+parm->storeIdx, parm->size, addr.x.a64, parm->size); + mload(func.aggrArgsBuf+parm->storeIdx, expr->ptyp, res, parm->size); + goto _exit; + } +label_OP_eq: + { + PrimType opndType = static_cast(expr)->GetOpndType(); + MValue opnd0 = EvalExpr(func, expr->Opnd(0)); + MValue opnd1 = EvalExpr(func, expr->Opnd(1)); + if (opndType == PTY_f32 || opndType == PTY_f64) { + res.ptyp = expr->ptyp; + HandleFloatEq(expr->op, opndType, res, opnd0, opnd1); + } else { + EXPRCOMPOPNOFLOAT(==, res, opnd0, opnd1, opndType, expr->ptyp); + } + goto _exit; + } +label_OP_ne: + { + PrimType opndType = static_cast(expr)->GetOpndType(); + MValue opnd0 = EvalExpr(func, expr->Opnd(0)); + MValue opnd1 = EvalExpr(func, expr->Opnd(1)); + if (opndType == PTY_f32 || opndType == PTY_f64) { + res.ptyp = expr->ptyp; + HandleFloatEq(expr->op, opndType, res, opnd0, opnd1); + } else { + EXPRCOMPOPNOFLOAT(!=, res, opnd0, opnd1, opndType, expr->ptyp); + } + goto _exit; + } +label_OP_gt: + { + PrimType opndTyp= static_cast(expr)->GetOpndType(); + MValue opnd0 = EvalExpr(func, expr->Opnd(0)); + MValue opnd1 = EvalExpr(func, expr->Opnd(1)); + EXPRCOMPOP(>, res, opnd0, opnd1, opndTyp, expr->ptyp); + goto _exit; + } +label_OP_ge: + { + PrimType opndTyp= static_cast(expr)->GetOpndType(); + MValue opnd0 = EvalExpr(func, expr->Opnd(0)); + MValue opnd1 = EvalExpr(func, expr->Opnd(1)); + EXPRCOMPOP(>=, res, opnd0, opnd1, opndTyp, expr->ptyp); + goto _exit; + } +label_OP_lt: + { + PrimType opndTyp= static_cast(expr)->GetOpndType(); + MValue opnd0 = EvalExpr(func, expr->Opnd(0)); + MValue opnd1 = EvalExpr(func, expr->Opnd(1)); + EXPRCOMPOP(<, res, opnd0, opnd1, opndTyp, expr->ptyp); + goto _exit; + } +label_OP_le: + { + PrimType opndTyp= static_cast(expr)->GetOpndType(); + MValue opnd0 = EvalExpr(func, expr->Opnd(0)); + MValue opnd1 = EvalExpr(func, expr->Opnd(1)); + EXPRCOMPOP(<=, res, opnd0, opnd1, opndTyp, expr->ptyp); + goto _exit; + } +label_OP_select: + { + MValue opnd0 = EvalExpr(func, expr->Opnd(0)); + MValue opnd1 = EvalExpr(func, expr->Opnd(1)); + MValue opnd2 = EvalExpr(func, expr->Opnd(2)); + EXPRSELECTOP(res, opnd0, opnd1, opnd2, expr->ptyp); + goto _exit; + } +label_OP_band: + { + MValue opnd0 = EvalExpr(func, expr->Opnd(0)); + MValue opnd1 = EvalExpr(func, expr->Opnd(1)); + EXPRBININTOP(&, res, opnd0, opnd1, expr->ptyp); + goto _exit; + } +label_OP_bior: + { + MValue opnd0 = EvalExpr(func, expr->Opnd(0)); + MValue opnd1 = EvalExpr(func, expr->Opnd(1)); + EXPRBININTOP(|, res, opnd0, opnd1, expr->ptyp); + goto _exit; + } +label_OP_bxor: + { + MValue opnd0 = EvalExpr(func, expr->Opnd(0)); + MValue opnd1 = EvalExpr(func, expr->Opnd(1)); + EXPRBININTOP(^, res, opnd0, opnd1, expr->ptyp); + goto _exit; + } +label_OP_lshr: + { + MValue opnd0 = EvalExpr(func, expr->Opnd(0)); + MValue numBits = EvalExpr(func, expr->Opnd(1)); + EXPRBININTOPUNSIGNED(>>, res, opnd0, numBits, expr->ptyp); + goto _exit; + } +label_OP_ashr: + { + MValue opnd0 = EvalExpr(func, expr->Opnd(0)); + MValue numBits = EvalExpr(func, expr->Opnd(1)); + EXPRBININTOP(>>, res, opnd0, numBits, expr->ptyp); + goto _exit; + } +label_OP_shl: + { + MValue opnd0 = EvalExpr(func, expr->Opnd(0)); + MValue numBits = EvalExpr(func, expr->Opnd(1)); + EXPRBININTOP(<<, res, opnd0, numBits, expr->ptyp); + goto _exit; + } +label_OP_bnot: + { + MValue opnd0 = EvalExpr(func, expr->Opnd(0)); + EXPRUNROP(~, res, opnd0, expr->ptyp); + goto _exit; + } +label_OP_lnot: + { + MValue opnd0 = EvalExpr(func, expr->Opnd(0)); + EXPRUNROP(!, res, opnd0, expr->ptyp); + goto _exit; + } +label_OP_neg: + { + MValue opnd0 = EvalExpr(func, expr->Opnd(0)); + switch (expr->ptyp) { + case PTY_i8: res.x.i8 = -opnd0.x.i8; break; + case PTY_i16: res.x.i16 = -opnd0.x.i16; break; + case PTY_i32: res.x.i32 = ~(uint32)opnd0.x.i32+1; break; + case PTY_i64: res.x.i64 = -opnd0.x.i64; break; + case PTY_u8: res.x.u8 = -opnd0.x.u8; break; + case PTY_u16: res.x.u16 = -opnd0.x.u16; break; + case PTY_u32: res.x.u32 = -opnd0.x.u32; break; + case PTY_u64: res.x.u64 = -opnd0.x.u64; break; + case PTY_f32: res.x.f32 = -opnd0.x.f32; break; + case PTY_f64: res.x.f64 = -opnd0.x.f64; break; + default: MIR_FATAL("Unsupported PrimType %d for unary operator %s", expr->ptyp, "OP_neg"); + } + res.ptyp = expr->ptyp; + goto _exit; + } +label_OP_abs: + { + MValue op0 = EvalExpr(func, expr->Opnd(0)); + switch (expr->ptyp) { + // abs operand must be signed + case PTY_i8: res.x.i8 = abs(op0.x.i8); break; + case PTY_i16: res.x.i16 = abs(op0.x.i16); break; + case PTY_i32: res.x.i32 = abs(op0.x.i32); break; + case PTY_i64: res.x.i64 = abs(op0.x.i64); break; + case PTY_f32: res.x.f32 = fabsf(op0.x.f32); break; + case PTY_f64: res.x.f64 = fabs(op0.x.f64); break; + default: MASSERT(false, "op_abs unsupported type %d", expr->ptyp); + } + res.ptyp = expr->ptyp; + goto _exit; + } +label_OP_min: + { + MValue opnd0 = EvalExpr(func, expr->Opnd(0)); + MValue opnd1 = EvalExpr(func, expr->Opnd(1)); + EXPRMAXMINOP(<, res, opnd0, opnd1, expr->ptyp); + goto _exit; + } +label_OP_max: + { + MValue opnd0 = EvalExpr(func, expr->Opnd(0)); + MValue opnd1 = EvalExpr(func, expr->Opnd(1)); + EXPRMAXMINOP(>, res, opnd0, opnd1, expr->ptyp); + goto _exit; + } +label_OP_rem: + { + MValue opnd0 = EvalExpr(func, expr->Opnd(0)); + MValue opnd1 = EvalExpr(func, expr->Opnd(1)); + EXPRREMOP(%, res, opnd0, opnd1, expr->ptyp); + goto _exit; + } +label_OP_cvt: + { + PrimType toPtyp = expr->ptyp; + PrimType fromPtyp = static_cast(expr)->FromType(); + MValue opnd = EvalExpr(func, expr->Opnd(0)); + res = CvtType(opnd, toPtyp, fromPtyp); + goto _exit; + } +label_OP_addrofoff: + { + // get addr of var symbol which can be formal/global/extern/PUStatic + int32 offset = static_cast(expr)->offset; + StIdx stidx = static_cast(expr)->stIdx; + uint8 *addr; + + if (stidx.Islocal()) { // local sym can only be formals or PUstatic + addr = func.GetFormalVarAddr(stidx); + if (!addr) { + addr = func.info->lmbcMod->GetVarAddr(func.info->mirFunc->GetPuidx(), stidx); + } + MASSERT(addr, "addrofoff can not find local var"); + } else { + MASSERT(stidx.IsGlobal(), "addrofoff: symbol neither local nor global"); + MIRSymbol* var = GlobalTables::GetGsymTable().GetSymbolFromStidx(stidx.Idx()); + switch (var->GetStorageClass()) { + case kScExtern: + addr = (uint8 *)(func.info->lmbcMod->FindExtSym(stidx)); + break; + case kScGlobal: + case kScFstatic: + addr = func.info->lmbcMod->GetVarAddr(var->GetStIdx()); + break; + default: + MASSERT(false, "addrofoff: storage class %d NYI", var->GetStorageClass()); + break; + } + } + res.x.a64 = addr + offset; + res.ptyp = PTY_a64; + goto _exit; + } +label_OP_alloca: + { + MValue opnd = EvalExpr(func, expr->Opnd(0)); + res.ptyp = PTY_a64; + res.x.a64 = func.Alloca(opnd.x.u64); + goto _exit; + } +label_OP_addroffunc: + { + FuncAddr *faddr = func.info->lmbcMod->GetFuncAddr(static_cast(expr)->GetPUIdx()); + res.x.a64 = (uint8*)faddr; + res.ptyp = PTY_a64; + goto _exit; + } +label_OP_addroflabel: + { + AddroflabelNode *node = static_cast(expr); + LabelIdx labelIdx = node->GetOffset(); + StmtNode *label = func.info->labelMap[labelIdx]; + res.x.a64 = reinterpret_cast(label); + res.ptyp = expr->ptyp; + goto _exit; + } +label_OP_retype: + { + res = EvalExpr(func, expr->Opnd(0)); + res.ptyp = expr->ptyp; + goto _exit; + } +label_OP_sext: + { + ExtractbitsNode *ext = static_cast(expr); + uint8 bOffset = ext->GetBitsOffset(); + uint8 bSize = ext->GetBitsSize(); + MASSERT(bOffset == 0, "sext unexpected offset"); + uint64 mask = bSize < k64BitSize ? (1ull << bSize) - 1 : ~0ull; + res = EvalExpr(func, expr->Opnd(0)); + res.x.i64 = (((uint64)res.x.i64 >> (bSize - 1)) & 1ull) ? res.x.i64 | ~mask : res.x.i64 & mask; + res.ptyp = expr->ptyp; + goto _exit; + } +label_OP_zext: + { + ExtractbitsNode *ext = static_cast(expr); + uint8 bOffset = ext->GetBitsOffset(); + uint8 bSize = ext->GetBitsSize(); + MASSERT(bOffset == 0, "zext unexpected offset"); + uint64 mask = bSize < k64BitSize ? (1ull << bSize) - 1 : ~0ull; + res = EvalExpr(func, expr->Opnd(0)); + res.x.i64 &= mask; + res.ptyp = expr->ptyp; + goto _exit; + } +label_OP_extractbits: + { + ExtractbitsNode *ebits = static_cast(expr); + uint8 bOffset = ebits->GetBitsOffset(); + uint8 bSize = ebits->GetBitsSize(); + uint64 mask = ((1ull << bSize) - 1) << bOffset; + res = EvalExpr(func, expr->Opnd(0)); + res.x.i64 = (uint64)(res.x.i64 & mask) >> bOffset; + if (IsSignedInteger(expr->ptyp)) { + mask = (1ull << bSize) - 1; + res.x.i64 = (((uint64)res.x.i64 >> (bSize - 1)) & 1ull) ? res.x.i64 | ~mask : res.x.i64 & mask; + } + res.ptyp = expr->ptyp; + goto _exit; + } +label_OP_depositbits: + { + DepositbitsNode *dbits = static_cast(expr); + MValue opnd0 = EvalExpr(func, expr->Opnd(0)); + MValue opnd1 = EvalExpr(func, expr->Opnd(1)); + uint64 mask = ~(0xffffffffffffffff << dbits->GetBitsSize()); + uint64 from = (opnd1.x.u64 & mask) << dbits->GetBitsOffset(); + mask = mask << dbits->GetBitsOffset(); + res.x.u64 = (opnd0.x.u64 & ~(mask)) | from; + res.ptyp = expr->ptyp; + goto _exit; + } +label_OP_intrinsicop: + { + auto *intrnop = static_cast(expr); + MValue op0 = EvalExpr(func, expr->Opnd(0)); + res.ptyp = expr->ptyp; + switch (intrnop->GetIntrinsic()) { + case INTRN_C_sin: + if (expr->ptyp == PTY_f32) { + res.x.f32 = sin(op0.x.f32); + } else if (expr->ptyp == PTY_f64) { + res.x.f64 = sin(op0.x.f64); + } + break; + case INTRN_C_ctz32: + if (expr->ptyp == PTY_u32 || expr->ptyp == PTY_i32) { + res.x.u32 = __builtin_ctz(op0.x.u32); + } + break; + case INTRN_C_clz32: + if (expr->ptyp == PTY_u32 || expr->ptyp == PTY_i32) { + res.x.u32 = __builtin_clz(op0.x.u32); + } + break; + case INTRN_C_ffs: + if (expr->ptyp == PTY_u32 || expr->ptyp == PTY_i32) { + res.x.u32 = __builtin_ffs(op0.x.u32); + } + break; + case INTRN_C_rev_4: + if (expr->ptyp == PTY_u32 || expr->ptyp == PTY_i32) { + res.x.u32 = __builtin_bitreverse32(op0.x.u32); + } + break; + default: + break; + } + goto _exit; + } + + // unsupported opcodes +label_OP_dassign: +label_OP_piassign: +label_OP_maydassign: +label_OP_iassign: +label_OP_block: +label_OP_doloop: +label_OP_dowhile: +label_OP_if: +label_OP_while: +label_OP_switch: +label_OP_multiway: +label_OP_foreachelem: +label_OP_comment: +label_OP_eval: +label_OP_free: +label_OP_calcassertge: +label_OP_calcassertlt: +label_OP_assertge: +label_OP_assertlt: +label_OP_callassertle: +label_OP_returnassertle: +label_OP_assignassertle: +label_OP_abort: +label_OP_assertnonnull: +label_OP_assignassertnonnull: +label_OP_callassertnonnull: +label_OP_returnassertnonnull: +label_OP_dread: +label_OP_addrof: +label_OP_iaddrof: +label_OP_sizeoftype: +label_OP_fieldsdist: +label_OP_array: +label_OP_iassignoff: +label_OP_iassignfpoff: +label_OP_regassign: +label_OP_goto: +label_OP_brfalse: +label_OP_brtrue: +label_OP_return: +label_OP_rangegoto: +label_OP_call: +label_OP_virtualcall: +label_OP_superclasscall: +label_OP_interfacecall: +label_OP_customcall: +label_OP_polymorphiccall: +label_OP_icall: +label_OP_interfaceicall: +label_OP_virtualicall: +label_OP_intrinsiccall: +label_OP_intrinsiccallwithtype: +label_OP_xintrinsiccall: +label_OP_callassigned: +label_OP_virtualcallassigned: +label_OP_superclasscallassigned: +label_OP_interfacecallassigned: +label_OP_customcallassigned: +label_OP_polymorphiccallassigned: +label_OP_icallassigned: +label_OP_interfaceicallassigned: +label_OP_virtualicallassigned: +label_OP_intrinsiccallassigned: +label_OP_intrinsiccallwithtypeassigned: +label_OP_xintrinsiccallassigned: +label_OP_callinstant: +label_OP_callinstantassigned: +label_OP_virtualcallinstant: +label_OP_virtualcallinstantassigned: +label_OP_superclasscallinstant: +label_OP_superclasscallinstantassigned: +label_OP_interfacecallinstant: +label_OP_interfacecallinstantassigned: +label_OP_jstry: +label_OP_try: +label_OP_cpptry: +label_OP_throw: +label_OP_jscatch: +label_OP_catch: +label_OP_cppcatch: +label_OP_finally: +label_OP_cleanuptry: +label_OP_endtry: +label_OP_safe: +label_OP_endsafe: +label_OP_unsafe: +label_OP_endunsafe: +label_OP_gosub: +label_OP_retsub: +label_OP_syncenter: +label_OP_syncexit: +label_OP_decref: +label_OP_incref: +label_OP_decrefreset: +label_OP_membaracquire: +label_OP_membarrelease: +label_OP_membarstoreload: +label_OP_membarstorestore: +label_OP_label: +label_OP_conststr16: +label_OP_ceil: +label_OP_floor: +label_OP_round: +label_OP_trunc: +label_OP_recip: +label_OP_sqrt: +label_OP_malloc: +label_OP_gcmalloc: +label_OP_gcpermalloc: +label_OP_stackmalloc: +label_OP_gcmallocjarray: +label_OP_gcpermallocjarray: +label_OP_stackmallocjarray: +label_OP_resolveinterfacefunc: +label_OP_resolvevirtualfunc: +label_OP_ror: +label_OP_CG_array_elem_add: +label_OP_cmp: +label_OP_cmpl: +label_OP_cmpg: +label_OP_land: +label_OP_lior: +label_OP_cand: +label_OP_cior: +label_OP_intrinsicopwithtype: +label_OP_iassignpcoff: +label_OP_ireadpcoff: +label_OP_checkpoint: +label_OP_addroffpc: +label_OP_igoto: +label_OP_asm: +label_OP_dreadoff: +label_OP_dassignoff: +label_OP_iassignspoff: +label_OP_blkassignoff: +label_OP_icallproto: +label_OP_icallprotoassigned: + { + MASSERT(false, "NIY"); + } + +_exit: + return res; +} + +} // namespace maple diff --git a/src/MapleEng/lmbc/src/load_store.cpp b/src/MapleEng/lmbc/src/load_store.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d5ccd611e2f43be429c121d0013fd3f6b598082a --- /dev/null +++ b/src/MapleEng/lmbc/src/load_store.cpp @@ -0,0 +1,127 @@ +/* + * Copyright (c) [2022] Futurewei Technologies, Inc. All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mvalue.h" +#include "mfunction.h" +#include "massert.h" + +namespace maple { + +void mload(uint8* addr, PrimType ptyp, MValue& res, size_t aggSize) { + res.ptyp = ptyp; + switch (ptyp) { + case PTY_i8: + res.x.i64 = *(int8 *)addr; + return; + case PTY_i16: + res.x.i64 = *(int16 *)addr; + return; + case PTY_i32: + res.x.i64 = *(int32 *)addr; + return; + case PTY_i64: + res.x.i64 = *(int64 *)addr; + return; + case PTY_u8: + res.x.u64 = *(uint8 *)addr; + return; + case PTY_u16: + res.x.u64 = *(uint16 *)addr; + return; + case PTY_u32: + res.x.u64 = *(uint32 *)addr; + return; + case PTY_u64: + res.x.u64 = *(uint64 *)addr; + return; + case PTY_f32: + res.x.f32 = *(float *)addr; + return; + case PTY_f64: + res.x.f64 = *(double *)addr; + return; + case PTY_a64: + res.x.a64 = *(uint8 **)addr; + return; + case PTY_agg: + res.x.a64 = addr; + res.aggSize = aggSize; // agg size + return; + default: + MASSERT(false, "mload ptyp %d NYI", ptyp); + break; + } +} + +void mstore(uint8* addr, PrimType ptyp, MValue& val, bool toVarArgStack) { + if (!IsPrimitiveInteger(ptyp) || !IsPrimitiveInteger(val.ptyp)) { + MASSERT(ptyp == val.ptyp || + ptyp == PTY_a64 && val.ptyp == PTY_u64 || + ptyp == PTY_u64 && val.ptyp == PTY_a64, + "mstore type mismatch: %d and %d", ptyp, val.ptyp); + } + switch (ptyp) { + case PTY_i8: + *(int8 *)addr = val.x.i8; + return; + case PTY_i16: + *(int16 *)addr = val.x.i16; + return; + case PTY_i32: + *(int32 *)addr = val.x.i32; + return; + case PTY_i64: + *(int64 *)addr = val.x.i64; + return; + case PTY_u8: + *(uint8 *)addr = val.x.u8; + return; + case PTY_u16: + *(uint16 *)addr = val.x.u16; + return; + case PTY_u32: + *(uint32 *)addr = val.x.u32; + return; + case PTY_u64: + *(uint64 *)addr = val.x.u64; + return; + case PTY_f32: + *(float *)addr = val.x.f32; + return; + case PTY_f64: + *(double *)addr = val.x.f64; + return; + case PTY_a64: + *(uint8 **)addr = val.x.a64; + return; + case PTY_agg: + if (toVarArgStack) { + if (val.aggSize > maplebe::k16ByteSize) { + *(uint8 **)addr = val.x.a64; + } else { + memcpy_s(addr, val.aggSize, val.x.a64, val.aggSize); + } + } else { + // val holds aggr data (regassign agg of <= 16 bytes to %%retval0) instead of ptr to aggr data + MASSERT(val.aggSize <= 16, "mstore agg > 16"); + memcpy_s(addr, val.aggSize, &(val.x.u64), val.aggSize); + } + return; + default: + MASSERT(false, "mstore ptyp %d NYI", ptyp); + break; + } +} + +} // namespace maple diff --git a/src/MapleEng/lmbc/src/mfunction.cpp b/src/MapleEng/lmbc/src/mfunction.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9d274c2b3d4da923d27ede78a4bee84ef5fc4044 --- /dev/null +++ b/src/MapleEng/lmbc/src/mfunction.cpp @@ -0,0 +1,340 @@ +/* + * Copyright (c) [2022] Futurewei Technologies, Inc. All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include "mfunction.h" +#include "mprimtype.h" +#include "massert.h" + +namespace maple { + +MFunction::MFunction(LmbcFunc *funcInfo, + MFunction *funcCaller, + uint8 *autoVars, + MValue *pregs, + MValue *formalvars) : info(funcInfo), + caller(funcCaller), + frame(autoVars), + pRegs(pregs), + formalVars(formalvars), + callArgs(nullptr), + aggrArgsBuf(nullptr) { + numCallArgs = 0; + nextStmt = info->mirFunc->GetBody(); + fp = (uint8*)frame + info->frameSize; + allocaOffset = 0; + allocaMem = nullptr; +} + +MFunction::~MFunction() { } + +uint8 *MFunction::Alloca(uint32 sz) { + if (allocaOffset + sz > ALLOCA_MEMMAX) { + return nullptr; + } + uint8 *ptr = allocaMem + allocaOffset; + allocaOffset += sz; + return ptr; +} + +uint8 *MFunction::GetFormalVarAddr(StIdx stidx) { + auto it = info->stidx2Parm.find(stidx.FullIdx()); + if (it == info->stidx2Parm.end()) { + return nullptr; + } + if (it->second->ptyp == PTY_agg) { + MASSERT(caller->aggrArgsBuf, "aggrArgsBuf not init"); + return caller->aggrArgsBuf + it->second->storeIdx; + } + return((uint8*)&formalVars[it->second->storeIdx].x); +} + +bool IsExtFunc(PUIdx puIdx, LmbcMod& module) { + MIRFunction *func = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puIdx); + if (func->IsExtern() || // ext func with proto + func->GetAttr(FUNCATTR_implicit) || // ext func with no proto + !func->GetBody() || // func with no body + (func->IsWeak() && module.FindExtFunc(puIdx))) { + return true; + } + return false; +} + +// Return size (roundup to 8 byte multiples) of aggregate returned by an iread. +size_t GetIReadAggrSize(BaseNode* expr) { + MASSERT(expr->op == OP_iread && expr->ptyp == PTY_agg, "iread on non PTY_agg type"); + IreadNode *iread = static_cast(expr); + TyIdx ptrTyIdx = iread->GetTyIdx(); + MIRPtrType *ptrType = + static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(ptrTyIdx)); + MIRType *aggType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ptrType->GetPointedTyIdx()); + FieldID fd = iread->GetFieldID(); + size_t sz = aggType->GetSize(); + if (fd != 0) { + MIRStructType *structType = static_cast(aggType); + MIRType *fdType = structType->GetFieldType(fd); + uint32 fdOffset = structType->GetBitOffsetFromBaseAddr(fd); + sz = fdType->GetSize(); + (void)fdOffset; + } + return ((sz + maplebe::k8ByteSize-1) >> maplebe::k8BitShift) << maplebe::k8BitShift; +} + +// Get total buffer size needed for all arguments of type PTY_agg in a call. +// This includes all type PTY_agg formal args and va-args (if any). +size_t GetAggCallArgsSize(LmbcFunc *callee, CallNode *call) { + size_t totalAggCallArgsSize = callee->formalsAggSize; + if (callee->isVarArgs) { + for (int i = callee->formalsNum; i < call->NumOpnds(); i++) { // PTY_agg va-args + if (call->Opnd(i)->ptyp == PTY_agg) { + MASSERT(call->Opnd(i)->op == OP_iread, "agg var arg unexpected op"); + totalAggCallArgsSize += GetIReadAggrSize(call->Opnd(i)); + } + } + } + return totalAggCallArgsSize; +} + +// Caller passes arguments to callees using MValues which is a union of Maple +// prim types. For PTY_agg, MValue holds ptr to the aggregate data. +// Memory allocation scenarios for call arguments: +// 1. Non-varidiac callee +// - Caller alloca array of MValue to pass call operands to callee +// - If any call arg is PTY_agg +// - Caller alloca agg buffer to hold data for all args of type PTY_agg +// - MValues for all args of type PTY_agg points to offsets in agg buffer +// 2. Varidiac callee +// - Caller alloca array of MValue to pass call operands including va-arg ones to callee +// - If any call arg is PTY_agg +// - Caller alloca agg buffer to hold data for all args (formals+va_args) of type PTY_agg +// - MValues of all args of type PTY_agg points to offsets in agg buffer +// - Caller alloca arg stack per AARCH64 ABI for va-args and copy va-args to this arg stack +void MFunction::CallMapleFuncDirect(CallNode *call) { + LmbcFunc *callee = info->lmbcMod->LkupLmbcFunc(call->GetPUIdx()); + if (!callee->formalsNum) { // ignore args if callee takes no params + InvokeFunc(callee, this); + return; + } + // alloca stack space for aggr args before evaluating operands + size_t totalAggCallArgsSize = GetAggCallArgsSize(callee, call); + uint8 buf[totalAggCallArgsSize]; + aggrArgsBuf = buf; // stack alloc for aggr args + for (int i = 0, sz = 0, offset = callee->formalsAggSize; i < call->NumOpnds(); i++) { + // a non-aggregate arg + if (call->Opnd(i)->ptyp != PTY_agg) { + callArgs[i] = EvalExpr(*this, call->Opnd(i)); + continue; + } + // an aggregate formal arg + if (i < callee->formalsNum) { + callArgs[i] = EvalExpr(*this, call->Opnd(i), callee->pos2Parm[i]); + continue; + } + // an aggregate var-arg + sz = GetIReadAggrSize(call->Opnd(i)); + ParmInf parmInf(PTY_agg, sz, false, offset); + offset += sz; + callArgs[i] = EvalExpr(*this, call->Opnd(i), &parmInf); + } + if (callee->isVarArgs) { + CallVaArgFunc(call->NumOpnds(), callee); + } else { + InvokeFunc(callee, this); + } +} + +void MFunction::CallMapleFuncIndirect(IcallNode *icall, LmbcFunc *callInfo) { + if (!callInfo->formalsNum) { // ignore caller args if callee has no formals + InvokeFunc(callInfo, this); + return; + } + + // Set up call args - skip over 1st arg, which is addr of func to call + uint8 buf[callInfo->formalsAggSize]; + aggrArgsBuf = buf; + for (int i = 0; i < icall->NumOpnds() - 1; i++) { + callArgs[i] = (icall->Opnd(i+1)->ptyp == PTY_agg) ? + EvalExpr(*this, icall->Opnd(i+1), callInfo->pos2Parm[i]) : + EvalExpr(*this, icall->Opnd(i+1)); + } + if (callInfo->isVarArgs) { + CallVaArgFunc(icall->NumOpnds()-1, callInfo); + } else { + InvokeFunc(callInfo, this); + } +} + +// Maple front end generates Maple IR in varidiac functions to +// use target ABI va_list to access va-args. For the caller, this +// function sets up va-args in a stack buffer which is passed to +// the variadic callee in va_list fields (stack field in aarch64 +// valist, or overflow_arg_area field in x86_64 valist): +// - Calc size of stack to emulate va-args stack in ABI +// - Alloca va-args stack and copy args to the stack +// - For vaArg up to 16 bytes, copy data directly to vaArg stack +// - For vaArg larger than 16 bytes, put pointer to data in vaArg stack +// - On a va_start intrinsic, the va-args stack addr in caller.vaArgs +// is saved in corresponding vaList field for va-arg access by callee. +void MFunction::CallVaArgFunc(int numArgs, LmbcFunc *callInfo) { + uint32 vArgsSz = 0; + for (int i = callInfo->formalsNum; i < numArgs; ++i) { + if (callArgs[i].ptyp != PTY_agg || callArgs[i].aggSize > maplebe::k16ByteSize) { + vArgsSz += maplebe::k8ByteSize; + } else { + vArgsSz += callArgs[i].aggSize; + } + } + vaArgsSize = vArgsSz; + uint8 buf[vaArgsSize]; + vaArgs = buf; + for (int i = callInfo->formalsNum, offset = 0; i < numArgs; ++i) { + mstore(vaArgs + offset, callArgs[i].ptyp, callArgs[i], true); + if (callArgs[i].ptyp != PTY_agg || callArgs[i].aggSize > maplebe::k16ByteSize) { + offset += maplebe::k8ByteSize; + } else { + offset += callArgs[i].aggSize; + } + } + InvokeFunc(callInfo, this); +} + +void MFunction::CallExtFuncDirect(CallNode* call) { + MIRFunction *func = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(call->GetPUIdx()); + MapleVector &formalDefVec = func->GetFormalDefVec(); + FuncAddr& faddr = *info->lmbcMod->GetFuncAddr(call->GetPUIdx()); + ffi_fp_t fp = (ffi_fp_t)(faddr.funcPtr.nativeFunc); + MASSERT(fp, "External function not found"); + + for (int i = formalDefVec.size(); i < call->NumOpnds(); i++) { + if (call->Opnd(i)->ptyp == PTY_agg) { + MASSERT(false, "extern func: va-arg of agg type NYI"); + } + } + // alloca stack space for aggr args before evaluating operands + uint8 buf[faddr.formalsAggSize]; + aggrArgsBuf = buf; + for (int i = 0, offset = 0; i < call->NumOpnds(); i++) { + // a non-aggregate arg + if (call->Opnd(i)->ptyp != PTY_agg) { + callArgs[i] = EvalExpr(*this, call->Opnd(i)); + continue; + } + // an aggregate formal arg + if (i < formalDefVec.size()) { + MIRType* ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(formalDefVec[i].formalTyIdx); + MASSERT(ty->GetPrimType() == PTY_agg, "expects formal arg of agg type"); + ParmInf parmInf(PTY_agg, ty->GetSize(), false, offset); + offset += ty->GetSize(); + callArgs[i] = EvalExpr(*this, call->Opnd(i), &parmInf); + continue; + } + } + CallWithFFI(func->GetReturnType()->GetPrimType(), fp); +} + +void MFunction::CallExtFuncIndirect(IcallNode *icallproto, void* fp) { + for (int i = 0; i < icallproto->NumOpnds() - 1; i++) { + callArgs[i]= EvalExpr(*this, icallproto->Opnd(i+1)); + } + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(icallproto->GetRetTyIdx()); + MIRFuncType *fProto = static_cast(type); + MIRType *fRetType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fProto->GetRetTyIdx()); + CallWithFFI(fRetType->GetPrimType(), (ffi_fp_t)fp); +} + +void MFunction::CallIntrinsic(IntrinsiccallNode &intrn) { + MIRIntrinsicID callId = intrn.GetIntrinsic(); + switch (callId) { + case INTRN_C_va_start: { + MValue addrofAP = EvalExpr(*this, intrn.Opnd(0)); + // setup VaList for Aarch64 + VaList *vaList = (maple::VaList*)addrofAP.x.a64; + vaList->gr_offs = 0; // indicate args are on vaList stack + vaList->stack = caller->vaArgs; + break; + } + default: + MASSERT(false, "CallIntrinsic %d\n NYI", callId); + break; + } +} + +// Maple PrimType to libffi type conversion table +static ffi_type ffi_type_table[] = { + ffi_type_void, // kPtyInvalid +#define EXPANDFFI1(x) x, +#define EXPANDFFI2(x) EXPANDFFI1(x) +#define PRIMTYPE(P) EXPANDFFI2(FFITYPE_##P) +#define LOAD_ALGO_PRIMARY_TYPE +#include "prim_types.def" +#undef PRIMTYPE + ffi_type_void // kPtyDerived +}; + +// FFI type def for Arm64 VaList struct fields +ffi_type *vaListObjAarch64 [] = { + ffi_type_table + PTY_ptr, + ffi_type_table + PTY_ptr, + ffi_type_table + PTY_ptr, + ffi_type_table + PTY_i32, + ffi_type_table + PTY_i32, + nullptr +}; + +// FFI type def for X86_64 VaList struct fields +ffi_type *vaListObjX86_64 [] = { + ffi_type_table + PTY_u32, + ffi_type_table + PTY_u32, + ffi_type_table + PTY_ptr, + ffi_type_table + PTY_ptr, + nullptr +}; + +// currently only support ARM64 va_list +ffi_type vaList_ffi_type = { 0, 0, FFI_TYPE_STRUCT, vaListObjAarch64 }; + +// Setup array of call args and their types then call libffi interface +// to invoked external function. Note that for varidiac external funcs, +// PTY_agg type is used to detect a pass by value va_list arg, and used +// to setup its type for ffi call - this works for now because pass by +// value struct args (except va_list) is not supported yet when calling +// external varidiac functions - a different way to detect pass by value +// va_list arg will be needed when pass by value struct for external +// varidiac funcs is supported. +void MFunction::CallWithFFI(PrimType ret_ptyp, ffi_fp_t fp) { + ffi_cif cif; + ffi_type ffi_ret_type = ffi_type_table[ret_ptyp]; + ffi_type* arg_types[numCallArgs]; + void* args[numCallArgs]; + + // gather args and arg types + for (int i = 0; i < numCallArgs; ++i) { + args[i] = &callArgs[i].x; + if (callArgs[i].ptyp == PTY_agg) { + arg_types[i] = &vaList_ffi_type; + } else { + arg_types[i] = ffi_type_table + callArgs[i].ptyp; + } + } + + ffi_status status = ffi_prep_cif(&cif, FFI_DEFAULT_ABI, numCallArgs, &ffi_ret_type, arg_types); + if (status == FFI_OK) { + ffi_call(&cif, fp, &retVal0.x, args); + retVal0.ptyp = ret_ptyp; + } else { + MIR_FATAL("Failed to call method at %p", (void *)fp); + } +} + +} // namespace maple diff --git a/src/MapleEng/lmbc/src/mplsh.cpp b/src/MapleEng/lmbc/src/mplsh.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9cbf9812dc8b61872a5caee845fc0347c9d08f42 --- /dev/null +++ b/src/MapleEng/lmbc/src/mplsh.cpp @@ -0,0 +1,126 @@ +/* + * Copyright (c) [2022] Futurewei Technologies, Inc. All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include "massert.h" +#include "lmbc_eng.h" +#include "eng_shim.h" + +namespace maple { + +void *LmbcMod::FindExtFunc(PUIdx puidx) { + void* fp = extFuncMap[puidx]; + if (fp) { + return fp; + } + std::string fname = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puidx)->GetName(); + for (auto it : libHandles) { + fp = dlsym(it, fname.c_str()); + if (fp) { + break; + } + } + MASSERT(fp, "dlsym symbol not found: %s", fname.c_str()); + extFuncMap[puidx] = fp; + return(fp); +} + +void *LmbcMod::FindExtSym(StIdx stidx) { + void* var = extSymMap[stidx.FullIdx()]; + if (var) { + return var; + } + MIRSymbol* sym = GlobalTables::GetGsymTable().GetSymbolFromStidx(stidx.Idx()); + if (sym) { + for (auto it : libHandles) { + var = dlsym(it, sym->GetName().c_str()); + if (var) { + break; + } + } + MASSERT(var, "dlsym ExtSym not found: %s", sym->GetName().c_str()); + extSymMap[stidx.FullIdx()] = var; + } + MASSERT(sym, "Unable to find symbol"); + return(var); +} + +maple::MIRModule *LmbcMod::Import(std::string path) { + maple::MIRModule* mod = new maple::MIRModule(path.c_str()); + mod->SetSrcLang(kSrcLangC); + std::string::size_type lastdot = mod->GetFileName().find_last_of("."); + bool islmbc = lastdot != std::string::npos && mod->GetFileName().compare(lastdot, 5, ".lmbc\0") == 0; + if (!islmbc) { + ERR(kLncErr, "Input must be .lmbc file: %s", path.c_str()); + delete mod; + return nullptr; + } + + BinaryMplImport binMplt(*mod); + binMplt.SetImported(false); + std::string modid = mod->GetFileName(); + if (!binMplt.Import(modid, true)) { + ERR(kLncErr, "mplsh-lmbc: cannot open .lmbc file: %s", modid.c_str()); + delete mod; + return nullptr; + } + return mod; +} + +// C runtime libs to preload. +// Add to list as needed or change to read list dynamically at runtime. +std::vector preLoadLibs = { + LIBC_SO, + LIBM_SO +}; + +void LmbcMod::LoadDefLibs() { + for (auto it : preLoadLibs) { + void *handle = dlopen(it.c_str(), RTLD_NOW | RTLD_GLOBAL | RTLD_NODELETE); + MASSERT(handle, "dlopen %s failed", it.c_str()); + libHandles.push_back(handle); + } +} + +LmbcMod::LmbcMod(std::string path) : lmbcPath(path) { + LoadDefLibs(); + mirMod = Import(path); + // In Lmbc GlobalMemSize is the mem segment size for un-init + // PU static variables, and is referenced through %%GP register. + unInitPUStaticsSize = mirMod->GetGlobalMemSize(); +} + +int RunLmbc(int argc, char** argv) { + int rc = 1; + const int skipArgsNum = 1; + LmbcMod* mod = new LmbcMod(argv[skipArgsNum]); + MASSERT(mod, "Create Lmbc module failed"); + MASSERT(mod->mirMod, "Import Lmbc module failed"); + mod->InitModule(); + if (mod->mainFn) { + rc = MplEngShim(mod->mainFn, argc-skipArgsNum, argv+skipArgsNum); + } + return rc; +} + +} // namespace maple + +int main(int argc, char **argv) { + if (argc == 1) { + std::string path(argv[0]); + (void)MIR_PRINTF("usage: %s .lmbc\n", path.substr(path.find_last_of("/\\") + 1).c_str()); + exit(1); + } + return maple::RunLmbc(argc, argv); +} diff --git a/src/MapleEng/lmbc/test/c2lmbc.sh b/src/MapleEng/lmbc/test/c2lmbc.sh new file mode 100755 index 0000000000000000000000000000000000000000..5c0e8d9ffe888085b450f70265b435f0a6630086 --- /dev/null +++ b/src/MapleEng/lmbc/test/c2lmbc.sh @@ -0,0 +1,58 @@ +#!/bin/bash +# Copyright (C) [2022] Futurewei Technologies, Inc. All rights reverved. +# +# Licensed under the Mulan Permissive Software License v2 +# You can use this software according to the terms and conditions of the MulanPSL - 2.0. +# You may obtain a copy of MulanPSL - 2.0 at: +# +# https://opensource.org/licenses/MulanPSL-2.0 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the MulanPSL - 2.0 for more details. +# +set -x +[[ -z "${MAPLE_ROOT}" ]] && { echo "Please source OpenArkCompiler environment"; exit 1; } + +if [ ! $# -le 2 ] || [ $# -eq 0 ]; then + echo "usage: ./c2lmbc.sh .c [clang2mpl|hir2mpl]" + exit +fi + +if [ $# -le 1 ]; then + FE=clang2mpl +else + FE=$2 +fi + +# hir2mpl flags +CLANGBIN=$MAPLE_ROOT/output/tools/bin +LINARO=$MAPLE_ROOT/output/tools/gcc-linaro-7.5.0 +ISYSTEM_FLAGS="-isystem $MAPLE_ROOT/output/aarch64-clang-release/lib/include -isystem $LINARO/aarch64-linux-gnu/libc/usr/include -isystem $LINARO/lib/gcc/aarch64-linux-gnu/7.5.0/include" +CLANGFE_FLAGS="-emit-ast --target=aarch64 -U __SIZEOF_INT128__ $ISYSTEM_FLAGS" +CLANGFE_FLAGS="$CLANGFE_FLAGS -o ${file%.c}.ast" +# clang2mpl flags +FE_FLAG="--ascii --simple-short-circuit --improved-issimple" +CLANG_FLAGS="--target=aarch64-linux-elf -Wno-return-type -U__SIZEOF_INT128__ -O3 -Wno-implicit -save-temps -fno-builtin-printf -fno-common -falign-functions=4" +# executables +CLANG=$CLANGBIN/clang +HIR2MPL=$MAPLE_EXECUTE_BIN/hir2mpl +CLANG2MPL=$MAPLE_EXECUTE_BIN/clang2mpl +MAPLE=$MAPLE_EXECUTE_BIN/maple +IRBUILD=$MAPLE_EXECUTE_BIN/irbuild +MPLSH=$MAPLE_EXECUTE_BIN/mplsh-lmbc + +file=$(basename -- "$1") +file="${file%.*}" + +if [[ $FE == "hir2mpl" ]]; then + $CLANG $CLANGFE_FLAGS -o ${file%.c}.ast $file.c || exit 1 + $HIR2MPL ${file%.c}.ast -enable-variable-array -o ${file%.c}.mpl || exit 1 +else + $CLANG2MPL $FE_FLAG $file.c -- $CLANG_FLAGS || exit 1 +fi + +$MAPLE --run=me --option="-O2 --no-lfo --no-mergestmts --skip-phases=slp" --genmempl --genlmbc $file.mpl +$IRBUILD $file.lmbc +mv comb.me.mpl $file.me.mpl