diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp index 0da590fac958dabf75157a41a0aab4de109ee117..8b2820a18794a833e1ecbc4aebcdb43f7e284b46 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp @@ -1068,7 +1068,7 @@ void AArch64CGFunc::SelectAggDassign(DassignNode &stmt) { ASSERT(stmt.GetRHS()->op == OP_regread, "SelectAggDassign: NYI"); bool isRet = false; if (lhsType->GetKind() == kTypeStruct || lhsType->GetKind() == kTypeUnion) { - RegreadNode *rhsregread = static_cast(stmt.GetRHS()); + RegreadNode *rhsregread = static_cast(stmt.GetRHS()); PregIdx pregIdx = rhsregread->GetRegIdx(); if (IsSpecialPseudoRegister(pregIdx)) { if ((-pregIdx) == kSregRetval0) { @@ -1170,13 +1170,13 @@ void AArch64CGFunc::SelectAggIassign(IassignNode &stmt, Operand &AddrOpnd) { int32 lhsOffset = 0; MIRType *stmtType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(stmt.GetTyIdx()); MIRSymbol *addrSym = nullptr; - MIRPtrType *lhsPointerType; + MIRPtrType *lhsPointerType = nullptr; if (stmtType->GetPrimType() == PTY_agg) { /* Move into regs */ - AddrofNode &addrofnode = static_cast(stmt.GetAddrExprBase()); + AddrofNode &addrofnode = static_cast(stmt.GetAddrExprBase()); addrSym = mirModule.CurFunction()->GetLocalOrGlobalSymbol(addrofnode.GetStIdx()); MIRType *addrty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(addrSym->GetTyIdx()); - lhsPointerType = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(addrty->GetTypeIndex())); + lhsPointerType = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(addrty->GetTypeIndex())); } else { lhsPointerType = static_cast(stmtType); } @@ -1224,9 +1224,9 @@ void AArch64CGFunc::SelectAggIassign(IassignNode &stmt, Operand &AddrOpnd) { rhsOffset = GetBecommon().GetFieldOffset(*structType, rhsDread->GetFieldID()).first; } if (stmtType->GetPrimType() == PTY_agg) { - // generate move to regs. + /* generate move to regs. */ CHECK_FATAL(lhsSize <= k16ByteSize, "SelectAggIassign: illegal struct size"); - // aggregates are 8 byte aligned. + /* aggregates are 8 byte aligned. */ Operand *rhsmemopnd = nullptr; RegOperand *result[kTwoRegister]; /* maximum 16 bytes, 2 registers */ bool parmCopy = IsParamStructCopy(*rhsSymbol); @@ -1242,15 +1242,15 @@ void AArch64CGFunc::SelectAggIassign(IassignNode &stmt, Operand &AddrOpnd) { MOperator mop1 = PickLdInsn(loadSize * kBitsPerByte, PTY_u32); Insn &ld = GetCG()->BuildInstruction(mop1, *(result[i]), *rhsmemopnd); GetCurBB()->AppendInsn(ld); - } + } for (uint32 i = 0; i < numRegs; i++) { AArch64reg preg = (i == 0 ? R0 : R1); RegOperand &dest = GetOrCreatePhysicalRegisterOperand(preg, loadSize * kBitsPerByte, kRegTyInt); - MOperator mop2 = (loadSize == k4ByteSize) ? MOP_wmovrr: MOP_xmovrr; + MOperator mop2 = (loadSize == k4ByteSize) ? MOP_wmovrr : MOP_xmovrr; Insn &mov = GetCG()->BuildInstruction(mop2, dest, *(result[i])); GetCurBB()->AppendInsn(mov); } - // Create artificial dependency to extend the live range + /* Create artificial dependency to extend the live range */ for (uint32 i = 0; i < numRegs; i++) { AArch64reg preg = (i == 0 ? R0 : R1); RegOperand &dest = GetOrCreatePhysicalRegisterOperand(preg, loadSize * kBitsPerByte, kRegTyInt); @@ -1323,7 +1323,7 @@ void AArch64CGFunc::SelectAggIassign(IassignNode &stmt, Operand &AddrOpnd) { isRefField = GetBecommon().IsRefField(*rhsStructType, rhsIread->GetFieldID()); } if (stmtType->GetPrimType() == PTY_agg) { - // generate move to regs. + /* generate move to regs. */ CHECK_FATAL(lhsSize <= k16ByteSize, "SelectAggIassign: illegal struct size"); RegOperand *result[kTwoRegister]; /* maximum 16 bytes, 2 registers */ uint32 loadSize = (lhsSize <= k4ByteSize) ? k4ByteSize : k8ByteSize; @@ -1344,7 +1344,7 @@ void AArch64CGFunc::SelectAggIassign(IassignNode &stmt, Operand &AddrOpnd) { Insn &mov = GetCG()->BuildInstruction(MOP_xmovrr, dest, *(result[i])); GetCurBB()->AppendInsn(mov); } - // Create artificial dependency to extend the live range + /* Create artificial dependency to extend the live range */ for (uint32 i = 0; i < numRegs; i++) { AArch64reg preg = (i == 0 ? R0 : R1); RegOperand &dest = GetOrCreatePhysicalRegisterOperand(preg, loadSize * kBitsPerByte, kRegTyInt); @@ -1435,7 +1435,7 @@ Operand *AArch64CGFunc::SelectDread(const BaseNode &parent, DreadNode &expr) { dataSize = GetPrimTypeBitSize(expr.GetPrimType()); } } - MemOperand *memOpnd; + MemOperand *memOpnd = nullptr; if (aggSize > k8ByteSize) { if (parent.op == OP_eval) { if (symbol->GetAttr(ATTR_volatile)) { @@ -1444,12 +1444,12 @@ Operand *AArch64CGFunc::SelectDread(const BaseNode &parent, DreadNode &expr) { uint32 numLoads = RoundUp(aggSize, k64BitSize) / k64BitSize; for (uint32 o = 0; o < numLoads; ++o) { if (parmCopy) { - memOpnd = &LoadStructCopyBase(*symbol, offset + o * kSizeOfPtr , kSizeOfPtr); + memOpnd = &LoadStructCopyBase(*symbol, offset + o * kSizeOfPtr, kSizeOfPtr); } else { memOpnd = &GetOrCreateMemOpnd(*symbol, offset + o * kSizeOfPtr, kSizeOfPtr); } - if (IsImmediateOffsetOutOfRange(*static_cast(memOpnd), kSizeOfPtr)) { - memOpnd = &SplitOffsetWithAddInstruction(*static_cast(memOpnd), kSizeOfPtr); + if (IsImmediateOffsetOutOfRange(*static_cast(memOpnd), kSizeOfPtr)) { + memOpnd = &SplitOffsetWithAddInstruction(*static_cast(memOpnd), kSizeOfPtr); } SelectCopy(dest, PTY_u64, *memOpnd, PTY_u64); } @@ -1457,7 +1457,7 @@ Operand *AArch64CGFunc::SelectDread(const BaseNode &parent, DreadNode &expr) { /* No side-effects. No need to generate anything for eval. */ } } else { - CHECK_FATAL(0,"SelectDread: Illegal agg size"); + CHECK_FATAL(0, "SelectDread: Illegal agg size"); } } if (parmCopy) { @@ -1714,7 +1714,7 @@ Operand *AArch64CGFunc::SelectIread(const BaseNode &parent, IreadNode &expr) { /* regsize is correct */ } else { uint32 sz = GetBecommon().GetTypeSize(pointedType->GetTypeIndex().GetIdx()); - regSize = (sz <= 4) ? k4ByteSize : k8ByteSize; + regSize = (sz <= k4ByteSize) ? k4ByteSize : k8ByteSize; } } else if (regSize < k4ByteSize) { regSize = k4ByteSize; /* 32-bit */ @@ -2057,8 +2057,9 @@ void AArch64CGFunc::SelectCondGoto(LabelOperand &targetOpnd, Opcode jmpOp, Opcod MOperator mOp = is64Bits ? MOP_xcmprr : MOP_wcmprr; if (isImm) { - /* Special cases, i.e., comparing with zero */ - /* Do not perform optimization for C, unlike Java which has no unsigned int. */ + /* Special cases, i.e., comparing with zero + * Do not perform optimization for C, unlike Java which has no unsigned int. + */ if (static_cast(opnd1)->IsZero() && (Globals::GetInstance()->GetOptimLevel() > 0) && ((mirModule.GetSrcLang() != kSrcLangC) || ((primType != PTY_u64) && (primType != PTY_u32)))) { bool finish = GenerateCompareWithZeroInstruction(jmpOp, cmpOp, is64Bits, targetOpnd, *opnd0); @@ -3715,10 +3716,10 @@ void AArch64CGFunc::SelectCvtInt2Int(const BaseNode *parent, Operand *&resOpnd, } else { /* Unsigned */ if (is64Bit) { - if (fsize == 8) { + if (fsize == k8BitSize) { ImmOperand &immOpnd = CreateImmOperand(0xff, k64BitSize, false); GetCurBB()->AppendInsn(GetCG()->BuildInstruction(MOP_xandrri13, *resOpnd, *opnd0, immOpnd)); - } else if (fsize == 16) { + } else if (fsize == k16BitSize) { ImmOperand &immOpnd = CreateImmOperand(0xffff, k64BitSize, false); GetCurBB()->AppendInsn(GetCG()->BuildInstruction(MOP_xandrri13, *resOpnd, *opnd0, immOpnd)); } else { @@ -3770,7 +3771,7 @@ void AArch64CGFunc::SelectCvtInt2Int(const BaseNode *parent, Operand *&resOpnd, } else { /* same size, so resOpnd can be set */ if ((mirModule.GetSrcLang() == kSrcLangJava) || (IsSignedInteger(fromType) == IsSignedInteger(toType)) || - (GetPrimTypeSize(toType) > 4)) { + (GetPrimTypeSize(toType) > k4BitSize)) { AArch64RegOperand *reg = static_cast(resOpnd); reg->SetRegisterNumber(static_cast(opnd0)->GetRegisterNumber()); } else if (IsUnsignedInteger(toType)) { @@ -3786,25 +3787,25 @@ void AArch64CGFunc::SelectCvtInt2Int(const BaseNode *parent, Operand *&resOpnd, mop = MOP_xuxtw64; break; default: - CHECK_FATAL(0,"Unhandled unsigned convert"); + CHECK_FATAL(0, "Unhandled unsigned convert"); } GetCurBB()->AppendInsn(GetCG()->BuildInstruction(mop, *resOpnd, *opnd0)); } else { - // signed target + /* signed target */ uint32 size = GetPrimTypeSize(toType); MOperator mop; switch (toType) { case PTY_i8: - mop = (size > 4) ? MOP_xsxtb64 : MOP_xsxtb32; + mop = (size > k4BitSize) ? MOP_xsxtb64 : MOP_xsxtb32; break; case PTY_i16: - mop = (size > 4) ? MOP_xsxth64 : MOP_xsxth32; + mop = (size > k4BitSize) ? MOP_xsxth64 : MOP_xsxth32; break; case PTY_i32: mop = MOP_xsxtw64; break; default: - CHECK_FATAL(0,"Unhandled unsigned convert"); + CHECK_FATAL(0, "Unhandled unsigned convert"); } GetCurBB()->AppendInsn(GetCG()->BuildInstruction(mop, *resOpnd, *opnd0)); } diff --git a/src/mapleall/maple_be/src/cg/cgfunc.cpp b/src/mapleall/maple_be/src/cg/cgfunc.cpp index 3cf6279e5191a4786e8aedcbaf35ea7b4fd0b4b0..d8c4f8548be69d6d969809eed756df8e275adab7 100644 --- a/src/mapleall/maple_be/src/cg/cgfunc.cpp +++ b/src/mapleall/maple_be/src/cg/cgfunc.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. diff --git a/src/mapleall/maple_ir/include/opcode_info.h b/src/mapleall/maple_ir/include/opcode_info.h index 6b4758b9f460403a2b5ab432f4cc9c5e4c9b8194..baa66e4fc60104c735200306c159a54e866e40ea 100644 --- a/src/mapleall/maple_ir/include/opcode_info.h +++ b/src/mapleall/maple_ir/include/opcode_info.h @@ -108,6 +108,13 @@ class OpcodeTable { return table[o].flag & OPCODEISCALLASSIGNED; } + bool IsICall(Opcode o) const { + ASSERT(o < OP_last, "invalid opcode"); + return o == OP_icall || o == OP_icallassigned || + o == OP_virtualicall || o == OP_virtualicallassigned || + o == OP_interfaceicall || o == OP_interfaceicallassigned; + } + bool NotPure(Opcode o) const { ASSERT(o < OP_last, "invalid opcode"); return table[o].flag & OPCODENOTPURE; diff --git a/src/mapleall/maple_me/src/alias_class.cpp b/src/mapleall/maple_me/src/alias_class.cpp index 2364c8ed3a92fd908c6c449535ec41eeb5d49e39..57b0eac6c683ab52500bb05cb58e49610c6d72e0 100644 --- a/src/mapleall/maple_me/src/alias_class.cpp +++ b/src/mapleall/maple_me/src/alias_class.cpp @@ -237,30 +237,34 @@ void AliasClass::ApplyUnionForCopies(StmtNode &stmt) { auto &call = static_cast(stmt); ASSERT(call.GetPUIdx() < GlobalTables::GetFunctionTable().GetFuncTable().size(), "index out of range in AliasClass::ApplyUnionForCopies"); - SetPtrOpndsNextLevNADS(0, static_cast(call.NumOpnds()), call.GetNopnd(), false); + if (CallHasSideEffect(call)) { + SetPtrOpndsNextLevNADS(0, static_cast(call.NumOpnds()), call.GetNopnd(), + CallHasNoPrivateDefEffect(call)); + } break; } - case OP_icall: - case OP_icallassigned: case OP_virtualcall: - case OP_virtualicall: case OP_superclasscall: case OP_interfacecall: - case OP_interfaceicall: case OP_customcall: case OP_polymorphiccall: case OP_virtualcallassigned: - case OP_virtualicallassigned: case OP_superclasscallassigned: case OP_interfacecallassigned: - case OP_interfaceicallassigned: case OP_customcallassigned: case OP_polymorphiccallassigned: { - auto &call = static_cast(stmt); - if (CallHasSideEffect(call)) { - SetPtrOpndsNextLevNADS(1, static_cast(call.NumOpnds()), - call.GetNopnd(), CallHasNoPrivateDefEffect(call)); - } + auto &call = static_cast(stmt); + SetPtrOpndsNextLevNADS(0, static_cast(call.NumOpnds()), call.GetNopnd(), false); + break; + } + case OP_icall: + case OP_icallassigned: + case OP_virtualicall: + case OP_interfaceicall: + case OP_virtualicallassigned: + case OP_interfaceicallassigned: { + auto &call = static_cast(stmt); + SetPtrOpndsNextLevNADS(1, static_cast(call.NumOpnds()), call.GetNopnd(), false); break; } case OP_intrinsiccall: @@ -915,8 +919,9 @@ void AliasClass::CollectMayDefForMustDefs(const StmtNode &stmt, std::set &mayUseOsts) { - for (size_t i = 0; i < stmt.NumOpnds(); ++i) { - BaseNode *expr = stmt.Opnd(i); + size_t opndId = kOpcodeInfo.IsICall(stmt.GetOpCode()) ? 1 : 0; + for (; opndId < stmt.NumOpnds(); ++opndId) { + BaseNode *expr = stmt.Opnd(opndId); if (!IsPotentialAddress(expr->GetPrimType())) { continue; } @@ -935,7 +940,7 @@ void AliasClass::CollectMayUseForCallOpnd(const StmtNode &stmt, std::setGetOriginalSt().IsFinal()) { // only final fields pointed to by the first opnd(this) are considered. - if (i != 0) { + if (opndId != 0) { continue; } @@ -1083,7 +1088,12 @@ void AliasClass::GenericInsertMayDefUse(StmtNode &stmt, BBId bbID) { InsertMayUseAll(stmt); return; } - case OP_callassigned: + case OP_call: + case OP_callassigned: { + InsertMayDefUseCall(stmt, CallHasSideEffect(static_cast(stmt)), + CallHasNoPrivateDefEffect(static_cast(stmt))); + return; + } case OP_virtualcallassigned: case OP_virtualicallassigned: case OP_superclasscallassigned: @@ -1092,7 +1102,6 @@ void AliasClass::GenericInsertMayDefUse(StmtNode &stmt, BBId bbID) { case OP_customcallassigned: case OP_polymorphiccallassigned: case OP_icallassigned: - case OP_call: case OP_virtualcall: case OP_virtualicall: case OP_superclasscall: @@ -1101,8 +1110,7 @@ void AliasClass::GenericInsertMayDefUse(StmtNode &stmt, BBId bbID) { case OP_customcall: case OP_polymorphiccall: case OP_icall: { - InsertMayDefUseCall(stmt, CallHasSideEffect(static_cast(stmt)), - CallHasNoPrivateDefEffect(static_cast(stmt))); + InsertMayDefUseCall(stmt, true, false); return; } case OP_intrinsiccallwithtype: { diff --git a/src/mrt/deplibs/libmplandroid.so b/src/mrt/deplibs/libmplandroid.so index e180f06caba33b87daa42be5aa1484d2396ed480..b0aed0c0c9bf00aa3565fa06eda50fafaeffa58e 100755 Binary files a/src/mrt/deplibs/libmplandroid.so and b/src/mrt/deplibs/libmplandroid.so differ