From d6bdf65c8501d0e282834975a2fcf162c86fe262 Mon Sep 17 00:00:00 2001 From: binaryfz Date: Thu, 25 Feb 2021 16:55:08 +0800 Subject: [PATCH] [sync after gerrit test]PR383 PR386 --- .../include/cg/aarch64/aarch64_cgfunc.h | 2 +- src/mapleall/maple_be/include/cg/cgfunc.h | 2 +- src/mapleall/maple_be/include/cg/insn.h | 16 ++ .../src/cg/aarch64/aarch64_cgfunc.cpp | 239 +++++++++++++++++- .../maple_be/src/cg/aarch64/aarch64_peep.cpp | 3 + .../src/cg/aarch64/aarch64_reaching.cpp | 11 + src/mapleall/maple_be/src/cg/cgfunc.cpp | 2 +- src/mapleall/maple_ir/include/opcode_info.h | 7 + src/mapleall/maple_me/src/alias_class.cpp | 46 ++-- src/mrt/deplibs/libmplandroid.so | Bin 2975136 -> 2975136 bytes 10 files changed, 293 insertions(+), 35 deletions(-) diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_cgfunc.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_cgfunc.h index 84aca10943..9740e68bf3 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_cgfunc.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_cgfunc.h @@ -109,7 +109,7 @@ class AArch64CGFunc : public CGFunc { void SelectComment(CommentNode &comment) override; void HandleCatch() override; - Operand *SelectDread(AddrofNode &expr) override; + Operand *SelectDread(const BaseNode &parent, AddrofNode &expr) override; RegOperand *SelectRegread(RegreadNode &expr) override; void SelectAddrof(Operand &result, StImmOperand &stImm); diff --git a/src/mapleall/maple_be/include/cg/cgfunc.h b/src/mapleall/maple_be/include/cg/cgfunc.h index 00853febc8..f9f6479af9 100644 --- a/src/mapleall/maple_be/include/cg/cgfunc.h +++ b/src/mapleall/maple_be/include/cg/cgfunc.h @@ -174,7 +174,7 @@ class CGFunc { virtual void HandleCatch() = 0; /* select expr */ - virtual Operand *SelectDread(AddrofNode &expr) = 0; + virtual Operand *SelectDread(const BaseNode &parent, AddrofNode &expr) = 0; virtual RegOperand *SelectRegread(RegreadNode &expr) = 0; virtual Operand *SelectAddrof(AddrofNode &expr) = 0; virtual Operand &SelectAddrofFunc(AddroffuncNode &expr) = 0; diff --git a/src/mapleall/maple_be/include/cg/insn.h b/src/mapleall/maple_be/include/cg/insn.h index b654ea5fe6..84f5dbd163 100644 --- a/src/mapleall/maple_be/include/cg/insn.h +++ b/src/mapleall/maple_be/include/cg/insn.h @@ -532,6 +532,21 @@ class Insn { return doNotRemove; } + void SetIsCallReturnUnsigned(bool unSigned) { + ASSERT(IsCall(), "Insn should be a call."); + this->isCallReturnUnsigned = unSigned; + } + + bool GetIsCallReturnUnsigned() const { + ASSERT(IsCall(), "Insn should be a call."); + return isCallReturnUnsigned; + } + + bool GetIsCallReturnSigned() const { + ASSERT(IsCall(), "Insn should be a call."); + return (isCallReturnUnsigned == false); + } + void SetRetType(RetType retType) { this->retType = retType; } @@ -599,6 +614,7 @@ class Insn { uint32 id = 0; bool isThrow = false; bool doNotRemove = false; /* caller reg cross call */ + bool isCallReturnUnsigned = false; /* for call insn only. false: signed, true: unsigned */ RetType retType = kRegNull; /* if this insn is call, it represent the return register type R0/V0 */ uint32 retSize = 0; /* Byte size of the return value if insn is a call. */ /* record the stack cleared by MCC_ClearLocalStackRef or MCC_DecRefResetPair */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp index 4385425271..8b2820a187 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp @@ -1004,8 +1004,7 @@ void AArch64CGFunc::SelectAggDassign(DassignNode &stmt) { result, lhsMemOpnd)); lhsSizeCovered += newAlignUsed; } - } else { /* rhs is iread */ - ASSERT(stmt.GetRHS()->GetOpCode() == OP_iread, "SelectAggDassign: NYI"); + } else if (stmt.GetRHS()->GetOpCode() == OP_iread) { IreadNode *rhsIread = static_cast(stmt.GetRHS()); RegOperand *addrOpnd = static_cast(HandleExpr(*rhsIread, *rhsIread->Opnd(0))); addrOpnd = &LoadIntoRegister(*addrOpnd, rhsIread->Opnd(0)->GetPrimType()); @@ -1065,6 +1064,30 @@ void AArch64CGFunc::SelectAggDassign(DassignNode &stmt) { GetCG()->BuildInstruction(PickStInsn(newAlignUsed * k8BitSize, PTY_u32), result, lhsMemOpnd)); lhsSizeCovered += newAlignUsed; } + } else { + ASSERT(stmt.GetRHS()->op == OP_regread, "SelectAggDassign: NYI"); + bool isRet = false; + if (lhsType->GetKind() == kTypeStruct || lhsType->GetKind() == kTypeUnion) { + RegreadNode *rhsregread = static_cast(stmt.GetRHS()); + PregIdx pregIdx = rhsregread->GetRegIdx(); + if (IsSpecialPseudoRegister(pregIdx)) { + if ((-pregIdx) == kSregRetval0) { + CHECK_FATAL(lhsSize <= k16ByteSize, "SelectAggDassign: Incorrect agg size"); + RegOperand &parm1 = GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, kRegTyInt); + Operand &memopnd1 = GetOrCreateMemOpnd(*lhsSymbol, 0, k64BitSize); + MOperator mop1 = PickStInsn(k64BitSize, PTY_u64); + GetCurBB()->AppendInsn(GetCG()->BuildInstruction(mop1, parm1, memopnd1)); + if (lhsSize > k8ByteSize) { + RegOperand &parm2 = GetOrCreatePhysicalRegisterOperand(R1, k64BitSize, kRegTyInt); + Operand &memopnd2 = GetOrCreateMemOpnd(*lhsSymbol, k8ByteSize, k64BitSize); + MOperator mop2 = PickStInsn(k64BitSize, PTY_u64); + GetCurBB()->AppendInsn(GetCG()->BuildInstruction(mop2, parm2, memopnd2)); + } + isRet = true; + } + } + } + CHECK_FATAL(isRet, "SelectAggDassign: NYI"); } } @@ -1145,8 +1168,18 @@ void AArch64CGFunc::SelectAggIassign(IassignNode &stmt, Operand &AddrOpnd) { ASSERT(stmt.Opnd(0) != nullptr, "null ptr check"); Operand &lhsAddrOpnd = LoadIntoRegister(AddrOpnd, stmt.Opnd(0)->GetPrimType()); int32 lhsOffset = 0; - MIRPtrType *lhsPointerType = - static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(stmt.GetTyIdx())); + MIRType *stmtType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(stmt.GetTyIdx()); + MIRSymbol *addrSym = nullptr; + MIRPtrType *lhsPointerType = nullptr; + if (stmtType->GetPrimType() == PTY_agg) { + /* Move into regs */ + AddrofNode &addrofnode = static_cast(stmt.GetAddrExprBase()); + addrSym = mirModule.CurFunction()->GetLocalOrGlobalSymbol(addrofnode.GetStIdx()); + MIRType *addrty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(addrSym->GetTyIdx()); + lhsPointerType = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(addrty->GetTypeIndex())); + } else { + lhsPointerType = static_cast(stmtType); + } MIRType *lhsType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(lhsPointerType->GetPointedTyIdx()); if (stmt.GetFieldID() != 0) { MIRStructType *structType = static_cast(lhsType); @@ -1190,6 +1223,42 @@ void AArch64CGFunc::SelectAggIassign(IassignNode &stmt, Operand &AddrOpnd) { rhsType = structType->GetFieldType(rhsDread->GetFieldID()); rhsOffset = GetBecommon().GetFieldOffset(*structType, rhsDread->GetFieldID()).first; } + if (stmtType->GetPrimType() == PTY_agg) { + /* generate move to regs. */ + CHECK_FATAL(lhsSize <= k16ByteSize, "SelectAggIassign: illegal struct size"); + /* aggregates are 8 byte aligned. */ + Operand *rhsmemopnd = nullptr; + RegOperand *result[kTwoRegister]; /* maximum 16 bytes, 2 registers */ + bool parmCopy = IsParamStructCopy(*rhsSymbol); + uint32 loadSize = (lhsSize <= k4ByteSize) ? k4ByteSize : k8ByteSize; + uint32 numRegs = (lhsSize <= k8ByteSize) ? kOneRegister : kTwoRegister; + for (uint32 i = 0; i < numRegs; i++) { + if (parmCopy) { + rhsmemopnd = &LoadStructCopyBase(*rhsSymbol, rhsOffset + i * k8ByteSize, loadSize * kBitsPerByte); + } else { + rhsmemopnd = &GetOrCreateMemOpnd(*rhsSymbol, rhsOffset + i * k8ByteSize, loadSize * kBitsPerByte); + } + result[i] = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, loadSize)); + MOperator mop1 = PickLdInsn(loadSize * kBitsPerByte, PTY_u32); + Insn &ld = GetCG()->BuildInstruction(mop1, *(result[i]), *rhsmemopnd); + GetCurBB()->AppendInsn(ld); + } + for (uint32 i = 0; i < numRegs; i++) { + AArch64reg preg = (i == 0 ? R0 : R1); + RegOperand &dest = GetOrCreatePhysicalRegisterOperand(preg, loadSize * kBitsPerByte, kRegTyInt); + MOperator mop2 = (loadSize == k4ByteSize) ? MOP_wmovrr : MOP_xmovrr; + Insn &mov = GetCG()->BuildInstruction(mop2, dest, *(result[i])); + GetCurBB()->AppendInsn(mov); + } + /* Create artificial dependency to extend the live range */ + for (uint32 i = 0; i < numRegs; i++) { + AArch64reg preg = (i == 0 ? R0 : R1); + RegOperand &dest = GetOrCreatePhysicalRegisterOperand(preg, loadSize * kBitsPerByte, kRegTyInt); + Insn &pseudo = cg->BuildInstruction(MOP_pseudo_ret_int, dest); + GetCurBB()->AppendInsn(pseudo); + } + return; + } rhsAlign = GetBecommon().GetTypeAlign(rhsType->GetTypeIndex()); alignUsed = std::min(lhsAlign, rhsAlign); ASSERT(alignUsed != 0, "expect non-zero"); @@ -1253,6 +1322,37 @@ void AArch64CGFunc::SelectAggIassign(IassignNode &stmt, Operand &AddrOpnd) { rhsOffset = GetBecommon().GetFieldOffset(*rhsStructType, rhsIread->GetFieldID()).first; isRefField = GetBecommon().IsRefField(*rhsStructType, rhsIread->GetFieldID()); } + if (stmtType->GetPrimType() == PTY_agg) { + /* generate move to regs. */ + CHECK_FATAL(lhsSize <= k16ByteSize, "SelectAggIassign: illegal struct size"); + RegOperand *result[kTwoRegister]; /* maximum 16 bytes, 2 registers */ + uint32 loadSize = (lhsSize <= k4ByteSize) ? k4ByteSize : k8ByteSize; + uint32 numRegs = (lhsSize <= k8ByteSize) ? kOneRegister : kTwoRegister; + for (uint32 i = 0; i < numRegs; i++) { + AArch64OfstOperand *rhsOffOpnd = &GetOrCreateOfstOpnd(rhsOffset + i * loadSize, loadSize * kBitsPerByte); + Operand &rhsmemopnd = + GetOrCreateMemOpnd(AArch64MemOperand::kAddrModeBOi, loadSize, rhsAddrOpnd, nullptr, rhsOffOpnd, nullptr); + result[i] = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, loadSize)); + MOperator mop1 = PickLdInsn(loadSize * kBitsPerByte, PTY_u32); + Insn &ld = GetCG()->BuildInstruction(mop1, *(result[i]), rhsmemopnd); + ld.MarkAsAccessRefField(isRefField); + GetCurBB()->AppendInsn(ld); + } + for (uint32 i = 0; i < numRegs; i++) { + AArch64reg preg = (i == 0 ? R0 : R1); + RegOperand &dest = GetOrCreatePhysicalRegisterOperand(preg, loadSize * kBitsPerByte, kRegTyInt); + Insn &mov = GetCG()->BuildInstruction(MOP_xmovrr, dest, *(result[i])); + GetCurBB()->AppendInsn(mov); + } + /* Create artificial dependency to extend the live range */ + for (uint32 i = 0; i < numRegs; i++) { + AArch64reg preg = (i == 0 ? R0 : R1); + RegOperand &dest = GetOrCreatePhysicalRegisterOperand(preg, loadSize * kBitsPerByte, kRegTyInt); + Insn &pseudo = cg->BuildInstruction(MOP_pseudo_ret_int, dest); + GetCurBB()->AppendInsn(pseudo); + } + return; + } rhsAlign = GetBecommon().GetTypeAlign(rhsType->GetTypeIndex()); alignUsed = std::min(lhsAlign, rhsAlign); ASSERT(alignUsed != 0, "expect non-zero"); @@ -1304,7 +1404,7 @@ void AArch64CGFunc::SelectAggIassign(IassignNode &stmt, Operand &AddrOpnd) { } } -Operand *AArch64CGFunc::SelectDread(DreadNode &expr) { +Operand *AArch64CGFunc::SelectDread(const BaseNode &parent, DreadNode &expr) { MIRSymbol *symbol = GetFunction().GetLocalOrGlobalSymbol(expr.GetStIdx()); if (symbol->IsEhIndex()) { MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx((TyIdx)PTY_i32); @@ -1325,8 +1425,41 @@ Operand *AArch64CGFunc::SelectDread(DreadNode &expr) { parmCopy = IsParamStructCopy(*symbol); } CHECK_FATAL(symType != PTY_agg, "dread type error"); - uint32 dataSize = GetPrimTypeSize(symType) * kBitsPerByte; + uint32 dataSize = GetPrimTypeBitSize(symType); + uint32 aggSize = 0; + if (symType == PTY_agg) { + if (expr.GetPrimType() == PTY_agg) { + aggSize = GetBecommon().GetTypeSize(symbol->GetType()->GetTypeIndex().GetIdx()); + dataSize = k64BitSize; + } else { + dataSize = GetPrimTypeBitSize(expr.GetPrimType()); + } + } MemOperand *memOpnd = nullptr; + if (aggSize > k8ByteSize) { + if (parent.op == OP_eval) { + if (symbol->GetAttr(ATTR_volatile)) { + /* Need to generate loads for the upper parts of the struct. */ + Operand &dest = AArch64RegOperand::GetZeroRegister(k64BitSize); + uint32 numLoads = RoundUp(aggSize, k64BitSize) / k64BitSize; + for (uint32 o = 0; o < numLoads; ++o) { + if (parmCopy) { + memOpnd = &LoadStructCopyBase(*symbol, offset + o * kSizeOfPtr, kSizeOfPtr); + } else { + memOpnd = &GetOrCreateMemOpnd(*symbol, offset + o * kSizeOfPtr, kSizeOfPtr); + } + if (IsImmediateOffsetOutOfRange(*static_cast(memOpnd), kSizeOfPtr)) { + memOpnd = &SplitOffsetWithAddInstruction(*static_cast(memOpnd), kSizeOfPtr); + } + SelectCopy(dest, PTY_u64, *memOpnd, PTY_u64); + } + } else { + /* No side-effects. No need to generate anything for eval. */ + } + } else { + CHECK_FATAL(0, "SelectDread: Illegal agg size"); + } + } if (parmCopy) { memOpnd = &LoadStructCopyBase(*symbol, offset, dataSize); } else { @@ -1575,7 +1708,15 @@ Operand *AArch64CGFunc::SelectIread(const BaseNode &parent, IreadNode &expr) { RegType regType = GetRegTyFromPrimTy(expr.GetPrimType()); uint32 regSize = GetPrimTypeSize(expr.GetPrimType()); - if (regSize < k4ByteSize) { + if (expr.GetFieldID() == 0 && pointedType->GetPrimType() == PTY_agg) { + /* Maple IR can passing small struct to be loaded into a single register. */ + if (regType == kRegTyFloat) { + /* regsize is correct */ + } else { + uint32 sz = GetBecommon().GetTypeSize(pointedType->GetTypeIndex().GetIdx()); + regSize = (sz <= k4ByteSize) ? k4ByteSize : k8ByteSize; + } + } else if (regSize < k4ByteSize) { regSize = k4ByteSize; /* 32-bit */ } regno_t vRegNO; @@ -1607,6 +1748,27 @@ Operand *AArch64CGFunc::SelectIread(const BaseNode &parent, IreadNode &expr) { } else { bitSize = GetPrimTypeBitSize(destType); } + if (regType == kRegTyFloat) { + destType = expr.GetPrimType(); + bitSize = GetPrimTypeBitSize(destType); + } else if (destType == PTY_agg) { + switch (bitSize) { + case k8BitSize: + destType = PTY_u8; + break; + case k16BitSize: + destType = PTY_u16; + break; + case k32BitSize: + destType = PTY_u32; + break; + case k64BitSize: + destType = PTY_u64; + break; + default: + CHECK_FATAL(false, "SelectIread: aggregate of wrong size"); + } + } } MemOperand *memOpnd = &CreateMemOpnd(destType, expr, *expr.Opnd(0), offset, memOrd); @@ -1895,8 +2057,11 @@ void AArch64CGFunc::SelectCondGoto(LabelOperand &targetOpnd, Opcode jmpOp, Opcod MOperator mOp = is64Bits ? MOP_xcmprr : MOP_wcmprr; if (isImm) { - /* Special cases, i.e., comparing with zero */ - if (static_cast(opnd1)->IsZero() && (Globals::GetInstance()->GetOptimLevel() > 0)) { + /* Special cases, i.e., comparing with zero + * Do not perform optimization for C, unlike Java which has no unsigned int. + */ + if (static_cast(opnd1)->IsZero() && (Globals::GetInstance()->GetOptimLevel() > 0) && + ((mirModule.GetSrcLang() != kSrcLangC) || ((primType != PTY_u64) && (primType != PTY_u32)))) { bool finish = GenerateCompareWithZeroInstruction(jmpOp, cmpOp, is64Bits, targetOpnd, *opnd0); if (finish) { return; @@ -3551,7 +3716,15 @@ void AArch64CGFunc::SelectCvtInt2Int(const BaseNode *parent, Operand *&resOpnd, } else { /* Unsigned */ if (is64Bit) { - GetCurBB()->AppendInsn(GetCG()->BuildInstruction(MOP_xuxtw64, *resOpnd, *opnd0)); + if (fsize == k8BitSize) { + ImmOperand &immOpnd = CreateImmOperand(0xff, k64BitSize, false); + GetCurBB()->AppendInsn(GetCG()->BuildInstruction(MOP_xandrri13, *resOpnd, *opnd0, immOpnd)); + } else if (fsize == k16BitSize) { + ImmOperand &immOpnd = CreateImmOperand(0xffff, k64BitSize, false); + GetCurBB()->AppendInsn(GetCG()->BuildInstruction(MOP_xandrri13, *resOpnd, *opnd0, immOpnd)); + } else { + GetCurBB()->AppendInsn(GetCG()->BuildInstruction(MOP_xuxtw64, *resOpnd, *opnd0)); + } } else { ASSERT(((fsize == k8BitSize) || (fsize == k16BitSize)), "incorrect from size"); if (fsize == k8BitSize) { @@ -3597,8 +3770,45 @@ void AArch64CGFunc::SelectCvtInt2Int(const BaseNode *parent, Operand *&resOpnd, } } else { /* same size, so resOpnd can be set */ - AArch64RegOperand *reg = static_cast(resOpnd); - reg->SetRegisterNumber(static_cast(opnd0)->GetRegisterNumber()); + if ((mirModule.GetSrcLang() == kSrcLangJava) || (IsSignedInteger(fromType) == IsSignedInteger(toType)) || + (GetPrimTypeSize(toType) > k4BitSize)) { + AArch64RegOperand *reg = static_cast(resOpnd); + reg->SetRegisterNumber(static_cast(opnd0)->GetRegisterNumber()); + } else if (IsUnsignedInteger(toType)) { + MOperator mop; + switch (toType) { + case PTY_u8: + mop = MOP_xuxtb32; + break; + case PTY_u16: + mop = MOP_xuxth32; + break; + case PTY_u32: + mop = MOP_xuxtw64; + break; + default: + CHECK_FATAL(0, "Unhandled unsigned convert"); + } + GetCurBB()->AppendInsn(GetCG()->BuildInstruction(mop, *resOpnd, *opnd0)); + } else { + /* signed target */ + uint32 size = GetPrimTypeSize(toType); + MOperator mop; + switch (toType) { + case PTY_i8: + mop = (size > k4BitSize) ? MOP_xsxtb64 : MOP_xsxtb32; + break; + case PTY_i16: + mop = (size > k4BitSize) ? MOP_xsxth64 : MOP_xsxth32; + break; + case PTY_i32: + mop = MOP_xsxtw64; + break; + default: + CHECK_FATAL(0, "Unhandled unsigned convert"); + } + GetCurBB()->AppendInsn(GetCG()->BuildInstruction(mop, *resOpnd, *opnd0)); + } } #endif } @@ -5419,6 +5629,7 @@ void AArch64CGFunc::SelectCall(CallNode &callNode) { GetCurBB()->SetHasCall(); if (retType != nullptr) { callInsn.SetRetSize(retType->GetSize()); + callInsn.SetIsCallReturnUnsigned(IsUnsignedInteger(retType->GetPrimType())); } GetFunction().SetHasCall(); @@ -5453,6 +5664,7 @@ void AArch64CGFunc::SelectIcall(IcallNode &icallNode, Operand &srcOpnd) { MIRType *retType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(icallNode.GetRetTyIdx()); if (retType != nullptr) { callInsn.SetRetSize(retType->GetSize()); + callInsn.SetIsCallReturnUnsigned(IsUnsignedInteger(retType->GetPrimType())); } GetCurBB()->AppendInsn(callInsn); @@ -5698,7 +5910,7 @@ MemOperand &AArch64CGFunc::GetOrCreateMemOpnd(const MIRSymbol &symbol, int32 off SelectAddrof(stAddrOpnd, stOpnd); /* AArch64MemOperand::AddrMode_B_OI */ return *memPool->New(AArch64MemOperand::kAddrModeBOi, size, stAddrOpnd, - nullptr, &GetOrCreateOfstOpnd(offset, k32BitSize), &symbol); + nullptr, &GetOrCreateOfstOpnd(0, k32BitSize), &symbol); } else if ((storageClass == kScPstatic) || (storageClass == kScFstatic)) { if (symbol.GetSKind() == kStConst) { ASSERT(offset == 0, "offset should be 0 for constant literals"); @@ -5947,6 +6159,7 @@ void AArch64CGFunc::SelectLibCall(const std::string &funcName, std::vector(retPrimType)); if (callRetType != nullptr) { callInsn.SetRetSize(callRetType->GetSize()); + callInsn.SetIsCallReturnUnsigned(IsUnsignedInteger(callRetType->GetPrimType())); } GetFunction().SetHasCall(); /* get return value */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp index abdfa9181f..8f7dc6f51c 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp @@ -558,6 +558,9 @@ void EliminateSpecifcUXTAArch64::Run(BB &bb, Insn &insn) { !prevInsn->IsMachineInstruction()) { return; } + if (cgFunc.GetMirModule().GetSrcLang() == kSrcLangC && prevInsn->IsCall() && prevInsn->GetIsCallReturnSigned()) { + return; + } if (thisMop == MOP_xuxtb32) { if (prevInsn->GetMachineOpcode() == MOP_xmovri32 || prevInsn->GetMachineOpcode() == MOP_xmovri64) { auto &dstMovOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp index 14a7893e3c..4fd2291665 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp @@ -38,6 +38,10 @@ void AArch64ReachingDefinition::InitStartGen() { } uint64 symSize = cgFunc->GetBecommon().GetTypeSize(type->GetTypeIndex()); + if ((cgFunc->GetMirModule().GetSrcLang() == kSrcLangC) && (symSize > k8ByteSize)) { + /* For C structure passing in one or two registers. */ + symSize = k8ByteSize; + } RegType regType = (pLoc.reg0 < V0) ? kRegTyInt : kRegTyFloat; uint32 srcBitSize = ((symSize < k4ByteSize) ? k4ByteSize : symSize) * kBitsPerByte; @@ -63,6 +67,13 @@ void AArch64ReachingDefinition::InitStartGen() { bb->InsertInsnBegin(pseudoInsn); pseudoInsns.emplace_back(&pseudoInsn); + if (pLoc.reg1) { + regOpnd = aarchCGFunc->GetOrCreatePhysicalRegisterOperand(pLoc.reg1, srcBitSize, regType); + Insn &pseudoInsn1 = cgFunc->GetCG()->BuildInstruction(mOp, regOpnd); + bb->InsertInsnBegin(pseudoInsn1); + pseudoInsns.emplace_back(&pseudoInsn1); + } + { /* * define memory address since store param may be transfered to stp and which with the short offset range. diff --git a/src/mapleall/maple_be/src/cg/cgfunc.cpp b/src/mapleall/maple_be/src/cg/cgfunc.cpp index f7302199b8..d8c4f8548b 100644 --- a/src/mapleall/maple_be/src/cg/cgfunc.cpp +++ b/src/mapleall/maple_be/src/cg/cgfunc.cpp @@ -30,7 +30,7 @@ using namespace maple; Operand *HandleDread(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { (void)parent; auto &dreadNode = static_cast(expr); - return cgFunc.SelectDread(dreadNode); + return cgFunc.SelectDread(parent, dreadNode); } Operand *HandleRegread(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { diff --git a/src/mapleall/maple_ir/include/opcode_info.h b/src/mapleall/maple_ir/include/opcode_info.h index 6b4758b9f4..baa66e4fc6 100644 --- a/src/mapleall/maple_ir/include/opcode_info.h +++ b/src/mapleall/maple_ir/include/opcode_info.h @@ -108,6 +108,13 @@ class OpcodeTable { return table[o].flag & OPCODEISCALLASSIGNED; } + bool IsICall(Opcode o) const { + ASSERT(o < OP_last, "invalid opcode"); + return o == OP_icall || o == OP_icallassigned || + o == OP_virtualicall || o == OP_virtualicallassigned || + o == OP_interfaceicall || o == OP_interfaceicallassigned; + } + bool NotPure(Opcode o) const { ASSERT(o < OP_last, "invalid opcode"); return table[o].flag & OPCODENOTPURE; diff --git a/src/mapleall/maple_me/src/alias_class.cpp b/src/mapleall/maple_me/src/alias_class.cpp index 2364c8ed3a..57b0eac6c6 100644 --- a/src/mapleall/maple_me/src/alias_class.cpp +++ b/src/mapleall/maple_me/src/alias_class.cpp @@ -237,30 +237,34 @@ void AliasClass::ApplyUnionForCopies(StmtNode &stmt) { auto &call = static_cast(stmt); ASSERT(call.GetPUIdx() < GlobalTables::GetFunctionTable().GetFuncTable().size(), "index out of range in AliasClass::ApplyUnionForCopies"); - SetPtrOpndsNextLevNADS(0, static_cast(call.NumOpnds()), call.GetNopnd(), false); + if (CallHasSideEffect(call)) { + SetPtrOpndsNextLevNADS(0, static_cast(call.NumOpnds()), call.GetNopnd(), + CallHasNoPrivateDefEffect(call)); + } break; } - case OP_icall: - case OP_icallassigned: case OP_virtualcall: - case OP_virtualicall: case OP_superclasscall: case OP_interfacecall: - case OP_interfaceicall: case OP_customcall: case OP_polymorphiccall: case OP_virtualcallassigned: - case OP_virtualicallassigned: case OP_superclasscallassigned: case OP_interfacecallassigned: - case OP_interfaceicallassigned: case OP_customcallassigned: case OP_polymorphiccallassigned: { - auto &call = static_cast(stmt); - if (CallHasSideEffect(call)) { - SetPtrOpndsNextLevNADS(1, static_cast(call.NumOpnds()), - call.GetNopnd(), CallHasNoPrivateDefEffect(call)); - } + auto &call = static_cast(stmt); + SetPtrOpndsNextLevNADS(0, static_cast(call.NumOpnds()), call.GetNopnd(), false); + break; + } + case OP_icall: + case OP_icallassigned: + case OP_virtualicall: + case OP_interfaceicall: + case OP_virtualicallassigned: + case OP_interfaceicallassigned: { + auto &call = static_cast(stmt); + SetPtrOpndsNextLevNADS(1, static_cast(call.NumOpnds()), call.GetNopnd(), false); break; } case OP_intrinsiccall: @@ -915,8 +919,9 @@ void AliasClass::CollectMayDefForMustDefs(const StmtNode &stmt, std::set &mayUseOsts) { - for (size_t i = 0; i < stmt.NumOpnds(); ++i) { - BaseNode *expr = stmt.Opnd(i); + size_t opndId = kOpcodeInfo.IsICall(stmt.GetOpCode()) ? 1 : 0; + for (; opndId < stmt.NumOpnds(); ++opndId) { + BaseNode *expr = stmt.Opnd(opndId); if (!IsPotentialAddress(expr->GetPrimType())) { continue; } @@ -935,7 +940,7 @@ void AliasClass::CollectMayUseForCallOpnd(const StmtNode &stmt, std::setGetOriginalSt().IsFinal()) { // only final fields pointed to by the first opnd(this) are considered. - if (i != 0) { + if (opndId != 0) { continue; } @@ -1083,7 +1088,12 @@ void AliasClass::GenericInsertMayDefUse(StmtNode &stmt, BBId bbID) { InsertMayUseAll(stmt); return; } - case OP_callassigned: + case OP_call: + case OP_callassigned: { + InsertMayDefUseCall(stmt, CallHasSideEffect(static_cast(stmt)), + CallHasNoPrivateDefEffect(static_cast(stmt))); + return; + } case OP_virtualcallassigned: case OP_virtualicallassigned: case OP_superclasscallassigned: @@ -1092,7 +1102,6 @@ void AliasClass::GenericInsertMayDefUse(StmtNode &stmt, BBId bbID) { case OP_customcallassigned: case OP_polymorphiccallassigned: case OP_icallassigned: - case OP_call: case OP_virtualcall: case OP_virtualicall: case OP_superclasscall: @@ -1101,8 +1110,7 @@ void AliasClass::GenericInsertMayDefUse(StmtNode &stmt, BBId bbID) { case OP_customcall: case OP_polymorphiccall: case OP_icall: { - InsertMayDefUseCall(stmt, CallHasSideEffect(static_cast(stmt)), - CallHasNoPrivateDefEffect(static_cast(stmt))); + InsertMayDefUseCall(stmt, true, false); return; } case OP_intrinsiccallwithtype: { diff --git a/src/mrt/deplibs/libmplandroid.so b/src/mrt/deplibs/libmplandroid.so index e180f06caba33b87daa42be5aa1484d2396ed480..b0aed0c0c9bf00aa3565fa06eda50fafaeffa58e 100755 GIT binary patch delta 206 zcmWm7xfOyy6hP54E~qH3!{Qee_q_&kV>>dS02-hK13jeVG%<9-Gq~~1%Ad;c`AYf{ zt>Uy2|7|2OqR>DSEwo{wgD!fo(Z>J|h8SUt38t7q!-WSQ0p|EW3xrr=g*75hshq)DO9* z5?IHtrg^use^GK(qN4!=CM+~zqlGpcbl{?k9{TVwzz{wHgcxCr38wfzGt9BT5)opo WkYJ4z8*GtbhdmC+QM{Ct@6!*y+EZ)* -- Gitee