diff --git a/src/mapleall/maple_be/include/cg/insn.h b/src/mapleall/maple_be/include/cg/insn.h index b654ea5fe64f1e14cfba698af4a5b9f8dd371267..84f5dbd163237641efe40c1d4c5748798f111841 100644 --- a/src/mapleall/maple_be/include/cg/insn.h +++ b/src/mapleall/maple_be/include/cg/insn.h @@ -532,6 +532,21 @@ class Insn { return doNotRemove; } + void SetIsCallReturnUnsigned(bool unSigned) { + ASSERT(IsCall(), "Insn should be a call."); + this->isCallReturnUnsigned = unSigned; + } + + bool GetIsCallReturnUnsigned() const { + ASSERT(IsCall(), "Insn should be a call."); + return isCallReturnUnsigned; + } + + bool GetIsCallReturnSigned() const { + ASSERT(IsCall(), "Insn should be a call."); + return (isCallReturnUnsigned == false); + } + void SetRetType(RetType retType) { this->retType = retType; } @@ -599,6 +614,7 @@ class Insn { uint32 id = 0; bool isThrow = false; bool doNotRemove = false; /* caller reg cross call */ + bool isCallReturnUnsigned = false; /* for call insn only. false: signed, true: unsigned */ RetType retType = kRegNull; /* if this insn is call, it represent the return register type R0/V0 */ uint32 retSize = 0; /* Byte size of the return value if insn is a call. */ /* record the stack cleared by MCC_ClearLocalStackRef or MCC_DecRefResetPair */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp index 5489cd883290dfca792fc9fa90f4263a265ef697..3bda6dc3b9e4d85f73715ea9001d34a50e6eacd4 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp @@ -1004,8 +1004,7 @@ void AArch64CGFunc::SelectAggDassign(DassignNode &stmt) { result, lhsMemOpnd)); lhsSizeCovered += newAlignUsed; } - } else { /* rhs is iread */ - ASSERT(stmt.GetRHS()->GetOpCode() == OP_iread, "SelectAggDassign: NYI"); + } else if (stmt.GetRHS()->GetOpCode() == OP_iread) { IreadNode *rhsIread = static_cast(stmt.GetRHS()); RegOperand *addrOpnd = static_cast(HandleExpr(*rhsIread, *rhsIread->Opnd(0))); addrOpnd = &LoadIntoRegister(*addrOpnd, rhsIread->Opnd(0)->GetPrimType()); @@ -1065,6 +1064,30 @@ void AArch64CGFunc::SelectAggDassign(DassignNode &stmt) { GetCG()->BuildInstruction(PickStInsn(newAlignUsed * k8BitSize, PTY_u32), result, lhsMemOpnd)); lhsSizeCovered += newAlignUsed; } + } else { + ASSERT(stmt.GetRHS()->op == OP_regread, "SelectAggDassign: NYI"); + bool isRet = false; + if (lhsType->GetKind() == kTypeStruct || lhsType->GetKind() == kTypeUnion) { + RegreadNode *rhsregread = static_cast(stmt.GetRHS()); + PregIdx pregIdx = rhsregread->GetRegIdx(); + if (IsSpecialPseudoRegister(pregIdx)) { + if ((-pregIdx) == kSregRetval0) { + CHECK_FATAL(lhsSize <= k16ByteSize, "SelectAggDassign: Incorrect agg size"); + RegOperand &parm1 = GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, kRegTyInt); + Operand &memopnd1 = GetOrCreateMemOpnd(*lhsSymbol, 0, k64BitSize); + MOperator mop1 = PickStInsn(k64BitSize, PTY_u64); + GetCurBB()->AppendInsn(GetCG()->BuildInstruction(mop1, parm1, memopnd1)); + if (lhsSize > k8ByteSize) { + RegOperand &parm2 = GetOrCreatePhysicalRegisterOperand(R1, k64BitSize, kRegTyInt); + Operand &memopnd2 = GetOrCreateMemOpnd(*lhsSymbol, k8ByteSize, k64BitSize); + MOperator mop2 = PickStInsn(k64BitSize, PTY_u64); + GetCurBB()->AppendInsn(GetCG()->BuildInstruction(mop2, parm2, memopnd2)); + } + isRet = true; + } + } + } + CHECK_FATAL(isRet, "SelectAggDassign: NYI"); } } @@ -1145,8 +1168,18 @@ void AArch64CGFunc::SelectAggIassign(IassignNode &stmt, Operand &AddrOpnd) { ASSERT(stmt.Opnd(0) != nullptr, "null ptr check"); Operand &lhsAddrOpnd = LoadIntoRegister(AddrOpnd, stmt.Opnd(0)->GetPrimType()); int32 lhsOffset = 0; - MIRPtrType *lhsPointerType = - static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(stmt.GetTyIdx())); + MIRType *stmtType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(stmt.GetTyIdx()); + MIRSymbol *addrSym = nullptr; + MIRPtrType *lhsPointerType; + if (stmtType->GetPrimType() == PTY_agg) { + /* Move into regs */ + AddrofNode &addrofnode = static_cast(stmt.GetAddrExprBase()); + addrSym = mirModule.CurFunction()->GetLocalOrGlobalSymbol(addrofnode.GetStIdx()); + MIRType *addrty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(addrSym->GetTyIdx()); + lhsPointerType = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(addrty->GetTypeIndex())); + } else { + lhsPointerType = static_cast(stmtType); + } MIRType *lhsType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(lhsPointerType->GetPointedTyIdx()); if (stmt.GetFieldID() != 0) { MIRStructType *structType = static_cast(lhsType); @@ -1190,6 +1223,42 @@ void AArch64CGFunc::SelectAggIassign(IassignNode &stmt, Operand &AddrOpnd) { rhsType = structType->GetFieldType(rhsDread->GetFieldID()); rhsOffset = GetBecommon().GetFieldOffset(*structType, rhsDread->GetFieldID()).first; } + if (stmtType->GetPrimType() == PTY_agg) { + // generate move to regs. + CHECK_FATAL(lhsSize <= k16ByteSize, "SelectAggIassign: illegal struct size"); + // aggregates are 8 byte aligned. + Operand *rhsmemopnd = nullptr; + RegOperand *result[kTwoRegister]; /* maximum 16 bytes, 2 registers */ + bool parmCopy = IsParamStructCopy(*rhsSymbol); + uint32 loadSize = (lhsSize <= k4ByteSize) ? k4ByteSize : k8ByteSize; + uint32 numRegs = (lhsSize <= k8ByteSize) ? kOneRegister : kTwoRegister; + for (uint32 i = 0; i < numRegs; i++) { + if (parmCopy) { + rhsmemopnd = &LoadStructCopyBase(*rhsSymbol, rhsOffset + i * k8ByteSize, loadSize * kBitsPerByte); + } else { + rhsmemopnd = &GetOrCreateMemOpnd(*rhsSymbol, rhsOffset + i * k8ByteSize, loadSize * kBitsPerByte); + } + result[i] = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, loadSize)); + MOperator mop1 = PickLdInsn(loadSize * kBitsPerByte, PTY_u32); + Insn &ld = GetCG()->BuildInstruction(mop1, *(result[i]), *rhsmemopnd); + GetCurBB()->AppendInsn(ld); + } + for (uint32 i = 0; i < numRegs; i++) { + AArch64reg preg = (i == 0 ? R0 : R1); + RegOperand &dest = GetOrCreatePhysicalRegisterOperand(preg, loadSize * kBitsPerByte, kRegTyInt); + MOperator mop2 = (loadSize == k4ByteSize) ? MOP_wmovrr: MOP_xmovrr; + Insn &mov = GetCG()->BuildInstruction(mop2, dest, *(result[i])); + GetCurBB()->AppendInsn(mov); + } + // Create artificial dependency to extend the live range + for (uint32 i = 0; i < numRegs; i++) { + AArch64reg preg = (i == 0 ? R0 : R1); + RegOperand &dest = GetOrCreatePhysicalRegisterOperand(preg, loadSize * kBitsPerByte, kRegTyInt); + Insn &pseudo = cg->BuildInstruction(MOP_pseudo_ret_int, dest); + GetCurBB()->AppendInsn(pseudo); + } + return; + } rhsAlign = GetBecommon().GetTypeAlign(rhsType->GetTypeIndex()); alignUsed = std::min(lhsAlign, rhsAlign); ASSERT(alignUsed != 0, "expect non-zero"); @@ -1253,6 +1322,37 @@ void AArch64CGFunc::SelectAggIassign(IassignNode &stmt, Operand &AddrOpnd) { rhsOffset = GetBecommon().GetFieldOffset(*rhsStructType, rhsIread->GetFieldID()).first; isRefField = GetBecommon().IsRefField(*rhsStructType, rhsIread->GetFieldID()); } + if (stmtType->GetPrimType() == PTY_agg) { + // generate move to regs. + CHECK_FATAL(lhsSize <= k16ByteSize, "SelectAggIassign: illegal struct size"); + RegOperand *result[kTwoRegister]; /* maximum 16 bytes, 2 registers */ + uint32 loadSize = (lhsSize <= k4ByteSize) ? k4ByteSize : k8ByteSize; + uint32 numRegs = (lhsSize <= k8ByteSize) ? kOneRegister : kTwoRegister; + for (uint32 i = 0; i < numRegs; i++) { + AArch64OfstOperand *rhsOffOpnd = &GetOrCreateOfstOpnd(rhsOffset + i * loadSize, loadSize * kBitsPerByte); + Operand &rhsmemopnd = + GetOrCreateMemOpnd(AArch64MemOperand::kAddrModeBOi, loadSize, rhsAddrOpnd, nullptr, rhsOffOpnd, nullptr); + result[i] = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, loadSize)); + MOperator mop1 = PickLdInsn(loadSize * kBitsPerByte, PTY_u32); + Insn &ld = GetCG()->BuildInstruction(mop1, *(result[i]), rhsmemopnd); + ld.MarkAsAccessRefField(isRefField); + GetCurBB()->AppendInsn(ld); + } + for (uint32 i = 0; i < numRegs; i++) { + AArch64reg preg = (i == 0 ? R0 : R1); + RegOperand &dest = GetOrCreatePhysicalRegisterOperand(preg, loadSize * kBitsPerByte, kRegTyInt); + Insn &mov = GetCG()->BuildInstruction(MOP_xmovrr, dest, *(result[i])); + GetCurBB()->AppendInsn(mov); + } + // Create artificial dependency to extend the live range + for (uint32 i = 0; i < numRegs; i++) { + AArch64reg preg = (i == 0 ? R0 : R1); + RegOperand &dest = GetOrCreatePhysicalRegisterOperand(preg, loadSize * kBitsPerByte, kRegTyInt); + Insn &pseudo = cg->BuildInstruction(MOP_pseudo_ret_int, dest); + GetCurBB()->AppendInsn(pseudo); + } + return; + } rhsAlign = GetBecommon().GetTypeAlign(rhsType->GetTypeIndex()); alignUsed = std::min(lhsAlign, rhsAlign); ASSERT(alignUsed != 0, "expect non-zero"); @@ -1896,7 +1996,9 @@ void AArch64CGFunc::SelectCondGoto(LabelOperand &targetOpnd, Opcode jmpOp, Opcod if (isImm) { /* Special cases, i.e., comparing with zero */ - if (static_cast(opnd1)->IsZero() && (Globals::GetInstance()->GetOptimLevel() > 0)) { + /* Do not perform optimization for C, unlike Java which has no unsigned int. */ + if (static_cast(opnd1)->IsZero() && (Globals::GetInstance()->GetOptimLevel() > 0) && + ((mirModule.GetSrcLang() != kSrcLangC) || ((primType != PTY_u64) && (primType != PTY_u32)))) { bool finish = GenerateCompareWithZeroInstruction(jmpOp, cmpOp, is64Bits, targetOpnd, *opnd0); if (finish) { return; @@ -3551,7 +3653,15 @@ void AArch64CGFunc::SelectCvtInt2Int(const BaseNode *parent, Operand *&resOpnd, } else { /* Unsigned */ if (is64Bit) { - GetCurBB()->AppendInsn(GetCG()->BuildInstruction(MOP_xuxtw64, *resOpnd, *opnd0)); + if (fsize == 8) { + ImmOperand &immOpnd = CreateImmOperand(0xff, k64BitSize, false); + GetCurBB()->AppendInsn(GetCG()->BuildInstruction(MOP_xandrri13, *resOpnd, *opnd0, immOpnd)); + } else if (fsize == 16) { + ImmOperand &immOpnd = CreateImmOperand(0xffff, k64BitSize, false); + GetCurBB()->AppendInsn(GetCG()->BuildInstruction(MOP_xandrri13, *resOpnd, *opnd0, immOpnd)); + } else { + GetCurBB()->AppendInsn(GetCG()->BuildInstruction(MOP_xuxtw64, *resOpnd, *opnd0)); + } } else { ASSERT(((fsize == k8BitSize) || (fsize == k16BitSize)), "incorrect from size"); if (fsize == k8BitSize) { @@ -3597,8 +3707,45 @@ void AArch64CGFunc::SelectCvtInt2Int(const BaseNode *parent, Operand *&resOpnd, } } else { /* same size, so resOpnd can be set */ - AArch64RegOperand *reg = static_cast(resOpnd); - reg->SetRegisterNumber(static_cast(opnd0)->GetRegisterNumber()); + if ((mirModule.GetSrcLang() == kSrcLangJava) || (IsSignedInteger(fromType) == IsSignedInteger(toType)) || + (GetPrimTypeSize(toType) > 4)) { + AArch64RegOperand *reg = static_cast(resOpnd); + reg->SetRegisterNumber(static_cast(opnd0)->GetRegisterNumber()); + } else if (IsUnsignedInteger(toType)) { + MOperator mop; + switch (toType) { + case PTY_u8: + mop = MOP_xuxtb32; + break; + case PTY_u16: + mop = MOP_xuxth32; + break; + case PTY_u32: + mop = MOP_xuxtw64; + break; + default: + CHECK_FATAL(0,"Unhandled unsigned convert"); + } + GetCurBB()->AppendInsn(GetCG()->BuildInstruction(mop, *resOpnd, *opnd0)); + } else { + // signed target + uint32 size = GetPrimTypeSize(toType); + MOperator mop; + switch (toType) { + case PTY_i8: + mop = (size > 4) ? MOP_xsxtb64 : MOP_xsxtb32; + break; + case PTY_i16: + mop = (size > 4) ? MOP_xsxth64 : MOP_xsxth32; + break; + case PTY_i32: + mop = MOP_xsxtw64; + break; + default: + CHECK_FATAL(0,"Unhandled unsigned convert"); + } + GetCurBB()->AppendInsn(GetCG()->BuildInstruction(mop, *resOpnd, *opnd0)); + } } #endif } @@ -5318,6 +5465,7 @@ void AArch64CGFunc::SelectCall(CallNode &callNode) { GetCurBB()->SetHasCall(); if (retType != nullptr) { callInsn.SetRetSize(retType->GetSize()); + callInsn.SetIsCallReturnUnsigned(IsUnsignedInteger(retType->GetPrimType())); } GetFunction().SetHasCall(); @@ -5352,6 +5500,7 @@ void AArch64CGFunc::SelectIcall(IcallNode &icallNode, Operand &srcOpnd) { MIRType *retType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(icallNode.GetRetTyIdx()); if (retType != nullptr) { callInsn.SetRetSize(retType->GetSize()); + callInsn.SetIsCallReturnUnsigned(IsUnsignedInteger(retType->GetPrimType())); } GetCurBB()->AppendInsn(callInsn); @@ -5597,7 +5746,7 @@ MemOperand &AArch64CGFunc::GetOrCreateMemOpnd(const MIRSymbol &symbol, int32 off SelectAddrof(stAddrOpnd, stOpnd); /* AArch64MemOperand::AddrMode_B_OI */ return *memPool->New(AArch64MemOperand::kAddrModeBOi, size, stAddrOpnd, - nullptr, &GetOrCreateOfstOpnd(offset, k32BitSize), &symbol); + nullptr, &GetOrCreateOfstOpnd(0, k32BitSize), &symbol); } else if ((storageClass == kScPstatic) || (storageClass == kScFstatic)) { if (symbol.GetSKind() == kStConst) { ASSERT(offset == 0, "offset should be 0 for constant literals"); @@ -5846,6 +5995,7 @@ void AArch64CGFunc::SelectLibCall(const std::string &funcName, std::vector(retPrimType)); if (callRetType != nullptr) { callInsn.SetRetSize(callRetType->GetSize()); + callInsn.SetIsCallReturnUnsigned(IsUnsignedInteger(callRetType->GetPrimType())); } GetFunction().SetHasCall(); /* get return value */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp index abdfa9181ffaa568af43152965ec12e2fae1c50c..8f7dc6f51cce07f4396ed11fe0eebeb17b666a73 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp @@ -558,6 +558,9 @@ void EliminateSpecifcUXTAArch64::Run(BB &bb, Insn &insn) { !prevInsn->IsMachineInstruction()) { return; } + if (cgFunc.GetMirModule().GetSrcLang() == kSrcLangC && prevInsn->IsCall() && prevInsn->GetIsCallReturnSigned()) { + return; + } if (thisMop == MOP_xuxtb32) { if (prevInsn->GetMachineOpcode() == MOP_xmovri32 || prevInsn->GetMachineOpcode() == MOP_xmovri64) { auto &dstMovOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp index 14a7893e3c8920df628bc7cdd15191d51341a86b..4fd22916659e87e3eeec3c6642ff48a07618c4b4 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp @@ -38,6 +38,10 @@ void AArch64ReachingDefinition::InitStartGen() { } uint64 symSize = cgFunc->GetBecommon().GetTypeSize(type->GetTypeIndex()); + if ((cgFunc->GetMirModule().GetSrcLang() == kSrcLangC) && (symSize > k8ByteSize)) { + /* For C structure passing in one or two registers. */ + symSize = k8ByteSize; + } RegType regType = (pLoc.reg0 < V0) ? kRegTyInt : kRegTyFloat; uint32 srcBitSize = ((symSize < k4ByteSize) ? k4ByteSize : symSize) * kBitsPerByte; @@ -63,6 +67,13 @@ void AArch64ReachingDefinition::InitStartGen() { bb->InsertInsnBegin(pseudoInsn); pseudoInsns.emplace_back(&pseudoInsn); + if (pLoc.reg1) { + regOpnd = aarchCGFunc->GetOrCreatePhysicalRegisterOperand(pLoc.reg1, srcBitSize, regType); + Insn &pseudoInsn1 = cgFunc->GetCG()->BuildInstruction(mOp, regOpnd); + bb->InsertInsnBegin(pseudoInsn1); + pseudoInsns.emplace_back(&pseudoInsn1); + } + { /* * define memory address since store param may be transfered to stp and which with the short offset range.