From ab6a447eac5d2bad592d96f5de442efaad128321 Mon Sep 17 00:00:00 2001 From: William Chen Date: Wed, 17 Feb 2021 14:29:36 -0800 Subject: [PATCH 1/9] Support PTY_agg in SelectAggDassign and SelectAggIassign --- .../src/cg/aarch64/aarch64_cgfunc.cpp | 104 +++++++++++++++++- 1 file changed, 100 insertions(+), 4 deletions(-) diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp index 5489cd8832..78773f16e1 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp @@ -1004,8 +1004,7 @@ void AArch64CGFunc::SelectAggDassign(DassignNode &stmt) { result, lhsMemOpnd)); lhsSizeCovered += newAlignUsed; } - } else { /* rhs is iread */ - ASSERT(stmt.GetRHS()->GetOpCode() == OP_iread, "SelectAggDassign: NYI"); + } else if (stmt.GetRHS()->GetOpCode() == OP_iread) { IreadNode *rhsIread = static_cast(stmt.GetRHS()); RegOperand *addrOpnd = static_cast(HandleExpr(*rhsIread, *rhsIread->Opnd(0))); addrOpnd = &LoadIntoRegister(*addrOpnd, rhsIread->Opnd(0)->GetPrimType()); @@ -1065,6 +1064,28 @@ void AArch64CGFunc::SelectAggDassign(DassignNode &stmt) { GetCG()->BuildInstruction(PickStInsn(newAlignUsed * k8BitSize, PTY_u32), result, lhsMemOpnd)); lhsSizeCovered += newAlignUsed; } + } else { + ASSERT(stmt.GetRHS()->op == OP_regread, "SelectAggDassign: NYI"); + bool isRet = false; + if (lhsType->GetKind() == kTypeStruct || lhsType->GetKind() == kTypeUnion) { + RegreadNode *rhsregread = static_cast(stmt.GetRHS()); + PregIdx pregIdx = rhsregread->GetRegIdx(); + if (IsSpecialPseudoRegister(pregIdx)) { + if ((-pregIdx) == kSregRetval0) { + CHECK_FATAL(lhsSize <= k16ByteSize, "SelectAggDassign: Incorrect agg size"); + RegOperand &parm1 = GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, kRegTyInt); + Operand &memopnd1 = GetOrCreateMemOpnd(*lhsSymbol, 0, k64BitSize); + GetCurBB()->AppendInsn(GetCG()->BuildInstruction(PickStInsn(k64BitSize, PTY_u64), parm1, memopnd1)); + if (lhsSize > k8ByteSize) { + RegOperand &parm2 = GetOrCreatePhysicalRegisterOperand(R1, k64BitSize, kRegTyInt); + Operand &memopnd2 = GetOrCreateMemOpnd(*lhsSymbol, k8ByteSize, k64BitSize); + GetCurBB()->AppendInsn(GetCG()->BuildInstruction(PickStInsn(k64BitSize, PTY_u64), parm2, memopnd2)); + } + isRet = true; + } + } + } + CHECK_FATAL(isRet, "SelectAggDassign: NYI"); } } @@ -1145,8 +1166,18 @@ void AArch64CGFunc::SelectAggIassign(IassignNode &stmt, Operand &AddrOpnd) { ASSERT(stmt.Opnd(0) != nullptr, "null ptr check"); Operand &lhsAddrOpnd = LoadIntoRegister(AddrOpnd, stmt.Opnd(0)->GetPrimType()); int32 lhsOffset = 0; - MIRPtrType *lhsPointerType = - static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(stmt.GetTyIdx())); + MIRType *stmtType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(stmt.GetTyIdx()); + MIRSymbol *addrSym = nullptr; + MIRPtrType *lhsPointerType; + if (stmtType->GetPrimType() == PTY_agg) { + /* Move into regs */ + AddrofNode &addrofnode = static_cast(stmt.GetAddrExprBase()); + addrSym = mirModule.CurFunction()->GetLocalOrGlobalSymbol(addrofnode.GetStIdx()); + MIRType *addrty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(addrSym->GetTyIdx()); + lhsPointerType = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(addrty->GetTypeIndex())); + } else { + lhsPointerType = static_cast(stmtType); + } MIRType *lhsType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(lhsPointerType->GetPointedTyIdx()); if (stmt.GetFieldID() != 0) { MIRStructType *structType = static_cast(lhsType); @@ -1190,6 +1221,41 @@ void AArch64CGFunc::SelectAggIassign(IassignNode &stmt, Operand &AddrOpnd) { rhsType = structType->GetFieldType(rhsDread->GetFieldID()); rhsOffset = GetBecommon().GetFieldOffset(*structType, rhsDread->GetFieldID()).first; } + if (stmtType->GetPrimType() == PTY_agg) { + // generate move to regs. + CHECK_FATAL(lhsSize <= k16ByteSize, "SelectAggIassign: illegal struct size"); + // aggregates are 8 byte aligned. + Operand *rhsmemopnd = nullptr; + RegOperand *result[kTwoRegister]; /* maximum 16 bytes, 2 registers */ + bool parmCopy = IsParamStructCopy(*rhsSymbol); + uint32 loadSize = (lhsSize <= k4ByteSize) ? k4ByteSize : k8ByteSize; + uint32 numRegs = (lhsSize <= k8ByteSize) ? kOneRegister : kTwoRegister; + for (uint32 i = 0; i < numRegs; i++) { + if (parmCopy) { + rhsmemopnd = &LoadStructCopyBase(*rhsSymbol, rhsOffset + i * 8, loadSize * kBitsPerByte); + } else { + rhsmemopnd = &GetOrCreateMemOpnd(*rhsSymbol, rhsOffset + i * 8, loadSize * kBitsPerByte); + } + result[i] = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, loadSize)); + Insn &ld = GetCG()->BuildInstruction(PickLdInsn(loadSize * kBitsPerByte, PTY_u32), *(result[i]), *rhsmemopnd); + GetCurBB()->AppendInsn(ld); + } + for (uint32 i = 0; i < numRegs; i++) { + AArch64reg preg = (i == 0 ? R0 : R1); + RegOperand &dest = GetOrCreatePhysicalRegisterOperand(preg, loadSize * kBitsPerByte, kRegTyInt); + MOperator mop = (loadSize == 4) ? MOP_wmovrr: MOP_xmovrr; + Insn &mov = GetCG()->BuildInstruction(mop, dest, *(result[i])); + GetCurBB()->AppendInsn(mov); + } + // Create artificial dependency to extend the live range + for (uint32 i = 0; i < numRegs; i++) { + AArch64reg preg = (i == 0 ? R0 : R1); + RegOperand &dest = GetOrCreatePhysicalRegisterOperand(preg, loadSize * kBitsPerByte, kRegTyInt); + Insn &pseudo = cg->BuildInstruction(MOP_pseudo_ret_int, dest); + GetCurBB()->AppendInsn(pseudo); + } + return; + } rhsAlign = GetBecommon().GetTypeAlign(rhsType->GetTypeIndex()); alignUsed = std::min(lhsAlign, rhsAlign); ASSERT(alignUsed != 0, "expect non-zero"); @@ -1253,6 +1319,36 @@ void AArch64CGFunc::SelectAggIassign(IassignNode &stmt, Operand &AddrOpnd) { rhsOffset = GetBecommon().GetFieldOffset(*rhsStructType, rhsIread->GetFieldID()).first; isRefField = GetBecommon().IsRefField(*rhsStructType, rhsIread->GetFieldID()); } + if (stmtType->GetPrimType() == PTY_agg) { + // generate move to regs. + CHECK_FATAL(lhsSize <= k16ByteSize, "SelectAggIassign: illegal struct size"); + RegOperand *result[kTwoRegister]; /* maximum 16 bytes, 2 registers */ + uint32 loadSize = (lhsSize <= k4ByteSize) ? k4ByteSize : k8ByteSize; + uint32 numRegs = (lhsSize <= k8ByteSize) ? kOneRegister : kTwoRegister; + for (uint32 i = 0; i < numRegs; i++) { + AArch64OfstOperand *rhsOffOpnd = &GetOrCreateOfstOpnd(rhsOffset + i * loadSize, loadSize * kBitsPerByte); + Operand &rhsmemopnd = + GetOrCreateMemOpnd(AArch64MemOperand::kAddrModeBOi, loadSize, rhsAddrOpnd, nullptr, rhsOffOpnd, nullptr); + result[i] = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, loadSize)); + Insn &ld = GetCG()->BuildInstruction(PickLdInsn(loadSize * kBitsPerByte, PTY_u32), *(result[i]), rhsmemopnd); + ld.MarkAsAccessRefField(isRefField); + GetCurBB()->AppendInsn(ld); + } + for (uint32 i = 0; i < numRegs; i++) { + AArch64reg preg = (i == 0 ? R0 : R1); + RegOperand &dest = GetOrCreatePhysicalRegisterOperand(preg, loadSize * kBitsPerByte, kRegTyInt); + Insn &mov = GetCG()->BuildInstruction(MOP_xmovrr, dest, *(result[i])); + GetCurBB()->AppendInsn(mov); + } + // Create artificial dependency to extend the live range + for (uint32 i = 0; i < numRegs; i++) { + AArch64reg preg = (i == 0 ? R0 : R1); + RegOperand &dest = GetOrCreatePhysicalRegisterOperand(preg, loadSize * kBitsPerByte, kRegTyInt); + Insn &pseudo = cg->BuildInstruction(MOP_pseudo_ret_int, dest); + GetCurBB()->AppendInsn(pseudo); + } + return; + } rhsAlign = GetBecommon().GetTypeAlign(rhsType->GetTypeIndex()); alignUsed = std::min(lhsAlign, rhsAlign); ASSERT(alignUsed != 0, "expect non-zero"); -- Gitee From 9177f7888a6cbdf7b492eef328fa28acb19185c5 Mon Sep 17 00:00:00 2001 From: William Chen Date: Thu, 18 Feb 2021 09:02:51 -0800 Subject: [PATCH 2/9] Fix format of previous push. 120 line length and magic numbers. --- .../src/cg/aarch64/aarch64_cgfunc.cpp | 20 +++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp index 78773f16e1..e9c3318e41 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp @@ -1075,11 +1075,13 @@ void AArch64CGFunc::SelectAggDassign(DassignNode &stmt) { CHECK_FATAL(lhsSize <= k16ByteSize, "SelectAggDassign: Incorrect agg size"); RegOperand &parm1 = GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, kRegTyInt); Operand &memopnd1 = GetOrCreateMemOpnd(*lhsSymbol, 0, k64BitSize); - GetCurBB()->AppendInsn(GetCG()->BuildInstruction(PickStInsn(k64BitSize, PTY_u64), parm1, memopnd1)); + MOperator mop1 = PickStInsn(k64BitSize, PTY_u64); + GetCurBB()->AppendInsn(GetCG()->BuildInstruction(mop1, parm1, memopnd1)); if (lhsSize > k8ByteSize) { RegOperand &parm2 = GetOrCreatePhysicalRegisterOperand(R1, k64BitSize, kRegTyInt); Operand &memopnd2 = GetOrCreateMemOpnd(*lhsSymbol, k8ByteSize, k64BitSize); - GetCurBB()->AppendInsn(GetCG()->BuildInstruction(PickStInsn(k64BitSize, PTY_u64), parm2, memopnd2)); + MOperator mop2 = PickStInsn(k64BitSize, PTY_u64); + GetCurBB()->AppendInsn(GetCG()->BuildInstruction(mop2, parm2, memopnd2)); } isRet = true; } @@ -1232,19 +1234,20 @@ void AArch64CGFunc::SelectAggIassign(IassignNode &stmt, Operand &AddrOpnd) { uint32 numRegs = (lhsSize <= k8ByteSize) ? kOneRegister : kTwoRegister; for (uint32 i = 0; i < numRegs; i++) { if (parmCopy) { - rhsmemopnd = &LoadStructCopyBase(*rhsSymbol, rhsOffset + i * 8, loadSize * kBitsPerByte); + rhsmemopnd = &LoadStructCopyBase(*rhsSymbol, rhsOffset + i * k8ByteSize, loadSize * kBitsPerByte); } else { - rhsmemopnd = &GetOrCreateMemOpnd(*rhsSymbol, rhsOffset + i * 8, loadSize * kBitsPerByte); + rhsmemopnd = &GetOrCreateMemOpnd(*rhsSymbol, rhsOffset + i * k8ByteSize, loadSize * kBitsPerByte); } result[i] = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, loadSize)); - Insn &ld = GetCG()->BuildInstruction(PickLdInsn(loadSize * kBitsPerByte, PTY_u32), *(result[i]), *rhsmemopnd); + MOperator mop1 = PickLdInsn(loadSize * kBitsPerByte, PTY_u32); + Insn &ld = GetCG()->BuildInstruction(mop1, *(result[i]), *rhsmemopnd); GetCurBB()->AppendInsn(ld); } for (uint32 i = 0; i < numRegs; i++) { AArch64reg preg = (i == 0 ? R0 : R1); RegOperand &dest = GetOrCreatePhysicalRegisterOperand(preg, loadSize * kBitsPerByte, kRegTyInt); - MOperator mop = (loadSize == 4) ? MOP_wmovrr: MOP_xmovrr; - Insn &mov = GetCG()->BuildInstruction(mop, dest, *(result[i])); + MOperator mop2 = (loadSize == k4ByteSize) ? MOP_wmovrr: MOP_xmovrr; + Insn &mov = GetCG()->BuildInstruction(mop2, dest, *(result[i])); GetCurBB()->AppendInsn(mov); } // Create artificial dependency to extend the live range @@ -1330,7 +1333,8 @@ void AArch64CGFunc::SelectAggIassign(IassignNode &stmt, Operand &AddrOpnd) { Operand &rhsmemopnd = GetOrCreateMemOpnd(AArch64MemOperand::kAddrModeBOi, loadSize, rhsAddrOpnd, nullptr, rhsOffOpnd, nullptr); result[i] = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, loadSize)); - Insn &ld = GetCG()->BuildInstruction(PickLdInsn(loadSize * kBitsPerByte, PTY_u32), *(result[i]), rhsmemopnd); + MOperator mop1 = PickLdInsn(loadSize * kBitsPerByte, PTY_u32); + Insn &ld = GetCG()->BuildInstruction(mop1, *(result[i]), rhsmemopnd); ld.MarkAsAccessRefField(isRefField); GetCurBB()->AppendInsn(ld); } -- Gitee From b71089980d9e9f709df2e3d1e3c7166db25c6d20 Mon Sep 17 00:00:00 2001 From: William Chen Date: Thu, 18 Feb 2021 12:48:16 -0800 Subject: [PATCH 3/9] For C do not optimize for unsigned compare to 0. --- src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp index e9c3318e41..4149e991c1 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp @@ -1996,7 +1996,9 @@ void AArch64CGFunc::SelectCondGoto(LabelOperand &targetOpnd, Opcode jmpOp, Opcod if (isImm) { /* Special cases, i.e., comparing with zero */ - if (static_cast(opnd1)->IsZero() && (Globals::GetInstance()->GetOptimLevel() > 0)) { + /* Do not perform optimization for C, unlike Java which has no unsigned int. */ + if (static_cast(opnd1)->IsZero() && (Globals::GetInstance()->GetOptimLevel() > 0) && + ((mirModule.GetSrcLang() != kSrcLangC) || ((primType != PTY_u64) && (primType != PTY_u32)))) { bool finish = GenerateCompareWithZeroInstruction(jmpOp, cmpOp, is64Bits, targetOpnd, *opnd0); if (finish) { return; -- Gitee From d96954d2a8069a003d30de3c13140232201abb18 Mon Sep 17 00:00:00 2001 From: William Chen Date: Thu, 18 Feb 2021 12:48:57 -0800 Subject: [PATCH 4/9] Remove offset from GetOrCreateMemOpnd as adrp of symbol has the offset --- src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp index 4149e991c1..18a569d322 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp @@ -5699,7 +5699,7 @@ MemOperand &AArch64CGFunc::GetOrCreateMemOpnd(const MIRSymbol &symbol, int32 off SelectAddrof(stAddrOpnd, stOpnd); /* AArch64MemOperand::AddrMode_B_OI */ return *memPool->New(AArch64MemOperand::kAddrModeBOi, size, stAddrOpnd, - nullptr, &GetOrCreateOfstOpnd(offset, k32BitSize), &symbol); + nullptr, &GetOrCreateOfstOpnd(0, k32BitSize), &symbol); } else if ((storageClass == kScPstatic) || (storageClass == kScFstatic)) { if (symbol.GetSKind() == kStConst) { ASSERT(offset == 0, "offset should be 0 for constant literals"); -- Gitee From 1446774291d365796918c8132bdd9510c548548c Mon Sep 17 00:00:00 2001 From: William Chen Date: Thu, 18 Feb 2021 15:44:39 -0800 Subject: [PATCH 5/9] Create zero ext for unsigned return value. If value being returned is unsigned and the variable being returned is not unsigned, create a zero extension. --- .../src/cg/aarch64/aarch64_cgfunc.cpp | 41 ++++++++++++++++++- 1 file changed, 39 insertions(+), 2 deletions(-) diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp index 18a569d322..4e5910fa63 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp @@ -3699,8 +3699,45 @@ void AArch64CGFunc::SelectCvtInt2Int(const BaseNode *parent, Operand *&resOpnd, } } else { /* same size, so resOpnd can be set */ - AArch64RegOperand *reg = static_cast(resOpnd); - reg->SetRegisterNumber(static_cast(opnd0)->GetRegisterNumber()); + if ((mirModule.GetSrcLang() == kSrcLangJava) || (IsSignedInteger(fromType) == IsSignedInteger(toType)) || + (GetPrimTypeSize(toType) > 4)) { + AArch64RegOperand *reg = static_cast(resOpnd); + reg->SetRegisterNumber(static_cast(opnd0)->GetRegisterNumber()); + } else if (IsUnsignedInteger(toType)) { + MOperator mop; + switch (toType) { + case PTY_u8: + mop = MOP_xuxtb32; + break; + case PTY_u16: + mop = MOP_xuxth32; + break; + case PTY_u32: + mop = MOP_xuxtw64; + break; + default: + CHECK_FATAL(0,"Unhandled unsigned convert"); + } + GetCurBB()->AppendInsn(GetCG()->BuildInstruction(mop, *resOpnd, *opnd0)); + } else { + // signed target + uint32 size = GetPrimTypeSize(toType); + MOperator mop; + switch (toType) { + case PTY_i8: + mop = (size > 4) ? MOP_xsxtb64 : MOP_xsxtb32; + break; + case PTY_i16: + mop = (size > 4) ? MOP_xsxth64 : MOP_xsxth32; + break; + case PTY_i32: + mop = MOP_xsxtw64; + break; + default: + CHECK_FATAL(0,"Unhandled unsigned convert"); + } + GetCurBB()->AppendInsn(GetCG()->BuildInstruction(mop, *resOpnd, *opnd0)); + } } #endif } -- Gitee From 4344ebc0f5a99634325c935aa764cd7bc7f64426 Mon Sep 17 00:00:00 2001 From: William Chen Date: Fri, 19 Feb 2021 09:02:46 -0800 Subject: [PATCH 6/9] Do not remove zero ext for signed call ret value in peep --- src/mapleall/maple_be/include/cg/insn.h | 16 ++++++++++++++++ .../maple_be/src/cg/aarch64/aarch64_cgfunc.cpp | 3 +++ .../maple_be/src/cg/aarch64/aarch64_peep.cpp | 2 +- 3 files changed, 20 insertions(+), 1 deletion(-) diff --git a/src/mapleall/maple_be/include/cg/insn.h b/src/mapleall/maple_be/include/cg/insn.h index b654ea5fe6..84f5dbd163 100644 --- a/src/mapleall/maple_be/include/cg/insn.h +++ b/src/mapleall/maple_be/include/cg/insn.h @@ -532,6 +532,21 @@ class Insn { return doNotRemove; } + void SetIsCallReturnUnsigned(bool unSigned) { + ASSERT(IsCall(), "Insn should be a call."); + this->isCallReturnUnsigned = unSigned; + } + + bool GetIsCallReturnUnsigned() const { + ASSERT(IsCall(), "Insn should be a call."); + return isCallReturnUnsigned; + } + + bool GetIsCallReturnSigned() const { + ASSERT(IsCall(), "Insn should be a call."); + return (isCallReturnUnsigned == false); + } + void SetRetType(RetType retType) { this->retType = retType; } @@ -599,6 +614,7 @@ class Insn { uint32 id = 0; bool isThrow = false; bool doNotRemove = false; /* caller reg cross call */ + bool isCallReturnUnsigned = false; /* for call insn only. false: signed, true: unsigned */ RetType retType = kRegNull; /* if this insn is call, it represent the return register type R0/V0 */ uint32 retSize = 0; /* Byte size of the return value if insn is a call. */ /* record the stack cleared by MCC_ClearLocalStackRef or MCC_DecRefResetPair */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp index 4e5910fa63..ad19041e69 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp @@ -5457,6 +5457,7 @@ void AArch64CGFunc::SelectCall(CallNode &callNode) { GetCurBB()->SetHasCall(); if (retType != nullptr) { callInsn.SetRetSize(retType->GetSize()); + callInsn.SetIsCallReturnUnsigned(IsUnsignedInteger(retType->GetPrimType())); } GetFunction().SetHasCall(); @@ -5491,6 +5492,7 @@ void AArch64CGFunc::SelectIcall(IcallNode &icallNode, Operand &srcOpnd) { MIRType *retType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(icallNode.GetRetTyIdx()); if (retType != nullptr) { callInsn.SetRetSize(retType->GetSize()); + callInsn.SetIsCallReturnUnsigned(IsUnsignedInteger(retType->GetPrimType())); } GetCurBB()->AppendInsn(callInsn); @@ -5985,6 +5987,7 @@ void AArch64CGFunc::SelectLibCall(const std::string &funcName, std::vector(retPrimType)); if (callRetType != nullptr) { callInsn.SetRetSize(callRetType->GetSize()); + callInsn.SetIsCallReturnUnsigned(IsUnsignedInteger(callRetType->GetPrimType())); } GetFunction().SetHasCall(); /* get return value */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp index abdfa9181f..93f916cf38 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp @@ -537,7 +537,7 @@ void EliminateSpecifcSXTAArch64::Run(BB &bb, Insn &insn) { void EliminateSpecifcUXTAArch64::Run(BB &bb, Insn &insn) { MOperator thisMop = insn.GetMachineOpcode(); Insn *prevInsn = insn.GetPreviousMachineInsn(); - if (prevInsn == nullptr) { + if ((prevInsn == nullptr) || (prevInsn->IsCall() && prevInsn->GetIsCallReturnSigned())) { return; } auto ®Opnd0 = static_cast(insn.GetOperand(kInsnFirstOpnd)); -- Gitee From 9d0fec470f70793315bcc5b39e0e06d301043683 Mon Sep 17 00:00:00 2001 From: William Chen Date: Fri, 19 Feb 2021 09:45:58 -0800 Subject: [PATCH 7/9] Fix previous push with C qualifier. --- src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp index 93f916cf38..8f7dc6f51c 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp @@ -537,7 +537,7 @@ void EliminateSpecifcSXTAArch64::Run(BB &bb, Insn &insn) { void EliminateSpecifcUXTAArch64::Run(BB &bb, Insn &insn) { MOperator thisMop = insn.GetMachineOpcode(); Insn *prevInsn = insn.GetPreviousMachineInsn(); - if ((prevInsn == nullptr) || (prevInsn->IsCall() && prevInsn->GetIsCallReturnSigned())) { + if (prevInsn == nullptr) { return; } auto ®Opnd0 = static_cast(insn.GetOperand(kInsnFirstOpnd)); @@ -558,6 +558,9 @@ void EliminateSpecifcUXTAArch64::Run(BB &bb, Insn &insn) { !prevInsn->IsMachineInstruction()) { return; } + if (cgFunc.GetMirModule().GetSrcLang() == kSrcLangC && prevInsn->IsCall() && prevInsn->GetIsCallReturnSigned()) { + return; + } if (thisMop == MOP_xuxtb32) { if (prevInsn->GetMachineOpcode() == MOP_xmovri32 || prevInsn->GetMachineOpcode() == MOP_xmovri64) { auto &dstMovOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); -- Gitee From 0c4d15324ee331a5425ef889933d6f6976ccb551 Mon Sep 17 00:00:00 2001 From: William Chen Date: Fri, 19 Feb 2021 13:14:38 -0800 Subject: [PATCH 8/9] Support agg param in reaching definition init --- .../maple_be/src/cg/aarch64/aarch64_reaching.cpp | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp index 14a7893e3c..4fd2291665 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp @@ -38,6 +38,10 @@ void AArch64ReachingDefinition::InitStartGen() { } uint64 symSize = cgFunc->GetBecommon().GetTypeSize(type->GetTypeIndex()); + if ((cgFunc->GetMirModule().GetSrcLang() == kSrcLangC) && (symSize > k8ByteSize)) { + /* For C structure passing in one or two registers. */ + symSize = k8ByteSize; + } RegType regType = (pLoc.reg0 < V0) ? kRegTyInt : kRegTyFloat; uint32 srcBitSize = ((symSize < k4ByteSize) ? k4ByteSize : symSize) * kBitsPerByte; @@ -63,6 +67,13 @@ void AArch64ReachingDefinition::InitStartGen() { bb->InsertInsnBegin(pseudoInsn); pseudoInsns.emplace_back(&pseudoInsn); + if (pLoc.reg1) { + regOpnd = aarchCGFunc->GetOrCreatePhysicalRegisterOperand(pLoc.reg1, srcBitSize, regType); + Insn &pseudoInsn1 = cgFunc->GetCG()->BuildInstruction(mOp, regOpnd); + bb->InsertInsnBegin(pseudoInsn1); + pseudoInsns.emplace_back(&pseudoInsn1); + } + { /* * define memory address since store param may be transfered to stp and which with the short offset range. -- Gitee From 53a2e2d1ec9a3dcfd68aae07002673b069cb483c Mon Sep 17 00:00:00 2001 From: William Chen Date: Fri, 19 Feb 2021 14:16:50 -0800 Subject: [PATCH 9/9] fix unsigned zero extension for SelectCvtInt2Int --- .../maple_be/src/cg/aarch64/aarch64_cgfunc.cpp | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp index ad19041e69..3bda6dc3b9 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp @@ -3653,7 +3653,15 @@ void AArch64CGFunc::SelectCvtInt2Int(const BaseNode *parent, Operand *&resOpnd, } else { /* Unsigned */ if (is64Bit) { - GetCurBB()->AppendInsn(GetCG()->BuildInstruction(MOP_xuxtw64, *resOpnd, *opnd0)); + if (fsize == 8) { + ImmOperand &immOpnd = CreateImmOperand(0xff, k64BitSize, false); + GetCurBB()->AppendInsn(GetCG()->BuildInstruction(MOP_xandrri13, *resOpnd, *opnd0, immOpnd)); + } else if (fsize == 16) { + ImmOperand &immOpnd = CreateImmOperand(0xffff, k64BitSize, false); + GetCurBB()->AppendInsn(GetCG()->BuildInstruction(MOP_xandrri13, *resOpnd, *opnd0, immOpnd)); + } else { + GetCurBB()->AppendInsn(GetCG()->BuildInstruction(MOP_xuxtw64, *resOpnd, *opnd0)); + } } else { ASSERT(((fsize == k8BitSize) || (fsize == k16BitSize)), "incorrect from size"); if (fsize == k8BitSize) { -- Gitee