diff --git a/src/bin/maple b/src/bin/maple index 50b615d6ebe872217a2af097a657cd35279f6d67..0d9457b4cd195e0e00c9e40fedf56b59d7b4b205 100755 Binary files a/src/bin/maple and b/src/bin/maple differ diff --git a/src/maple_be/include/cg/aarch64/aarch64_color_ra.h b/src/maple_be/include/cg/aarch64/aarch64_color_ra.h index e2574adc28f5b2b4e3aeb037aade738fde99a3d0..74203b1f297abe5ad411ebcaf1755238d1b164ee 100644 --- a/src/maple_be/include/cg/aarch64/aarch64_color_ra.h +++ b/src/maple_be/include/cg/aarch64/aarch64_color_ra.h @@ -72,7 +72,7 @@ inline bool FindIn(const MapleList &list, const T &item) { inline bool IsBitArrElemSet(const uint64 *vec, const uint32 num) { size_t index = num / kU64; uint64 bit = num % kU64; - return vec[index] & (1UL << bit); + return vec[index] & (1ULL << bit); } inline bool IsBBsetOverlap(const uint64 *vec1, const uint64 *vec2, uint32 bbBuckets) { @@ -283,7 +283,7 @@ class LiveRange { void SetMemberBitArrElem(uint32 bbID) { uint32 index = bbID / kU64; uint64 bit = bbID % kU64; - uint64 mask = 1UL << bit; + uint64 mask = 1ULL << bit; if ((GetBBMemberElem(index) & mask) == 0) { IncNumBBMembers(); SetBBMemberElem(index, GetBBMemberElem(index) | mask); @@ -293,7 +293,7 @@ class LiveRange { void UnsetMemberBitArrElem(uint32 bbID) { uint32 index = bbID / kU64; uint64 bit = bbID % kU64; - uint64 mask = 1UL << bit; + uint64 mask = 1ULL << bit; if ((GetBBMemberElem(index) & mask) != 0) { DecNumBBMembers(); SetBBMemberElem(index, GetBBMemberElem(index) & (~mask)); @@ -303,7 +303,7 @@ class LiveRange { void SetConflictBitArrElem(regno_t regNO) { uint32 index = regNO / kU64; uint64 bit = regNO % kU64; - uint64 mask = 1UL << bit; + uint64 mask = 1ULL << bit; if ((GetBBConflictElem(index) & mask) == 0) { IncNumBBConflicts(); SetBBConflictElem(index, GetBBConflictElem(index) | mask); @@ -313,7 +313,7 @@ class LiveRange { void UnsetConflictBitArrElem(regno_t regNO) { uint32 index = regNO / kU64; uint64 bit = regNO % kU64; - uint64 mask = 1UL << bit; + uint64 mask = 1ULL << bit; if ((GetBBConflictElem(index) & mask) != 0) { DecNumBBConflicts(); SetBBConflictElem(index, GetBBConflictElem(index) & (~mask)); @@ -852,13 +852,13 @@ class LocalRegAllocator { void SetPregUsed(regno_t regNO, bool isInt) { uint64 mask = 0; if (isInt) { - mask = 1UL << (regNO - R0); + mask = 1ULL << (regNO - R0); if ((intPregUsed & mask) == 0) { ++numIntPregUsed; intPregUsed |= mask; } } else { - mask = 1UL << (regNO - V0); + mask = 1ULL << (regNO - V0); if ((fpPregUsed & mask) == 0) { ++numFpPregUsed; fpPregUsed |= mask; @@ -894,26 +894,26 @@ class LocalRegAllocator { void SetPregs(regno_t regNO, bool isInt) { if (isInt) { - intPregs |= 1UL << (regNO - RegBaseUpdate(true)); + intPregs |= 1ULL << (regNO - RegBaseUpdate(true)); } else { - fpPregs |= 1UL << (regNO - RegBaseUpdate(false)); + fpPregs |= 1ULL << (regNO - RegBaseUpdate(false)); } } void ClearPregs(regno_t regNO, bool isInt) { if (isInt) { - intPregs &= ~(1UL << (regNO - RegBaseUpdate(true))); + intPregs &= ~(1ULL << (regNO - RegBaseUpdate(true))); } else { - fpPregs &= ~(1UL << (regNO - RegBaseUpdate(false))); + fpPregs &= ~(1ULL << (regNO - RegBaseUpdate(false))); } } bool IsPregAvailable(regno_t regNO, bool isInt) const { bool isAvailable; if (isInt) { - isAvailable = intPregs & (1UL << (regNO - RegBaseUpdate(true))); + isAvailable = intPregs & (1ULL << (regNO - RegBaseUpdate(true))); } else { - isAvailable = fpPregs & (1UL << (regNO - RegBaseUpdate(false))); + isAvailable = fpPregs & (1ULL << (regNO - RegBaseUpdate(false))); } return isAvailable; } @@ -922,8 +922,8 @@ class LocalRegAllocator { const MapleSet &fpSpillRegSet) { uint32 intBase = R0; uint32 fpBase = V0; - intPregs = (1UL << (intMax + 1)) - 1; - fpPregs = (1UL << (((fpMax + 1) + fpBase) - RegBaseUpdate(false))) - 1; + intPregs = (1ULL << (intMax + 1)) - 1; + fpPregs = (1ULL << (((fpMax + 1) + fpBase) - RegBaseUpdate(false))) - 1; for (uint32 regNO : intSpillRegSet) { ClearPregs(regNO + intBase, true); } @@ -934,8 +934,8 @@ class LocalRegAllocator { ClearPregs(RYP, true); } #ifdef RESERVED_REGS - intPregs &= ~(1UL << R16); - intPregs &= ~(1UL << R17); + intPregs &= ~(1ULL << R16); + intPregs &= ~(1ULL << R17); #endif /* RESERVED_REGS */ } @@ -997,7 +997,7 @@ class LocalRegAllocator { void SetBitArrElement(uint64 *vec, regno_t regNO) { uint32 index = regNO / kU64; uint64 bit = regNO % kU64; - vec[index] |= 1UL << bit; + vec[index] |= 1ULL << bit; } /* The following local vars keeps track of allocation information in bb. */ diff --git a/src/maple_be/include/cg/operand.h b/src/maple_be/include/cg/operand.h index f839383b9e03562a3b94c21a2ddacfc6fbc3073e..4e425757fa8a12ad49cf8908e25bb71e1c96a902 100644 --- a/src/maple_be/include/cg/operand.h +++ b/src/maple_be/include/cg/operand.h @@ -416,7 +416,7 @@ class ImmOperand : public Operand { } void BitwiseNegate() { - value = ~(static_cast(value)) & ((1UL << size) - 1UL); + value = ~(static_cast(value)) & ((1ULL << size) - 1UL); } void DivideByPow2(int32 shift) { @@ -424,7 +424,7 @@ class ImmOperand : public Operand { } void ModuloByPow2(int32 shift) { - value = (static_cast(value)) & ((1UL << shift) - 1UL); + value = (static_cast(value)) & ((1ULL << shift) - 1UL); } bool IsAllOnes() const { diff --git a/src/maple_be/src/cg/aarch64/aarch64_color_ra.cpp b/src/maple_be/src/cg/aarch64/aarch64_color_ra.cpp index f819bf7484707d949893ccda4accdcc2546485df..f363eac3857aac82951ebf8948e3f5e3d6871a9d 100644 --- a/src/maple_be/src/cg/aarch64/aarch64_color_ra.cpp +++ b/src/maple_be/src/cg/aarch64/aarch64_color_ra.cpp @@ -75,7 +75,7 @@ template void GraphColorRegAllocator::ForEachBBArrElem(const uint64 *vec, Func functor) const { for (uint32 iBBArrElem = 0; iBBArrElem < bbBuckets; ++iBBArrElem) { for (uint32 bBBArrElem = 0; bBBArrElem < kU64; ++bBBArrElem) { - if ((vec[iBBArrElem] & (1UL << bBBArrElem)) != 0) { + if ((vec[iBBArrElem] & (1ULL << bBBArrElem)) != 0) { functor(iBBArrElem * kU64 + bBBArrElem); } } @@ -86,7 +86,7 @@ template void GraphColorRegAllocator::ForEachBBArrElemWithInterrupt(const uint64 *vec, Func functor) const { for (uint32 iBBArrElem = 0; iBBArrElem < bbBuckets; ++iBBArrElem) { for (uint32 bBBArrElem = 0; bBBArrElem < kU64; ++bBBArrElem) { - if ((vec[iBBArrElem] & (1UL << bBBArrElem)) != 0) { + if ((vec[iBBArrElem] & (1ULL << bBBArrElem)) != 0) { if (functor(iBBArrElem * kU64 + bBBArrElem)) { return; } @@ -99,7 +99,7 @@ template void GraphColorRegAllocator::ForEachRegArrElem(const uint64 *vec, Func functor) const { for (uint32 iBBArrElem = 0; iBBArrElem < regBuckets; ++iBBArrElem) { for (uint32 bBBArrElem = 0; bBBArrElem < kU64; ++bBBArrElem) { - if ((vec[iBBArrElem] & (1UL << bBBArrElem)) != 0) { + if ((vec[iBBArrElem] & (1ULL << bBBArrElem)) != 0) { functor(iBBArrElem * kU64 + bBBArrElem); } } @@ -127,7 +127,7 @@ void GraphColorRegAllocator::PrintLiveRangeConflicts(const LiveRange &lr) const for (uint32 i = 0; i < regBuckets; ++i) { uint64 chunk = lr.GetBBConflictElem(i); for (uint64 bit = 0; bit < kU64; ++bit) { - if (chunk & (1UL << bit)) { + if (chunk & (1ULL << bit)) { regno_t newNO = i * kU64 + bit; LogInfo::MapleLogger() << newNO << ","; } @@ -454,10 +454,10 @@ bool GraphColorRegAllocator::CreateLiveRangeHandleLocal(regno_t regNO, BB &bb, b if (isDef) { /* movk is handled by different id for use/def in the same insn. */ lraInfo->SetDefCntElem(regNO, lraInfo->GetDefCntElem(regNO) + 1); - lraInfo->SetLocalPregMask(lraInfo->GetLocalPregMask() | (1UL << regNO)); + lraInfo->SetLocalPregMask(lraInfo->GetLocalPregMask() | (1ULL << regNO)); } else { lraInfo->SetUseCntElem(regNO, lraInfo->GetUseCntElem(regNO) + 1); - lraInfo->SetLocalPregMask(lraInfo->GetLocalPregMask() | (1UL << regNO)); + lraInfo->SetLocalPregMask(lraInfo->GetLocalPregMask() | (1ULL << regNO)); } /* lr info is useful for lra, so continue lr info */ return false; @@ -990,7 +990,7 @@ bool GraphColorRegAllocator::CheckOverlap(uint64 val, uint32 &lastBitSet, uint32 return false; } for (uint32 x = 0; x < kU64; ++x) { - if ((val & (1UL << x)) != 0) { + if ((val & (1ULL << x)) != 0) { ++overlapNum; lastBitSet = i * kU64 + x; if (overlapNum > 1) { @@ -2080,7 +2080,7 @@ void GraphColorRegAllocator::HandleLocalRaDebug(regno_t regNO, const LocalRegAll regno_t base = isInt ? R0 : V0; for (uint32 i = 0; i < RZR; ++i) { - if ((regUsed & (1UL << i)) != 0) { + if ((regUsed & (1ULL << i)) != 0) { LogInfo::MapleLogger() << " " << (i + base); } } @@ -2088,7 +2088,7 @@ void GraphColorRegAllocator::HandleLocalRaDebug(regno_t regNO, const LocalRegAll LogInfo::MapleLogger() << "\tregs:"; uint64 regs = localRa.GetPregs(isInt); for (uint32 regnoInLoop = 0; regnoInLoop < RZR; ++regnoInLoop) { - if ((regs & (1UL << regnoInLoop)) != 0) { + if ((regs & (1ULL << regnoInLoop)) != 0) { LogInfo::MapleLogger() << " " << (regnoInLoop + base); } } @@ -2357,7 +2357,7 @@ MemOperand *GraphColorRegAllocator::GetConsistentReuseMem(const uint64 *conflict regno_t regNO; for (uint32 i = 0; i < regBuckets; ++i) { for (uint32 b = 0; b < kU64; ++b) { - if ((conflict[i] & (1UL << b)) != 0) { + if ((conflict[i] & (1ULL << b)) != 0) { continue; } regNO = i * kU64 + b; @@ -2386,7 +2386,7 @@ MemOperand *GraphColorRegAllocator::GetCommonReuseMem(const uint64 *conflict, co regno_t regNO; for (uint32 i = 0; i < regBuckets; ++i) { for (uint32 b = 0; b < kU64; ++b) { - if ((conflict[i] & (1UL << b)) != 0) { + if ((conflict[i] & (1ULL << b)) != 0) { continue; } regNO = i * kU64 + b; @@ -2633,17 +2633,17 @@ bool GraphColorRegAllocator::SetAvailableSpillReg(std::set &cannotUseRe for (const auto &it : callerRegSet) { regno_t spillReg = it + base; - if (cannotUseReg.find(spillReg) == cannotUseReg.end() && (usedRegMask & (1UL << (spillReg - pregInterval))) == 0) { + if (cannotUseReg.find(spillReg) == cannotUseReg.end() && (usedRegMask & (1ULL << (spillReg - pregInterval))) == 0) { lr.SetAssignedRegNO(spillReg); - usedRegMask |= 1UL << (spillReg - pregInterval); + usedRegMask |= 1ULL << (spillReg - pregInterval); return true; } } for (const auto &it : calleeRegSet) { regno_t spillReg = it + base; - if (cannotUseReg.find(spillReg) == cannotUseReg.end() && (usedRegMask & (1UL << (spillReg - pregInterval))) == 0) { + if (cannotUseReg.find(spillReg) == cannotUseReg.end() && (usedRegMask & (1ULL << (spillReg - pregInterval))) == 0) { lr.SetAssignedRegNO(spillReg); - usedRegMask |= 1UL << (spillReg - pregInterval); + usedRegMask |= 1ULL << (spillReg - pregInterval); return true; } } @@ -2712,8 +2712,8 @@ regno_t GraphColorRegAllocator::PickRegForSpill(uint64 &usedRegMask, RegType reg if (spillReg >= k64BitSize) { spillReg = k64BitSize - 1; } - if ((usedRegMask & (1UL << (spillReg - pregInterval))) == 0) { - usedRegMask |= (1UL << (spillReg - pregInterval)); + if ((usedRegMask & (1ULL << (spillReg - pregInterval))) == 0) { + usedRegMask |= (1ULL << (spillReg - pregInterval)); needSpillLr = true; return spillReg; } @@ -2929,13 +2929,13 @@ void GraphColorRegAllocator::MarkUsedRegs(Operand &opnd, BBAssignInfo *bbInfo, u LiveRange *lr = lrVec[vregNO]; if (lr != nullptr) { if (lr->GetAssignedRegNO() != 0) { - usedRegMask |= (1UL << (lr->GetAssignedRegNO() - pregInterval)); + usedRegMask |= (1ULL << (lr->GetAssignedRegNO() - pregInterval)); } if (lr->GetSplitLr() && lr->GetSplitLr()->GetAssignedRegNO()) { - usedRegMask |= (1UL << (lr->GetSplitLr()->GetAssignedRegNO() - pregInterval)); + usedRegMask |= (1ULL << (lr->GetSplitLr()->GetAssignedRegNO() - pregInterval)); } } else if (bbInfo != nullptr && bbInfo->HasRegMap(vregNO)) { - usedRegMask |= (1UL << (bbInfo->GetRegMapElem(vregNO) - pregInterval)); + usedRegMask |= (1ULL << (bbInfo->GetRegMapElem(vregNO) - pregInterval)); } } diff --git a/src/maple_be/src/cg/aarch64/aarch64_immediate.cpp b/src/maple_be/src/cg/aarch64/aarch64_immediate.cpp index 391f32cea3a0b884fb63671d75b08635b80f3dad..a4ae2e75e151f6cb81f47beaf69dadee799f3485 100644 --- a/src/maple_be/src/cg/aarch64/aarch64_immediate.cpp +++ b/src/maple_be/src/cg/aarch64/aarch64_immediate.cpp @@ -48,7 +48,7 @@ bool IsBitmaskImmediate(uint64 val, uint32 bitLen) { } if (bitLen == k32BitSize) { - val = (val << k32BitSize) | (val & ((1UL << k32BitSize) - 1)); + val = (val << k32BitSize) | (val & ((1ULL << k32BitSize) - 1)); } /* get the least significant bit set and add it to 'val' */ @@ -71,7 +71,7 @@ bool IsBitmaskImmediate(uint64 val, uint32 bitLen) { } int32 logDiff = __builtin_ctzll(diff); - int64 pattern = val & ((1UL << static_cast(diff)) - 1); + int64 pattern = val & ((1ULL << static_cast(diff)) - 1); return val == pattern * bitmaskImmMultTable[kMaxBitTableSize - logDiff]; } diff --git a/src/maple_ir/include/mir_builder.h b/src/maple_ir/include/mir_builder.h index a54a66b35b0435c18d41746ab77dc353d71ca7ef..ec1d39c01f94762d269cc77905ba412fb548a6b9 100644 --- a/src/maple_ir/include/mir_builder.h +++ b/src/maple_ir/include/mir_builder.h @@ -240,6 +240,7 @@ class MIRBuilder { } IcallNode *CreateStmtIcall(const MapleVector &args); + IcallNode *CreateStmtIcallAssigned(const MapleVector &args, const MIRSymbol &ret); // For Call, VirtualCall, SuperclassCall, InterfaceCall IntrinsiccallNode *CreateStmtIntrinsicCall(MIRIntrinsicID idx, const MapleVector &arguments, TyIdx tyIdx = TyIdx()); diff --git a/src/maple_ir/src/bin_mpl_export.cpp b/src/maple_ir/src/bin_mpl_export.cpp index c91ac7032079b75d764000e9e0e43bc79fabe3a4..204325a220608ed25268c10d75ca35c0e7bcedc5 100644 --- a/src/maple_ir/src/bin_mpl_export.cpp +++ b/src/maple_ir/src/bin_mpl_export.cpp @@ -676,7 +676,7 @@ void BinaryMplExport::WriteTypeField(uint64 contentIdx) { } -void BinaryMplExport::WriteContentField(int fieldNum, size_t *fieldStartP) { +void BinaryMplExport::WriteContentField(int fieldNum, uint64 *fieldStartP) { WriteNum(kBinContentStart); size_t totalSizeIdx = buf.size(); ExpandFourBuffSize(); // total size of this field to ~BIN_SYM_START diff --git a/src/maple_ir/src/mir_builder.cpp b/src/maple_ir/src/mir_builder.cpp index 9282d1dd5dabdc6ce599d46f61a0f1b606de2269..abfe704983e992520f52cc71658ff79e3c64012f 100644 --- a/src/maple_ir/src/mir_builder.cpp +++ b/src/maple_ir/src/mir_builder.cpp @@ -765,6 +765,23 @@ IcallNode *MIRBuilder::CreateStmtIcall(const MapleVector &args) { return stmt; } +IcallNode *MIRBuilder::CreateStmtIcallAssigned(const MapleVector &args, const MIRSymbol &ret) { + auto *stmt = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), OP_icallassigned); + CallReturnVector nrets(GetCurrentFuncCodeMpAllocator()->Adapter()); + CHECK_FATAL((ret.GetStorageClass() == kScAuto || ret.GetStorageClass() == kScFormal || + ret.GetStorageClass() == kScExtern || ret.GetStorageClass() == kScGlobal), + "unknown classtype! check it!"); + nrets.push_back(CallReturnPair(ret.GetStIdx(), RegFieldPair(0, 0))); + stmt->SetNumOpnds(args.size()); + stmt->GetNopnd().resize(stmt->GetNumOpnds()); + stmt->SetReturnVec(nrets); + for (size_t i = 0; i < stmt->GetNopndSize(); ++i) { + stmt->SetNOpndAt(i, args.at(i)); + } + stmt->SetRetTyIdx(ret.GetTyIdx()); + return stmt; +} + IntrinsiccallNode *MIRBuilder::CreateStmtIntrinsicCall(MIRIntrinsicID idx, const MapleVector &arguments, TyIdx tyIdx) { auto *stmt = GetCurrentFuncCodeMp()->New( diff --git a/src/maple_ir/src/mir_nodes.cpp b/src/maple_ir/src/mir_nodes.cpp index 5024f2872ceaebe46514c5e2139813d06bcd3e88..9c161321b2c3bbc13062ed7b2978a3fd9be4db98 100644 --- a/src/maple_ir/src/mir_nodes.cpp +++ b/src/maple_ir/src/mir_nodes.cpp @@ -245,7 +245,6 @@ void RetypeNode::Dump(int32 indent) const { ty->Dump(indent + 1); } DumpOpnd(*theMIRModule, indent); - LogInfo::MapleLogger() << "\n"; } void ExtractbitsNode::Dump(int32 indent) const { diff --git a/src/maple_ir/src/option.cpp b/src/maple_ir/src/option.cpp index 73253f4a1812590a3db2032e3479fc34bf751194..bf80e8d756441c5dd662732b2970ee9d9b5bc278 100644 --- a/src/maple_ir/src/option.cpp +++ b/src/maple_ir/src/option.cpp @@ -99,6 +99,7 @@ enum OptionIndex { kMapleLinker, kMplnkDumpMuid, kEmitVtableImpl, + kCheckArrayStore, }; const Descriptor kUsage[] = { @@ -173,6 +174,8 @@ const Descriptor kUsage[] = { #if MIR_JAVA { kSkipVirtual, 0, "", "skipvirtual", kBuildTypeAll, kArgCheckPolicyNone, " --skipvirtual" }, #endif + { kCheckArrayStore, 0, "", "check-array-store", kBuildTypeAll, kArgCheckPolicyNone, + " --check-array-store Check array store[default off]" }, { 0, 0, nullptr, nullptr, kBuildTypeAll, kArgCheckPolicyNone, nullptr } }; @@ -288,6 +291,9 @@ bool Options::ParseOptions(int argc, char **argv, std::string &fileName) const { Options::skipVirtualMethod = true; break; #endif + case kCheckArrayStore: + Options::checkArrayStore = true; + break; default: result = false; ASSERT(false, "unhandled case in Options"); diff --git a/src/maple_ir/src/parser.cpp b/src/maple_ir/src/parser.cpp index 8e8d5adc2583d317900d186f7bc33270c1ec253a..e727391c1af3ce486659c5cd74debe7a8de03d5b 100644 --- a/src/maple_ir/src/parser.cpp +++ b/src/maple_ir/src/parser.cpp @@ -1563,6 +1563,10 @@ bool MIRParser::ParseJavaClassInterface(MIRSymbol &symbol, bool isClass) { return false; } symbol.SetNameStrIdx(lexer.GetName()); + if (!GlobalTables::GetGsymTable().AddToStringSymbolMap(symbol)) { + Error("duplicate symbol name used in javainterface at "); + return false; + } lexer.NextToken(); TyIdx tyidx(0); if (!ParseType(tyidx)) { @@ -2390,14 +2394,7 @@ bool MIRParser::ParseMIRForClass() { ASSERT(st != nullptr, "st nullptr check"); st->SetStorageClass(kScInvalid); st->SetSKind(kStJavaClass); - if (!ParseJavaClassInterface(*st, true)) { - return false; - } - if (!GlobalTables::GetGsymTable().AddToStringSymbolMap(*st)) { - Error("duplicate symbol name used in javaclass at "); - return false; - } - return true; + return ParseJavaClassInterface(*st, true); } bool MIRParser::ParseMIRForInterface() { @@ -2405,14 +2402,7 @@ bool MIRParser::ParseMIRForInterface() { ASSERT(st != nullptr, "st nullptr check"); st->SetStorageClass(kScInvalid); st->SetSKind(kStJavaInterface); - if (!ParseJavaClassInterface(*st, false)) { - return false; - } - if (!GlobalTables::GetGsymTable().AddToStringSymbolMap(*st)) { - Error("duplicate symbol name used in javainterface at "); - return false; - } - return true; + return ParseJavaClassInterface(*st, false); } bool MIRParser::ParseMIRForFlavor() { diff --git a/src/maple_util/include/itab_util.h b/src/maple_util/include/itab_util.h index 7c94546249a514006f0cf9203ab4802aa86eb890..e19902944bbd857a4e3b9732c9b8cf359328c88e 100644 --- a/src/maple_util/include/itab_util.h +++ b/src/maple_util/include/itab_util.h @@ -1,5 +1,5 @@ /* - * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under the Mulan PSL v1. * You can use this software according to the terms and conditions of the Mulan PSL v1. diff --git a/src/maple_util/include/literalstrname.h b/src/maple_util/include/literalstrname.h index 41fd08b6af3c8fef0a5dd56487aa7ead07b406b9..249033e7cf173b3504d1b89b4a1e0c7d5524eea9 100644 --- a/src/maple_util/include/literalstrname.h +++ b/src/maple_util/include/literalstrname.h @@ -1,5 +1,5 @@ /* - * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under the Mulan PSL v1. * You can use this software according to the terms and conditions of the Mulan PSL v1. diff --git a/src/maple_util/include/namemangler.h b/src/maple_util/include/namemangler.h index 256ee5274d01782f49ca20a294df24649dbb190e..8ad623d38156079fb982ff042ef1a829c01ee3da 100644 --- a/src/maple_util/include/namemangler.h +++ b/src/maple_util/include/namemangler.h @@ -144,6 +144,18 @@ static constexpr const char kClinitSuffix[] = "_7C_3Cclinit_3E_7C_28_29V"; static constexpr const char kCinitStr[] = "_7C_3Cinit_3E_7C_28"; static constexpr const char kClinitSubStr[] = "7C_3Cinit_3E_7C"; +static constexpr const char kPreNativeFunc[] = "MCC_PreNativeCall"; +static constexpr const char kPostNativeFunc[] = "MCC_PostNativeCall"; +static constexpr const char kDecodeRefFunc[] = "MCC_DecodeReference"; +static constexpr const char kFindNativeFunc[] = "MCC_FindNativeMethodPtr"; +static constexpr const char kFindNativeFuncNoeh[] = "MCC_FindNativeMethodPtrWithoutException"; +static constexpr const char kDummyNativeFunc[] = "MCC_DummyNativeMethodPtr"; +static constexpr const char kCheckThrowPendingExceptionFunc[] = "MCC_CheckThrowPendingException"; +static constexpr const char kCallFastNative[] = "MCC_CallFastNative"; +static constexpr const char kCallFastNativeExt[] = "MCC_CallFastNativeExt"; +static constexpr const char kCallSlowNativeExt[] = "MCC_CallSlowNativeExt"; +static constexpr const char kSetReliableUnwindContextFunc[] = "MCC_SetReliableUnwindContext"; + static constexpr const char kFunctionLayoutStr[] = "__func_layout__"; static constexpr const char kFunctionProfileTabPrefixStr[] = "__muid_profile_func_tab"; diff --git a/src/maple_util/include/utils.h b/src/maple_util/include/utils.h index bf8d2e99a6e0899ad9d6d1494757572dee2ece6a..8d447d0e827d5a5773855e33badf479ef6ebf70d 100644 --- a/src/maple_util/include/utils.h +++ b/src/maple_util/include/utils.h @@ -108,7 +108,7 @@ constexpr uint32_t bit_field_v = bit_field::value; template > struct lbit_field { - enum {value = 1UL << pos}; + enum {value = 1ULL << pos}; }; template diff --git a/src/mpl2mpl/include/analyzector.h b/src/mpl2mpl/include/analyzector.h index 2178435152f38bd16afc7be4e6111f0e174122d9..78add925a4ae399d844fa5e1bb9047d8aea6e29b 100644 --- a/src/mpl2mpl/include/analyzector.h +++ b/src/mpl2mpl/include/analyzector.h @@ -16,6 +16,7 @@ #define MPL2MPL_INCLUDE_ANALYZECTOR_H #include "module_phase.h" #include "phase_impl.h" + namespace maple { class AnalyzeCtor : public FuncOptimizeImpl { public: diff --git a/src/mpl2mpl/include/native_stub_func.h b/src/mpl2mpl/include/native_stub_func.h index be6c55cad938cbdcfdc37abb78abf361022785be..7f53b9545340305cd9535ea13f1c1a668cda63ac 100644 --- a/src/mpl2mpl/include/native_stub_func.h +++ b/src/mpl2mpl/include/native_stub_func.h @@ -26,19 +26,8 @@ constexpr int kSlownativeFuncnum = 9; constexpr int kJniTypeNormal = 0; constexpr int kJniTypeMapleCriticalNative = 1; constexpr int kJnitTypeCriticalNative = 2; -constexpr int kInvalidCode = 0xFF; +constexpr int kInvalidCode = 0x01; -constexpr char kPreNativeFunc[] = "MCC_PreNativeCall"; -constexpr char kPostNativeFunc[] = "MCC_PostNativeCall"; -constexpr char kDecodeRefFunc[] = "MCC_DecodeReference"; -constexpr char kFindNativeFunc[] = "MCC_FindNativeMethodPtr"; -constexpr char kFindNativeFuncNoeh[] = "MCC_FindNativeMethodPtrWithoutException"; -constexpr char kDummyNativeFunc[] = "MCC_DummyNativeMethodPtr"; -constexpr char kCheckThrowPendingExceptionFunc[] = "MCC_CheckThrowPendingException"; -constexpr char kCallFastNativeFunc[] = "MCC_CallFastNative"; -constexpr char kCallFastNativeExtFunc[] = "MCC_CallFastNativeExt"; -constexpr char kCallSlowNativeExtFunc[] = "MCC_CallSlowNativeExt"; -constexpr char kSetReliableUnwindContextFunc[] = "MCC_SetReliableUnwindContext"; class NativeFuncProperty { public: NativeFuncProperty() = default; @@ -95,8 +84,6 @@ class NativeStubFuncGeneration : public FuncOptimizeImpl { MIRFunction *MRTPostNativeFunc = nullptr; MIRFunction *MRTDecodeRefFunc = nullptr; MIRFunction *MRTCheckThrowPendingExceptionFunc = nullptr; - MIRFunction *MRTCallFastNativeFunc = nullptr; - MIRFunction *MRTCallFastNativeExtFunc = nullptr; MIRFunction *MRTCallSlowNativeFunc[kSlownativeFuncnum] = { nullptr }; // for native func which args <=8, use x0-x7 MIRFunction *MRTCallSlowNativeExtFunc = nullptr; MIRFunction *MCCSetReliableUnwindContextFunc = nullptr; diff --git a/src/mpl2mpl/include/vtable_impl.h b/src/mpl2mpl/include/vtable_impl.h index 72cc896949706737904b0c295fb217f4878c5b27..601372dd4a7d3baa1cc80a94e6e5be4121a1fc0d 100644 --- a/src/mpl2mpl/include/vtable_impl.h +++ b/src/mpl2mpl/include/vtable_impl.h @@ -1,5 +1,5 @@ /* - * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under the Mulan PSL v1. * You can use this software according to the terms and conditions of the Mulan PSL v1. @@ -37,6 +37,7 @@ class VtableImpl : public FuncOptimizeImpl { private: void ReplaceResolveInterface(StmtNode &stmt, const ResolveFuncNode &resolveNode); + bool Intrinsify(MIRFunction &func, CallNode &cnode); MIRModule *mirModule; MIRFunction *mccItabFunc; }; diff --git a/src/mpl2mpl/src/native_stub_func.cpp b/src/mpl2mpl/src/native_stub_func.cpp index 37d735be271174cd3d642b2fc56c0a46dc9c9e5f..54acb553163d073eb243ddbb87c6ffeef43f6497 100644 --- a/src/mpl2mpl/src/native_stub_func.cpp +++ b/src/mpl2mpl/src/native_stub_func.cpp @@ -90,18 +90,20 @@ MIRFunction &NativeStubFuncGeneration::GetOrCreateDefaultNativeFunc(MIRFunction return *nativeFunc; } -// The final order of statements inside of this stub function may need to be adjusted. -// syncenter (dread ref %_this) // if native func is synchronized -// callassigned &__MRT_PreNativeCall (addrof ptr $__cinf_calling_class) {regassign ptr %2} -// // or callassigned &__MRT_PreNativeCall (regread ref %_this) {regassign ptr %2} -// -// // call to the actual registered or implemented native function -// callassigned &MCC_DecodeReference(dread ref %retvar_stubfunc) {dassign ref %retvar_stubfunc} -// callassigned &__MRT_PostNativeCall (dread ptr %env_ptr) {} -// syncexit (dread ref %_this) // if native func is synchronized -// -// in the end and before return to Java frame, check pending exception -// callassigned &MCC_CheckThrowPendingException () {} +// Create function body of the stub func. +// syncenter (dread ref %_this or classinfo) // if native func is synchronized +// if (not critical_native) +// callassigned MCC_PreNativeCall() {Env} +// call native_func(Env, [classinfo], oringinal_args){retv}; +// else +// call native_func(oringinal_args){}; +// if (type of retv is ref) +// callassigned &MCC_DecodeReference(dread ref %retv) {dassign ref %retv} +// if (not critical_native) +// callassigned &__MRT_PostNativeCall (dread ptr %env_ptr) {} +// syncexit (dread ref %_this or classinfo) // if native func is synchronized +// if (not critical_native) +// callassigned &MCC_CheckThrowPendingException () {} void NativeStubFuncGeneration::ProcessFunc(MIRFunction *func) { // FUNCATTR_bridge for function to exclude ASSERT(func != nullptr, "null ptr check!"); @@ -118,8 +120,8 @@ void NativeStubFuncGeneration::ProcessFunc(MIRFunction *func) { func->GetBody()->ResetBlock(); NativeFuncProperty funcProperty; bool needNativeCall = (!func->GetAttr(FUNCATTR_critical_native)) && (funcProperty.jniType == kJniTypeNormal); - bool needCheckThrowPendingExceptionFunc = needNativeCall; if (funcProperty.jniType == kJnitTypeCriticalNative) { + // Can't reach here now. func->SetAttr(FUNCATTR_critical_native); } GStrIdx classObjSymStrIdx = @@ -148,8 +150,7 @@ void NativeStubFuncGeneration::ProcessFunc(MIRFunction *func) { NaryStmtNode *syncEnter = builder->CreateStmtNary(OP_syncenter, monitor); func->GetBody()->AddStatement(syncEnter); } - // Get Env pointer, skip for critical native functions who do not need Env - // Generate stubfunc call/return stmt, extra args only for non-critical_native calls + // Get Env pointer through MCC_PreNativeCall(){Env} MIRSymbol *envPtrSym = nullptr; PregIdx envPregIdx = 0; if (Options::usePreg) { @@ -158,34 +159,16 @@ void NativeStubFuncGeneration::ProcessFunc(MIRFunction *func) { envPtrSym = builder->CreateSymbol(GlobalTables::GetTypeTable().GetVoidPtr()->GetTypeIndex(), "env_ptr", kStVar, kScAuto, func, kScopeLocal); } - // Generate a MRT call for extra work before calling the native - BaseNode *callerObj = nullptr; // it will be used by PreNativeCall, and might be used by syncenter - if (func->GetAttr(FUNCATTR_static)) { - // Grab class object - callerObj = builder->CreateExprAddrof(0, *classObjSymbol); - } else { - // Grab _this pointer - MIRSymbol *formal0St = func->GetFormal(0); - if (formal0St->GetSKind() == kStPreg) { - callerObj = - builder->CreateExprRegread(formal0St->GetType()->GetPrimType(), - func->GetPregTab()->GetPregIdxFromPregno(formal0St->GetPreg()->GetPregNo())); - } else { - callerObj = builder->CreateExprDread(*formal0St); - } - } - MapleVector args(builder->GetCurrentFuncCodeMpAllocator()->Adapter()); - args.push_back(callerObj); + MapleVector args(func->GetCodeMempoolAllocator().Adapter()); CallNode *preFuncCall = Options::usePreg ? builder->CreateStmtCallRegassigned(MRTPreNativeFunc->GetPuidx(), args, envPregIdx, OP_callassigned) : builder->CreateStmtCallAssigned(MRTPreNativeFunc->GetPuidx(), args, envPtrSym, OP_callassigned); - // Generate a MRT call for extra work after calling the native - MapleVector postArgs(func->GetCodeMempoolAllocator().Adapter()); - postArgs.push_back(Options::usePreg ? (static_cast(builder->CreateExprRegread(PTY_ptr, envPregIdx))) - : (static_cast(builder->CreateExprDread(*envPtrSym)))); + // Use Env as an arg, MCC_PostNativeCall(Env) + args.push_back(Options::usePreg ? (static_cast(builder->CreateExprRegread(PTY_ptr, envPregIdx))) + : (static_cast(builder->CreateExprDread(*envPtrSym)))); CallNode *postFuncCall = - builder->CreateStmtCallAssigned(MRTPostNativeFunc->GetPuidx(), postArgs, nullptr, OP_callassigned); + builder->CreateStmtCallAssigned(MRTPostNativeFunc->GetPuidx(), args, nullptr, OP_callassigned); MapleVector allocCallArgs(func->GetCodeMempoolAllocator().Adapter()); if (!func->GetAttr(FUNCATTR_critical_native)) { @@ -229,7 +212,6 @@ void NativeStubFuncGeneration::ProcessFunc(MIRFunction *func) { func->GetBody()->AddStatement(callAssign); } if (func->GetReturnType()->GetPrimType() == PTY_ref) { - // Generate a MRT call to decode the tagged pointer MapleVector decodeArgs(func->GetCodeMempoolAllocator().Adapter()); CHECK_FATAL(stubFuncRet != nullptr, "stubfunc_ret is nullptr"); decodeArgs.push_back(builder->CreateExprDread(*stubFuncRet)); @@ -237,7 +219,6 @@ void NativeStubFuncGeneration::ProcessFunc(MIRFunction *func) { builder->CreateStmtCallAssigned(MRTDecodeRefFunc->GetPuidx(), decodeArgs, stubFuncRet, OP_callassigned); func->GetBody()->AddStatement(decodeFuncCall); } - // Generate a MRT call for extra work after calling the native if (needNativeCall) { func->GetBody()->AddStatement(postFuncCall); } @@ -262,7 +243,7 @@ void NativeStubFuncGeneration::ProcessFunc(MIRFunction *func) { func->GetBody()->AddStatement(syncExit); } // check pending exception just before leaving this stub frame except for critical natives - if (needCheckThrowPendingExceptionFunc) { + if (needNativeCall) { MapleVector getExceptArgs(func->GetCodeMempoolAllocator().Adapter()); CallNode *callGetExceptFunc = builder->CreateStmtCallAssigned(MRTCheckThrowPendingExceptionFunc->GetPuidx(), getExceptArgs, nullptr, OP_callassigned); @@ -301,7 +282,7 @@ void NativeStubFuncGeneration::GenerateRegFuncTabEntry() { #else constexpr int locIdxShift = 4; #endif - constexpr uint64 locIdxMask = 0xFF00000000000000; + constexpr uint64 locIdxMask = 0x01; uint64 locIdx = regFuncTabConst->GetConstVec().size(); auto *newConst = GlobalTables::GetIntConstTable().GetOrCreateIntConst(static_cast((locIdx << locIdxShift) | locIdxMask), @@ -327,13 +308,13 @@ void NativeStubFuncGeneration::GenerateRegTabEntry(const MIRFunction &func) { if (tmp.length() > base.length() && tmp.find(base) != std::string::npos) { tmp.replace(tmp.find(base), base.length() + 1, ""); } - uint32 nameIdx = ReflectionAnalysis::FindOrInsertRepeatString(tmp, true); // always used + uint32 baseFuncNameWithTypeIdx = ReflectionAnalysis::FindOrInsertRepeatString(tmp, true); // always used uint32 classIdx = ReflectionAnalysis::FindOrInsertRepeatString(base, true); // always used // Using MIRIntConst instead of MIRStruct for RegTable. auto *baseConst = GlobalTables::GetIntConstTable().GetOrCreateIntConst(classIdx, *GlobalTables::GetTypeTable().GetVoidPtr()); regTableConst->PushBack(baseConst); - auto *newConst = GlobalTables::GetIntConstTable().GetOrCreateIntConst(nameIdx, + auto *newConst = GlobalTables::GetIntConstTable().GetOrCreateIntConst(baseFuncNameWithTypeIdx, *GlobalTables::GetTypeTable().GetVoidPtr()); regTableConst->PushBack(newConst); } @@ -369,17 +350,17 @@ void NativeStubFuncGeneration::GenerateRegisteredNativeFuncCall(MIRFunction &fun (!func.GetAttr(FUNCATTR_critical_native)) && (funcProperty.jniType == kJniTypeNormal); // Get current native method function ptr from reg_jni_func_tab slot // and define a temp register for shift operation - auto funcptrshiftPreg = func.GetPregTab()->CreatePreg(PTY_ptr); - BaseNode *regreadExpr = builder->CreateExprRegread(PTY_ptr, funcptrPreg); - constexpr int intConstLength = 56; - BaseNode *shiftExpr = builder->CreateExprBinary(OP_lshr, *GlobalTables::GetTypeTable().GetPtr(), regreadExpr, - builder->CreateIntConst(intConstLength, PTY_u32)); - RegassignNode *funcptrshiftAssign = builder->CreateStmtRegassign(PTY_ptr, funcptrshiftPreg, shiftExpr); - auto readFuncptrshift = builder->CreateExprRegread(PTY_ptr, funcptrshiftPreg); + auto funcPtrAndOpPreg = func.GetPregTab()->CreatePreg(PTY_ptr); + BaseNode *regReadExpr = builder->CreateExprRegread(PTY_ptr, funcptrPreg); + constexpr int intConstLength = 1; + BaseNode *andExpr = builder->CreateExprBinary(OP_band, *GlobalTables::GetTypeTable().GetPtr(), regReadExpr, + builder->CreateIntConst(intConstLength, PTY_u32)); + RegassignNode *funcPtrAndOpAssign = builder->CreateStmtRegassign(PTY_ptr, funcPtrAndOpPreg, andExpr); + auto readFuncPtrAndReg = builder->CreateExprRegread(PTY_ptr, funcPtrAndOpPreg); BaseNode *checkRegExpr = builder->CreateExprCompare(OP_eq, *GlobalTables::GetTypeTable().GetUInt1(), *GlobalTables::GetTypeTable().GetPtr(), - readFuncptrshift, builder->CreateIntConst(kInvalidCode, PTY_ptr)); + readFuncPtrAndReg, builder->CreateIntConst(kInvalidCode, PTY_ptr)); auto *ifStmt = static_cast(builder->CreateStmtIf(checkRegExpr)); // get find_native_func function MIRType *voidPointerType = GlobalTables::GetTypeTable().GetVoidPtr(); @@ -402,15 +383,17 @@ void NativeStubFuncGeneration::GenerateRegisteredNativeFuncCall(MIRFunction &fun func.GetBody()->AddStatement(wrapperCall); } else if (!Options::regNativeDynamicOnly) { func.GetBody()->AddStatement(funcptrAssign); - func.GetBody()->AddStatement(funcptrshiftAssign); + func.GetBody()->AddStatement(funcPtrAndOpAssign); // Get find_native_func function - MIRFunction *findNativeFunc = builder->GetOrCreateFunction(kFindNativeFuncNoeh, voidPointerType->GetTypeIndex()); + MIRFunction *findNativeFunc = builder->GetOrCreateFunction(NameMangler::kFindNativeFuncNoeh, + voidPointerType->GetTypeIndex()); findNativeFunc->SetAttr(FUNCATTR_nosideeffect); // CallAssigned statement for unregistered situation CallNode *callGetFindNativeFunc = builder->CreateStmtCallRegassigned(findNativeFunc->GetPuidx(), dynamicStubOpnds, funcptrPreg, OP_callassigned); // Check return value of dynamic linking stub - MIRFunction *dummyNativeFunc = builder->GetOrCreateFunction(kDummyNativeFunc, voidPointerType->GetTypeIndex()); + MIRFunction *dummyNativeFunc = builder->GetOrCreateFunction(NameMangler::kDummyNativeFunc, + voidPointerType->GetTypeIndex()); dummyNativeFunc->SetAttr(FUNCATTR_nosideeffect); auto dummyFuncPreg = func.GetPregTab()->CreatePreg(PTY_ptr); auto readDummyFuncPtr = builder->CreateExprRegread(PTY_ptr, dummyFuncPreg); @@ -454,8 +437,9 @@ void NativeStubFuncGeneration::GenerateRegisteredNativeFuncCall(MIRFunction &fun } } else { func.GetBody()->AddStatement(funcptrAssign); - func.GetBody()->AddStatement(funcptrshiftAssign); - MIRFunction *findNativeFunc = builder->GetOrCreateFunction(kFindNativeFunc, voidPointerType->GetTypeIndex()); + func.GetBody()->AddStatement(funcPtrAndOpAssign); + MIRFunction *findNativeFunc = builder->GetOrCreateFunction(NameMangler::kFindNativeFunc, + voidPointerType->GetTypeIndex()); findNativeFunc->SetAttr(FUNCATTR_nosideeffect); // CallAssigned statement for unregistered situation CallNode *callGetFindNativeFunc = builder->CreateStmtCallRegassigned(findNativeFunc->GetPuidx(), dynamicStubOpnds, @@ -485,7 +469,8 @@ void NativeStubFuncGeneration::GenerateRegisteredNativeFuncCall(MIRFunction &fun icall->SetNOpndAt(0, readFuncPtr); icall->SetRetTyIdx(nativeFunc.GetReturnTyIdx()); // Check if funcptr is Invalid - MIRFunction *findNativeFunc = builder->GetOrCreateFunction(kFindNativeFunc, voidPointerType->GetTypeIndex()); + MIRFunction *findNativeFunc = builder->GetOrCreateFunction(NameMangler::kFindNativeFunc, + voidPointerType->GetTypeIndex()); findNativeFunc->SetAttr(FUNCATTR_nosideeffect); // CallAssigned statement for unregistered situation CallNode *callGetFindNativeFunc = @@ -503,56 +488,33 @@ void NativeStubFuncGeneration::GenerateRegisteredNativeFuncCall(MIRFunction &fun StmtNode *NativeStubFuncGeneration::CreateNativeWrapperCallNode(MIRFunction &func, BaseNode *funcPtr, MapleVector &args, const MIRSymbol *ret) { - MIRFunction *wrapperFunc = nullptr; - MapleVector wrapperArgs(func.GetCodeMPAllocator().Adapter()); - // The first arg is the natvie function pointer. - wrapperArgs.push_back(funcPtr); - // Is fast native? - auto isFast = (func.GetAttr(FUNCATTR_fast_native) || func.GetAttr(FUNCATTR_critical_native)); - // Do not need native wrapper for critical natives - // if num_of_args < 8 #ifdef USE_ARM32_MACRO constexpr size_t numOfArgs = 4; #else constexpr size_t numOfArgs = 8; #endif - - if (func.GetAttr(FUNCATTR_critical_native) && args.size() < numOfArgs) { - auto *icall = func.GetCodeMempool()->New(GetMIRModule(), OP_icallassigned); - CallReturnVector nrets(func.GetCodeMempoolAllocator().Adapter()); - if (ret != nullptr) { - CHECK_FATAL((ret->GetStorageClass() == kScAuto || ret->GetStorageClass() == kScFormal || - ret->GetStorageClass() == kScExtern || ret->GetStorageClass() == kScGlobal), - "unknown classtype! check it!"); - nrets.push_back(CallReturnPair(ret->GetStIdx(), RegFieldPair(0, 0))); - } - icall->SetNumOpnds(args.size() + 1); - icall->GetNopnd().resize(icall->GetNumOpnds()); - icall->SetReturnVec(nrets); - for (size_t i = 1; i < icall->GetNopndSize(); ++i) { - icall->SetNOpndAt(i, args[i - 1]->CloneTree(GetMIRModule().GetCurFuncCodeMPAllocator())); + MIRFunction *wrapperFunc = nullptr; + MapleVector wrapperArgs(func.GetCodeMPAllocator().Adapter()); + // The first arg is the natvie function pointer. + wrapperArgs.push_back(funcPtr); + // Push back all original args. + wrapperArgs.insert(wrapperArgs.end(), args.begin(), args.end()); + // Do not need native wrapper for fast natives or critical natives. + if (func.GetAttr(FUNCATTR_fast_native) || func.GetAttr(FUNCATTR_critical_native)) { + if (ret == nullptr) { + return builder->CreateStmtIcall(wrapperArgs); + } else { + return builder->CreateStmtIcallAssigned(wrapperArgs, *ret); } - icall->SetNOpndAt(0, funcPtr); - icall->SetRetTyIdx(func.GetReturnTyIdx()); - return icall; } - - // If num of args > 8 if (args.size() > numOfArgs) { - wrapperFunc = isFast ? MRTCallFastNativeExtFunc : MRTCallSlowNativeExtFunc; - } else if (isFast) { - wrapperFunc = MRTCallFastNativeFunc; + wrapperFunc = MRTCallSlowNativeExtFunc; } else { wrapperFunc = MRTCallSlowNativeFunc[args.size()]; } - // Push back all original args. - wrapperArgs.insert(wrapperArgs.end(), args.begin(), args.end()); - // If no return (aka void) if (ret == nullptr) { - // Use 'call' statement if no return value. return builder->CreateStmtCall(wrapperFunc->GetPuidx(), wrapperArgs); } else { - // Use 'callassigned' if the function has return value. return builder->CreateStmtCallAssigned(wrapperFunc->GetPuidx(), wrapperArgs, ret, OP_callassigned); } } @@ -564,7 +526,6 @@ void NativeStubFuncGeneration::GenerateNativeWrapperFuncCall(MIRFunction &func, } void NativeStubFuncGeneration::GenerateRegTableEntryType() { - // Use MIRIntType instead of MIRStructType in RegTableEntry MIRArrayType &arrayType = *GlobalTables::GetTypeTable().GetOrCreateArrayType(*GlobalTables::GetTypeTable().GetVoidPtr(), 0); regTableConst = GetMIRModule().GetMemPool()->New(GetMIRModule(), arrayType); @@ -576,64 +537,46 @@ void NativeStubFuncGeneration::GenerateHelperFuncDecl() { MIRType *refType = GlobalTables::GetTypeTable().GetRef(); // MRT_PendingException MRTCheckThrowPendingExceptionFunc = - builder->GetOrCreateFunction(kCheckThrowPendingExceptionFunc, voidType->GetTypeIndex()); - CHECK_FATAL(MRTCheckThrowPendingExceptionFunc != nullptr, - "MRTCheckThrowPendingExceptionFunc is null in NativeStubFuncGeneration::GenerateHelperFuncDecl"); + builder->GetOrCreateFunction(NameMangler::kCheckThrowPendingExceptionFunc, voidType->GetTypeIndex()); + CHECK_FATAL(MRTCheckThrowPendingExceptionFunc != nullptr, "MRTCheckThrowPendingExceptionFunc is null."); MRTCheckThrowPendingExceptionFunc->SetAttr(FUNCATTR_nosideeffect); MRTCheckThrowPendingExceptionFunc->SetBody(nullptr); // MRT_PreNativeCall ArgVector preArgs(GetMIRModule().GetMPAllocator().Adapter()); - preArgs.push_back(ArgPair("caller", refType)); - MRTPreNativeFunc = builder->CreateFunction(kPreNativeFunc, *voidPointerType, preArgs); - CHECK_FATAL(MRTPreNativeFunc != nullptr, - "MRTPreNativeFunc is null in NativeStubFuncGeneration::GenerateHelperFuncDecl"); + MRTPreNativeFunc = builder->CreateFunction(NameMangler::kPreNativeFunc, *voidPointerType, preArgs); + CHECK_FATAL(MRTPreNativeFunc != nullptr, "MRTPreNativeFunc is null."); MRTPreNativeFunc->SetBody(nullptr); // MRT_PostNativeCall ArgVector postArgs(GetMIRModule().GetMPAllocator().Adapter()); postArgs.push_back(ArgPair("env", voidPointerType)); - MRTPostNativeFunc = builder->CreateFunction(kPostNativeFunc, *voidType, postArgs); - CHECK_FATAL(MRTPostNativeFunc != nullptr, - "MRTPostNativeFunc is null in NativeStubFuncGeneration::GenerateHelperFuncDecl"); + MRTPostNativeFunc = builder->CreateFunction(NameMangler::kPostNativeFunc, *voidType, postArgs); + CHECK_FATAL(MRTPostNativeFunc != nullptr, "MRTPostNativeFunc is null."); MRTPostNativeFunc->SetBody(nullptr); // MRT_DecodeReference ArgVector decodeArgs(GetMIRModule().GetMPAllocator().Adapter()); decodeArgs.push_back(ArgPair("obj", refType)); - MRTDecodeRefFunc = builder->CreateFunction(kDecodeRefFunc, *refType, decodeArgs); - CHECK_FATAL(MRTDecodeRefFunc != nullptr, - "MRTDecodeRefFunc is null in NativeStubFuncGeneration::GenerateHelperFuncDecl"); + MRTDecodeRefFunc = builder->CreateFunction(NameMangler::kDecodeRefFunc, *refType, decodeArgs); + CHECK_FATAL(MRTDecodeRefFunc != nullptr, "MRTDecodeRefFunc is null."); MRTDecodeRefFunc->SetAttr(FUNCATTR_nosideeffect); MRTDecodeRefFunc->SetBody(nullptr); - // MCC_CallFastNative + // MCC_CallSlowNative ArgVector callArgs(GetMIRModule().GetMPAllocator().Adapter()); callArgs.push_back(ArgPair("func", voidPointerType)); - MRTCallFastNativeFunc = builder->CreateFunction(kCallFastNativeFunc, *voidPointerType, callArgs); - CHECK_FATAL(MRTCallFastNativeFunc != nullptr, - "MRTCallFastNativeFunc is null in NativeStubFuncGeneration::GenerateHelperFuncDecl"); - MRTCallFastNativeFunc->SetBody(nullptr); - // MCC_CallSlowNative for (int i = 0; i < kSlownativeFuncnum; ++i) { MRTCallSlowNativeFunc[i] = builder->CreateFunction(callSlowNativeFuncs[i], *voidPointerType, callArgs); - CHECK_FATAL(MRTCallSlowNativeFunc[i] != nullptr, - "MRTCallSlowNativeFunc is null in NativeStubFuncGeneration::GenerateHelperFuncDecl"); + CHECK_FATAL(MRTCallSlowNativeFunc[i] != nullptr, "MRTCallSlowNativeFunc is null."); MRTCallSlowNativeFunc[i]->SetBody(nullptr); } - // MCC_CallFastNativeExt + // MCC_CallSlowNativeExt ArgVector callExtArgs(GetMIRModule().GetMPAllocator().Adapter()); callExtArgs.push_back(ArgPair("func", voidPointerType)); - MRTCallFastNativeExtFunc = builder->CreateFunction(kCallFastNativeExtFunc, *voidPointerType, callExtArgs); - CHECK_FATAL(MRTCallFastNativeExtFunc != nullptr, - "MRTCallFastNativeExtFunc is null in NativeStubFuncGeneration::GenerateHelperFuncDecl"); - MRTCallFastNativeExtFunc->SetBody(nullptr); - // MCC_CallSlowNativeExt - MRTCallSlowNativeExtFunc = builder->CreateFunction(kCallSlowNativeExtFunc, *voidPointerType, callExtArgs); - CHECK_FATAL(MRTCallSlowNativeExtFunc != nullptr, - "MRTCallSlowNativeExtFunc is null in NativeStubFuncGeneration::GenerateHelperFuncDecl"); + MRTCallSlowNativeExtFunc = builder->CreateFunction(NameMangler::kCallSlowNativeExt, *voidPointerType, callExtArgs); + CHECK_FATAL(MRTCallSlowNativeExtFunc != nullptr, "MRTCallSlowNativeExtFunc is null."); MRTCallSlowNativeExtFunc->SetBody(nullptr); // MCC_SetReliableUnwindContext MCCSetReliableUnwindContextFunc = - builder->GetOrCreateFunction(kSetReliableUnwindContextFunc, voidType->GetTypeIndex()); - CHECK_FATAL(MCCSetReliableUnwindContextFunc != nullptr, - "MCCSetReliableUnwindContextFunc is null in NativeStubFuncGeneration::GenerateHelperFuncDecl"); + builder->GetOrCreateFunction(NameMangler::kSetReliableUnwindContextFunc, voidType->GetTypeIndex()); + CHECK_FATAL(MCCSetReliableUnwindContextFunc != nullptr, "MCCSetReliableUnwindContextFunc is null"); MCCSetReliableUnwindContextFunc->SetAttr(FUNCATTR_nosideeffect); MCCSetReliableUnwindContextFunc->SetBody(nullptr); } diff --git a/src/mpl2mpl/src/vtable_analysis.cpp b/src/mpl2mpl/src/vtable_analysis.cpp index 19ad4619513b11186bce52bdb9a662fa78a83e29..98dcedf29f9751f82ce858ca5af2a620fbf1c7fb 100644 --- a/src/mpl2mpl/src/vtable_analysis.cpp +++ b/src/mpl2mpl/src/vtable_analysis.cpp @@ -295,7 +295,7 @@ void VtableAnalysis::GenItableDefinition(const Klass &klass) { if (count != 0) { auto *secondItabEmitArray = GetMIRModule().GetMemPool()->New(GetMIRModule(), *voidPtrType); // remember count in secondItabVec - count = ((secondConflictList.size() | (1UL << (kShiftCountBit - 1))) << kShiftCountBit) + count; + count = ((secondConflictList.size() | (1ULL << (kShiftCountBit - 1))) << kShiftCountBit) + count; secondItabEmitArray->PushBack(GlobalTables::GetIntConstTable().GetOrCreateIntConst(count, *voidPtrType)); secondItabEmitArray->PushBack(oneConst); // padding for (uint32 i = 0; i < kItabSecondHashSize; ++i) { diff --git a/src/mpl2mpl/src/vtable_impl.cpp b/src/mpl2mpl/src/vtable_impl.cpp index a1d817ef6fb37c21179a0f5b07caeb472746f9e0..88cb649a26c1ca0659215849ee96f558faf5b906 100644 --- a/src/mpl2mpl/src/vtable_impl.cpp +++ b/src/mpl2mpl/src/vtable_impl.cpp @@ -32,7 +32,45 @@ VtableImpl::VtableImpl(MIRModule &mod, KlassHierarchy *kh, bool dump) mccItabFunc = builder->GetOrCreateFunction(kInterfaceMethod, TyIdx(PTY_ptr)); mccItabFunc->SetAttr(FUNCATTR_nosideeffect); } - +#if TARGARM || TARGAARCH64 +bool VtableImpl::Intrinsify(MIRFunction &func, CallNode &cnode) { + MIRFunction *calleeFunc = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(cnode.GetPUIdx()); + const std::string funcName = calleeFunc->GetName(); + MIRIntrinsicID intrnId = INTRN_UNDEFINED; + if (funcName == "Lsun_2Fmisc_2FUnsafe_3B_7CgetAndAddInt_7C_28Ljava_2Flang_2FObject_3BJI_29I") { + intrnId = INTRN_GET_AND_ADDI; + } else if (funcName == "Lsun_2Fmisc_2FUnsafe_3B_7CgetAndAddLong_7C_28Ljava_2Flang_2FObject_3BJJ_29J") { + intrnId = INTRN_GET_AND_ADDL; + } else if (funcName == "Lsun_2Fmisc_2FUnsafe_3B_7CgetAndSetInt_7C_28Ljava_2Flang_2FObject_3BJI_29I") { + intrnId = INTRN_GET_AND_SETI; + } else if (funcName == "Lsun_2Fmisc_2FUnsafe_3B_7CgetAndSetLong_7C_28Ljava_2Flang_2FObject_3BJJ_29J") { + intrnId = INTRN_GET_AND_SETL; + } else if (funcName == "Lsun_2Fmisc_2FUnsafe_3B_7CcompareAndSwapInt_7C_28Ljava_2Flang_2FObject_3BJII_29Z") { + intrnId = INTRN_COMP_AND_SWAPI; + } else if (funcName == "Lsun_2Fmisc_2FUnsafe_3B_7CcompareAndSwapLong_7C_28Ljava_2Flang_2FObject_3BJJJ_29Z") { + intrnId = INTRN_COMP_AND_SWAPL; + } + if (intrnId == INTRN_UNDEFINED) { + return false; + } + CallReturnVector retvs = cnode.GetReturnVec(); + if (!retvs.empty()) { + StIdx stidx = retvs.begin()->first; + StmtNode *intrnCallStmt = nullptr; + if (stidx.Idx() != 0) { + MIRSymbol *retSt = currFunc->GetLocalOrGlobalSymbol(stidx); + intrnCallStmt = builder->CreateStmtIntrinsicCallAssigned(intrnId, cnode.GetNopnd(), retSt); + } else { + ASSERT (retvs.begin()->second.IsReg(), "return value must be preg"); + PregIdx pregIdx = retvs.begin()->second.GetPregIdx(); + intrnCallStmt = builder->CreateStmtIntrinsicCallAssigned(intrnId, cnode.GetNopnd(), pregIdx); + } + func.GetBody()->ReplaceStmt1WithStmt2(&cnode, intrnCallStmt); + return true; + } + return false; +} +#endif void VtableImpl::ProcessFunc(MIRFunction *func) { if (func->IsEmpty()) { return; @@ -43,6 +81,25 @@ void VtableImpl::ProcessFunc(MIRFunction *func) { while (stmt != nullptr) { next = stmt->GetNext(); Opcode opcode = stmt->GetOpCode(); +#if TARGARM || TARGAARCH64 + if (kOpcodeInfo.IsCallAssigned(opcode)) { + CallNode *cnode = static_cast(stmt); + MIRFunction *calleefunc = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(cnode->GetPUIdx()); + const std::set intrisicsList { +#define DEF_MIR_INTRINSIC(X, NAME, INTRN_CLASS, RETURN_TYPE, ...) NAME, +#include "simplifyintrinsics.def" +#undef DEF_MIR_INTRINSIC + }; + const std::string funcName = calleefunc->GetName(); + if (Options::O2 && intrisicsList.find(funcName) != intrisicsList.end() && + funcName != "Ljava_2Flang_2FString_3B_7CindexOf_7C_28Ljava_2Flang_2FString_3B_29I") { + if (Intrinsify(*func, *cnode)) { + stmt = next; + continue; + } + } + } +#endif switch (opcode) { case OP_regassign: { auto *regassign = static_cast(stmt); diff --git a/src/mplfe/common/include/fe_struct_elem_info.h b/src/mplfe/common/include/fe_struct_elem_info.h index 0da49057bbfe91baa6d31c1962112e510b0c5fc0..0a5d1fcae610473362e465d47c9f946c490050fd 100644 --- a/src/mplfe/common/include/fe_struct_elem_info.h +++ b/src/mplfe/common/include/fe_struct_elem_info.h @@ -172,7 +172,7 @@ class FEStructMethodInfo : public FEStructElemInfo { return argTypes; } - static std::map> InitJavaPolymorphicWhiteList(); + static void InitJavaPolymorphicWhiteList(); LLT_PROTECTED: void PrepareImpl(MIRBuilder &mirBuilder, bool argIsStatic) override; diff --git a/src/mplfe/common/src/fe_struct_elem_info.cpp b/src/mplfe/common/src/fe_struct_elem_info.cpp index a2b77240af378344a8c1546bf5e0845fcebf14e6..77e271510414a50420eb81c7859c4d489a552b7b 100644 --- a/src/mplfe/common/src/fe_struct_elem_info.cpp +++ b/src/mplfe/common/src/fe_struct_elem_info.cpp @@ -214,8 +214,7 @@ bool FEStructFieldInfo::CompareFieldType(const FieldPair &fieldPair) const { } // ---------- FEStructMethodInfo ---------- -std::map> FEStructMethodInfo::javaPolymorphicWhiteList = - FEStructMethodInfo::InitJavaPolymorphicWhiteList(); +std::map> FEStructMethodInfo::javaPolymorphicWhiteList; FEStructMethodInfo::FEStructMethodInfo(const GStrIdx &argFullNameIdx, MIRSrcLang argSrcLang, bool argIsStatic) : FEStructElemInfo(argFullNameIdx, argSrcLang, argIsStatic), @@ -228,9 +227,9 @@ FEStructMethodInfo::FEStructMethodInfo(const GStrIdx &argFullNameIdx, MIRSrcLang LoadMethodType(); } -std::map> FEStructMethodInfo::InitJavaPolymorphicWhiteList() { +void FEStructMethodInfo::InitJavaPolymorphicWhiteList() { MPLFE_PARALLEL_FORBIDDEN(); - std::map> ans; + std::map> &ans = javaPolymorphicWhiteList; StringTable &strTable = GlobalTables::GetStrTable(); GStrIdx idxMethodHandle = strTable.GetOrCreateStrIdxFromName(NameMangler::EncodeName("Ljava/lang/invoke/MethodHandle;")); @@ -238,7 +237,7 @@ std::map> FEStructMethodInfo::InitJavaPolymorphicWhit success = success && ans[idxMethodHandle].insert(strTable.GetOrCreateStrIdxFromName("invoke")).second; success = success && ans[idxMethodHandle].insert(strTable.GetOrCreateStrIdxFromName("invokeBasic")).second; success = success && ans[idxMethodHandle].insert(strTable.GetOrCreateStrIdxFromName("invokeExact")).second; - return ans; + CHECK_FATAL(success, "error occurs"); } PUIdx FEStructMethodInfo::GetPuIdx() const { diff --git a/src/mplfe/common/src/mplfe_compiler.cpp b/src/mplfe/common/src/mplfe_compiler.cpp index 1d697621438626997c74cb21e7387817cb8707c6..a3932243f3f7d21ab5eec01c17abb3ff0be24f87 100644 --- a/src/mplfe/common/src/mplfe_compiler.cpp +++ b/src/mplfe/common/src/mplfe_compiler.cpp @@ -30,6 +30,7 @@ MPLFECompiler::~MPLFECompiler() { void MPLFECompiler::Init() { FEManager::Init(module); + FEStructMethodInfo::InitJavaPolymorphicWhiteList(); } void MPLFECompiler::Release() { diff --git a/src/mplfe/jbc_input/include/jbc_opcode.def b/src/mplfe/jbc_input/include/jbc_opcode.def index d05768413795a8076583070bfc12da7fb38bef15..3da3cd64370e29344f5eb2435d2d48a9bc8982b5 100644 --- a/src/mplfe/jbc_input/include/jbc_opcode.def +++ b/src/mplfe/jbc_input/include/jbc_opcode.def @@ -214,7 +214,7 @@ JBC_OP(MultiANewArray, 0xC5, MultiANewArray, "multianewarray", (kOpFlagFallThru JBC_OP(IfNull, 0xC6, Branch, "ifnull", (kOpFlagFallThru | kOpFlagBranch)) JBC_OP(IfNonNull, 0xC7, Branch, "ifnonnull", (kOpFlagFallThru | kOpFlagBranch)) JBC_OP(GotoW, 0xC8, Goto, "goto_w", (kOpFlagBranch)) -JBC_OP(JsrW, 0xC9, Reversed, "jsr_w", (kOpFlagBranch)) +JBC_OP(JsrW, 0xC9, Jsr, "jsr_w", (kOpFlagBranch)) JBC_OP(BreakPoint, 0xCA, Reversed, "breakpoint", (kOpFlagFallThru)) JBC_OP(UnusedCB, 0xCB, Unused, "unused_cb", (kOpFlagFallThru)) JBC_OP(UnusedCC, 0xCC, Unused, "unused_cc", (kOpFlagFallThru))