diff --git a/llvm/lib/Target/XVM/MCTargetDesc/XVMInstPrinter.cpp b/llvm/lib/Target/XVM/MCTargetDesc/XVMInstPrinter.cpp index cf1caaffbc811244a5088eeb90cebe8b5bf0bfcf..f45761417f14c020d14da6eb45f33ef11a1b0456 100644 --- a/llvm/lib/Target/XVM/MCTargetDesc/XVMInstPrinter.cpp +++ b/llvm/lib/Target/XVM/MCTargetDesc/XVMInstPrinter.cpp @@ -10,6 +10,172 @@ // //===----------------------------------------------------------------------===// #ifdef XVM_DYLIB_MODE -// Insert the XVM backend code here + +#include "../XVM_def.h" +#include "MCTargetDesc/XVMInstPrinter.h" +#include "llvm/MC/MCAsmInfo.h" +#include "llvm/MC/MCExpr.h" +#include "llvm/MC/MCInst.h" +#include "llvm/MC/MCSymbol.h" +#include "llvm/Support/Casting.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/FormattedStream.h" +using namespace llvm; +using namespace std; +#define DEBUG_TYPE "asm-printer" +#define GET_INSTRINFO_CTOR_DTOR + +// Include the auto-generated portion of the assembly writer. +#include "XVMGenAsmWriter.inc" + +void XVMInstPrinter::printInst(const MCInst *MI, uint64_t Address, + StringRef Annot, const MCSubtargetInfo &STI, + raw_ostream &O) { + unsigned int MCInstFlag = MI->getFlags(); + MCOperand MCOp = MI->getOperand(MI->getNumOperands() - 1); + if (MCOp.isImm()) { + unsigned Indent = MCOp.getImm(); + for (unsigned i = 0; i <= Indent; ++i) { + O << "\t"; + } + } + switch(MCInstFlag) { + default: + printInstruction(MI, Address, O); + break; + case FUNC_CALL_FLAG_MC_INST_IMM: + printCallInstructionImm(MI, O); + break; + case FUNC_CALL_FLAG_MC_INST_REG: + printCallInstructionReg(MI, O); + break; + case FUNC_ID_FLAG_MC_INST_REG: + printMovWithFuncID(MI, O); + break; + case GLOBAL_DATAREF_FLAG_MC_INST: + printDataRefWithGlobalID(MI, O); + break; + } + printAnnotation(O, Annot); +} + +void XVMInstPrinter::printCallInstructionImm(const MCInst *MI, raw_ostream &O) { + assert(MI->getNumOperands() >= 2); + O << "\t"; + auto MnemonicInfo = getMnemonic(MI); + O << MnemonicInfo.first; + + assert(MI->getOperand(1).isImm()); + O << MI->getOperand(1).getImm(); +} + +void XVMInstPrinter::printMovWithFuncID(const MCInst *MI, raw_ostream &O) { + const MCOperand & Op0 = MI->getOperand(0); + assert(Op0.isReg()); + const MCOperand & Op1 = MI->getOperand(1); + assert(Op1.isExpr()); + const MCOperand & Op2 = MI->getOperand(2); + assert(Op2.isImm()); + O << "\t"; + O << "mov " << getRegisterName(Op0.getReg()) << ", #" << Op2.getImm(); +} + +void XVMInstPrinter::printDataRefWithGlobalID(const MCInst *MI, raw_ostream &O) { + assert(MI->getNumOperands() >= 3); + const MCOperand & Op0 = MI->getOperand(0); + assert(Op0.isReg()); + const MCOperand & Op1 = MI->getOperand(1); + assert(Op1.isExpr()); + const MCOperand & Op2 = MI->getOperand(2); + assert(Op2.isImm()); + O << "\t"; + O << "dataref " << getRegisterName(Op0.getReg()) << ", #" << Op2.getImm(); +} + +void XVMInstPrinter::printCallInstructionReg(const MCInst *MI, raw_ostream &O) { + assert(MI->getNumOperands() >= 2); + O << "\t"; + auto MnemonicInfo = getMnemonic(MI); + O << MnemonicInfo.first; + + assert(MI->getOperand(1).isReg()); + O << getRegisterName(MI->getOperand(1).getReg()); +} + +static void printExpr(const MCExpr *Expr, raw_ostream &O) { +#ifndef NDEBUG + const MCSymbolRefExpr *SRE; + + if (const MCBinaryExpr *BE = dyn_cast(Expr)) + SRE = dyn_cast(BE->getLHS()); + else + SRE = dyn_cast(Expr); + assert(SRE && "Unexpected MCExpr type."); + + MCSymbolRefExpr::VariantKind Kind = SRE->getKind(); + + assert(Kind == MCSymbolRefExpr::VK_None); +#endif + O << *Expr; +} + +void XVMInstPrinter::printOperand(const MCInst *MI, unsigned OpNo, + raw_ostream &O, const char *Modifier) { + assert((Modifier == nullptr || Modifier[0] == 0) && "No modifiers supported"); + const MCOperand &Op = MI->getOperand(OpNo); + if (Op.isReg()) { + O << getRegisterName(Op.getReg()); + } else if (Op.isImm()) { + O << formatImm((int32_t)Op.getImm()); + } else { + assert(Op.isExpr() && "Expected an expression"); + printExpr(Op.getExpr(), O); + } +} + +void XVMInstPrinter::printMemOperand(const MCInst *MI, int OpNo, raw_ostream &O, + const char *Modifier) { + const MCOperand &RegOp = MI->getOperand(OpNo); + const MCOperand &OffsetOp = MI->getOperand(OpNo + 1); + + // register + assert(RegOp.isReg() && "Register operand not a register"); + O << getRegisterName(RegOp.getReg()); + + // offset + if (OffsetOp.isImm()) { + auto Imm = OffsetOp.getImm(); + if (Imm == 0) + O << ", #" << formatImm(Imm); + else + O << ", #" << formatImm(Imm); + } else { + assert(0 && "Expected an immediate"); + } +} + +void XVMInstPrinter::printImm64Operand(const MCInst *MI, unsigned OpNo, + raw_ostream &O) { + const MCOperand &Op = MI->getOperand(OpNo); + if (Op.isImm()) + O << formatImm(Op.getImm()); + else if (Op.isExpr()) + printExpr(Op.getExpr(), O); + else + O << Op; +} + +void XVMInstPrinter::printBrTargetOperand(const MCInst *MI, unsigned OpNo, + raw_ostream &O) { + const MCOperand &Op = MI->getOperand(OpNo); + if (Op.isImm()) { + int16_t Imm = Op.getImm(); + O << ((Imm >= 0) ? "+" : "") << formatImm(Imm); + } else if (Op.isExpr()) { + printExpr(Op.getExpr(), O); + } else { + O << Op; + } +} #endif diff --git a/llvm/lib/Target/XVM/MCTargetDesc/XVMMCTargetDesc.cpp b/llvm/lib/Target/XVM/MCTargetDesc/XVMMCTargetDesc.cpp index 2415d5f19aeec69378182f86b4eb78717e1cdadf..ce2b3194ff33f5847f9ae17663ecfc9ec9a1a432 100644 --- a/llvm/lib/Target/XVM/MCTargetDesc/XVMMCTargetDesc.cpp +++ b/llvm/lib/Target/XVM/MCTargetDesc/XVMMCTargetDesc.cpp @@ -10,9 +10,120 @@ // //===----------------------------------------------------------------------===// #ifdef XVM_DYLIB_MODE -// Insert the XVM backend code here -#include "llvm/Support/Compiler.h" +#include "MCTargetDesc/XVMMCTargetDesc.h" +#include "MCTargetDesc/XVMInstPrinter.h" +#include "MCTargetDesc/XVMMCAsmInfo.h" +#include "MCTargetDesc/XVMTargetStreamer.h" +#include "TargetInfo/XVMTargetInfo.h" +#include "llvm/MC/MCInstrAnalysis.h" +#include "llvm/MC/MCInstrInfo.h" +#include "llvm/MC/MCRegisterInfo.h" +#include "llvm/MC/MCSubtargetInfo.h" +#include "llvm/MC/TargetRegistry.h" +#include "llvm/Support/Host.h" + +#define GET_INSTRINFO_MC_DESC +#define ENABLE_INSTR_PREDICATE_VERIFIER +#include "XVMGenInstrInfo.inc" + +#define GET_SUBTARGETINFO_MC_DESC +#include "XVMGenSubtargetInfo.inc" + +#define GET_REGINFO_MC_DESC +#include "XVMGenRegisterInfo.inc" + +using namespace llvm; + +static MCInstrInfo *createXVMMCInstrInfo() { + MCInstrInfo *X = new MCInstrInfo(); + InitXVMMCInstrInfo(X); + return X; +} + +static MCRegisterInfo *createXVMMCRegisterInfo(const Triple &TT) { + MCRegisterInfo *X = new MCRegisterInfo(); + InitXVMMCRegisterInfo(X, XVM::R11 /* RAReg doesn't exist */); + return X; +} + +static MCSubtargetInfo *createXVMMCSubtargetInfo(const Triple &TT, + StringRef CPU, StringRef FS) { + return createXVMMCSubtargetInfoImpl(TT, CPU, /*TuneCPU*/ CPU, FS); +} + +static MCStreamer *createXVMMCStreamer(const Triple &T, MCContext &Ctx, + std::unique_ptr &&MAB, + std::unique_ptr &&OW, + std::unique_ptr &&Emitter, + bool RelaxAll) { + return createELFStreamer(Ctx, std::move(MAB), std::move(OW), std::move(Emitter), + RelaxAll); +} + +static MCTargetStreamer *createTargetAsmStreamer(MCStreamer &S, + formatted_raw_ostream &, + MCInstPrinter *, bool) { + return new XVMTargetStreamer(S); +} + + +static MCInstPrinter *createXVMMCInstPrinter(const Triple &T, + unsigned SyntaxVariant, + const MCAsmInfo &MAI, + const MCInstrInfo &MII, + const MCRegisterInfo &MRI) { + if (SyntaxVariant == 0) + return new XVMInstPrinter(MAI, MII, MRI); + return nullptr; +} + +namespace { + +class XVMMCInstrAnalysis : public MCInstrAnalysis { +public: + explicit XVMMCInstrAnalysis(const MCInstrInfo *Info) + : MCInstrAnalysis(Info) {} + +}; + +} // end anonymous namespace + +static MCInstrAnalysis *createXVMInstrAnalysis(const MCInstrInfo *Info) { + return new XVMMCInstrAnalysis(Info); +} + + extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeXVMTargetMC() { + for (Target *T: {&getTheXVMleTarget(), &getTheXVMTarget()}) { + // Register the MC asm info. + RegisterMCAsmInfo X(*T); + + // Register the MC instruction info. + TargetRegistry::RegisterMCInstrInfo(*T, createXVMMCInstrInfo); + + // Register the MC register info. + TargetRegistry::RegisterMCRegInfo(*T, createXVMMCRegisterInfo); + + // Register the MC subtarget info. + TargetRegistry::RegisterMCSubtargetInfo(*T, + createXVMMCSubtargetInfo); + + // Register the object streamer + TargetRegistry::RegisterELFStreamer(*T, createXVMMCStreamer); + + // Register the MCTargetStreamer. + TargetRegistry::RegisterAsmTargetStreamer(*T, createTargetAsmStreamer); + + // Register the MCInstPrinter. + TargetRegistry::RegisterMCInstPrinter(*T, createXVMMCInstPrinter); + + // Register the MC instruction analyzer. + TargetRegistry::RegisterMCInstrAnalysis(*T, createXVMInstrAnalysis); + } +} + +extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeXVMTargetMCCalledInDylib() { + LLVMInitializeXVMTargetMC(); } #else diff --git a/llvm/lib/Target/XVM/MCTargetDesc/XVMTargetStreamer.cpp b/llvm/lib/Target/XVM/MCTargetDesc/XVMTargetStreamer.cpp index 872e3efb4fd913a79ca662b9573a57bc4fb56a56..c7ac3ae6bc9427a11b4fc0a432c0127227905598 100644 --- a/llvm/lib/Target/XVM/MCTargetDesc/XVMTargetStreamer.cpp +++ b/llvm/lib/Target/XVM/MCTargetDesc/XVMTargetStreamer.cpp @@ -10,6 +10,22 @@ // //===----------------------------------------------------------------------===// #ifdef XVM_DYLIB_MODE -// Insert the XVM backend code here + +#include "XVMTargetStreamer.h" + +using namespace llvm; + +// +// XVMTargetStreamer Implemenation +// +XVMTargetStreamer::XVMTargetStreamer(MCStreamer &S) : MCTargetStreamer(S) {} + +XVMTargetStreamer::~XVMTargetStreamer() = default; + +void XVMTargetStreamer::changeSection(const MCSection *CurSection, + MCSection *Section, + const MCExpr *SubSection, + raw_ostream &OS) { +} #endif diff --git a/llvm/lib/Target/XVM/TargetInfo/XVMTargetInfo.cpp b/llvm/lib/Target/XVM/TargetInfo/XVMTargetInfo.cpp index 0af0638eeab10ad5c67f09c959651db36f9f65dd..2e605b364ffd5d1d397b51ee578f6b45b9346f61 100644 --- a/llvm/lib/Target/XVM/TargetInfo/XVMTargetInfo.cpp +++ b/llvm/lib/Target/XVM/TargetInfo/XVMTargetInfo.cpp @@ -6,9 +6,29 @@ // //===----------------------------------------------------------------------===// #ifdef XVM_DYLIB_MODE -// Insert the XVM backend code here -#include "llvm/Support/Compiler.h" + +#include "TargetInfo/XVMTargetInfo.h" +#include "llvm/MC/TargetRegistry.h" + +using namespace llvm; + +Target &llvm::getTheXVMTarget() { + static Target TheXVMTarget; + return TheXVMTarget; +} + +Target &llvm::getTheXVMleTarget() { + static Target TheXVMleTarget; + return TheXVMleTarget; +} + extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeXVMTargetInfo() { + RegisterTarget X( + getTheXVMleTarget(), "xvm", "XVM (little endian)", "XVM"); +} + +extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeXVMTargetInfoCalledInDylib() { + LLVMInitializeXVMTargetInfo(); } #else diff --git a/llvm/lib/Target/XVM/XVMAsmPrinter.cpp b/llvm/lib/Target/XVM/XVMAsmPrinter.cpp index 4632e3e1eace4172cba3948d619a92f1c9cb1888..462e2c8b5cb7a107b9650396d6dbc983c952d29a 100644 --- a/llvm/lib/Target/XVM/XVMAsmPrinter.cpp +++ b/llvm/lib/Target/XVM/XVMAsmPrinter.cpp @@ -11,9 +11,1148 @@ // //===----------------------------------------------------------------------===// #ifdef XVM_DYLIB_MODE -// Insert the XVM backend code here -#include "llvm/Support/Compiler.h" + +#include "XVM_def.h" +#include "XVM.h" +#include "XVMInstrInfo.h" +#include "XVMTargetMachine.h" +#include "XVMMCInstLower.h" +#include "MCTargetDesc/XVMInstPrinter.h" +#include "TargetInfo/XVMTargetInfo.h" +#include "llvm/Analysis/ConstantFolding.h" +#include "llvm/CodeGen/AsmPrinter.h" +#include "llvm/CodeGen/MachineConstantPool.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineInstr.h" +#include "llvm/CodeGen/MachineModuleInfo.h" +#include "llvm/MC/MCAsmInfo.h" +#include "llvm/MC/MCInst.h" +#include "llvm/MC/MCStreamer.h" +#include "llvm/MC/MCSymbol.h" +#include "llvm/MC/TargetRegistry.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" +#include +#include +#include +using namespace llvm; + +#define DEBUG_TYPE "asm-printer" + +static cl::opt XVMExportAll("xvm-export-all", + cl::Hidden, cl::init(false), + cl::desc("Exports all function")); + +namespace { +class XVMAsmPrinter : public AsmPrinter { +public: + explicit XVMAsmPrinter(TargetMachine &TM, + std::unique_ptr Streamer) + : AsmPrinter(TM, std::move(Streamer)) {} + + StringRef getPassName() const override { return "XVM Assembly Printer"; } + bool doInitialization(Module &M) override; + void printOperand(const MachineInstr *MI, int OpNum, raw_ostream &O); + bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, + const char *ExtraCode, raw_ostream &O) override; + bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNum, + const char *ExtraCode, raw_ostream &O) override; + + void emitInstruction(const MachineInstr *MI) override; + void emitFunctionHeader() override; + void emitFunctionParamList(const MachineFunction &MF, raw_ostream &O); + void emitFunctionReturnVal(const MachineFunction &MF, raw_ostream &O); + void emitFunctionBodyEnd() override; + void emitStartOfAsmFile(Module &M) override; + void emitEndOfAsmFile(Module &M) override; + void emitBasicBlockStart(const MachineBasicBlock &MBB) override {}; + void emitGlobalVariable(const GlobalVariable *GV) override; + bool runOnMachineFunction(MachineFunction & MF) override; + +private: + const Function *F; + void emitDecls(const Module &M); + void setFunctionCallInfo(MCInst * Inst); + void setGlobalSymbolInfo(const MachineInstr *MI, MCInst* Inst); + DenseMap MIIndentMap; + + void GetMIIndent(MachineFunction &MF); + + void InitModuleMapFuncnameIndex(Module *M); + void InitDataSectionGlobalConstant(Module *M); + + void emitDataSectionInfo(raw_svector_ostream &O); + + inline void emitFunctionParamList(const Function &F, raw_ostream &O); + inline void emitFunctionReturnVal(const Function &F, raw_ostream &O); + + void InitGlobalConstantImpl(const DataLayout &DL, const Constant *CV, + const Constant *BaseCV, + uint64_t Offset, XVMSectionInfo* SInfo); + void InitGlobalConstantDataSequential(const DataLayout &DL, + const ConstantDataSequential *CDS, XVMSectionInfo* SInfo); + void InitGlobalConstantArray(const DataLayout &DL, + const ConstantArray *CA, + const Constant *BaseCV, uint64_t Offset, + XVMSectionInfo* SInfo); + void InitGlobalConstantStruct(const DataLayout &DL, + const ConstantStruct *CS, + const Constant *BaseCV, uint64_t Offset, + XVMSectionInfo* SInfo); +}; +} // namespace + +typedef struct SecSubSecIndices { + unsigned char SecNameIndex; + unsigned int SubSecNameIndex; +} SecSubSecIndices; +static std::map FunctionNameAndIndex; +static std::map FunctionDefinitionMap; +static std::map DataSectionNameIndexMap; +static std::map DataSectionIndexInfoMap; + +template +inline std::string UnsignedIntTypeToHex(T V, size_t W=sizeof(T)*2) +{ + std::stringstream SS; + std::string RS; + SS << std::setfill('0') << std::setw(W) << std::hex << (V|0); + for(unsigned Index=0; Index>= 8; + } + return Result; +} +#define DATA_SECTION 0 +#define DATA_SUB_SECTION 1 + +#define XVM_SD_SEG_START 4 +static uint64_t CreateRefContent(int ToDataSecID, uint64_t ToOffset) { + uint64_t seg_index = XVM_SD_SEG_START + ToDataSecID; + uint64_t ReTRefData = 0; + ReTRefData = (seg_index & 0x00000000000FFFFF) << 44; + ReTRefData = ReTRefData | (ToOffset & 0x00000FFFFFFFFFFF); + return ReTRefData; +} + +static int GetDataIndex(const char * _name, const unsigned char SecOrSubSection) { + std::map::iterator I = DataSectionNameIndexMap.find(_name); + if (I == DataSectionNameIndexMap.end()) { + return -1; + } else { + if (SecOrSubSection == DATA_SECTION) + return I->second.SecNameIndex; + else + return I->second.SubSecNameIndex; + } +} + +int GetFuncIndex(const char * _name) { + std::map::iterator I = FunctionNameAndIndex.find(_name); + if (I == FunctionNameAndIndex.end()) { + return -1; + } else { + return I->second; + } +} + +static int GetDefFuncIndex(const char * _name) { + std::map::iterator I = FunctionDefinitionMap.find(_name); + if (I == FunctionDefinitionMap.end()) + return -1; + return I->second; +} + +static inline unsigned int GetPtrLevel(short PtrSecIndex) { + if (PtrSecIndex == -1) + return 0; + unsigned int ret = 0; + std::map::iterator I = DataSectionIndexInfoMap.find(PtrSecIndex); + if (I == DataSectionIndexInfoMap.end()) { + return 0; + } else { + if ((I->second.BufType & XVM_SECTION_DATA_TYPE_POINTER) != XVM_SECTION_DATA_TYPE_UNKNOWN) { + return GetPtrLevel(I->second.PtrSecIndex) + ret + 1; + } + } + return ret; +} + +/* Checks the global variable's name */ +static bool IsGlobalVariableType(const char *FuncName, const Module *M, const char *type) { + GlobalVariable *GV = M->getGlobalVariable(type); + if (GV) { + const ConstantArray *InitList = dyn_cast(GV->getInitializer()); + if (!InitList) { + return false; + } + + for (Value *P : InitList->operands()) { + ConstantStruct *CS = dyn_cast(P); + if (!CS) continue; + + const char *Associated = CS->getOperand(1)->getName().data(); + if (strcmp(Associated, FuncName) == 0) { + return true; + } + } + } + return false; +} + +static bool IsConstructorDestructor(const char *FuncName, const Module *M) { + return IsGlobalVariableType(FuncName, M, "llvm.global_dtors") || + IsGlobalVariableType(FuncName, M, "llvm.global_ctors"); +} + +unsigned int GetPtrRegisterLevelBasedOnName(const char * _name) { + int DataIndex = GetDataIndex(_name, DATA_SUB_SECTION); + if (DataIndex != -1) { + std::map::iterator I = DataSectionIndexInfoMap.find(DataIndex); + if (I == DataSectionIndexInfoMap.end()) { + return 0; + } + if ((I->second.BufType & XVM_SECTION_DATA_TYPE_POINTER) != XVM_SECTION_DATA_TYPE_UNKNOWN) { + unsigned int ret = 1; + return ret + GetPtrLevel(I->second.PtrSecIndex); + } + } + return 0; +} + +uint64_t GetSubSecOffsetForGlobal(const char * _name) { + std::map::iterator I = DataSectionNameIndexMap.find(_name); + if (I == DataSectionNameIndexMap.end()) { + return -1; + } else { + std::map::iterator I1 = DataSectionIndexInfoMap.find(I->second.SubSecNameIndex); + if (I1 == DataSectionIndexInfoMap.end()) + return -1; + return I1->second.SubSecOffset; + } +} + +static int GetOffsetInDataSection(const char * _name) { + int DataIndex = GetDataIndex(_name, DATA_SUB_SECTION); + if (DataIndex != -1) { + std::map::iterator I = DataSectionIndexInfoMap.find(DataIndex); + if (I == DataSectionIndexInfoMap.end()) { + return -1; + } + return I->second.SubSecOffset; + } + return -1; +} + +static int GetMergedSectionIndex(const std::string &_SymName) { + for (auto& I : DataSectionIndexInfoMap) { + XVMSectionInfo & SInfo = I.second; + if (SInfo.SymName.compare(_SymName) == 0) { + return SInfo.MergedSecIndex; + } + } + return -1; +} + +static inline void PatchSectionInfo(void) { + int MergedSectionIndex = 0; + // Assign the merged section index to each section + std::map _SectionNameIndex; + for (auto& I : DataSectionIndexInfoMap) { + XVMSectionInfo & SInfo = I.second; + std::map::iterator It = _SectionNameIndex.find(SInfo.SecName); + if (It == _SectionNameIndex.end()) { + _SectionNameIndex[SInfo.SecName] = MergedSectionIndex++; + } + } + // patch merged section index + for (auto& I : DataSectionIndexInfoMap) { + XVMSectionInfo & SInfo = I.second; + SInfo.MergedSecIndex = _SectionNameIndex[SInfo.SecName]; + } + + // patch offset + std::map SectionSizes; + for (auto& I : DataSectionIndexInfoMap) { + XVMSectionInfo & SInfo = I.second; + if (SectionSizes.find(SInfo.MergedSecIndex) == SectionSizes.end()) { + SInfo.SubSecOffset = 0; + SectionSizes[SInfo.MergedSecIndex] = SInfo.SecSize; + } else { + SInfo.SubSecOffset = SectionSizes[SInfo.MergedSecIndex]; + SectionSizes[SInfo.MergedSecIndex] += SInfo.SecSize; + } + } + + // patch the merged section index + for (auto & E: DataSectionNameIndexMap) { + int _MergedSectionIndex = GetMergedSectionIndex(E.first); + if (_MergedSectionIndex != -1) { + E.second.SecNameIndex = _MergedSectionIndex; + } + } + + for (auto& I : DataSectionIndexInfoMap) { + XVMSectionInfo & SInfo = I.second; + for(auto & EachPatch : SInfo.PatchListInfo) { + uint64_t DataSectionOffset = GetOffsetInDataSection(EachPatch.SymName.data()); + DataSectionOffset += EachPatch.AddEnd; + SInfo.PtrSecIndex = GetDataIndex(EachPatch.SymName.data(), DATA_SUB_SECTION); + int DataSectionIndex = GetDataIndex(EachPatch.SymName.data(), DATA_SECTION); + LLVM_DEBUG(dbgs() << "Add to Buf: " + << UnsignedIntTypeToHex(ReverseBytes( + CreateRefContent(DataSectionIndex, DataSectionOffset), + 8), + 8*2).c_str() + << " size=" << 8 << "\n"); + SInfo.SecBuf += UnsignedIntTypeToHex( + ReverseBytes(CreateRefContent(DataSectionIndex, DataSectionOffset), 8), 16); + } + } +} + +static inline std::string GetDataSectionPerm(XVMSectionInfo* SInfo) { + assert(SInfo->Permission != XVM_SECTION_PERM_UNKNOWN && "Permission Unset"); + switch(SInfo->Permission) { + case XVM_SECTION_PERM_RO: + return "ro"; + break; + case XVM_SECTION_PERM_WO: + return "wo"; + break; + case XVM_SECTION_PERM_XO: + return "xo"; + break; + case XVM_SECTION_PERM_RW: + return "rw"; + break; + case XVM_SECTION_PERM_RX: + return "rx"; + break; + case XVM_SECTION_PERM_WX: + return "wx"; + break; + case XVM_SECTION_PERM_RWX: + return "rwx"; + break; + default: + llvm_unreachable("Unknown Data Permission Type"); + } +} + +void XVMAsmPrinter::emitDataSectionInfo(raw_svector_ostream & O) { + O << ";; \"data\" index name perm bytes init_data\n"; + // Merged section sizes + int _MaxMergedIndex = 0; + std::map SectionSizes; + for (auto& I : DataSectionIndexInfoMap) { + XVMSectionInfo & SInfo = I.second; + if (SectionSizes.find(SInfo.MergedSecIndex) == SectionSizes.end()) { + SectionSizes[SInfo.MergedSecIndex] = SInfo.SecSize; + } else { + SectionSizes[SInfo.MergedSecIndex] += SInfo.SecSize; + } + if (SInfo.MergedSecIndex > _MaxMergedIndex) + _MaxMergedIndex = SInfo.MergedSecIndex; + } + + if (DataSectionIndexInfoMap.size()>0) { + for (int _MergedSecIndex = 0; _MergedSecIndex <= _MaxMergedIndex; _MergedSecIndex++) + { + // Merged section info + std::string MergedSecBuf = ""; + std::string _SecName = ""; + std::string _SecPermit = ""; + for (auto& I : DataSectionIndexInfoMap) { + XVMSectionInfo & SInfo = I.second; + if (SInfo.MergedSecIndex == _MergedSecIndex) { + MergedSecBuf += SInfo.SecBuf; + _SecName = SInfo.SecName; + _SecPermit = GetDataSectionPerm(&SInfo); + } + } + // The section data + O << "(data " << _MergedSecIndex << " $" << _SecName << " " << _SecPermit; + O << " " << SectionSizes[_MergedSecIndex]; + if (!MergedSecBuf.empty()) { + O << " \"" << MergedSecBuf << "\""; + } + O << ")\n"; + } + } +} + +void XVMAsmPrinter::InitGlobalConstantDataSequential( + const DataLayout &DL, const ConstantDataSequential *CDS, XVMSectionInfo* SInfo) { + LLVM_DEBUG(dbgs() << "\n--------------------InitGlobalConstantDataSequential-------------------\n"); + + unsigned ElementByteSize = CDS->getElementByteSize(); + if (isa(CDS->getElementType())) { + for (unsigned I = 0, E = CDS->getNumElements(); I != E; ++I) { + LLVM_DEBUG(dbgs() << "Add to Buf: " + << UnsignedIntTypeToHex(ReverseBytes( + CDS->getElementAsInteger(I), + ElementByteSize), + ElementByteSize*2).c_str() + << " size=" << ElementByteSize << "\n"); + SInfo->SecBuf += UnsignedIntTypeToHex(ReverseBytes(CDS->getElementAsInteger(I), ElementByteSize), ElementByteSize*2); + } + } else { + llvm_unreachable("Should not have FP in sequential data"); + } + unsigned Size = DL.getTypeAllocSize(CDS->getType()); + unsigned EmittedSize = + DL.getTypeAllocSize(CDS->getElementType()) * CDS->getNumElements(); + assert(EmittedSize <= Size && "Size cannot be less than EmittedSize!"); + if (unsigned Padding = Size - EmittedSize) { + LLVM_DEBUG(dbgs() << "\n------------Seq PADSIZE----------- " << Padding << "\n"); + LLVM_DEBUG(dbgs() << "Add to Buf: " + << UnsignedIntTypeToHex(ReverseBytes(0, Padding), Padding*2).c_str() + << " size=" << Padding << "\n"); + SInfo->SecBuf += UnsignedIntTypeToHex(ReverseBytes(0, Padding), Padding*2); + } +} + + +void XVMAsmPrinter::InitGlobalConstantArray(const DataLayout &DL, + const ConstantArray *CA, + const Constant *BaseCV, uint64_t Offset, + XVMSectionInfo* SInfo) { + LLVM_DEBUG(dbgs() << "\n--------------------InitGlobalConstantArray-------------------\n"); + for (unsigned I = 0, E = CA->getNumOperands(); I != E; ++I) { + InitGlobalConstantImpl(DL, CA->getOperand(I), BaseCV, Offset, SInfo); + Offset += DL.getTypeAllocSize(CA->getOperand(I)->getType()); + } +} + + +void XVMAsmPrinter::InitGlobalConstantStruct(const DataLayout &DL, + const ConstantStruct *CS, + const Constant *BaseCV, uint64_t Offset, + XVMSectionInfo* SInfo) { + LLVM_DEBUG(dbgs() << "\n--------------------InitGlobalConstantStruct-------------------\n"); + unsigned Size = DL.getTypeAllocSize(CS->getType()); + const StructLayout *Layout = DL.getStructLayout(CS->getType()); + uint64_t SizeSoFar = 0; + for (unsigned I = 0, E = CS->getNumOperands(); I != E; ++I) { + const Constant *Field = CS->getOperand(I); + + // Print the actual field value. + InitGlobalConstantImpl(DL, Field, BaseCV, Offset + SizeSoFar, SInfo); + + // Check if padding is needed and insert one or more 0s. + uint64_t FieldSize = DL.getTypeAllocSize(Field->getType()); + uint64_t PadSize = ((I == E - 1 ? Size : Layout->getElementOffset(I + 1)) - + Layout->getElementOffset(I)) - + FieldSize; + + // Insert padding - this may include padding to increase the size of the + // current field up to the ABI size (if the struct is not packed) as well + // as padding to ensure that the next field starts at the right offset. + if(PadSize > 0) { + LLVM_DEBUG(dbgs() << "\n------------Struct PADSIZE-----------" << PadSize << "\n"); + LLVM_DEBUG(dbgs() << "Add to Buf: " + << UnsignedIntTypeToHex(ReverseBytes(0, PadSize), PadSize*2).c_str() + << " size=" << PadSize << "\n"); + SInfo->SecBuf += UnsignedIntTypeToHex(ReverseBytes(0, PadSize), PadSize*2); + } + SizeSoFar += FieldSize + PadSize; + } + LLVM_DEBUG(dbgs() << "\n------------Total with Padding----------- " << SizeSoFar << "\n"); + assert(SizeSoFar == Layout->getSizeInBytes() && + "Layout of constant struct may be incorrect!"); +} + +void XVMAsmPrinter::InitGlobalConstantImpl(const DataLayout &DL, const Constant *CV, + const Constant *BaseCV, + uint64_t Offset, + XVMSectionInfo* SInfo) { + LLVM_DEBUG(dbgs() << "\n--------------------InitGlobalConstantImpl " + << CV->getName().str().c_str() + << "-------------------\n"); + uint64_t Size = DL.getTypeAllocSize(CV->getType()); + + // Globals with sub-elements such as combinations of arrays and structs + // are handled recursively by InitGlobalConstantImpl. Keep track of the + // constant symbol base and the current position with BaseCV and Offset. + if (!BaseCV && CV->hasOneUse()) + BaseCV = dyn_cast(CV->user_back()); + + if (isa(CV) || isa(CV)) { + if (SInfo->BufType == XVM_SECTION_DATA_TYPE_UNKNOWN) { + SInfo->BufType = XVM_SECTION_DATA_TYPE_BSS; + LLVM_DEBUG(dbgs() << "\nemit in InitGlobalConstantImpl bss " + << Size << " " + << DL.getTypeStoreSize(CV->getType()) << " " + << DL.getTypeAllocSize(CV->getType()).getFixedSize() << "\n"); + return; + } + LLVM_DEBUG(dbgs() << "\nemit in InitGlobalConstantImpl zero or undef " + << Size << " " + << DL.getTypeStoreSize(CV->getType()) << " " + << DL.getTypeAllocSize(CV->getType()).getFixedSize() << "\n"); + LLVM_DEBUG(dbgs() << "Add to Buf: " + << UnsignedIntTypeToHex(ReverseBytes(0, Size), Size*2).c_str() + << " size=" << Size << "\n"); + SInfo->SecBuf += UnsignedIntTypeToHex(ReverseBytes(0, Size), Size*2); + return; + } + + if (const ConstantInt *CI = dyn_cast(CV)) { + const uint64_t StoreSize = DL.getTypeStoreSize(CV->getType()); + if (StoreSize <= 8) { + SInfo->BufType = SInfo->BufType | XVM_SECTION_DATA_TYPE_NUMERIC; + LLVM_DEBUG(dbgs() << "\nemit in InitGlobalConstantImpl int\n"); + LLVM_DEBUG(dbgs() << "Add to Buf: " + << UnsignedIntTypeToHex(ReverseBytes(CI->getZExtValue(), StoreSize), StoreSize*2).c_str() + << " size=" << StoreSize << "\n"); + SInfo->SecBuf += UnsignedIntTypeToHex(ReverseBytes(CI->getZExtValue(), StoreSize), StoreSize*2); + } else { + llvm_unreachable("Should not have large int global value!"); + // InitGlobalConstantLargeInt(DL, CI, SInfo); + } + return; + } + + if (const ConstantFP *CFP = dyn_cast(CV)) + llvm_unreachable("Should not have floating point global value!"); + + if (isa(CV)) { + SInfo->BufType = SInfo->BufType | XVM_SECTION_DATA_TYPE_POINTER; + LLVM_DEBUG(dbgs() << "\nemit in InitGlobalConstantImpl nullptr\n"); + LLVM_DEBUG(dbgs() << "Add to Buf: \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00" + << " size=" << 8 << "\n"); + SInfo->SecBuf += "\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00"; + return; + } + + if (const ConstantDataSequential *CDS = dyn_cast(CV)) { + SInfo->BufType = SInfo->BufType | XVM_SECTION_DATA_TYPE_STRING; + return InitGlobalConstantDataSequential(DL, CDS, SInfo); + } + + if (const ConstantArray *CVA = dyn_cast(CV)) { + SInfo->BufType = SInfo->BufType | XVM_SECTION_DATA_TYPE_ARRAY; + return InitGlobalConstantArray(DL, CVA, BaseCV, Offset, SInfo); + } + + if (const ConstantStruct *CVS = dyn_cast(CV)) { + SInfo->BufType = SInfo->BufType | XVM_SECTION_DATA_TYPE_STRUCT; + return InitGlobalConstantStruct(DL, CVS, BaseCV, Offset, SInfo); + } + + if (const ConstantExpr *CE = dyn_cast(CV)) { + // Look through bitcasts, which might not be able to be MCExpr'ized (e.g. of + // vectors). + if (CE->getOpcode() == Instruction::BitCast) + return InitGlobalConstantImpl(DL, CE->getOperand(0), BaseCV, Offset, SInfo); + + if (Size > 8) { + // If the constant expression's size is greater than 64-bits, then we have + // to emit the value in chunks. Try to constant fold the value and emit it + // that way. + Constant *New = ConstantFoldConstant(CE, DL); + if (New != CE) + llvm_unreachable("unimplemented after ConstantFoldConstant"); + } + } + + if (const ConstantVector *V = dyn_cast(CV)) + llvm_unreachable("Should not have vector global value!"); + + LLVM_DEBUG(dbgs() << "\nemit in InitGlobalConstantImpl ptr\n"); + + int idx_func = GetFuncIndex(CV->getName().data()); + if (idx_func != -1) { + SInfo->BufType = SInfo->BufType | XVM_SECTION_DATA_TYPE_POINTER; + SInfo->SecBuf += UnsignedIntTypeToHex(ReverseBytes(idx_func, 8), 16); + LLVM_DEBUG(dbgs() << "\nsuccess in function pointer " + << UnsignedIntTypeToHex(ReverseBytes(idx_func, 8), 16).c_str() + << "\n"); + return; + } + + int DataSectionIndex = GetDataIndex(CV->getName().data(), DATA_SECTION); + int DataSectionOffset = 0; + XVMGVPathInfo PatchInfo; + if (DataSectionIndex != -1) { + SInfo->BufType = XVM_SECTION_DATA_TYPE_POINTER; + SInfo->SecComment += "="; + SInfo->SecComment += CV->getName().data(); + + PatchInfo.SymName = CV->getName().data(); + PatchInfo.AddEnd = 0; + SInfo->PatchListInfo.push_back(PatchInfo); + return; + } + + // Otherwise, it must be a ConstantExpr. Lower it to an MCExpr, then emit it + // thread the streamer with EmitValue. + const MCExpr *ME = lowerConstant(CV); + std::string StrME; + llvm::raw_string_ostream rso(StrME); + if (ME != NULL) { + DataSectionIndex = 0; + DataSectionOffset = 0; + if (ME->getKind() == llvm::MCExpr::Binary) { + const MCSymbolRefExpr *SRE; + if (const MCBinaryExpr *BE = dyn_cast(ME)) { + SRE = dyn_cast(BE->getLHS()); + const auto *CE = dyn_cast(BE->getRHS()); + assert(SRE && CE && "Binary expression must be sym+const."); + // Here we need to handle the case such as .L.str+1: see sample source from pr53084.c + std::string SymName = SRE->getSymbol().getName().str(); + SymName = GetSymbolName(SymName); + DataSectionIndex = GetDataIndex(SymName.data(), DATA_SECTION); + rso << *ME; + SInfo->SecComment += "="; + SInfo->SecComment += StrME.data(); + + PatchInfo.SymName = SymName; + PatchInfo.AddEnd = (int)CE->getValue(); + if (DataSectionIndex != -1) { + SInfo->BufType = XVM_SECTION_DATA_TYPE_POINTER; + } + } + else { + SRE = dyn_cast(ME); + assert(SRE && "Unexpected MCExpr type."); + } + } else { + if (ME->getKind() == llvm::MCExpr::Constant) { + // Here we handle the case of constant cast to ptr + const auto &ConstA = cast(ME); + const ConstantExpr *CExprA = dyn_cast(CV); + assert(CExprA->getOpcode() == Instruction::IntToPtr); + LLVM_DEBUG(dbgs() << "Add to Buf: " + << UnsignedIntTypeToHex(ReverseBytes(ConstA->getValue(), 8), 8*2).c_str() + << " size=" << 8 << "\n"); + SInfo->SecBuf += UnsignedIntTypeToHex(ReverseBytes(ConstA->getValue(), 8), 8*2); + return; + } + rso << *ME; + PatchInfo.SymName = StrME.data(); + PatchInfo.AddEnd = 0; + } + SInfo->PatchListInfo.push_back(PatchInfo); + return; + } + llvm_unreachable("unhandled global type!!"); +} + +void XVMAsmPrinter::InitDataSectionGlobalConstant(Module *M) { + LLVM_DEBUG(dbgs() << "--------------------InitGlobalConstant-------------------\n"); + SecSubSecIndices SecIndices; + SecIndices.SecNameIndex = -1; + unsigned int SectionIndex = 0; + + for (const llvm::GlobalVariable &GV : M->getGlobalList()) { + if (GV.getName().compare("llvm.global_ctors") == 0 || + GV.getName().compare("llvm.global_dtors") == 0) { + continue; + } + if (GV.hasInitializer()) { + LLVM_DEBUG(dbgs() << "\n\nvar: " + << GV.getName().str().c_str() + << "\n"); + const DataLayout &DL = GV.getParent()->getDataLayout(); + const Constant *CV = GV.getInitializer(); + uint64_t Size = DL.getTypeAllocSize(CV->getType()); + XVMSectionInfo SInfo; + SInfo.MergedSecIndex = -1; + + if (Size) { + SInfo.SecName = "unknown"; // placeholder + SInfo.Permission = XVM_SECTION_PERM_UNKNOWN; // placeholder + SecIndices.SubSecNameIndex = SectionIndex; + SInfo.SecIndex = SectionIndex; + SInfo.PtrSecIndex = -1; + SInfo.SecSize = Size; + SInfo.BufType = XVM_SECTION_DATA_TYPE_UNKNOWN; + SInfo.SecBuf = ""; + SInfo.SecComment = GV.getName().data(); + SInfo.SymName = GV.getName().data(); + InitGlobalConstantImpl(DL, CV, nullptr, 0, &SInfo); + } + LLVM_DEBUG(dbgs() << "buf is " + << SInfo.SecBuf.c_str() + << " size is " << Size << "\n"); + // ser permission + if (GV.isConstant()) { + SInfo.Permission = XVM_SECTION_PERM_RO; + SInfo.SecName = "rodata"; + } else { + SInfo.Permission = XVM_SECTION_PERM_RW; + if ( (SInfo.BufType & XVM_SECTION_DATA_TYPE_BSS) != XVM_SECTION_DATA_TYPE_UNKNOWN) { + SInfo.SecName = "bss"; + } else { + SInfo.SecName = "data"; + } + } + SInfo.SymName = GV.getName().data(); + + DataSectionNameIndexMap.insert(std::pair(GV.getName().data(), SecIndices)); + DataSectionIndexInfoMap.insert(std::pair(SectionIndex++, SInfo)); + } + // TODO: extern case + } + PatchSectionInfo(); +} + +void XVMAsmPrinter::InitModuleMapFuncnameIndex(Module *M) { + int Index = 0; + std::vector FuncDecl; + + /* F1 can't be const because if the function is a constructor + * or destructor, we need to add the export attribute to it */ + for (Function &F1 : M->getFunctionList()) { + if(F1.getInstructionCount() != 0) { + FunctionDefinitionMap.insert(std::make_pair(F1.getName().data(), Index)); + FunctionNameAndIndex.insert(std::make_pair(F1.getName().data(), Index)); + if (IsConstructorDestructor(F1.getName().data(), M)) { + F1.addFnAttr("xvm-export-name", F1.getName().data()); + } + Index ++; + } + else if(!F1.isIntrinsic()) { + FuncDecl.push_back(F1.getName().data()); + } + } + + for (std::string FName : FuncDecl) { + FunctionNameAndIndex.insert(std::make_pair(FName, Index)); + Index ++; + } +} + +bool XVMAsmPrinter::doInitialization(Module &M) { + AsmPrinter::doInitialization(M); + + return false; +} + +void XVMAsmPrinter::printOperand(const MachineInstr *MI, int OpNum, + raw_ostream &O) { + const MachineOperand &MO = MI->getOperand(OpNum); + + switch (MO.getType()) { + case MachineOperand::MO_Register: + O << XVMInstPrinter::getRegisterName(MO.getReg()); + break; + + case MachineOperand::MO_Immediate: + O << MO.getImm(); + break; + + case MachineOperand::MO_MachineBasicBlock: + O << *MO.getMBB()->getSymbol(); + break; + + case MachineOperand::MO_GlobalAddress: + O << *getSymbol(MO.getGlobal()); + break; + + case MachineOperand::MO_BlockAddress: { + MCSymbol *BA = GetBlockAddressSymbol(MO.getBlockAddress()); + O << BA->getName(); + break; + } + + case MachineOperand::MO_ExternalSymbol: + O << *GetExternalSymbolSymbol(MO.getSymbolName()); + break; + + case MachineOperand::MO_JumpTableIndex: + case MachineOperand::MO_ConstantPoolIndex: + default: + llvm_unreachable(""); + } +} + +bool XVMAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, + const char *ExtraCode, raw_ostream &O) { + if (ExtraCode && ExtraCode[0]) + return AsmPrinter::PrintAsmOperand(MI, OpNo, ExtraCode, O); + + printOperand(MI, OpNo, O); + return false; +} + +bool XVMAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI, + unsigned OpNum, const char *ExtraCode, + raw_ostream &O) { + assert(OpNum + 1 < MI->getNumOperands() && "Insufficient operands"); + const MachineOperand &BaseMO = MI->getOperand(OpNum); + const MachineOperand &OffsetMO = MI->getOperand(OpNum + 1); + assert(BaseMO.isReg() && "Unexpected base pointer for inline asm memory operand."); + assert(OffsetMO.isImm() && "Unexpected offset for inline asm memory operand."); + int Offset = OffsetMO.getImm(); + if (ExtraCode) + return true; // Unknown modifier. + + if (Offset < 0) + O << "(" << XVMInstPrinter::getRegisterName(BaseMO.getReg()) << " - " << -Offset << ")"; + else + O << "(" << XVMInstPrinter::getRegisterName(BaseMO.getReg()) << " + " << Offset << ")"; + + return false; +} + +void XVMAsmPrinter::setFunctionCallInfo(MCInst * Inst) { + const char * func_name = NULL; + const MCExpr * TmpExpr; + int64_t func_index = -1; + if (Inst == NULL) + return; + + assert(Inst->getNumOperands()>0); + MCOperand & FirstMO = Inst->getOperand(0); + if (FirstMO.isExpr()) { + Inst->setFlags(FUNC_CALL_FLAG_MC_INST_IMM); + TmpExpr = Inst->getOperand(0).getExpr(); + func_name = cast(*TmpExpr).getSymbol().getName().data(); + assert(func_name != NULL); + assert(Inst->getNumOperands() == 1); + func_index = GetFuncIndex(func_name); + if (func_index == -1) { + std::string ErrorMesg("Error: function name "); + ErrorMesg += func_name; + ErrorMesg += " is called; but could not be found\n"; + report_fatal_error(ErrorMesg.data()); + } + MCOperand MCOp = MCOperand::createImm(func_index); + Inst->addOperand(MCOp); + } + if (FirstMO.isReg()) { + Inst->setFlags(FUNC_CALL_FLAG_MC_INST_REG); + int reg_num = FirstMO.getReg(); + MCOperand MCOp = MCOperand::createReg(reg_num); + Inst->addOperand(MCOp); + } +} + +void XVMAsmPrinter::setGlobalSymbolInfo(const MachineInstr *MI, MCInst* Inst) { + unsigned int numOps = MI->getNumOperands(); + bool hasGlobalSymbol = false; + for(unsigned int i=0; igetOperand(i); + if (tmp.isGlobal()) { + hasGlobalSymbol = true; + } + } + if (!hasGlobalSymbol) + return; + /* add operand and indicate the global symbol type*/ + numOps = Inst->getNumOperands(); + for (unsigned int i = 0; i < numOps; i++) { + const MCOperand &tmp = Inst->getOperand(i); + if (tmp.isExpr()) { + const MCExpr *TmpExpr = tmp.getExpr(); + const MCSymbolRefExpr *SRE; + if (const MCBinaryExpr *BE = dyn_cast(TmpExpr)) + SRE = dyn_cast(BE->getLHS()); + else + SRE = dyn_cast(TmpExpr); + std::string SymName = SRE->getSymbol().getName().str(); + /* the symbol may be function name, may be global varaibel name */ + int SymIndex = GetFuncIndex(SymName.c_str()); + if (SymIndex != -1) { + /* it is a function name */ + Inst->setFlags(FUNC_ID_FLAG_MC_INST_REG); + MCOperand MCOp = MCOperand::createImm(SymIndex); + Inst->addOperand(MCOp); + } else { + /* it may be a global variable name */ + SymName = GetSymbolName(SymName); + SymIndex = GetDataIndex(SymName.c_str(), DATA_SECTION); + if (SymIndex == -1) { + report_fatal_error( + "TODO: Add the support of non-func-global-var scenarios\n"); + } else { + Inst->setFlags(GLOBAL_DATAREF_FLAG_MC_INST); + MCOperand MCOp = MCOperand::createImm(SymIndex); + Inst->addOperand(MCOp); + } + } + } + } + return; +} + +void XVMAsmPrinter::emitInstruction(const MachineInstr *MI) { + MCInst TmpInst; + XVMMCInstLower MCInstLowering(OutContext, *this); + MCInstLowering.Lower(MI, TmpInst); + + if (MI->isCall()) { + setFunctionCallInfo(&TmpInst); + } else { + setGlobalSymbolInfo(MI, &TmpInst); + } + TmpInst.addOperand(MCOperand::createImm(MIIndentMap[MI])); + + EmitToStreamer(*OutStreamer, TmpInst); +} + +void XVMAsmPrinter::emitFunctionHeader() { + const Function &F = MF->getFunction(); + //OutStreamer->switchSection(MF->getSection()); + + SmallString<128> Str; + raw_svector_ostream O(Str); + int Index = GetDefFuncIndex(F.getName().data()); + assert(Index != -1); + + if (F.hasFnAttribute("xvm-export-name")) { + StringRef ExportName = F.getFnAttribute("xvm-export-name").getValueAsString(); + O << "(export $" << ExportName << "\n"; + } else { + if (XVMExportAll) { + O << "\t(export " << " $" << F.getName() << " "; + } + } + O << "\t(func " << Index << " $" << F.getName() << " "; + + emitFunctionParamList(*MF, O); + emitFunctionReturnVal(*MF, O); + + OutStreamer->emitRawText(O.str()); + + auto Section = getObjFileLowering().SectionForGlobal(&F, TM); + MF->setSection(Section); +} + +void XVMAsmPrinter::emitFunctionReturnVal(const MachineFunction &MF, raw_ostream &O) { + emitFunctionReturnVal(MF.getFunction(), O); +} + +void XVMAsmPrinter::emitFunctionReturnVal(const Function &F, raw_ostream &O) { + O << "(result"; + Type *Ty = F.getReturnType(); + if (auto *PTy = dyn_cast(Ty)) { + O << " ref"; + } else if (Ty->isIntegerTy()) { + O << " i64"; + } else if (!Ty->isVoidTy()) { + llvm_unreachable("Invalid return type"); + } + + O << ")"; +} + +void XVMAsmPrinter::emitFunctionBodyEnd() { + SmallString<128> Str; + raw_svector_ostream O(Str); + O << "\t)"; + if (MF->getFunction().hasFnAttribute("xvm-export-name")) + O << "\n)"; + else if (XVMExportAll) { + O << ")"; + } + + OutStreamer->emitRawText(O.str()); +} + +void XVMAsmPrinter::emitFunctionParamList(const MachineFunction &MF, raw_ostream &O) { + emitFunctionParamList(MF.getFunction(), O); +} + +void XVMAsmPrinter::emitFunctionParamList(const Function &F, raw_ostream &O) { + O << "(param"; + + for (Function::const_arg_iterator I = F.arg_begin(), E = F.arg_end(); + I != E; ++I) { + Type *Ty = I->getType(); + if (auto *PTy = dyn_cast(Ty)) { + O << " ref"; + } else if (Ty->isIntegerTy()) { + O << " i64"; + } else { + llvm_unreachable("Invalid param type"); + } + } + + O << ")"; +} + +void XVMAsmPrinter::emitStartOfAsmFile(Module &M) { + InitModuleMapFuncnameIndex(&M); + InitDataSectionGlobalConstant(&M); + SmallString<128> Str1; + raw_svector_ostream O(Str1); + + O << "(module"; + OutStreamer->emitRawText(O.str()); +} + +static void output_constructor_destructor_metadata( + raw_svector_ostream &O, + std::map> priority_map, + char *MetadataName) { + O << "(metadata " << MetadataName << " \""; + if (strcmp(MetadataName, "$init_array") == 0) { + for (auto i = priority_map.begin(); i != priority_map.end(); i++) { + for (int j = 0; j < (i->second).size(); j++) { + int func_index = GetFuncIndex((i->second)[j]); + std::string func_id_data = UnsignedIntTypeToHex(ReverseBytes(func_index, 4), 8); + O << func_id_data.data(); + } + } + } else { + /* Destructors are ran from largest number to smallest */ + for (auto i = priority_map.rbegin(); i != priority_map.rend(); i++) { + for (int j = 0; j < (i->second).size(); j++) { + int func_index = GetFuncIndex((i->second)[j]); + std::string func_id_data = UnsignedIntTypeToHex(ReverseBytes(func_index, 4), 8); + O << func_id_data.data(); + } + } + } + O << "\")\n"; +} + +static void emitConstructorsDestructors(raw_svector_ostream &O, Module &M, char *GVName, char *MetadataName) { + GlobalVariable *GV; + std::map> priority_map; + + GV = M.getGlobalVariable(GVName); + + if (!GV) { + return; + } + const ConstantArray *InitList = dyn_cast(GV->getInitializer()); + if (!InitList) { + return; + } + StructType *ETy = dyn_cast(InitList->getType()->getElementType()); + if (!ETy || ETy->getNumElements() != 3 || + !ETy->getTypeAtIndex(0U)->isIntegerTy() || + !ETy->getTypeAtIndex(1U)->isPointerTy() || + !ETy->getTypeAtIndex(2U)->isPointerTy()) { + return; + } + + for (Value *P : InitList->operands()) { + ConstantStruct *CS = dyn_cast(P); + if (!CS) { + continue; + } + ConstantInt *Priority = dyn_cast(CS->getOperand(0)); + if (!Priority) { + continue; + } + uint16_t PriorityValue = Priority->getLimitedValue(UINT16_MAX); + const char *func_name = CS->getOperand(1)->getName().data(); + priority_map[PriorityValue].push_back(func_name); + } + + output_constructor_destructor_metadata(O, priority_map, MetadataName); +} + +static void emitMetaDataSectionInfo(raw_svector_ostream &O, Module &M) { + O << "\n;; \"metadata\" name init_data\n"; + emitConstructorsDestructors(O, M, "llvm.global_ctors", "$init_array"); + emitConstructorsDestructors(O, M, "llvm.global_dtors", "$fini_array"); +} + +void XVMAsmPrinter::emitEndOfAsmFile(Module &M) { + SmallString<128> Str1; + raw_svector_ostream O(Str1); + emitDecls(M); + emitDataSectionInfo(O); + emitMetaDataSectionInfo(O, M); + O << ")"; + OutStreamer->emitRawText(O.str()); +} + +void XVMAsmPrinter::emitDecls(const Module &M) { + SmallString<128> Str1; + raw_svector_ostream O(Str1); + for (const Function &F : M.getFunctionList()) { + if (GetDefFuncIndex(F.getName().data()) == -1 && + !F.isIntrinsic()) { + O << "(import $" << F.getName() << "\n"; + O << "\t(func " << GetFuncIndex(F.getName().data()) << " $" << F.getName() << " "; + emitFunctionParamList(F, O); + emitFunctionReturnVal(F, O); + O << ")\n)\n"; + } + } + OutStreamer->emitRawText(O.str()); +} + +static bool ShouldIncIndent(MachineInstr &MI, const XVMInstrInfo *TII) { + if (TII->isCondBranch(&MI) || TII->isCondBranchProcessed(&MI) || + MI.getOpcode() == XVM::LOOP || MI.getOpcode() == XVM::BLOCK || + MI.getOpcode() == XVM::THEN || MI.getOpcode() == XVM::ELSE) { + return true; + } else { + return false; + } +} + +static bool ShouldDecIndent(MachineInstr &MI) { + if (MI.getOpcode() == XVM::END_LOOP || MI.getOpcode() == XVM::END_BLOCK || + MI.getOpcode() == XVM::END_THEN || MI.getOpcode() == XVM::END_IF || + MI.getOpcode() == XVM::END_ELSE) { + return true; + } else { + return false; + } +} + +void XVMAsmPrinter::GetMIIndent(MachineFunction &MF) { + unsigned CurrentIndent = 0; + const XVMInstrInfo *TII = MF.getSubtarget().getInstrInfo(); + for (MachineFunction::iterator MBBI = MF.begin(), E = MF.end(); MBBI != E; ++MBBI) { + for (MachineBasicBlock::iterator MII = MBBI->begin(), EI = MBBI->end(); MII != EI; ++MII) { + if (ShouldIncIndent(*MII, TII)) { + MIIndentMap[&*MII] = CurrentIndent++; + } else if (ShouldDecIndent(*MII)) { + MIIndentMap[&*MII] = --CurrentIndent; + } else { + MIIndentMap[&*MII] = CurrentIndent; + } + } + } + assert (CurrentIndent == 0 && "All the indents should be paired!"); +} + +void XVMAsmPrinter::emitGlobalVariable(const GlobalVariable *GV) {} + +bool XVMAsmPrinter::runOnMachineFunction(MachineFunction &MF) { + SetupMachineFunction(MF); + GetMIIndent(MF); + emitFunctionBody(); + return false; +} + +// Force static initialization. extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeXVMAsmPrinter() { + RegisterAsmPrinter X(getTheXVMleTarget()); + RegisterAsmPrinter Y(getTheXVMTarget()); +} + +extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeXVMAsmPrinterCalledInDylib() { + LLVMInitializeXVMAsmPrinter(); } #else diff --git a/llvm/lib/Target/XVM/XVMCFGSort.cpp b/llvm/lib/Target/XVM/XVMCFGSort.cpp index 69f8ba27a8590056e7f1152028fe6e78ea6e7cf1..31fad1dc7edcb1443bb636b726e1888ee87a789e 100644 --- a/llvm/lib/Target/XVM/XVMCFGSort.cpp +++ b/llvm/lib/Target/XVM/XVMCFGSort.cpp @@ -16,6 +16,299 @@ /// ////===----------------------------------------------------------------------===// #ifdef XVM_DYLIB_MODE -// Insert the XVM backend code here + +#include "MCTargetDesc/XVMMCTargetDesc.h" +#include "XVM.h" +#include "XVMSortRegion.h" +#include "XVMSubtarget.h" +#include "llvm/ADT/PriorityQueue.h" +#include "llvm/ADT/SetVector.h" +#include "llvm/CodeGen/MachineDominators.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineLoopInfo.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/Passes.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/raw_ostream.h" +using namespace llvm; +using XVM::SortRegion; +using XVM::SortRegionInfo; + +#define DEBUG_TYPE "xvm-cfg-sort" + +namespace { + +class XVMCFGSort final : public MachineFunctionPass { + StringRef getPassName() const override { return "XVM CFG Sort"; } + + void getAnalysisUsage(AnalysisUsage &AU) const override { + AU.setPreservesCFG(); + AU.addRequired(); + AU.addPreserved(); + AU.addRequired(); + AU.addPreserved(); + MachineFunctionPass::getAnalysisUsage(AU); + } + + bool runOnMachineFunction(MachineFunction &MF) override; + +public: + static char ID; // Pass identification, replacement for typeid + XVMCFGSort() : MachineFunctionPass(ID) {} +}; +} // end anonymous namespace + +char XVMCFGSort::ID = 0; +INITIALIZE_PASS(XVMCFGSort, DEBUG_TYPE, + "Reorders blocks in topological order", false, false) + +FunctionPass *llvm::createXVMCFGSort() { + return new XVMCFGSort(); +} + +static void maybeUpdateTerminator(MachineBasicBlock *MBB) { +#ifndef NDEBUG + bool AnyBarrier = false; +#endif + bool AllAnalyzable = true; + for (const MachineInstr &Term : MBB->terminators()) { +#ifndef NDEBUG + AnyBarrier |= Term.isBarrier(); +#endif + AllAnalyzable &= Term.isBranch() && !Term.isIndirectBranch(); + } + assert((AnyBarrier || AllAnalyzable) && + "analyzeBranch needs to analyze any block with a fallthrough"); + + // Find the layout successor from the original block order. + MachineFunction *MF = MBB->getParent(); + MachineBasicBlock *OriginalSuccessor = + unsigned(MBB->getNumber() + 1) < MF->getNumBlockIDs() + ? MF->getBlockNumbered(MBB->getNumber() + 1) + : nullptr; + + if (AllAnalyzable) + MBB->updateTerminator(OriginalSuccessor); +} + +namespace { +/// Sort blocks by their number. +struct CompareBlockNumbers { + bool operator()(const MachineBasicBlock *A, + const MachineBasicBlock *B) const { + return A->getNumber() > B->getNumber(); + } +}; +/// Sort blocks by their number in the opposite order.. +struct CompareBlockNumbersBackwards { + bool operator()(const MachineBasicBlock *A, + const MachineBasicBlock *B) const { + return A->getNumber() < B->getNumber(); + } +}; +/// Bookkeeping for a region to help ensure that we don't mix blocks not +/// dominated by the its header among its blocks. +struct Entry { + const SortRegion *TheRegion; + unsigned NumBlocksLeft; + + /// List of blocks not dominated by Loop's header that are deferred until + /// after all of Loop's blocks have been seen. + std::vector Deferred; + + explicit Entry(const SortRegion *R) + : TheRegion(R), NumBlocksLeft(R->getNumBlocks()) {} +}; +} // end anonymous namespace + +/// Sort the blocks, taking special care to make sure that regions are not +/// interrupted by blocks not dominated by their header. +/// TODO: There are many opportunities for improving the heuristics here. +/// Explore them. +static void sortBlocks(MachineFunction &MF, const MachineLoopInfo &MLI, + const MachineDominatorTree &MDT) { + // Remember original layout ordering, so we can update terminators after + // reordering to point to the original layout successor. + MF.RenumberBlocks(); + + // Prepare for a topological sort: Record the number of predecessors each + // block has, ignoring loop backedges. + SmallVector NumPredsLeft(MF.getNumBlockIDs(), 0); + for (MachineBasicBlock &MBB : MF) { + unsigned N = MBB.pred_size(); + if (MachineLoop *L = MLI.getLoopFor(&MBB)) + if (L->getHeader() == &MBB) + for (const MachineBasicBlock *Pred : MBB.predecessors()) + if (L->contains(Pred)) + --N; + NumPredsLeft[MBB.getNumber()] = N; + } + + // Topological sort the CFG, with additional constraints: + // - Between a region header and the last block in the region, there can be + // no blocks not dominated by its header. + // - It's desirable to preserve the original block order when possible. + // We use two ready lists; Preferred and Ready. Preferred has recently + // processed successors, to help preserve block sequences from the original + // order. Ready has the remaining ready blocks. + PriorityQueue, + CompareBlockNumbers> + Preferred; + PriorityQueue, + CompareBlockNumbersBackwards> + Ready; + + SortRegionInfo SRI(MLI); + SmallVector Entries; + for (MachineBasicBlock *MBB = &MF.front();;) { + const SortRegion *R = SRI.getRegionFor(MBB); + if (R) { + // If MBB is a region header, add it to the active region list. We can't + // put any blocks that it doesn't dominate until we see the end of the + // region. + if (R->getHeader() == MBB) + Entries.push_back(Entry(R)); + // For each active region the block is in, decrement the count. If MBB is + // the last block in an active region, take it off the list and pick up + // any blocks deferred because the header didn't dominate them. + for (Entry &E : Entries) + if (E.TheRegion->contains(MBB) && --E.NumBlocksLeft == 0) + for (auto DeferredBlock : E.Deferred) + Ready.push(DeferredBlock); + while (!Entries.empty() && Entries.back().NumBlocksLeft == 0) + Entries.pop_back(); + } + // The main topological sort logic. + for (MachineBasicBlock *Succ : MBB->successors()) { + // Ignore backedges. + if (MachineLoop *SuccL = MLI.getLoopFor(Succ)) + if (SuccL->getHeader() == Succ && SuccL->contains(MBB)) + continue; + // Decrement the predecessor count. If it's now zero, it's ready. + if (--NumPredsLeft[Succ->getNumber()] == 0) { + // When we are in a SortRegion, we allow sorting of not only BBs that + // belong to the current (innermost) region but also BBs that are + // dominated by the current region header. + Preferred.push(Succ); + } + } + // Determine the block to follow MBB. First try to find a preferred block, + // to preserve the original block order when possible. + MachineBasicBlock *Next = nullptr; + while (!Preferred.empty()) { + Next = Preferred.top(); + Preferred.pop(); + // If X isn't dominated by the top active region header, defer it until + // that region is done. + if (!Entries.empty() && + !MDT.dominates(Entries.back().TheRegion->getHeader(), Next)) { + Entries.back().Deferred.push_back(Next); + Next = nullptr; + continue; + } + // If Next was originally ordered before MBB, and it isn't because it was + // loop-rotated above the header, it's not preferred. + if (Next->getNumber() < MBB->getNumber() && + (!R || !R->contains(Next) || + R->getHeader()->getNumber() < Next->getNumber())) { + Ready.push(Next); + Next = nullptr; + continue; + } + break; + } + // If we didn't find a suitable block in the Preferred list, check the + // general Ready list. + if (!Next) { + // If there are no more blocks to process, we're done. + if (Ready.empty()) { + maybeUpdateTerminator(MBB); + break; + } + for (;;) { + Next = Ready.top(); + Ready.pop(); + // If Next isn't dominated by the top active region header, defer it + // until that region is done. + if (!Entries.empty() && + !MDT.dominates(Entries.back().TheRegion->getHeader(), Next)) { + Entries.back().Deferred.push_back(Next); + continue; + } + break; + } + } + // Move the next block into place and iterate. + Next->moveAfter(MBB); + maybeUpdateTerminator(MBB); + MBB = Next; + } + assert(Entries.empty() && "Active sort region list not finished"); + MF.RenumberBlocks(); + +#ifndef NDEBUG + SmallSetVector OnStack; + + // Insert a sentinel representing the degenerate loop that starts at the + // function entry block and includes the entire function as a "loop" that + // executes once. + OnStack.insert(nullptr); + + for (auto &MBB : MF) { + assert(MBB.getNumber() >= 0 && "Renumbered blocks should be non-negative."); + const SortRegion *Region = SRI.getRegionFor(&MBB); + + if (Region && &MBB == Region->getHeader()) { + // Region header. + if (Region->isLoop()) { + // Loop header. The loop predecessor should be sorted above, and the + // other predecessors should be backedges below. + for (auto Pred : MBB.predecessors()) + assert( + (Pred->getNumber() < MBB.getNumber() || Region->contains(Pred)) && + "Loop header predecessors must be loop predecessors or " + "backedges"); + } else { + // Exception header. All predecessors should be sorted above. + for (auto Pred : MBB.predecessors()) + assert(Pred->getNumber() < MBB.getNumber() && + "Non-loop-header predecessors should be topologically sorted"); + } + assert(OnStack.insert(Region) && + "Regions should be declared at most once."); + + } else { + // Not a region header. All predecessors should be sorted above. + for (auto Pred : MBB.predecessors()) + assert(Pred->getNumber() < MBB.getNumber() && + "Non-loop-header predecessors should be topologically sorted"); + assert(OnStack.count(SRI.getRegionFor(&MBB)) && + "Blocks must be nested in their regions"); + } + while (OnStack.size() > 1 && &MBB == SRI.getBottom(OnStack.back())) + OnStack.pop_back(); + } + assert(OnStack.pop_back_val() == nullptr && + "The function entry block shouldn't actually be a region header"); + assert(OnStack.empty() && + "Control flow stack pushes and pops should be balanced."); +#endif +} + +bool XVMCFGSort::runOnMachineFunction(MachineFunction &MF) { + LLVM_DEBUG(dbgs() << "********** CFG Sorting **********\n" + "********** Function: " + << MF.getName() << '\n'); + + const auto &MLI = getAnalysis(); + auto &MDT = getAnalysis(); + + MF.getRegInfo().invalidateLiveness(); + + // Sort the blocks, with contiguous sort regions. + sortBlocks(MF, MLI, MDT); + + return true; +} #endif \ No newline at end of file diff --git a/llvm/lib/Target/XVM/XVMCFGStackify.cpp b/llvm/lib/Target/XVM/XVMCFGStackify.cpp index 38b63c09a5e1fe3a1ee84bbdb60ab4536b14369a..afb2ff46aa92765b6f692c19348287dc2226e2e6 100644 --- a/llvm/lib/Target/XVM/XVMCFGStackify.cpp +++ b/llvm/lib/Target/XVM/XVMCFGStackify.cpp @@ -21,6 +21,776 @@ /// //===----------------------------------------------------------------------===// #ifdef XVM_DYLIB_MODE -// Insert the XVM backend code here + + +#include "XVM.h" +//#include "XVMMachineFunctionInfo.h" +#include "XVMSortRegion.h" +#include "XVMSubtarget.h" +#include "llvm/ADT/Statistic.h" +#include "llvm/CodeGen/MachineDominators.h" +#include "llvm/CodeGen/MachinePostDominators.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineLoopInfo.h" +#include "llvm/MC/MCAsmInfo.h" +#include "llvm/Target/TargetMachine.h" +using namespace llvm; +using XVM::SortRegionInfo; + +#define DEBUG_TYPE "xvm-cfg-stackify" + +STATISTIC(NumCallUnwindMismatches, "Number of call unwind mismatches found"); +STATISTIC(NumCatchUnwindMismatches, "Number of catch unwind mismatches found"); + +namespace { +class XVMCFGStackify final : public MachineFunctionPass { + StringRef getPassName() const override { return "XVM CFG Stackify"; } + + void getAnalysisUsage(AnalysisUsage &AU) const override { + AU.addRequired(); + AU.addRequired(); + AU.addRequired(); + MachineFunctionPass::getAnalysisUsage(AU); + } + + bool runOnMachineFunction(MachineFunction &MF) override; + + // For each block whose label represents the end of a scope, record the block + // which holds the beginning of the scope. This will allow us to quickly skip + // over scoped regions when walking blocks. + SmallVector ScopeTops; + void updateScopeTops(MachineBasicBlock *Begin, MachineBasicBlock *End) { + int EndNo = End->getNumber(); + if (!ScopeTops[EndNo] || ScopeTops[EndNo]->getNumber() > Begin->getNumber()) + ScopeTops[EndNo] = Begin; + } + + // Placing markers. + void placeMarkers(MachineFunction &MF); + void placeBlockMarker(MachineBasicBlock &MBB); + void placeLoopMarker(MachineBasicBlock &MBB); + const XVMInstrInfo *TII = nullptr; + void extendCondStmt(std::map CondBranchsWithDepth, + MachineFunction &MF); + void insertWithBreak(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI); + void insertRetBlock(MachineFunction &MF); + void removeInFunctionRet(MachineFunction &MF); + + // Wrap-up + using EndMarkerInfo = + std::pair; + unsigned getBranchDepth(const SmallVectorImpl &Stack, + const std::set *SetEndBlockLoop, + const MachineBasicBlock *MBB); + void rewriteDepthImmediates(MachineFunction &MF); + void fixBackEdgesOfLoops(MachineFunction &MF); + void fixEndsAtEndOfFunction(MachineFunction &MF); + void cleanupFunctionData(MachineFunction &MF); + + // For each BLOCK|LOOP|TRY, the corresponding END_(BLOCK|LOOP|TRY) or DELEGATE + // (in case of TRY). + DenseMap BeginToEnd; + // For each END_(BLOCK|LOOP|TRY) or DELEGATE, the corresponding + // BLOCK|LOOP|TRY. + DenseMap EndToBegin; + + // We need an appendix block to place 'end_loop' or 'end_try' marker when the + // loop / exception bottom block is the last block in a function + MachineBasicBlock *AppendixBB = nullptr; + MachineBasicBlock *getAppendixBlock(MachineFunction &MF) { + AppendixBB = nullptr; + if (!AppendixBB) { + AppendixBB = MF.CreateMachineBasicBlock(); + // Give it a fake predecessor so that AsmPrinter prints its label. + AppendixBB->addSuccessor(AppendixBB); + MF.push_back(AppendixBB); + } + return AppendixBB; + } + + // Before running rewriteDepthImmediates function, 'delegate' has a BB as its + // destination operand. getFakeCallerBlock() returns a fake BB that will be + // used for the operand when 'delegate' needs to rethrow to the caller. This + // will be rewritten as an immediate value that is the number of block depths + // + 1 in rewriteDepthImmediates, and this fake BB will be removed at the end + // of the pass. + MachineBasicBlock *FakeCallerBB = nullptr; + MachineBasicBlock *getFakeCallerBlock(MachineFunction &MF) { + if (!FakeCallerBB) + FakeCallerBB = MF.CreateMachineBasicBlock(); + return FakeCallerBB; + } + + // Helper functions to register / unregister scope information created by + // marker instructions. + void registerScope(MachineInstr *Begin, MachineInstr *End); + void registerTryScope(MachineInstr *Begin, MachineInstr *End, + MachineBasicBlock *EHPad); + void unregisterScope(MachineInstr *Begin); + +public: + static char ID; // Pass identification, replacement for typeid + XVMCFGStackify() : MachineFunctionPass(ID) {} + ~XVMCFGStackify() override { releaseMemory(); } + void releaseMemory() override; +}; +} // end anonymous namespace + +char XVMCFGStackify::ID = 0; +INITIALIZE_PASS(XVMCFGStackify, DEBUG_TYPE, + "Insert BLOCK/LOOP markers for XVM scopes", false, + false) + +FunctionPass *llvm::createXVMCFGStackify() { + return new XVMCFGStackify(); +} + +/// Test whether Pred has any terminators explicitly branching to MBB, as +/// opposed to falling through. Note that it's possible (eg. in unoptimized +/// code) for a branch instruction to both branch to a block and fallthrough +/// to it, so we check the actual branch operands to see if there are any +/// explicit mentions. +static bool explicitlyBranchesTo(MachineBasicBlock *Pred, + MachineBasicBlock *MBB) { + for (MachineInstr &MI : Pred->terminators()) + for (MachineOperand &MO : MI.explicit_operands()) + if (MO.isMBB() && MO.getMBB() == MBB) + return true; + return false; +} + +// Returns an iterator to the earliest position possible within the MBB, +// satisfying the restrictions given by BeforeSet and AfterSet. BeforeSet +// contains instructions that should go before the marker, and AfterSet contains +// ones that should go after the marker. In this function, AfterSet is only +// used for validation checking. +template +static MachineBasicBlock::iterator +getEarliestInsertPos(MachineBasicBlock *MBB, const Container &BeforeSet, + const Container &AfterSet) { + auto InsertPos = MBB->end(); + while (InsertPos != MBB->begin()) { + if (BeforeSet.count(&*std::prev(InsertPos))) { +#ifndef NDEBUG + // Validation check + for (auto Pos = InsertPos, E = MBB->begin(); Pos != E; --Pos) + assert(!AfterSet.count(&*std::prev(Pos))); +#endif + break; + } + --InsertPos; + } + return InsertPos; +} + +// Returns an iterator to the latest position possible within the MBB, +// satisfying the restrictions given by BeforeSet and AfterSet. BeforeSet +// contains instructions that should go before the marker, and AfterSet contains +// ones that should go after the marker. In this function, BeforeSet is only +// used for validation checking. +template +static MachineBasicBlock::iterator +getLatestInsertPos(MachineBasicBlock *MBB, const Container &BeforeSet, + const Container &AfterSet) { + auto InsertPos = MBB->begin(); + while (InsertPos != MBB->end()) { + if (AfterSet.count(&*InsertPos)) { +#ifndef NDEBUG + // Validation check + for (auto Pos = InsertPos, E = MBB->end(); Pos != E; ++Pos) + assert(!BeforeSet.count(&*Pos)); +#endif + break; + } + ++InsertPos; + } + return InsertPos; +} + +void ChangeBranchCondOpc(MachineInstr &MI, const XVMInstrInfo *TII) { + switch (MI.getOpcode()) { + case XVM::BUEQ_rr : MI.setDesc(TII->get(XVM::LOOP_BUEQ_rr)); break; + case XVM::BSNEQ_rr : MI.setDesc(TII->get(XVM::LOOP_BSNEQ_rr)); break; + case XVM::BSGE_rr : MI.setDesc(TII->get(XVM::LOOP_BSGE_rr)); break; + case XVM::BUGE_rr : MI.setDesc(TII->get(XVM::LOOP_BUGE_rr)); break; + case XVM::BSLE_rr : MI.setDesc(TII->get(XVM::LOOP_BSLE_rr)); break; + case XVM::BULE_rr : MI.setDesc(TII->get(XVM::LOOP_BULE_rr)); break; + case XVM::BSGT_rr : MI.setDesc(TII->get(XVM::LOOP_BSGT_rr)); break; + case XVM::BUGT_rr : MI.setDesc(TII->get(XVM::LOOP_BUGT_rr)); break; + case XVM::BSLT_rr : MI.setDesc(TII->get(XVM::LOOP_BSLT_rr)); break; + case XVM::BULT_rr : MI.setDesc(TII->get(XVM::LOOP_BULT_rr)); break; + case XVM::BSEQ_ri : MI.setDesc(TII->get(XVM::LOOP_BSEQ_ri)); break; + case XVM::BUEQ_ri : MI.setDesc(TII->get(XVM::LOOP_BUEQ_ri)); break; + case XVM::BSNEQ_ri : MI.setDesc(TII->get(XVM::LOOP_BSNEQ_ri)); break; + case XVM::BUNEQ_ri: MI.setDesc(TII->get(XVM::LOOP_BUNEQ_ri)); break; + case XVM::BSGE_ri : MI.setDesc(TII->get(XVM::LOOP_BSGE_ri)); break; + case XVM::BUGE_ri : MI.setDesc(TII->get(XVM::LOOP_BUGE_ri)); break; + case XVM::BSLE_ri : MI.setDesc(TII->get(XVM::LOOP_BSLE_ri)); break; + case XVM::BULE_ri : MI.setDesc(TII->get(XVM::LOOP_BULE_ri)); break; + case XVM::BSGT_ri : MI.setDesc(TII->get(XVM::LOOP_BSGT_ri)); break; + case XVM::BUGT_ri : MI.setDesc(TII->get(XVM::LOOP_BUGT_ri)); break; + case XVM::BSLT_ri : MI.setDesc(TII->get(XVM::LOOP_BSLT_ri)); break; + case XVM::BULT_ri : MI.setDesc(TII->get(XVM::LOOP_BULT_ri)); break; + default: return; + } +} + +void XVMCFGStackify::fixBackEdgesOfLoops(MachineFunction &MF) { + TII = MF.getSubtarget().getInstrInfo(); + auto &MLI = getAnalysis(); + for (auto &MBB : MF) { + MachineLoop *Loop = MLI.getLoopFor(&MBB); + if (!Loop) + continue; + + MachineBasicBlock *LoopHeader = Loop->getHeader(); + for (auto &MI : MBB.terminators()) { + /* skip the added opcode such as THEN ... */ + if (MI.getNumOperands() <=0 ) { + continue; + } + if (TII->isUnCondBranch(&MI) && MI.getOperand(0).getMBB() == LoopHeader) { + if (&MBB != Loop->getBottomBlock()) { + BuildMI(MBB, MI, MI.getDebugLoc(), TII->get(XVM::CONTINUE)); + } + MI.eraseFromParent(); + break; + } else if (TII->isCondBranch(&MI) && MI.getOperand(0).getMBB() == LoopHeader) { + uint32_t action_opcode = XVM::CONTINUE; + /* Fix Loop Exiting Fallthrough */ + if(&MBB == Loop->getBottomBlock() && &MI == &*(--MBB.end()) && MLI.getLoopFor(MBB.getFallThrough()) != Loop){ + TII->negateCondBranch(&MI); + action_opcode = XVM::BREAK; + } + MachineInstr *MIThen = MBB.getParent()->CreateMachineInstr(TII->get(XVM::THEN), DebugLoc()); + MBB.insertAfter(MI.getIterator(), MIThen); + MachineInstr *MIAction = MBB.getParent()->CreateMachineInstr(TII->get(action_opcode), DebugLoc()); + MBB.insertAfter(MIThen->getIterator(), MIAction); + MachineInstr *MIEndThen = MBB.getParent()->CreateMachineInstr(TII->get(XVM::END_THEN), DebugLoc()); + MBB.insertAfter(MIAction->getIterator(), MIEndThen); + MachineInstr *MIEndIf = MBB.getParent()->CreateMachineInstr(TII->get(XVM::END_IF), DebugLoc()); + MBB.insertAfter(MIEndThen->getIterator(), MIEndIf); + ChangeBranchCondOpc(MI, TII); + } + } + } +} + +void XVMCFGStackify::registerScope(MachineInstr *Begin, + MachineInstr *End) { + BeginToEnd[Begin] = End; + EndToBegin[End] = Begin; +} + +void XVMCFGStackify::unregisterScope(MachineInstr *Begin) { + assert(BeginToEnd.count(Begin)); + MachineInstr *End = BeginToEnd[Begin]; + assert(EndToBegin.count(End)); + BeginToEnd.erase(Begin); + EndToBegin.erase(End); +} + +static bool isChild(const MachineInstr &MI) { + if (MI.getNumOperands() == 0) + return false; + const MachineOperand &MO = MI.getOperand(0); + if (!MO.isReg() || MO.isImplicit() || !MO.isDef()) + return false; + Register Reg = MO.getReg(); + return Register::isVirtualRegister(Reg); +} + +/** if END_BLOCK is followed with END_LOOP in the same BB, and both of them + * are in the beginning of the BB, the break_imm needs to increase one. + * This is the case where there are multiple conditions ||ed or &&ed in a + * loop condition such as for or while loops. + * FIXME: we may find other approach for fixing this. +*/ +unsigned XVMCFGStackify::getBranchDepth( + const SmallVectorImpl &Stack, + const std::set *SetEndBlockLoop, + const MachineBasicBlock *MBB) { + unsigned Depth = 0; + for (auto X : reverse(Stack)) { + if (X.first == MBB) { + std::set::iterator I = SetEndBlockLoop->find(MBB); + if (I != SetEndBlockLoop->end()) + ++Depth; + break; + } + ++Depth; + } + assert(Depth < Stack.size() && "Branch destination should be in scope"); + return Depth; +} + +static bool isContinueBlock(MachineBasicBlock &MBB) { + for (auto &MI : MBB) { + if (MI.getOpcode() == XVM::CONTINUE) + return true; + } + return false; +} + +static inline MachineBasicBlock *getNextBlock(MachineBasicBlock &MBB) { + MachineFunction::iterator I = MBB.getIterator(); + MachineFunction::iterator E = MBB.getParent()->end(); + if (++I == E) + return nullptr; + return &*I; +} + +static inline MachineInstr *getNextInstruction(MachineInstr &MI) { + MachineBasicBlock::iterator I = MI.getIterator(); + MachineBasicBlock::iterator E = MI.getParent()->end(); + if (++I == E) + return nullptr; + return &*I; +} + +void XVMCFGStackify::insertWithBreak(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI) { + DebugLoc DL; + MachineInstr &Inst = *MBBI; + + MachineBasicBlock *EndBlock = &MBB.getParent()->back(); + const auto &TII = MBB.getParent()->getSubtarget().getInstrInfo(); + MBB.addSuccessor(EndBlock); + MachineInstr *BreakInstr = BuildMI(*MBB.getParent(), DL, TII->get(XVM::BR)).addMBB(EndBlock); + MBB.insertAfter(MBBI, BreakInstr); +} + +void XVMCFGStackify::insertRetBlock(MachineFunction &MF) { + const auto &TII = *MF.getSubtarget().getInstrInfo(); + MachineBasicBlock *BeginBlock = nullptr; + MachineBasicBlock *EndBlock = nullptr; + DebugLoc DL; + + BeginBlock = MF.CreateMachineBasicBlock(); + MachineInstr *Begin = + BuildMI(MF, DL, TII.get(XVM::BLOCK)); + BeginBlock->push_back(Begin); + MF.push_front(BeginBlock); + + EndBlock = MF.CreateMachineBasicBlock(); + MachineInstr *End = BuildMI(MF, DL, TII.get(XVM::END_BLOCK)); + EndBlock->push_back(End); + MachineInstr *Ret = BuildMI(MF, DL, TII.get(XVM::RETURN)); + EndBlock->push_back(Ret); + MF.push_back(EndBlock); + + ScopeTops.resize(MF.getNumBlockIDs() + 1); + updateScopeTops(BeginBlock, EndBlock); +} + +void XVMCFGStackify::removeInFunctionRet(MachineFunction &MF) { + bool retSeen = false; + bool retExists = false; + for (auto &MBB: MF) { + for (auto MBBI = MBB.begin(); MBBI != MBB.end(); ++MBBI) { + MachineInstr &Inst = *MBBI; + if (Inst.getOpcode() == XVM::RETURN) { + retExists = true; + if (getNextBlock(MBB) == nullptr) { + // This is the last block + if (getNextInstruction(Inst) != nullptr) { + // This is NOT the last instruction + insertRetBlock(MF); + insertWithBreak(MBB, MBBI); + MBBI = MBB.erase(MBBI); + } + } else { + // This is not the last block + if (retSeen) { + insertWithBreak(MBB, MBBI); + MBBI = MBB.erase(MBBI); + } else { + insertRetBlock(MF); + insertWithBreak(MBB, MBBI); + MBBI = MBB.erase(MBBI); + retSeen = true; + } + } + } + } + } + if (!retExists) { + const auto &TII = *MF.getSubtarget().getInstrInfo(); + DebugLoc DL; + MachineBasicBlock &EndBlock = MF.back(); + MachineInstr *ret = BuildMI(MF, DL, TII.get(XVM::RETURN)); + EndBlock.push_back(ret); + } +} + +/// Insert LOOP/BLOCK markers at appropriate places. +void XVMCFGStackify::placeMarkers(MachineFunction &MF) { + // We allocate one more than the number of blocks in the function to + // accommodate for the possible fake block we may insert at the end. + ScopeTops.resize(MF.getNumBlockIDs() + 1); + // Place the LOOP for MBB if MBB is the header of a loop. + for (auto &MBB : MF) + placeLoopMarker(MBB); + + const MCAsmInfo *MCAI = MF.getTarget().getMCAsmInfo(); + for (auto &MBB : MF) { + // Place the BLOCK for MBB if MBB is branched to from above. + placeBlockMarker(MBB); + } + + removeInFunctionRet(MF); +} + +/// Insert a BLOCK marker for branches to MBB (if needed). +// TODO Consider a more generalized way of handling block (and also loop and +// try) signatures when we implement the multi-value proposal later. +void XVMCFGStackify::placeBlockMarker(MachineBasicBlock &MBB) { + assert(!MBB.isEHPad()); + MachineFunction &MF = *MBB.getParent(); + auto &MDT = getAnalysis(); + const auto &TII = *MF.getSubtarget().getInstrInfo(); + + // First compute the nearest common dominator of all forward non-fallthrough + // predecessors so that we minimize the time that the BLOCK is on the stack, + // which reduces overall stack height. + MachineBasicBlock *Header = nullptr; + bool IsBranchedTo = false; + int MBBNumber = MBB.getNumber(); + for (MachineBasicBlock *Pred : MBB.predecessors()) { + if (Pred->getNumber() < MBBNumber) { + Header = Header ? MDT.findNearestCommonDominator(Header, Pred) : Pred; + if (explicitlyBranchesTo(Pred, &MBB)) + IsBranchedTo = true; + } + } + if (!Header) + return; + if (!IsBranchedTo) + return; + + assert(&MBB != &MF.front() && "Header blocks shouldn't have predecessors"); + MachineBasicBlock *LayoutPred = MBB.getPrevNode(); + + // If the nearest common dominator is inside a more deeply nested context, + // walk out to the nearest scope which isn't more deeply nested. + for (MachineFunction::iterator I(LayoutPred), E(Header); I != E; --I) { + if (MachineBasicBlock *ScopeTop = ScopeTops[I->getNumber()]) { + if (ScopeTop->getNumber() > Header->getNumber()) { + // Skip over an intervening scope. + I = std::next(ScopeTop->getIterator()); + } else { + // We found a scope level at an appropriate depth. + Header = ScopeTop; + break; + } + } + } + + // Decide where in Header to put the BLOCK. + + // Instructions that should go before the BLOCK. + SmallPtrSet BeforeSet; + // Instructions that should go after the BLOCK. + SmallPtrSet AfterSet; + for (const auto &MI : *Header) { + // If there is a previously placed LOOP marker and the bottom block of the + // loop is above MBB, it should be after the BLOCK, because the loop is + // nested in this BLOCK. Otherwise it should be before the BLOCK. + if (MI.getOpcode() == XVM::LOOP) { + auto *LoopBottom = BeginToEnd[&MI]->getParent()->getPrevNode(); + if (MBB.getNumber() > LoopBottom->getNumber()) + AfterSet.insert(&MI); +#ifndef NDEBUG + else + BeforeSet.insert(&MI); +#endif + } + + // If there is a previously placed BLOCK/TRY marker and its corresponding + // END marker is before the current BLOCK's END marker, that should be + // placed after this BLOCK. Otherwise it should be placed before this BLOCK + // marker. + if (MI.getOpcode() == XVM::BLOCK) { + if (BeginToEnd[&MI]->getParent()->getNumber() <= MBB.getNumber()) + AfterSet.insert(&MI); +#ifndef NDEBUG + else + BeforeSet.insert(&MI); +#endif + } + +#ifndef NDEBUG + // All END_(BLOCK|LOOP|TRY) markers should be before the BLOCK. + if (MI.getOpcode() == XVM::END_BLOCK || + MI.getOpcode() == XVM::END_LOOP) + BeforeSet.insert(&MI); +#endif + + // Terminators should go after the BLOCK. + if (MI.isTerminator()) + AfterSet.insert(&MI); + } + + // Local expression tree should go after the BLOCK. + for (auto I = Header->getFirstTerminator(), E = Header->begin(); I != E; + --I) { + if (std::prev(I)->isDebugInstr() || std::prev(I)->isPosition()) + continue; + if (isChild(*std::prev(I))) + AfterSet.insert(&*std::prev(I)); + else + break; + } + + // Add the BLOCK. +// XVM::BlockType ReturnType = XVM::BlockType::Void; + auto InsertPos = getLatestInsertPos(Header, BeforeSet, AfterSet); + MachineInstr *Begin = + BuildMI(*Header, InsertPos, Header->findDebugLoc(InsertPos), + TII.get(XVM::BLOCK)); +//FIXME: Check if we need it +// .addImm(int64_t(ReturnType)); + + // Decide where in Header to put the END_BLOCK. + BeforeSet.clear(); + AfterSet.clear(); + for (auto &MI : MBB) { +#ifndef NDEBUG + // END_BLOCK should precede existing LOOP and TRY markers. + if (MI.getOpcode() == XVM::LOOP) + AfterSet.insert(&MI); +#endif + + // If there is a previously placed END_LOOP marker and the header of the + // loop is above this block's header, the END_LOOP should be placed after + // the BLOCK, because the loop contains this block. Otherwise the END_LOOP + // should be placed before the BLOCK. The same for END_TRY. + if (MI.getOpcode() == XVM::END_LOOP) { + if (EndToBegin[&MI]->getParent()->getNumber() >= Header->getNumber()) + BeforeSet.insert(&MI); +#ifndef NDEBUG + else + AfterSet.insert(&MI); +#endif + } + } + + // Mark the end of the block. + InsertPos = getEarliestInsertPos(&MBB, BeforeSet, AfterSet); + MachineInstr *End = BuildMI(MBB, InsertPos, MBB.findPrevDebugLoc(InsertPos), + TII.get(XVM::END_BLOCK)); + registerScope(Begin, End); + + // Track the farthest-spanning scope that ends at this point. + updateScopeTops(Header, &MBB); +} + +/// Insert a LOOP marker for a loop starting at MBB (if it's a loop header). +void XVMCFGStackify::placeLoopMarker(MachineBasicBlock &MBB) { + MachineFunction &MF = *MBB.getParent(); + const auto &MLI = getAnalysis(); + SortRegionInfo SRI(MLI); + const auto &TII = *MF.getSubtarget().getInstrInfo(); + + MachineLoop *Loop = MLI.getLoopFor(&MBB); + if (!Loop || Loop->getHeader() != &MBB) + return; + + // The operand of a LOOP is the first block after the loop. If the loop is the + // bottom of the function, insert a dummy block at the end. + MachineBasicBlock *Bottom = SRI.getBottom(Loop); + auto Iter = std::next(Bottom->getIterator()); + if (Iter == MF.end()) { + getAppendixBlock(MF); + Iter = std::next(Bottom->getIterator()); + } + MachineBasicBlock *AfterLoop = &*Iter; + + // Decide where in Header to put the LOOP. + SmallPtrSet BeforeSet; + SmallPtrSet AfterSet; + for (const auto &MI : MBB) { + // LOOP marker should be after any existing loop that ends here. Otherwise + // we assume the instruction belongs to the loop. + if (MI.getOpcode() == XVM::END_LOOP) + BeforeSet.insert(&MI); +#ifndef NDEBUG + else + AfterSet.insert(&MI); +#endif + } + + // Mark the beginning of the loop. + auto InsertPos = getEarliestInsertPos(&MBB, BeforeSet, AfterSet); + //FIXME: modify the form of the LOOP instruction + MachineInstr *Begin = BuildMI(MBB, InsertPos, MBB.findDebugLoc(InsertPos), + TII.get(XVM::LOOP)); +// .addImm(int64_t(XVM::BlockType::Void)); + + // Decide where in Header to put the END_LOOP. + BeforeSet.clear(); + AfterSet.clear(); +#ifndef NDEBUG + for (const auto &MI : MBB) + // Existing END_LOOP markers belong to parent loops of this loop + if (MI.getOpcode() == XVM::END_LOOP) + AfterSet.insert(&MI); +#endif + + // Mark the end of the loop (using arbitrary debug location that branched to + // the loop end as its location). + InsertPos = getEarliestInsertPos(AfterLoop, BeforeSet, AfterSet); + DebugLoc EndDL = AfterLoop->pred_empty() + ? DebugLoc() + : (*AfterLoop->pred_rbegin())->findBranchDebugLoc(); + MachineInstr *End = + BuildMI(*AfterLoop, InsertPos, EndDL, TII.get(XVM::END_LOOP)); + registerScope(Begin, End); + + assert((!ScopeTops[AfterLoop->getNumber()] || + ScopeTops[AfterLoop->getNumber()]->getNumber() < MBB.getNumber()) && + "With block sorting the outermost loop for a block should be first."); + updateScopeTops(&MBB, AfterLoop); +} + +void XVMCFGStackify::extendCondStmt(std::map CondBranchsWithDepth, + MachineFunction &MF) { + for (auto& I : CondBranchsWithDepth) { + MachineInstr *MI = I.first; + unsigned int depth = I.second; + bool isDone = false; + for (auto &MBB : reverse(MF)) { + for (MachineInstr &EachMI : llvm::reverse(MBB)) { + if (MI == &EachMI) { + MachineInstr *MI_THEN = MBB.getParent()->CreateMachineInstr(TII->get(XVM::THEN), DebugLoc()); + MBB.insertAfter(EachMI.getIterator(), MI_THEN); + + MachineInstr *MI_BREAK_IMM = MBB.getParent()->CreateMachineInstr(TII->get(XVM::BREAK_IMM), DebugLoc()); + MBB.insertAfter(MI_THEN->getIterator(), MI_BREAK_IMM); + MachineInstrBuilder MIB(MF, MI_BREAK_IMM); + MIB.addImm(depth); + + MachineInstr *MI_END_THEN = MBB.getParent()->CreateMachineInstr(TII->get(XVM::END_THEN), DebugLoc()); + MBB.insertAfter(MI_BREAK_IMM->getIterator(), MI_END_THEN); + + MachineInstr *MI_END_IF = MBB.getParent()->CreateMachineInstr(TII->get(XVM::END_IF), DebugLoc()); + MBB.insertAfter(MI_END_THEN->getIterator(), MI_END_IF); + isDone = true; + } + } + if (isDone) { + break; + } + } + } +} + +void XVMCFGStackify::rewriteDepthImmediates(MachineFunction &MF) { + // Now rewrite references to basic blocks to be depth immediates. + std::map CondBranchsWithDepth; + TII = MF.getSubtarget().getInstrInfo(); + SmallVector Stack; + std::set SetEndBlockLoop; + SmallVector EHPadStack; + for (auto &MBB : reverse(MF)) { + for (MachineInstr &MI : llvm::reverse(MBB)) { + switch (MI.getOpcode()) { + case XVM::BLOCK: + assert(ScopeTops[Stack.back().first->getNumber()]->getNumber() <= + MBB.getNumber() && + "Block/try marker should be balanced"); + Stack.pop_back(); + break; + + case XVM::LOOP: + assert(Stack.back().first == &MBB && "Loop top should be balanced"); + Stack.pop_back(); + break; + + case XVM::END_BLOCK: + Stack.push_back(std::make_pair(&MBB, &MI)); + break; + + case XVM::END_LOOP: { + Stack.push_back(std::make_pair(EndToBegin[&MI]->getParent(), &MI)); + MachineInstr * PrevMI = MI.getPrevNode(); + if (PrevMI != NULL && PrevMI == MBB.begin()) { + if (PrevMI->getOpcode() == XVM::END_BLOCK) { + SetEndBlockLoop.insert(&MBB); + } + } + break; + } + default: + if (MI.isTerminator()) { + // Rewrite MBB operands to be depth immediates. + SmallVector Ops(MI.operands()); + unsigned int Opcode = MI.getOpcode(); + unsigned int depth = 0; + while (MI.getNumOperands() > 0) + MI.removeOperand(MI.getNumOperands() - 1); + for (auto MO : Ops) { + if (MO.isMBB()) { + depth = getBranchDepth(Stack, &SetEndBlockLoop, MO.getMBB()); + MO = MachineOperand::CreateImm(depth); + } + MI.addOperand(MF, MO); + } + if (Opcode == XVM::BR) { + MI.setDesc(TII->get(XVM::BREAK_IMM)); + } else if (TII->isCondBranch(&MI) && !TII->isCondBranchProcessed(&MI)) { + /** add the following instructions: THEN, BRREAK_IMM and END_THEN */ + CondBranchsWithDepth.insert(std::make_pair(&MI, depth)); + } + } + break; + } + } + } + extendCondStmt(CondBranchsWithDepth, MF); + assert(Stack.empty() && "Control flow should be balanced"); +} + +void XVMCFGStackify::cleanupFunctionData(MachineFunction &MF) { + if (FakeCallerBB) + MF.deleteMachineBasicBlock(FakeCallerBB); + AppendixBB = FakeCallerBB = nullptr; +} + +void XVMCFGStackify::releaseMemory() { + ScopeTops.clear(); + BeginToEnd.clear(); + EndToBegin.clear(); +} + +bool XVMCFGStackify::runOnMachineFunction(MachineFunction &MF) { + LLVM_DEBUG(dbgs() << "********** CFG Stackifying **********\n" + "********** Function: " + << MF.getName() << '\n'); + const MCAsmInfo *MCAI = MF.getTarget().getMCAsmInfo(); + + releaseMemory(); + + // Place the BLOCK/LOOP/TRY markers to indicate the beginnings of scopes. + placeMarkers(MF); + + // Place the continue statements for each backedge of loops. + fixBackEdgesOfLoops(MF); + + // Convert MBB operands in terminators to relative depth immediates. + rewriteDepthImmediates(MF); + + // Add an end instruction at the end of the function body. + const auto &TII = *MF.getSubtarget().getInstrInfo(); + if (!MF.getSubtarget() + .getTargetTriple() + .isOSBinFormatELF()) +//FIXME: See if we need it +// appendEndToFunction(MF, TII); + + cleanupFunctionData(MF); + + return true; +} #endif diff --git a/llvm/lib/Target/XVM/XVMCFGStructure.cpp b/llvm/lib/Target/XVM/XVMCFGStructure.cpp index 9427a2f4b6b7f8a4c06f8fc54319d1e556c3f159..aca00b1be1c65c5741ff5988d54f9c688c326372 100644 --- a/llvm/lib/Target/XVM/XVMCFGStructure.cpp +++ b/llvm/lib/Target/XVM/XVMCFGStructure.cpp @@ -1,4 +1,330 @@ #ifdef XVM_DYLIB_MODE -// Insert the XVM backend code here + +#include "XVM.h" +#include "XVMSubtarget.h" +#include "llvm/ADT/Statistic.h" +#include "llvm/CodeGen/MachineDominators.h" +#include "llvm/CodeGen/MachinePostDominators.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineLoopInfo.h" +#include "llvm/Target/TargetMachine.h" +#include "llvm/Analysis/DomTreeUpdater.h" + +using namespace llvm; + +#define DEBUG_TYPE "xvm-cfg-structure" + +namespace { +class XVMCFGStructure final : public MachineFunctionPass { + StringRef getPassName() const override { return "XVM CFG Structure"; } + + void getAnalysisUsage(AnalysisUsage &AU) const override { + AU.addRequired(); + AU.addRequired(); + AU.addRequired(); + MachineFunctionPass::getAnalysisUsage(AU); + } + + bool runOnMachineFunction(MachineFunction &MF) override; + + // Placing markers + void placeMarkers(MachineFunction &MF); + void placeLoopMarker(MachineBasicBlock &MBB); + void placeIfMarker(MachineBasicBlock &MBB); + + // Add break/continue for Loops + void fixLoops(MachineBasicBlock &MBB); +public: + static char ID; // Pass identification, replacement for typeid + XVMCFGStructure() : MachineFunctionPass(ID) {} + ~XVMCFGStructure() override {} +}; +} + +char XVMCFGStructure::ID = 0; +INITIALIZE_PASS(XVMCFGStructure, DEBUG_TYPE, + "Insert LOOP/IF markers for XVM scopes", false, + false) + +FunctionPass *llvm::createXVMCFGStructure() { + return new XVMCFGStructure(); +} + +static void getBeginInsertPos(MachineBasicBlock &MBB, + MachineBasicBlock::iterator &MBBI) { + MBBI = MBB.instr_begin(); + MachineBasicBlock::iterator E = MBB.instr_end(); + for (; MBBI != E; ++MBBI) { + if (MBBI->getOpcode() == XVM::LOOP || + MBBI->getOpcode() == XVM::THEN || + MBBI->getOpcode() == XVM::ELSE || + MBBI->getOpcode() == XVM::BLOCK) + continue; + else + return; + } +} + +static void getEndInsertPos(MachineBasicBlock &MBB, + MachineBasicBlock::iterator &MBBI) { + MBBI = MBB.instr_begin(); + MachineBasicBlock::iterator E = MBB.instr_end(); + for (; MBBI != E; ++MBBI) { + if (MBBI->getOpcode() == XVM::END_LOOP || + MBBI->getOpcode() == XVM::END_BLOCK || + MBBI->getOpcode() == XVM::END_THEN || + MBBI->getOpcode() == XVM::END_ELSE) { + return; + } + } +} + +void XVMCFGStructure::placeLoopMarker(MachineBasicBlock &MBB) { + MachineLoopInfo &MLI = getAnalysis(); + MachineFunction *MF = MBB.getParent(); + const auto &TII = MF->getSubtarget().getInstrInfo(); + + MachineLoop *Loop = MLI.getLoopFor(&MBB); + if (!Loop || Loop->getHeader() != &MBB) + return; + + //We have the following 2 assumptions: + // 1. the loop header should be first MBB within the loop + // 2. the loop MBBs are continguous. This should be guaranteed + // by the previous sorting pass. + MachineBasicBlock *LoopTop = Loop->getTopBlock(); + MachineBasicBlock *LoopBottom = Loop->getBottomBlock(); + + //verify that the loop header should be the first MBB within the loop + if (LoopTop != &MBB) { + assert ("we assume the first block is the loop header!"); + } + + MachineBasicBlock::iterator LoopInsertPos, LoopEndInsertPos; + getBeginInsertPos(*LoopTop, LoopInsertPos); + getEndInsertPos(*LoopBottom, LoopEndInsertPos); + + MachineInstr *Begin = BuildMI(*LoopTop, LoopInsertPos, + LoopTop->findDebugLoc(LoopInsertPos), + TII->get(XVM::LOOP)); + //FIXME: the debug location might be problematic + MachineInstr *End = BuildMI(*LoopBottom, LoopEndInsertPos, + LoopBottom->findDebugLoc(LoopEndInsertPos), + TII->get(XVM::END_LOOP)); +} + +static bool isContinueBlock(MachineBasicBlock &MBB) { + for (auto &MI : MBB) { + if (MI.getOpcode() == XVM::CONTINUE) + return true; + } + return false; +} + +void XVMCFGStructure::placeIfMarker(MachineBasicBlock &MBB) { + // We assume 2 things: + // 1. the then part and the else part are contingous + // 2. at the end of the MBB, only one branch instruction + + // We need to negate the conditions in each branch + MachineFunction *MF = MBB.getParent(); + const auto &TII = MF->getSubtarget().getInstrInfo(); + auto &MDT = getAnalysis(); + auto &MPDT = getAnalysis(); + + if (!MBB.canFallThrough() || MBB.empty()) + return; + + MachineBasicBlock::iterator MBBFirstTerminator = MBB.getFirstTerminator(); + if (MBBFirstTerminator == MBB.end() || !TII->isCondBranch(&(*MBBFirstTerminator))) + return; + + MachineBasicBlock *ThenBeginBlock = MBB.getFallThrough(); + MachineBasicBlock *ElseOrOtherBeginBlock = MBB.back().getOperand(0).getMBB(); + MachineBasicBlock *ThenEndBlock = &*--ElseOrOtherBeginBlock->getIterator(); + + MachineBasicBlock::iterator ThenInsertPos, ThenEndInsertPos; + getBeginInsertPos(*ThenBeginBlock, ThenInsertPos); + getEndInsertPos(*ThenEndBlock, ThenEndInsertPos); + BuildMI(*ThenBeginBlock, ThenInsertPos, + ThenBeginBlock->findDebugLoc(ThenInsertPos), + TII->get(XVM::THEN)); + BuildMI(*ThenEndBlock, ThenEndInsertPos, + ThenEndBlock->findDebugLoc(ThenEndInsertPos), + TII->get(XVM::END_THEN)); + + bool HaveElse = !MPDT.properlyDominates(ElseOrOtherBeginBlock, ThenBeginBlock); + + MachineBasicBlock::iterator ElseInsertPos, ElseEndInsertPos; + MachineBasicBlock *ElseEndBlock = nullptr; + + if (HaveElse) { + // We need to handle Continue in a special case currently + if (isContinueBlock(*ElseOrOtherBeginBlock)) { + ElseEndBlock = ElseOrOtherBeginBlock; + getBeginInsertPos(*ElseOrOtherBeginBlock, ElseInsertPos) ; + getEndInsertPos(*ElseOrOtherBeginBlock, ElseEndInsertPos) ; + } else { + MachineBasicBlock *CommonSucc = MPDT.findNearestCommonDominator(ThenBeginBlock, ElseOrOtherBeginBlock); + ElseEndBlock = &*--CommonSucc->getIterator(); + getBeginInsertPos(*ElseOrOtherBeginBlock, ElseInsertPos) ; + getEndInsertPos(*ElseEndBlock, ElseEndInsertPos) ; + } + BuildMI(*ElseOrOtherBeginBlock, ElseInsertPos, + ElseOrOtherBeginBlock->findDebugLoc(ElseInsertPos), + TII->get(XVM::ELSE)); + BuildMI(*ElseEndBlock, ElseEndInsertPos, + ElseEndBlock->findDebugLoc(ElseEndInsertPos), + TII->get(XVM::END_ELSE)); + BuildMI(*ElseEndBlock, ++ElseEndInsertPos, + ElseEndBlock->findDebugLoc(ElseEndInsertPos), + TII->get(XVM::END_IF)); + } else { + BuildMI(*ThenEndBlock, ++ThenEndInsertPos, + ThenEndBlock->findDebugLoc(ThenEndInsertPos), + TII->get(XVM::END_IF)); + } +} + +void XVMCFGStructure::fixLoops(MachineBasicBlock &MBB) { + // We do the following operations: + // 1. Every backedge is a continue + // 2. Every exit condition leads to a break + // + // We assume the following 2: + // 1. The loop has single entry (reducible) + // 2. The loop exit block is unique + // Assumption 1 is realisitic in most of times. + // Assumption 2 is not. In next step, we add (nested) blocks which end before exit blocks, + // we added breaks (with indexes) in every exiting block + MachineFunction *MF = MBB.getParent(); + auto &MLI = getAnalysis(); + const auto &TII = MF->getSubtarget().getInstrInfo(); + MachineLoop *Loop = MLI.getLoopFor(&MBB); + SmallVector ExitingBlocks; + auto &MPDT = getAnalysis(); + using DomTreeT = PostDomTreeBase; + SmallVector PDTUpdates; + + if (!Loop || Loop->getHeader() != &MBB) // MBB must be loop head + return; + + Loop->getExitingBlocks(ExitingBlocks); + + // For every predecessor of loop header, if it is within the loop, + // then it is supposed to insert a continue at the backedge. + for (MachineBasicBlock *Pred : MBB.predecessors()) { + if (Loop->contains(Pred)) { + // create continue block + MachineBasicBlock *ContinueBlock = MF->CreateMachineBasicBlock(); + auto ContinueInsertPos = ContinueBlock->begin(); + BuildMI(*ContinueBlock, ContinueInsertPos, + ContinueBlock->findDebugLoc(ContinueInsertPos), + TII->get(XVM::CONTINUE)); + + // directly put it after the source of the backedge, + // modify the branch target to the direct successor + // later in placeIfMarker, we will negate all branch conditions + bool canFallThrough = Pred->canFallThrough(); + MachineBasicBlock *NewSucc = canFallThrough ? + Pred->getFallThrough() : + ContinueBlock; + + //FIXME: Does adding the new block destroy the loop structure? + MachineFunction::iterator MBBI = ++Pred->getIterator(); + MF->insert(MBBI, ContinueBlock); + + MachineBasicBlock *Old = &MBB; + if (Old != NewSucc){ + // FIXME: do we need to do anything for else case?? + Pred->ReplaceUsesOfBlockWith(&MBB, NewSucc); + } + else{ + report_fatal_error("old MBB == new succ, need to handle this case"); + } + + MPDT.getBase().applyUpdates({{DomTreeT::Delete, &*Pred, &MBB}, + {DomTreeT::Insert, &*Pred, NewSucc}}); + if (canFallThrough) { + Pred->addSuccessorWithoutProb(ContinueBlock); + MPDT.getBase().applyUpdates({{DomTreeT::Insert, &*Pred, ContinueBlock}}); + } + ContinueBlock->addSuccessor(&MBB, BranchProbability :: getOne()); + MPDT.getBase().applyUpdates({{DomTreeT::Insert, ContinueBlock, &MBB}}); + // we jump to the fall through and the continue block became new fall through + Loop->addBasicBlockToLoop(ContinueBlock, MLI.getBase()); + } + } + // fin adding continue + + // For every ExitingBlock, we insert a break instruction + while (!ExitingBlocks.empty()) { + MachineBasicBlock *LoopExiting = ExitingBlocks.pop_back_val(); + assert(LoopExiting->canFallThrough() && "Loop exiting blocks must have fall through!"); + + // create break block + MachineBasicBlock *BreakBlock = MF->CreateMachineBasicBlock(); + auto BreakInsertPos = BreakBlock->begin(); + BuildMI(*BreakBlock, BreakInsertPos, + BreakBlock->findDebugLoc(BreakInsertPos), + TII->get(XVM::BREAK)); + + // TODO: insert blocks for multiple exits + MachineBasicBlock *FallThrough = LoopExiting->getFallThrough(); + MachineFunction::iterator MBBI2 = FallThrough->getIterator(); + MF->insert(MBBI2, BreakBlock); + Loop->addBasicBlockToLoop(BreakBlock, MLI.getBase()); + + // puts the break block in place + MachineBasicBlock *JumpTarget = LoopExiting->back().getOperand(0).getMBB(); + if (JumpTarget != FallThrough){ + // when the last instruction in the block is conditional branch + LoopExiting->ReplaceUsesOfBlockWith(JumpTarget, FallThrough); + LoopExiting->addSuccessorWithoutProb(BreakBlock); + } else { + // when the last instruction in the block is unconditional branch + // need to find the last conditional branch, insert break between LoopExiting its JumpTarget + for (MachineBasicBlock::reverse_iterator It = LoopExiting->rbegin(), E = LoopExiting->rend(); + It != E; ++It) { + MachineInstr *MI = &*It; + if (MI) { + if (TII->isCondBranch(MI)){ + JumpTarget = MI->getOperand(0).getMBB(); + LoopExiting->ReplaceUsesOfBlockWith(JumpTarget, BreakBlock); + break; + } + } + } + } + MPDT.getBase().applyUpdates({{DomTreeT::Delete, LoopExiting, JumpTarget}, + {DomTreeT::Insert, LoopExiting, FallThrough}}); + // The condition need to be negated + MPDT.getBase().applyUpdates({{DomTreeT::Insert, LoopExiting, BreakBlock}}); + BreakBlock->addSuccessor(JumpTarget, BranchProbability :: getOne()); + MPDT.getBase().applyUpdates({{DomTreeT::Insert, BreakBlock, JumpTarget}}); + } +} + +// Insert LOOP/END_LOOP/THEN/END_THEN/ELSE/ELSE_END +void XVMCFGStructure::placeMarkers(MachineFunction &MF) { + for (auto &MBB : MF) { + fixLoops(MBB); + placeLoopMarker(MBB); + } + + for (auto &MBB : MF) { + placeIfMarker(MBB); + } +} + +bool XVMCFGStructure::runOnMachineFunction(MachineFunction &MF) { + LLVM_DEBUG(dbgs() << "********** CFG Stackifying **********\n" + "********** Function: " + << MF.getName() << '\n'); + + placeMarkers(MF); + + return true; +} #endif diff --git a/llvm/lib/Target/XVM/XVMExpandPseudoInsts.cpp b/llvm/lib/Target/XVM/XVMExpandPseudoInsts.cpp index 9427a2f4b6b7f8a4c06f8fc54319d1e556c3f159..960fcc1d28f87c29c7768469f671a21c9d33fd49 100644 --- a/llvm/lib/Target/XVM/XVMExpandPseudoInsts.cpp +++ b/llvm/lib/Target/XVM/XVMExpandPseudoInsts.cpp @@ -1,4 +1,179 @@ #ifdef XVM_DYLIB_MODE -// Insert the XVM backend code here + +#include "XVM.h" +#include "XVMInstrInfo.h" +#include "XVMTargetMachine.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" + +using namespace llvm; + +#define XVM_EXPAND_PSEUDO_NAME "XVM pseudo instruction expansion pass" + +namespace { +class XVMExpandPseudo : public MachineFunctionPass { +public: + static char ID; + XVMExpandPseudo() : MachineFunctionPass(ID) { + initializeXVMExpandPseudoPass(*PassRegistry::getPassRegistry()); + } + bool runOnMachineFunction(MachineFunction &MF) override; + StringRef getPassName() const override { return XVM_EXPAND_PSEUDO_NAME; } +private: + bool expandMBB(MachineBasicBlock &MBB); + bool expandMI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, + MachineBasicBlock::iterator &NextMBB); + bool expandSelectCC(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, + MachineBasicBlock::iterator &NextMBBI); +}; + char XVMExpandPseudo::ID = 0; +} + +bool XVMExpandPseudo::runOnMachineFunction(MachineFunction &MF) { + bool Modified = false; + for (auto &MBB : MF) + Modified |= expandMBB(MBB); + return Modified; +} + +bool XVMExpandPseudo::expandMBB(MachineBasicBlock &MBB) { + bool Modified = false; + + MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end(); + int InstNumber = std::distance(MBB.begin(), MBB.end()); + for (int i = 0; i < InstNumber; i++) { + MachineBasicBlock::iterator NMBBI = std::next(MBBI); + Modified |= expandMI(MBB, MBBI, NMBBI); + MBBI = NMBBI; + } + return Modified; +} + +bool XVMExpandPseudo::expandMI(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI, + MachineBasicBlock::iterator &NextMBBI) { + // XVMInstrInfo::getInstSizeInBytes expects that the total size of the + // expanded instructions for each pseudo is correct in the Size field of the + // tablegen definition for the pseudo. + switch (MBBI->getOpcode()) { + default: + return false; + case XVM::PseudoSelectCC_ri: + case XVM::PseudoSelectCC_rr: + return expandSelectCC(MBB, MBBI, NextMBBI); + } +} + +static unsigned getBranchOpcodeFromSelectCC(MachineInstr &MI) { + assert((MI.getOpcode() == XVM::PseudoSelectCC_ri || + MI.getOpcode() == XVM::PseudoSelectCC_rr) && + "The instruction should be a pseudo select cc!"); + bool IsRROp = MI.getOpcode() == XVM::PseudoSelectCC_rr; + unsigned Cond = MI.getOperand(3).getImm(); + unsigned NewCond; + switch (Cond) { +#define SET_NEWCC(X, Y) \ + case ISD::X: \ + NewCond = IsRROp ? XVM::Y##_rr : XVM::Y##_ri; \ + break + SET_NEWCC(SETGT, BSGT); + SET_NEWCC(SETUGT, BUGT); + SET_NEWCC(SETGE, BSGE); + SET_NEWCC(SETUGE, BUGE); + SET_NEWCC(SETEQ, BUEQ); + SET_NEWCC(SETNE, BSNEQ); + SET_NEWCC(SETLT, BSLT); + SET_NEWCC(SETULT, BULT); + SET_NEWCC(SETLE, BSLE); + SET_NEWCC(SETULE, BULE); + default: + report_fatal_error("unimplemented select CondCode " + Twine(Cond)); + } + return NewCond; +} + +bool XVMExpandPseudo::expandSelectCC(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI, + MachineBasicBlock::iterator &NextMBBI) { + MachineInstr &MI = *MBBI; + const TargetInstrInfo *TII = MBB.getParent()->getSubtarget().getInstrInfo(); + DebugLoc DL = MI.getDebugLoc(); + unsigned Opc = MI.getOpcode(); + MachineFunction *MF = MBB.getParent(); + MachineRegisterInfo &MRI = MF->getRegInfo(); + + // To "insert" a SELECT instruction, we actually have to insert the diamond + // control-flow pattern. The incoming instruction knows the destination vreg + // to set, the condition code register to branch on, the true/false values to + // select between, and a branch opcode to use. + MachineFunction::iterator I = ++MBB.getIterator(); + + MachineBasicBlock *TMBB = MF->CreateMachineBasicBlock(); + MachineBasicBlock *FMBB = MF->CreateMachineBasicBlock(); + MachineBasicBlock *SuccMBB = MF->CreateMachineBasicBlock(); + + MF->insert(I, FMBB); + MF->insert(I, TMBB); + MF->insert(I, SuccMBB); + + // Update machine-CFG edges by transferring all successors of the current + // block to the new block which will contain the Phi node for the select. + SuccMBB->splice(SuccMBB->begin(), &MBB, NextMBBI, MBB.end()); + SuccMBB->transferSuccessorsAndUpdatePHIs(&MBB); + + //Construct the conditional branch in MBB + unsigned NewCC = getBranchOpcodeFromSelectCC(MI); + + BuildMI(MBB, MBBI, DL, TII->get(NewCC)).addMBB(TMBB) + .add(MI.getOperand(1)) + .add(MI.getOperand(2)); + BuildMI(MBB, MBBI, DL, TII->get(XVM::BR)).addMBB(FMBB); + MBB.addSuccessor(TMBB); + MBB.addSuccessor(FMBB); + + //Next, add the move instruction in FMBB and TMBB + MachineOperand Tval = MI.getOperand(4); + MachineOperand Fval = MI.getOperand(5); + + assert (Tval.isReg() && Fval.isReg() && "value should be reg!"); + + MachineBasicBlock::iterator FMBBInsertPos = FMBB->begin(); + MachineBasicBlock::iterator TMBBInsertPos = TMBB->begin(); + + Register FReg = MRI.createVirtualRegister(&XVM::XVMGPRRegClass); + Register TReg = MRI.createVirtualRegister(&XVM::XVMGPRRegClass); + + BuildMI(*FMBB, FMBBInsertPos, FMBB->findDebugLoc(FMBBInsertPos), + TII->get(XVM::MOV_rr), FReg).add(Fval); + BuildMI(*FMBB, FMBBInsertPos, + FMBB->findDebugLoc(FMBBInsertPos), + TII->get(XVM::BR)).addMBB(SuccMBB); + FMBB->addSuccessor(SuccMBB); + + BuildMI(*TMBB, TMBBInsertPos, TMBB->findDebugLoc(TMBBInsertPos), + TII->get(XVM::MOV_rr), TReg).add(Tval); + BuildMI(*TMBB, TMBBInsertPos, + TMBB->findDebugLoc(TMBBInsertPos), + TII->get(XVM::BR)).addMBB(SuccMBB); + TMBB->addSuccessor(SuccMBB); + + //Last, we add the PHI instruction in SuccMBB + MachineBasicBlock::iterator PHIInsertPos = SuccMBB->begin(); + BuildMI(*SuccMBB, PHIInsertPos, SuccMBB->findDebugLoc(PHIInsertPos), + TII->get(XVM::PHI), MI.getOperand(0).getReg()).addReg(FReg) + .addMBB(FMBB) + .addReg(TReg) + .addMBB(TMBB); + MBBI->eraseFromParent(); + return true; +} + +INITIALIZE_PASS(XVMExpandPseudo, "xvm-expand-pseudo", + XVM_EXPAND_PSEUDO_NAME, false, false) +namespace llvm { + +FunctionPass *createXVMExpandPseudoPass() { return new XVMExpandPseudo(); } + +} #endif diff --git a/llvm/lib/Target/XVM/XVMFrameLowering.cpp b/llvm/lib/Target/XVM/XVMFrameLowering.cpp index e85115bef43f24d35d3628583fe24ec66a0afff2..348ae307c5be7776816331ebcba8686ccf600e34 100644 --- a/llvm/lib/Target/XVM/XVMFrameLowering.cpp +++ b/llvm/lib/Target/XVM/XVMFrameLowering.cpp @@ -10,6 +10,97 @@ // //===----------------------------------------------------------------------===// #ifdef XVM_DYLIB_MODE -// Insert the XVM backend code here + +#include "XVM_def.h" +#include "XVM.h" +#include "XVMFrameLowering.h" +#include "XVMInstrInfo.h" +#include "XVMSubtarget.h" +#include "MCTargetDesc/XVMInstPrinter.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +using namespace llvm; + +bool XVMFrameLowering::hasFP(const MachineFunction &MF) const { return false; } + +bool XVMFrameLowering::needsSPForLocalFrame( + const MachineFunction &MF) const { + auto &MFI = MF.getFrameInfo(); + return MFI.getStackSize() || MFI.adjustsStack() || hasFP(MF); +} + +bool XVMFrameLowering::needsSP(const MachineFunction &MF) const { + return needsSPForLocalFrame(MF); +} + +unsigned XVMFrameLowering::getSPReg(const MachineFunction &MF) { + return XVM::SP; +} + +unsigned XVMFrameLowering::getOpcSubRef(const MachineFunction &MF) { + return XVM::SubRef_ri; +} + +unsigned XVMFrameLowering::getOpcAddRef(const MachineFunction &MF) { + return XVM::AddRef_ri; +} + +unsigned +XVMFrameLowering::getOpcGlobSet(const MachineFunction &MF) { + return XVM::G_GLOBAL_VALUE; +} + +void XVMFrameLowering::emitPrologue(MachineFunction &MF, + MachineBasicBlock &MBB) const +{ + auto &MFI = MF.getFrameInfo(); + if (!needsSP(MF)) { + return; + } + uint64_t StackSize = MFI.getStackSize(); + auto &ST = MF.getSubtarget(); + const auto *TII = ST.getInstrInfo(); + auto &MRI = MF.getRegInfo(); + auto InsertPt = MBB.begin(); + DebugLoc DL; + + unsigned SPReg = getSPReg(MF); + + if (StackSize > 0) { + /* prepare stack space for the function to use */ + BuildMI(MBB, InsertPt, DL, TII->get(XVM::SubRef_ri), SPReg).addReg(SPReg).addImm(StackSize); + } +} + +void XVMFrameLowering::emitEpilogue(MachineFunction &MF, + MachineBasicBlock &MBB) const +{ + uint64_t StackSize = MF.getFrameInfo().getStackSize(); + if (!needsSP(MF)) { + return; + } + auto &ST = MF.getSubtarget(); + const auto *TII = ST.getInstrInfo(); + auto &MRI = MF.getRegInfo(); + auto InsertPt = MBB.getFirstTerminator(); + DebugLoc DL; + + if (InsertPt != MBB.end()) + DL = InsertPt->getDebugLoc(); + unsigned SPReg = getSPReg(MF); + + if (StackSize > 0) { + BuildMI(MBB, InsertPt, DL, TII->get(XVM::AddRef_ri), SPReg).addReg(SPReg).addImm(StackSize); + } +} + +void XVMFrameLowering::determineCalleeSaves(MachineFunction &MF, + BitVector &SavedRegs, + RegScavenger *RS) const +{ + TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); +} #endif diff --git a/llvm/lib/Target/XVM/XVMISelDAGToDAG.cpp b/llvm/lib/Target/XVM/XVMISelDAGToDAG.cpp index ce5114d51d0fcaad2d4d04388e9e48bd2c11fea3..af70cce50a07333a4fcf1078dc7d9f60e679f5d0 100644 --- a/llvm/lib/Target/XVM/XVMISelDAGToDAG.cpp +++ b/llvm/lib/Target/XVM/XVMISelDAGToDAG.cpp @@ -11,6 +11,187 @@ // //===----------------------------------------------------------------------===// #ifdef XVM_DYLIB_MODE -// Insert the XVM backend code here +#include "XVM.h" +#include "XVMRegisterInfo.h" +#include "XVMSubtarget.h" +#include "XVMTargetMachine.h" +#include "llvm/CodeGen/FunctionLoweringInfo.h" +#include "llvm/CodeGen/MachineConstantPool.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/SelectionDAGISel.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/IntrinsicInst.h" +//#include "llvm/IR/IntrinsicsXVM.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/Endian.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/Target/TargetMachine.h" + +using namespace llvm; + +#define DEBUG_TYPE "xvm-isel" + +// Instruction Selector Implementation +namespace { + +class XVMDAGToDAGISel : public SelectionDAGISel { + + /// Subtarget - Keep a pointer to the XVMSubtarget around so that we can + /// make the right decision when generating code for different subtargets. + const XVMSubtarget *Subtarget; + +public: + explicit XVMDAGToDAGISel(XVMTargetMachine &TM) + : SelectionDAGISel(TM), Subtarget(nullptr) {} + + StringRef getPassName() const override { + return "XVM DAG->DAG Pattern Instruction Selection"; + } + + bool runOnMachineFunction(MachineFunction &MF) override { + LLVM_DEBUG(dbgs() << "********** XVMDAGToDAGISel **********\n" + "********** Function: " + << MF.getName() << '\n'); + // Reset the subtarget each time through. + Subtarget = &MF.getSubtarget(); + return SelectionDAGISel::runOnMachineFunction(MF); + } + + void PreprocessISelDAG() override; + bool SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintCode, + std::vector &OutOps) override; +private: +// Include the pieces autogenerated from the target description. +#include "XVMGenDAGISel.inc" + + void Select(SDNode *N) override; + + // Complex Pattern for address selection. + bool SelectAddr(SDValue Addr, SDValue &Base, SDValue &Offset); + bool SelectFIAddr(SDValue Addr, SDValue &Base, SDValue &Offset); +}; +} // namespace + +// ComplexPattern used on XVM Load/Store instructions +bool XVMDAGToDAGISel::SelectAddr(SDValue Addr, SDValue &Base, SDValue &Offset) { + // if Address is FI, get the TargetFrameIndex. + SDLoc DL(Addr); + if (auto *FIN = dyn_cast(Addr)) { + Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i64); + Offset = CurDAG->getTargetConstant(0, DL, MVT::i64); + return true; + } + + if (Addr.getOpcode() == ISD::TargetExternalSymbol || + Addr.getOpcode() == ISD::TargetGlobalAddress) + return false; + + // Addresses of the form Addr+const or Addr|const + if (CurDAG->isBaseWithConstantOffset(Addr)) { + auto *CN = cast(Addr.getOperand(1)); + if (isInt<16>(CN->getSExtValue())) { + // If the first operand is a FI, get the TargetFI Node + if (auto *FIN = dyn_cast(Addr.getOperand(0))) + Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i64); + else + Base = Addr.getOperand(0); + + Offset = CurDAG->getTargetConstant(CN->getSExtValue(), DL, MVT::i64); + return true; + } + } + + Base = Addr; + Offset = CurDAG->getTargetConstant(0, DL, MVT::i64); + return true; +} + +// ComplexPattern used on XVM FI instruction +bool XVMDAGToDAGISel::SelectFIAddr(SDValue Addr, SDValue &Base, + SDValue &Offset) { + SDLoc DL(Addr); + + if (!CurDAG->isBaseWithConstantOffset(Addr)) { + return false; + } + + // Addresses of the form Addr+const or Addr|const + auto *CN = cast(Addr.getOperand(1)); + if (isInt<16>(CN->getSExtValue())) { + // If the first operand is a FI, get the TargetFI Node + if (auto *FIN = dyn_cast(Addr.getOperand(0))) + Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i64); + else { + return false; + } + Offset = CurDAG->getTargetConstant(CN->getSExtValue(), DL, MVT::i64); + return true; + } + return false; +} + +bool XVMDAGToDAGISel::SelectInlineAsmMemoryOperand( + const SDValue &Op, unsigned ConstraintCode, std::vector &OutOps) { + SDValue Op0, Op1; + switch (ConstraintCode) { + default: + return true; + case InlineAsm::Constraint_m: // memory + if (!SelectAddr(Op, Op0, Op1)) { + return true; + } + break; + } + + SDLoc DL(Op); + SDValue AluOp = CurDAG->getTargetConstant(ISD::ADD, DL, MVT::i32);; + OutOps.push_back(Op0); + OutOps.push_back(Op1); + OutOps.push_back(AluOp); + return false; +} + +void XVMDAGToDAGISel::Select(SDNode *Node) { + unsigned Opcode = Node->getOpcode(); + + // If we have a custom node, we already have selected! + if (Node->isMachineOpcode()) { + LLVM_DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << '\n'); + return; + } + + // tablegen selection should be handled here. + switch (Opcode) { + default: + break; + case ISD::FrameIndex: { + int FI = cast(Node)->getIndex(); + EVT VT = Node->getValueType(0); + SDValue TFI = CurDAG->getTargetFrameIndex(FI, VT); + unsigned Opc = XVM::MOV_rr; + if (Node->hasOneUse()) { + CurDAG->SelectNodeTo(Node, Opc, VT, TFI); + return; + } + ReplaceNode(Node, CurDAG->getMachineNode(Opc, SDLoc(Node), VT, TFI)); + return; + } + } + + // Select the default instruction + SelectCode(Node); +} + +void XVMDAGToDAGISel::PreprocessISelDAG() { + +} + +FunctionPass *llvm::createXVMISelDag(XVMTargetMachine &TM) { + return new XVMDAGToDAGISel(TM); +} #endif diff --git a/llvm/lib/Target/XVM/XVMISelLowering.cpp b/llvm/lib/Target/XVM/XVMISelLowering.cpp index c6c08fda9a30df31dda57b628a484469b3a79def..fe50f9ebefaf872d8f17c5221a3b152c4ea45bc5 100644 --- a/llvm/lib/Target/XVM/XVMISelLowering.cpp +++ b/llvm/lib/Target/XVM/XVMISelLowering.cpp @@ -12,6 +12,922 @@ //===----------------------------------------------------------------------===// #ifdef XVM_DYLIB_MODE -// Insert the XVM backend code here + +#include "XVM_def.h" +#include "XVMISelLowering.h" +#include "XVM.h" +#include "XVMSubtarget.h" +#include "XVMTargetMachine.h" +#include "XVMMachineFunctionInfo.h" +#include "llvm/Analysis/ConstantFolding.h" +#include "llvm/CodeGen/CallingConvLower.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" +#include "llvm/CodeGen/ValueTypes.h" +#include "llvm/IR/DiagnosticInfo.h" +#include "llvm/IR/DiagnosticPrinter.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/raw_ostream.h" +#include +#include +#include + +using namespace llvm; + +#define DEBUG_TYPE "xvm-lower" + +static void fail(const SDLoc &DL, SelectionDAG &DAG, const Twine &Msg) { + MachineFunction &MF = DAG.getMachineFunction(); + DAG.getContext()->diagnose( + DiagnosticInfoUnsupported(MF.getFunction(), Msg, DL.getDebugLoc())); +} + +static int ShiftAndGet16Bits(uint64_t num, int n) { + return (num >> n) & 0xFFFF; +} + +static bool is_valid_immediate_size(int32_t imm) +{ + return imm <= 0x3FFF && imm >= 0; +} + +static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *Msg, + SDValue Val) { + MachineFunction &MF = DAG.getMachineFunction(); + std::string Str; + raw_string_ostream OS(Str); + OS << Msg; + Val->print(OS); + OS.flush(); + DAG.getContext()->diagnose( + DiagnosticInfoUnsupported(MF.getFunction(), Str, DL.getDebugLoc())); +} + +static bool hasFP(const MachineFunction &MF) { + return false; +} + +static bool needsSPForLocalFrame( + const MachineFunction &MF) { + auto &MFI = MF.getFrameInfo(); + return MFI.getStackSize() || MFI.adjustsStack() || hasFP(MF); +} + +static bool needsSP(const MachineFunction &MF) { + return needsSPForLocalFrame(MF); +} + +static unsigned getBranchOpcodeFromSelectCC(MachineInstr &MI) { + assert((MI.getOpcode() == XVM::PseudoSelectCC_ri || + MI.getOpcode() == XVM::PseudoSelectCC_rr) && + "The instruction should be a pseudo select cc!"); + bool IsRROp = MI.getOpcode() == XVM::PseudoSelectCC_rr; + if (!IsRROp) { + int64_t imm32 = MI.getOperand(2).getImm(); + IsRROp = !(is_valid_immediate_size(imm32)); + } + unsigned Cond = MI.getOperand(3).getImm(); + unsigned NewCond; + switch (Cond) { +#define SET_NEWCC(X, Y) \ + case ISD::X: \ + NewCond = IsRROp ? XVM::Y##_rr : XVM::Y##_ri; \ + break + SET_NEWCC(SETGT, BSGT); + SET_NEWCC(SETUGT, BUGT); + SET_NEWCC(SETGE, BSGE); + SET_NEWCC(SETUGE, BUGE); + SET_NEWCC(SETEQ, BUEQ); + SET_NEWCC(SETNE, BSNEQ); + SET_NEWCC(SETLT, BSLT); + SET_NEWCC(SETULT, BULT); + SET_NEWCC(SETLE, BSLE); + SET_NEWCC(SETULE, BULE); + default: + report_fatal_error("unimplemented select CondCode " + Twine(Cond)); + } + return NewCond; +} + +XVMTargetLowering::XVMTargetLowering(const TargetMachine &TM, + const XVMSubtarget &STI) + : TargetLowering(TM), Subtarget(&STI) { + + // Set up the register classes. + addRegisterClass(MVT::i64, &XVM::XVMGPRRegClass); + + // Compute derived properties from the register classes + computeRegisterProperties(STI.getRegisterInfo()); + + setStackPointerRegisterToSaveRestore(XVM::R11); + + setOperationAction(ISD::BRCOND, MVT::Other, Custom); + setOperationAction(ISD::BR_CC, MVT::i64, Expand); + setOperationAction(ISD::SELECT, MVT::i64, Expand); + setOperationAction(ISD::SETCC, MVT::i64, Expand); + setOperationAction(ISD::SELECT_CC, MVT::i64, Custom); + + setOperationAction(ISD::BR_JT, MVT::Other, Expand); + setOperationAction(ISD::BRIND, MVT::Other, Expand); + + setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); + + setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Custom); + setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); + setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); + + setOperationAction(ISD::CTPOP, MVT::i64, Expand); + setOperationAction(ISD::CTTZ, MVT::i64, Expand); + setOperationAction(ISD::CTLZ, MVT::i64, Expand); + setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand); + setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand); + + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Expand); + + setOperationAction({ISD::VASTART, ISD::VAARG}, MVT::Other, Custom); + setOperationAction({ISD::VACOPY, ISD::VAEND}, MVT::Other, Expand); + + setOperationAction(ISD::ROTL, MVT::i64, Expand); + setOperationAction(ISD::ROTR, MVT::i64, Expand); + + setOperationAction(ISD::BSWAP, MVT::i64, Expand); + + // Extended load operations for i1 types must be promoted + for (MVT VT : MVT::integer_valuetypes()) { + setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote); + setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); + setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); + } + setLoadExtAction(ISD::SEXTLOAD, MVT::i64, MVT::i8, Expand); + setLoadExtAction(ISD::SEXTLOAD, MVT::i64, MVT::i16, Expand); + setLoadExtAction(ISD::SEXTLOAD, MVT::i64, MVT::i32, Expand); + + setLoadExtAction(ISD::ZEXTLOAD, MVT::i64, MVT::i8, Expand); + setLoadExtAction(ISD::ZEXTLOAD, MVT::i64, MVT::i16, Expand); + setLoadExtAction(ISD::ZEXTLOAD, MVT::i64, MVT::i32, Expand); + + addRegisterClass(MVT::i64, &XVM::XVMGPRRegClass); + + setBooleanContents(ZeroOrOneBooleanContent); + + // Function alignments + setMinFunctionAlignment(Align(8)); + setPrefFunctionAlignment(Align(8)); + + unsigned CommonMaxStores = (unsigned) 0xFFFFFFFF; + MaxStoresPerMemset = MaxStoresPerMemsetOptSize = CommonMaxStores; + MaxStoresPerMemcpy = MaxStoresPerMemcpyOptSize = CommonMaxStores; + MaxStoresPerMemmove = MaxStoresPerMemmoveOptSize = CommonMaxStores; + MaxLoadsPerMemcmp = MaxLoadsPerMemcmpOptSize = CommonMaxStores; +} + +bool XVMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { + return false; +} + +bool XVMTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { + if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) + return false; + unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); + unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); + return NumBits1 > NumBits2; +} + +bool XVMTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { + if (!VT1.isInteger() || !VT2.isInteger()) + return false; + unsigned NumBits1 = VT1.getSizeInBits(); + unsigned NumBits2 = VT2.getSizeInBits(); + return NumBits1 > NumBits2; +} + +bool XVMTargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const { + // because we only have 64 bit registers and 64 bit ops + // extend must always be performed + return false; +} + +bool XVMTargetLowering::isZExtFree(EVT VT1, EVT VT2) const { + return false; +} + +XVMTargetLowering::ConstraintType +XVMTargetLowering::getConstraintType(StringRef Constraint) const { + if (Constraint.size() == 1) { + switch (Constraint[0]) { + default: + break; + case 'w': + return C_RegisterClass; + } + } + + return TargetLowering::getConstraintType(Constraint); +} + +std::pair +XVMTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, + StringRef Constraint, + MVT VT) const { + if (Constraint.size() == 1) + // GCC Constraint Letters + switch (Constraint[0]) { + case 'r': // GENERAL_REGS + return std::make_pair(0U, &XVM::XVMGPRRegClass); + default: + break; + } + + return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); +} + +void XVMTargetLowering::ReplaceNodeResults( + SDNode *N, SmallVectorImpl &Results, SelectionDAG &DAG) const { + const char *err_msg; + uint32_t Opcode = N->getOpcode(); + switch (Opcode) { + default: + report_fatal_error("Unhandled custom legalization"); + case ISD::ATOMIC_LOAD_ADD: + case ISD::ATOMIC_LOAD_AND: + case ISD::ATOMIC_LOAD_OR: + case ISD::ATOMIC_LOAD_XOR: + case ISD::ATOMIC_SWAP: + case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: + break; + } + + SDLoc DL(N); + fail(DL, DAG, err_msg); +} + +SDValue XVMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { + switch (Op.getOpcode()) { + case ISD::BRCOND: + return LowerBRCOND(Op, DAG); + case ISD::GlobalAddress: + return LowerGlobalAddress(Op, DAG); + case ISD::SELECT_CC: + return LowerSELECT_CC(Op, DAG); + case ISD::VASTART: + return LowerVASTART(Op, DAG); + case ISD::VAARG: + return LowerVAARG(Op, DAG); + case ISD::DYNAMIC_STACKALLOC: + report_fatal_error("Unsupported dynamic stack allocation"); + default: + llvm_unreachable("unimplemented operand"); + } +} + +// Calling Convention Implementation +#include "XVMGenCallingConv.inc" + +SDValue XVMTargetLowering::LowerFormalArguments( + SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, + const SmallVectorImpl &Ins, const SDLoc &DL, + SelectionDAG &DAG, SmallVectorImpl &InVals) const { + switch (CallConv) { + default: + report_fatal_error("Unsupported calling convention"); + case CallingConv::C: + case CallingConv::Fast: + break; + } + + MachineFunction &MF = DAG.getMachineFunction(); + MachineRegisterInfo &RegInfo = MF.getRegInfo(); + + // Assign locations to all of the incoming arguments. + SmallVector ArgLocs; + CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); + CCInfo.AnalyzeFormalArguments(Ins, CC_XVM64); + bool doesNeedSP = needsSP(MF); + + for (auto &VA : ArgLocs) { + if (VA.isRegLoc()) { + // Arguments passed in registers + EVT RegVT = VA.getLocVT(); + MVT::SimpleValueType SimpleTy = RegVT.getSimpleVT().SimpleTy; + switch (SimpleTy) { + default: { + errs() << "LowerFormalArguments Unhandled argument type: " + << RegVT.getEVTString() << '\n'; + llvm_unreachable(nullptr); + } + case MVT::i32: + case MVT::i64: + Register VReg = RegInfo.createVirtualRegister(&XVM::XVMGPRRegClass); + RegInfo.addLiveIn(VA.getLocReg(), VReg); + SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, RegVT); + + // If this is an value that has been promoted to wider types, insert an + // assert[sz]ext to capture this, then truncate to the right size. + if (VA.getLocInfo() == CCValAssign::SExt) + ArgValue = DAG.getNode(ISD::AssertSext, DL, RegVT, ArgValue, + DAG.getValueType(VA.getValVT())); + else if (VA.getLocInfo() == CCValAssign::ZExt) + ArgValue = DAG.getNode(ISD::AssertZext, DL, RegVT, ArgValue, + DAG.getValueType(VA.getValVT())); + + if (VA.getLocInfo() != CCValAssign::Full) + ArgValue = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), ArgValue); + + InVals.push_back(ArgValue); + + break; + } + } else { + MVT LocVT = VA.getLocVT(); + MachineFrameInfo &MFI = MF.getFrameInfo(); + EVT ValVT = VA.getValVT(); + // sanity check + assert(VA.isMemLoc()); + /* The stack pointer offset is relative to the caller stack frame. + * we also need to add the offset created in in callee for saving callee saved regs + * we do not need to consider further callee stack offset, + * it will be handled later in eliminateFrameIndex + */ + int FI = 0; + if (doesNeedSP) { + const MCPhysReg *CSRegs = MF.getRegInfo().getCalleeSavedRegs(); + unsigned CSRcounter = 0; + for ( ; CSRegs[CSRcounter]; ++CSRcounter); + FI = MFI.CreateFixedObject(ValVT.getSizeInBits()/8, + VA.getLocMemOffset() + CSRcounter*8, true); + } else { + FI = MFI.CreateFixedObject(ValVT.getSizeInBits()/8, + VA.getLocMemOffset(), true); + } + + // Create load nodes to retrieve arguments from the stack + SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); + SDValue Load = DAG.getLoad( + LocVT, DL, Chain, FIN, + MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)); + InVals.push_back(Load); + } + } + + std::vector OutChains; + if (IsVarArg) { + const TargetRegisterClass *RC = &XVM::XVMGPRRegClass; + MachineFrameInfo &MFI = MF.getFrameInfo(); + MachineRegisterInfo &RegInfo = MF.getRegInfo(); + XVMMachineFunctionInfo *XFI = MF.getInfo(); + + static const MCPhysReg ArgGPRs[] = {XVM::R0, XVM::R1, XVM::R2, XVM::R3, XVM::R4, XVM::R5}; + ArrayRef ArgRegs = makeArrayRef(ArgGPRs); + // The first register index: for example, + // in foo(int t1, int t2, ...), FirstRegIdx is 2 (R0 and R1 are used for t1 and t2); + unsigned FirstRegIdx = CCInfo.getFirstUnallocated(ArgRegs); + + int VaArgOffset, VaArgsSaveSize; + if (FirstRegIdx == ArgRegs.size()) { + VaArgOffset = CCInfo.getNextStackOffset(); + VaArgsSaveSize = 0; + } else { + VaArgsSaveSize = 8 * (ArgRegs.size() - FirstRegIdx); + VaArgOffset = -VaArgsSaveSize; + } + + XFI->SetVarArgsFrameIndex( + MFI.CreateFixedObject(8, // size + VaArgOffset, // SPOffset + true)); // IsImmutable + // Copy the registers that have not been used for var argument passing + // assume per size is always 8 + for (unsigned I = FirstRegIdx; I < ArgRegs.size(); I++, VaArgOffset += 8) { + const Register Reg = RegInfo.createVirtualRegister(RC); + RegInfo.addLiveIn(ArgRegs[I], Reg); + SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, MVT::i64); + int FI = MFI.CreateFixedObject(8, VaArgOffset, true); + SDValue FrameIndex = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); + SDValue Store = DAG.getStore(Chain, DL, ArgValue, FrameIndex, MachinePointerInfo::getFixedStack(MF, FI)); + // Init the mem operand always. + cast(Store.getNode())->getMemOperand()->setValue((Value*)nullptr); + OutChains.push_back(Store); + } + } + + if (!OutChains.empty()) { + OutChains.push_back(Chain); + Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains); + } + + return Chain; +} + +SDValue XVMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, + SmallVectorImpl &InVals) const { + SelectionDAG &DAG = CLI.DAG; + auto &Outs = CLI.Outs; + auto &OutVals = CLI.OutVals; + auto &Ins = CLI.Ins; + SDValue Chain = CLI.Chain; + SDValue Callee = CLI.Callee; + bool &IsTailCall = CLI.IsTailCall; + CallingConv::ID CallConv = CLI.CallConv; + bool IsVarArg = CLI.IsVarArg; + MachineFunction &MF = DAG.getMachineFunction(); + + // XVM target does not support tail call optimization. + IsTailCall = false; + + switch (CallConv) { + default: + report_fatal_error("Unsupported calling convention"); + case CallingConv::Fast: + case CallingConv::C: + break; + } + const XVMRegisterInfo *TRI = Subtarget->getRegisterInfo(); + const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv); + assert(Mask && "Missing call preserved mask for calling convention"); + + // Analyze operands of the call, assigning locations to each operand. + SmallVector ArgLocs; + CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); + + CCInfo.AnalyzeCallOperands(Outs, CC_XVM64); + + unsigned NumBytes = CCInfo.getNextStackOffset(); + + for (auto &Arg : Outs) { + ISD::ArgFlagsTy Flags = Arg.Flags; + if (!Flags.isByVal()) + continue; + } + + auto PtrVT = getPointerTy(MF.getDataLayout()); + Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL); + + SmallVector, 6> RegsToPass; + SmallVector MemOpChains; + + SDValue StackPtr = DAG.getCopyFromReg(Chain, CLI.DL, XVM::SP, PtrVT); + + // Walk arg assignments + for (unsigned i = 0, e = Outs.size(); i != e; ++i) { + CCValAssign &VA = ArgLocs[i]; + SDValue Arg = OutVals[i]; + + // Promote the value if needed. + switch (VA.getLocInfo()) { + default: + llvm_unreachable("Unknown loc info"); + case CCValAssign::Full: + break; + case CCValAssign::SExt: + Arg = DAG.getNode(ISD::SIGN_EXTEND, CLI.DL, VA.getLocVT(), Arg); + break; + case CCValAssign::ZExt: + Arg = DAG.getNode(ISD::ZERO_EXTEND, CLI.DL, VA.getLocVT(), Arg); + break; + case CCValAssign::AExt: + Arg = DAG.getNode(ISD::ANY_EXTEND, CLI.DL, VA.getLocVT(), Arg); + break; + } + + // Push arguments into RegsToPass vector + if (VA.isRegLoc()) + RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); + else{ + assert(VA.isMemLoc()); + + int32_t Offset = VA.getLocMemOffset(); + SDValue PtrOff = DAG.getIntPtrConstant(Offset, CLI.DL); + + SDValue DstAddr = DAG.getNode(ISD::ADD, CLI.DL, PtrVT, StackPtr, PtrOff); + MachinePointerInfo DstInfo = MachinePointerInfo::getStack(MF, Offset); + + SDValue Store = DAG.getStore(Chain, CLI.DL, Arg, DstAddr, DstInfo); + MemOpChains.push_back(Store); + } + } + + if (!MemOpChains.empty()) + Chain = DAG.getNode(ISD::TokenFactor, CLI.DL, MVT::Other, MemOpChains); + + SDValue InFlag; + + // Build a sequence of copy-to-reg nodes chained together with token chain and + // flag operands which copy the outgoing args into registers. The InFlag in + // necessary since all emitted instructions must be stuck together. + for (auto &Reg : RegsToPass) { + Chain = DAG.getCopyToReg(Chain, CLI.DL, Reg.first, Reg.second, InFlag); + InFlag = Chain.getValue(1); + } + + // If the callee is a GlobalAddress node (quite common, every direct call is) + // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. + // Likewise ExternalSymbol -> TargetExternalSymbol. + if (GlobalAddressSDNode *G = dyn_cast(Callee)) { + Callee = DAG.getTargetGlobalAddress(G->getGlobal(), CLI.DL, PtrVT, + G->getOffset(), 0); + } else if (ExternalSymbolSDNode *E = dyn_cast(Callee)) { + Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT, 0); + fail(CLI.DL, DAG, Twine("A call to built-in function '" + + StringRef(E->getSymbol()) + + "' is not supported.")); + } + + // Returns a chain & a flag for retval copy to use. + SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); + SmallVector Ops; + Ops.push_back(Chain); + Ops.push_back(Callee); + Ops.push_back(DAG.getRegisterMask(Mask)); + + // Add argument registers to the end of the list so that they are + // known live into the call. + for (auto &Reg : RegsToPass) + Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType())); + + if (InFlag.getNode()) + Ops.push_back(InFlag); + + Chain = DAG.getNode(XVMISD::CALL, CLI.DL, NodeTys, Ops); + InFlag = Chain.getValue(1); + + // Create the CALLSEQ_END node. + Chain = DAG.getCALLSEQ_END( + Chain, DAG.getConstant(NumBytes, CLI.DL, PtrVT, true), + DAG.getConstant(0, CLI.DL, PtrVT, true), InFlag, CLI.DL); + InFlag = Chain.getValue(1); + + // Handle result values, copying them out of physregs into vregs that we + // return. + return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, CLI.DL, DAG, + InVals); +} + +SDValue +XVMTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, + bool IsVarArg, + const SmallVectorImpl &Outs, + const SmallVectorImpl &OutVals, + const SDLoc &DL, SelectionDAG &DAG) const { + unsigned Opc = XVMISD::RET_FLAG; + + // CCValAssign - represent the assignment of the return value to a location + SmallVector RVLocs; + MachineFunction &MF = DAG.getMachineFunction(); + + // CCState - Info about the registers and stack slot. + CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext()); + + if (MF.getFunction().getReturnType()->isAggregateType()) { + fail(DL, DAG, "only integer returns supported"); + return DAG.getNode(Opc, DL, MVT::Other, Chain); + } + + // Analize return values. + CCInfo.AnalyzeReturn(Outs, RetCC_XVM64); + + SDValue Flag; + SmallVector RetOps(1, Chain); + + // Copy the result values into the output registers. + for (unsigned i = 0; i != RVLocs.size(); ++i) { + CCValAssign &VA = RVLocs[i]; + assert(VA.isRegLoc() && "Can only return in registers!"); + + Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVals[i], Flag); + + // Guarantee that all emitted copies are stuck together, + // avoiding something bad. + Flag = Chain.getValue(1); + RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); + } + + RetOps[0] = Chain; // Update chain. + + // Add the flag if we have it. + if (Flag.getNode()) + RetOps.push_back(Flag); + + return DAG.getNode(Opc, DL, MVT::Other, RetOps); +} + +SDValue XVMTargetLowering::LowerCallResult( + SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg, + const SmallVectorImpl &Ins, const SDLoc &DL, + SelectionDAG &DAG, SmallVectorImpl &InVals) const { + + MachineFunction &MF = DAG.getMachineFunction(); + // Assign locations to each value returned by this call. + SmallVector RVLocs; + CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext()); + + if (Ins.size() >= 2) { + fail(DL, DAG, "only small returns supported"); + for (unsigned i = 0, e = Ins.size(); i != e; ++i) + InVals.push_back(DAG.getConstant(0, DL, Ins[i].VT)); + return DAG.getCopyFromReg(Chain, DL, 1, Ins[0].VT, InFlag).getValue(1); + } + + CCInfo.AnalyzeCallResult(Ins, RetCC_XVM64); + + // Copy all of the result registers out of their specified physreg. + for (auto &Val : RVLocs) { + Chain = DAG.getCopyFromReg(Chain, DL, Val.getLocReg(), + Val.getValVT(), InFlag).getValue(1); + InFlag = Chain.getValue(2); + InVals.push_back(Chain.getValue(0)); + } + + return Chain; +} + +SDValue XVMTargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { + SDValue CondV = Op.getOperand(1); + SDLoc DL(Op); + + if (CondV.getOpcode() == ISD::SETCC && + CondV.getOperand(0).getValueType() == MVT::i64) { + SDValue LHS = CondV.getOperand(0); + SDValue RHS = CondV.getOperand(1); + + SDValue TargetCC = CondV.getOperand(2); + + return DAG.getNode(XVMISD::BR_CC, DL, Op.getValueType(), + Op.getOperand(0), LHS, RHS, TargetCC, Op.getOperand(2)); + } else if (CondV.getOpcode() == ISD::AssertZext && + CondV.getOperand(0).getValueType() == MVT::i64) { + SDValue LHS = CondV.getOperand(0); + SDValue RHS = DAG.getConstant(1, DL, MVT::i64); + + SDValue TargetCC = DAG.getCondCode(ISD::SETEQ); + + return DAG.getNode(XVMISD::BR_CC, DL, Op.getValueType(), + Op.getOperand(0), LHS, RHS, TargetCC, Op.getOperand(2)); + } else if (CondV.getOpcode() == ISD::AND || CondV.getOpcode() == ISD::OR || + CondV.getOpcode() == ISD::XOR || CondV.getOpcode() == ISD::Constant) { + SDValue LHS = CondV; + if (CondV.getNumOperands()>0) { + LHS = CondV.getOperand(0); + } + SDValue RHS = DAG.getConstant(0, DL, MVT::i64); + SDValue TargetCC = DAG.getCondCode(ISD::SETNE); + + return DAG.getNode(XVMISD::BR_CC, DL, Op.getValueType(), + Op.getOperand(0), LHS, RHS, TargetCC, Op.getOperand(2)); + } + //FIXME: complete the lowering for other cases + return Op; +} + +SDValue XVMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { + SDValue LHS = Op.getOperand(0); + SDValue RHS = Op.getOperand(1); + SDValue TrueV = Op.getOperand(2); + SDValue FalseV = Op.getOperand(3); + ISD::CondCode CC = cast(Op.getOperand(4))->get(); + SDLoc DL(Op); + + SDValue TargetCC = DAG.getConstant(CC, DL, LHS.getValueType()); + SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue); + SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV}; + + return DAG.getNode(XVMISD::SELECT_CC, DL, VTs, Ops); +} + +/// VASTART - have three operands: an input chain, +/// pointer, and a SRCVALUE. +SDValue XVMTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { + MachineFunction &MF = DAG.getMachineFunction(); + int VarArgsFrameIndex = MF.getInfo()->GetVarArgsFrameIndex(); + SDValue FrameIndex = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy(MF.getDataLayout())); + + const Value *SV = cast(Op.getOperand(2))->getValue(); + SDValue RetSDValue = DAG.getStore(Op.getOperand(0), SDLoc(Op), FrameIndex, Op.getOperand(1), + MachinePointerInfo() // unsigned AddressSpace = 0, int64_t offset = 0 + ); + return RetSDValue; +} + +/// VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, +/// and the alignment. It returns a pair of values: the vaarg value and a +/// new chain. +SDValue XVMTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const { + SDLoc DL(Op); + MachinePointerInfo MPI(0U, 0L); + EVT PointerType = getPointerTy(DAG.getMachineFunction().getDataLayout()); + SDValue LoadPointer = DAG.getLoad(PointerType, DL, Op.getOperand(0), Op.getOperand(1), MPI); + SDValue Offset = DAG.getConstant(8, DL, MVT::i64); + SDValue AddPointer = DAG.getNode(ISD::ADD, DL, PointerType, LoadPointer, Offset); + SDValue StorePointer = DAG.getStore(LoadPointer.getValue(1), DL, AddPointer, Op.getOperand(1), MPI); + SDValue RetSDValue = DAG.getLoad(Op.getValueType(), DL, StorePointer, LoadPointer, MPI); + return RetSDValue; +} + +const char *XVMTargetLowering::getTargetNodeName(unsigned Opcode) const { + switch ((XVMISD::NodeType)Opcode) { + case XVMISD::FIRST_NUMBER: + break; + case XVMISD::RET_FLAG: + return "XVMISD::RET_FLAG"; + case XVMISD::CALL: + return "XVMISD::CALL"; + case XVMISD::SELECT_CC: + return "XVMISD::SELECT_CC"; + case XVMISD::BR_CC: + return "XVMISD::BR_CC"; + case XVMISD::Wrapper: + return "XVMISD::Wrapper"; + case XVMISD::MEMCPY: + return "XVMISD::MEMCPY"; + } + return nullptr; +} + +SDValue XVMTargetLowering::LowerGlobalAddress(SDValue Op, + SelectionDAG &DAG) const { + auto N = cast(Op); + assert(N->getOffset() == 0 && "Invalid offset for global address"); + + SDLoc DL(Op); + const GlobalValue *GV = N->getGlobal(); + SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i64); + + return DAG.getNode(XVMISD::Wrapper, DL, MVT::i64, GA); +} + +unsigned +XVMTargetLowering::EmitSubregExt(MachineInstr &MI, MachineBasicBlock *BB, + unsigned Reg, bool isSigned) const { + return 0; +} + +MachineBasicBlock * +XVMTargetLowering::EmitInstrWithCustomInserterSelectCC(MachineInstr &MI, + MachineBasicBlock *BB) const { + const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo(); + DebugLoc DL = MI.getDebugLoc(); + unsigned Opc = MI.getOpcode(); + + bool isSelectRROp = (Opc == XVM::PseudoSelectCC_rr); + +#ifndef NDEBUG + bool isSelectRIOp = (Opc == XVM::PseudoSelectCC_ri); + + assert((isSelectRROp || isSelectRIOp) && "Unexpected instr type to insert"); +#endif + // To "insert" a SELECT instruction, we actually have to insert the diamond + // control-flow pattern. The incoming instruction knows the destination vreg + // to set, the condition code register to branch on, the true/false values to + // select between, and a branch opcode to use. + const BasicBlock *LLVM_BB = BB->getBasicBlock(); + MachineFunction::iterator I = ++BB->getIterator(); + + // ThisMBB: + // ... + // TrueVal = ... + // jmp_XX r1, r2 goto Copy1MBB + // fallthrough --> Copy0MBB + MachineBasicBlock *ThisMBB = BB; + MachineFunction *F = BB->getParent(); + MachineBasicBlock *Copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); + MachineBasicBlock *Copy1MBB = F->CreateMachineBasicBlock(LLVM_BB); + + F->insert(I, Copy0MBB); + F->insert(I, Copy1MBB); + // Update machine-CFG edges by transferring all successors of the current + // block to the new block which will contain the Phi node for the select. + Copy1MBB->splice(Copy1MBB->begin(), BB, + std::next(MachineBasicBlock::iterator(MI)), BB->end()); + Copy1MBB->transferSuccessorsAndUpdatePHIs(BB); + // Next, add the true and fallthrough blocks as its successors. + BB->addSuccessor(Copy0MBB); + BB->addSuccessor(Copy1MBB); + + // Insert Branch if Flag + int NewCC = getBranchOpcodeFromSelectCC(MI); + + Register LHS = MI.getOperand(1).getReg(); + if (isSelectRROp) { + Register RHS = MI.getOperand(2).getReg(); + BuildMI(BB, DL, TII.get(NewCC)).addMBB(Copy1MBB).addReg(LHS).addReg(RHS); + } else { + int64_t imm32 = MI.getOperand(2).getImm(); + // Check before we build J*_ri instruction. + assert (isInt<32>(imm32)); + if (is_valid_immediate_size(imm32)) { + BuildMI(BB, DL, TII.get(NewCC)).addMBB(Copy1MBB).addReg(LHS).addImm(imm32); + } else { + Register ScratchReg; + MachineRegisterInfo &MRI = F->getRegInfo(); + ScratchReg = MRI.createVirtualRegister(&XVM::XVMGPRRegClass); + uint64_t MostSignificantBits = ShiftAndGet16Bits(imm32, 48); + uint64_t UpperMiddleBits = ShiftAndGet16Bits(imm32, 32); + uint64_t LowerMiddleBits = ShiftAndGet16Bits(imm32, 16); + uint64_t LeastSignificantBits = ShiftAndGet16Bits(imm32, 0); + + Register VRegForMov = MRI.createVirtualRegister(&XVM::XVMGPRRegClass); + MachineInstr * MovMI = BuildMI(BB, DL, TII.get(XVM::MOV_ri), VRegForMov) + .addImm(0); + Register PrevReg = VRegForMov; + if (LeastSignificantBits) { + Register VRegForMovk1 = MRI.createVirtualRegister(&XVM::XVMGPRRegClass); + BuildMI(BB, DL, TII.get(XVM::MOVK_ri), VRegForMovk1) + .addReg(PrevReg).addImm(LeastSignificantBits).addImm(0); + PrevReg = VRegForMovk1; + } + if (LowerMiddleBits) { + Register VRegForMovk2 = MRI.createVirtualRegister(&XVM::XVMGPRRegClass); + BuildMI(BB, DL, TII.get(XVM::MOVK_ri), VRegForMovk2) + .addReg(PrevReg).addImm(LowerMiddleBits).addImm(0); + PrevReg = VRegForMovk2; + } + if (UpperMiddleBits) { + Register VRegForMovk3 = MRI.createVirtualRegister(&XVM::XVMGPRRegClass); + BuildMI(BB, DL, TII.get(XVM::MOVK_ri), VRegForMovk3) + .addReg(PrevReg).addImm(UpperMiddleBits).addImm(0); + PrevReg = VRegForMovk3; + } + if (MostSignificantBits) { + Register VRegForMovk4 = MRI.createVirtualRegister(&XVM::XVMGPRRegClass); + BuildMI(BB, DL, TII.get(XVM::MOVK_ri), VRegForMovk4) + .addReg(PrevReg).addImm(MostSignificantBits).addImm(0); + } + BuildMI(BB, DL, TII.get(NewCC)).addMBB(Copy1MBB).addReg(LHS).addReg(PrevReg); + } + } + + // Copy0MBB: + // %FalseValue = ... + // # fallthrough to Copy1MBB + BB = Copy0MBB; + + // Update machine-CFG edges + BB->addSuccessor(Copy1MBB); + + // Copy1MBB: + // %Result = phi [ %FalseValue, Copy0MBB ], [ %TrueValue, ThisMBB ] + // ... + BB = Copy1MBB; + BuildMI(*BB, BB->begin(), DL, TII.get(XVM::PHI), MI.getOperand(0).getReg()) + .addReg(MI.getOperand(5).getReg()) + .addMBB(Copy0MBB) + .addReg(MI.getOperand(4).getReg()) + .addMBB(ThisMBB); + + MI.eraseFromParent(); // The pseudo instruction is gone now. + return BB; +} + +MachineBasicBlock * +XVMTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, + MachineBasicBlock *BB) const { + const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo(); + DebugLoc DL = MI.getDebugLoc(); + unsigned Opc = MI.getOpcode(); + switch(Opc) { + case XVM::PseudoSelectCC_rr: + case XVM::PseudoSelectCC_ri: + return EmitInstrWithCustomInserterSelectCC(MI, BB); + } + return BB; +} + +EVT XVMTargetLowering::getSetCCResultType(const DataLayout &, LLVMContext &, + EVT VT) const { + return MVT::i64; +} + +MVT XVMTargetLowering::getScalarShiftAmountTy(const DataLayout &DL, + EVT VT) const { + return MVT::i64; +} + +bool XVMTargetLowering::isLegalAddressingMode(const DataLayout &DL, + const AddrMode &AM, Type *Ty, + unsigned AS, + Instruction *I) const { + // No global is ever allowed as a base. + if (AM.BaseGV) + return false; + + switch (AM.Scale) { + case 0: // "r+i" or just "i", depending on HasBaseReg. + break; + case 1: + if (!AM.HasBaseReg) // allow "r+i". + break; + return false; // disallow "r+r" or "r+r+i". + default: + return false; + } + + return true; +} #endif diff --git a/llvm/lib/Target/XVM/XVMInstrInfo.cpp b/llvm/lib/Target/XVM/XVMInstrInfo.cpp index aaeaca5c7e0369d706e6f063d998b013a1bf1721..c05322fd6f5fde6fbc1ac1c543958012659ca51a 100644 --- a/llvm/lib/Target/XVM/XVMInstrInfo.cpp +++ b/llvm/lib/Target/XVM/XVMInstrInfo.cpp @@ -11,6 +11,467 @@ //===----------------------------------------------------------------------===// #ifdef XVM_DYLIB_MODE -// Insert the XVM backend code here + +#include "XVM.h" +#include "XVM_def.h" +#include "XVMInstrInfo.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/CodeGen/MachineBasicBlock.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/IR/DebugLoc.h" +#include "llvm/Support/ErrorHandling.h" +#include +#include + +#define GET_INSTRINFO_CTOR_DTOR +#include "XVMGenInstrInfo.inc" + +using namespace llvm; + +static CondCode getCondFromBranchOpc(unsigned Opc) { + switch (Opc) { + default: + return COND_INVALID; + case XVM::BUEQ_rr: + case XVM::BUEQ_ri: + case XVM::BSEQ_ri: + return COND_EQ; + case XVM::BSNEQ_rr: + case XVM::BSNEQ_ri: + return COND_NE; + case XVM::BSGE_rr: + case XVM::BSGE_ri: + return COND_GE; + case XVM::BUGE_rr: + case XVM::BUGE_ri: + return COND_UGE; + case XVM::BSLE_rr: + case XVM::BSLE_ri: + return COND_LE; + case XVM::BULE_rr: + case XVM::BULE_ri: + return COND_ULE; + case XVM::BSGT_rr: + case XVM::BSGT_ri: + return COND_GT; + case XVM::BUGT_rr: + case XVM::BUGT_ri: + return COND_UGT; + case XVM::BSLT_rr: + case XVM::BSLT_ri: + return COND_LT; + case XVM::BULT_rr: + case XVM::BULT_ri: + return COND_ULT; + } +} + +static unsigned getBranchOpcFromCond(ArrayRef &Cond) { + assert(Cond.size() == 3 && "Expected an operation and 2 operands!"); + assert(Cond[0].isImm() && "Expected an imm for operation!"); + + switch (Cond[0].getImm()) { + default: + //Invalid operation, bail out + return 0; + case COND_EQ: + return Cond[2].isImm() ? XVM::BSEQ_ri : XVM::BUEQ_rr; + case COND_NE: + return Cond[2].isImm() ? XVM::BSNEQ_ri : XVM::BSNEQ_rr; + case COND_GE: + return Cond[2].isImm() ? XVM::BSGE_ri : XVM::BSGE_rr; + case COND_UGE: + return Cond[2].isImm() ? XVM::BUGE_ri : XVM::BUGE_rr; + case COND_LE: + return Cond[2].isImm() ? XVM::BSLE_ri : XVM::BSLE_rr; + case COND_ULE: + return Cond[2].isImm() ? XVM::BULE_ri : XVM::BULE_rr; + case COND_GT: + return Cond[2].isImm() ? XVM::BSGT_ri : XVM::BSGT_rr; + case COND_UGT: + return Cond[2].isImm() ? XVM::BUGT_ri : XVM::BUGT_rr; + case COND_LT: + return Cond[2].isImm() ? XVM::BSLT_ri : XVM::BSLT_rr; + case COND_ULT: + return Cond[2].isImm() ? XVM::BULT_ri : XVM::BULT_rr; + } +} + +XVMInstrInfo::XVMInstrInfo() + : XVMGenInstrInfo(XVM::ADJCALLSTACKDOWN, XVM::ADJCALLSTACKUP) {} + +void XVMInstrInfo::copyPhysReg(MachineBasicBlock &MBB, + MachineBasicBlock::iterator I, + const DebugLoc &DL, MCRegister DestReg, + MCRegister SrcReg, bool KillSrc) const { + if (XVM::XVMGPRRegClass.contains(DestReg, SrcReg) || + (XVM::XVMRRRegClass.contains(SrcReg)) && XVM::XVMGPRRegClass.contains(DestReg)) + BuildMI(MBB, I, DL, get(XVM::MOV_rr), DestReg) + .addReg(SrcReg, getKillRegState(KillSrc)); + else + llvm_unreachable("To-be-extended: reg-to-reg copy"); + + return; + +} + +static int ShiftAndGet16Bits(uint64_t num, int n) { + return (num >> n) & 0xFFFF; +} + +static inline void replace_imm_with_movk(MachineBasicBlock *BB, + MachineBasicBlock::iterator MI, + DebugLoc dl) +{ + const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo(); + uint64_t imm = MI->getOperand(2).getImm(); + uint64_t most_significant_bits = ShiftAndGet16Bits(imm, 48); + uint64_t upper_significant_bits = ShiftAndGet16Bits(imm, 32); + uint64_t lower_significant_bits = ShiftAndGet16Bits(imm, 16); + uint64_t least_significant_bits = ShiftAndGet16Bits(imm, 0); + if (least_significant_bits) { + BuildMI(*BB, MI, dl, TII.get(XVM::MOVK_ri)) + .addReg(XVM::R2, RegState::Define).addImm(0).addImm(least_significant_bits).addImm(0); + } + if (lower_significant_bits) { + BuildMI(*BB, MI, dl, TII.get(XVM::MOVK_ri)) + .addReg(XVM::R2, RegState::Define).addImm(0).addImm(lower_significant_bits).addImm(1); + } + if (upper_significant_bits) { + BuildMI(*BB, MI, dl, TII.get(XVM::MOVK_ri)) + .addReg(XVM::R2, RegState::Define).addImm(0).addImm(upper_significant_bits).addImm(2); + } + if (most_significant_bits) { + BuildMI(*BB, MI, dl, TII.get(XVM::MOVK_ri)) + .addReg(XVM::R2, RegState::Define).addImm(0).addImm(most_significant_bits).addImm(3); + } + +} + +void XVMInstrInfo::expandMEMCPY(MachineBasicBlock::iterator MI) const { + int func_num = GetFuncIndex("memcpy"); + MachineBasicBlock *BB = MI->getParent(); + Register DstReg = MI->getOperand(0).getReg(); + Register SrcReg = MI->getOperand(1).getReg(); + DebugLoc dl = MI->getDebugLoc(); + + BuildMI(*BB, MI, dl, get(XVM::LDD)) + .addReg(XVM::R0, RegState::Define).addReg(DstReg).addImm(0); + BuildMI(*BB, MI, dl, get(XVM::LDD)) + .addReg(XVM::R1, RegState::Define).addReg(SrcReg).addImm(0); + if (MI->getOpcode() == XVM::MEMCPY_ri) { + replace_imm_with_movk(BB, MI, dl); + } else { + Register CopyLen = MI->getOperand(2).getReg(); + BuildMI(*BB, MI, dl, get(XVM::LDD)) + .addReg(XVM::R2, RegState::Define).addReg(CopyLen).addImm(0); + } + + BuildMI(*BB, MI, dl, get(XVM::CALL_IMM)).addImm(func_num); + BB->erase(MI); +} + +void XVMInstrInfo::expandMEMMOVE(MachineBasicBlock::iterator MI) const { + int func_num = GetFuncIndex("memmove"); + MachineBasicBlock *BB = MI->getParent(); + Register DstReg = MI->getOperand(0).getReg(); + Register SrcReg = MI->getOperand(1).getReg(); + DebugLoc dl = MI->getDebugLoc(); + + BuildMI(*BB, MI, dl, get(XVM::LDD)) + .addReg(XVM::R0, RegState::Define).addReg(DstReg).addImm(0); + BuildMI(*BB, MI, dl, get(XVM::LDD)) + .addReg(XVM::R1, RegState::Define).addReg(SrcReg).addImm(0); + if (MI->getOpcode() == XVM::MEMMOV_ri) { + replace_imm_with_movk(BB, MI, dl); + } else { + Register CopyLen = MI->getOperand(2).getReg(); + BuildMI(*BB, MI, dl, get(XVM::LDD)) + .addReg(XVM::R2, RegState::Define).addReg(CopyLen).addImm(0); + } + + BuildMI(*BB, MI, dl, get(XVM::CALL_IMM)).addImm(func_num); + BB->erase(MI); +} + +void XVMInstrInfo::expandMEMSET(MachineBasicBlock::iterator MI) const { + int func_num = GetFuncIndex("memset"); + MachineBasicBlock *BB = MI->getParent(); + Register DstReg = MI->getOperand(0).getReg(); + Register SrcReg = MI->getOperand(1).getReg(); + DebugLoc dl = MI->getDebugLoc(); + + BuildMI(*BB, MI, dl, get(XVM::LDD)) + .addReg(XVM::R0, RegState::Define).addReg(DstReg).addImm(0); + BuildMI(*BB, MI, dl, get(XVM::LDD)) + .addReg(XVM::R1, RegState::Define).addReg(SrcReg).addImm(0); + if (MI->getOpcode() == XVM::MEMSET_ri) { + replace_imm_with_movk(BB, MI, dl); + } else { + Register CopyLen = MI->getOperand(2).getReg(); + BuildMI(*BB, MI, dl, get(XVM::LDD)) + .addReg(XVM::R2, RegState::Define).addReg(CopyLen).addImm(0); + } + + BuildMI(*BB, MI, dl, get(XVM::CALL_IMM)).addImm(func_num); + BB->erase(MI); +} + + +bool XVMInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { + switch (MI.getOpcode()) { + default: + return false; + case XVM::MEMCPY_ri: + case XVM::MEMCPY_rr: + expandMEMCPY(MI); + return true; + case XVM::MEMMOV_ri: + case XVM::MEMMOV_rr: + expandMEMMOVE(MI); + return true; + case XVM::MEMSET_ri: + case XVM::MEMSET_rr: + expandMEMSET(MI); + return true; + } + + return false; +} + +void XVMInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, + MachineBasicBlock::iterator I, + Register SrcReg, bool IsKill, int FI, + const TargetRegisterClass *RC, + const TargetRegisterInfo *TRI) const { + DebugLoc DL; + if (I != MBB.end()) + DL = I->getDebugLoc(); + + if (RC == &XVM::XVMGPRRegClass) + BuildMI(MBB, I, DL, get(XVM::STD)) + .addReg(SrcReg, getKillRegState(IsKill)) + .addFrameIndex(FI) + .addImm(0); + else + llvm_unreachable("Can't store this register to stack slot"); + return; +} + +void XVMInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, + MachineBasicBlock::iterator I, + Register DestReg, int FI, + const TargetRegisterClass *RC, + const TargetRegisterInfo *TRI) const { + DebugLoc DL; + if (I != MBB.end()) + DL = I->getDebugLoc(); + + if (RC == &XVM::XVMGPRRegClass) + BuildMI(MBB, I, DL, get(XVM::LDD), DestReg).addFrameIndex(FI).addImm(0); + else + llvm_unreachable("Can't load this register from stack slot"); + return; +} + +bool XVMInstrInfo::analyzeBranch(MachineBasicBlock &MBB, + MachineBasicBlock *&TBB, + MachineBasicBlock *&FBB, + SmallVectorImpl &Cond, + bool AllowModify) const { + //TODO: consider the case when the CFG is stackified. + // Maybe need to implement removeBranch and insertBranch + + bool HaveCond = false; + for (MachineInstr &MI : MBB.terminators()) { + switch (MI.getOpcode()) { + default: + // Unhandled instruction; bail out. + return true; + case XVM::BR : + if (!HaveCond) + TBB = MI.getOperand(0).getMBB(); + else + FBB = MI.getOperand(0).getMBB(); + break; + case XVM::BUEQ_rr : + case XVM::BSNEQ_rr : + case XVM::BSGE_rr : + case XVM::BUGE_rr : + case XVM::BSLE_rr : + case XVM::BULE_rr : + case XVM::BSGT_rr : + case XVM::BUGT_rr : + case XVM::BSLT_rr : + case XVM::BULT_rr : + case XVM::BSEQ_ri : + case XVM::BUEQ_ri : + case XVM::BSNEQ_ri : + case XVM::BUNEQ_ri : + case XVM::BSGE_ri : + case XVM::BUGE_ri : + case XVM::BSLE_ri : + case XVM::BULE_ri : + case XVM::BSGT_ri : + case XVM::BUGT_ri : + case XVM::BSLT_ri : + case XVM::BULT_ri : + if (HaveCond) + return true; + CondCode CC = getCondFromBranchOpc(MI.getOpcode()); + Cond.push_back(MachineOperand::CreateImm(CC)); + Cond.push_back(MI.getOperand(1)); + Cond.push_back(MI.getOperand(2)); + TBB = MI.getOperand(0).getMBB(); + HaveCond = true; + break; + } + } + + return false; +} + +unsigned XVMInstrInfo::insertBranch(MachineBasicBlock &MBB, + MachineBasicBlock *TBB, + MachineBasicBlock *FBB, + ArrayRef Cond, + const DebugLoc &DL, + int *BytesAdded) const { + assert(!BytesAdded && "code size not handled"); + + if (Cond.empty()) { + if (!TBB) + return 0; + + BuildMI(&MBB, DL, get(XVM::BR)).addMBB(TBB); + return 1; + } + + assert(Cond.size() == 3 && "Expected 2 operands and an operation!"); + + BuildMI(&MBB, DL, get(getBranchOpcFromCond(Cond))).addMBB(TBB) \ + .add(Cond[1]) \ + .add(Cond[2]); + if (!FBB) + return 1; + BuildMI(&MBB, DL, get(XVM::BR)).addMBB(FBB); + return 2; +} + +unsigned XVMInstrInfo::removeBranch(MachineBasicBlock &MBB, + int *BytesRemoved) const { + assert(!BytesRemoved && "code size not handled"); + + MachineBasicBlock::instr_iterator I = MBB.instr_end(); + unsigned Count = 0; + + while (I != MBB.instr_begin()) { + --I; + if (I->isDebugInstr()) + continue; + if (!I->isTerminator()) + break; + // Remove the branch. + I->eraseFromParent(); + I = MBB.instr_end(); + ++Count; + } + + return Count; +} + +bool XVMInstrInfo::isCondBranchProcessed(const MachineInstr *MI) const{ + switch (MI->getOpcode()) { + case XVM::LOOP_BUEQ_rr : + case XVM::LOOP_BSNEQ_rr : + case XVM::LOOP_BSGE_rr : + case XVM::LOOP_BUGE_rr : + case XVM::LOOP_BSLE_rr : + case XVM::LOOP_BULE_rr : + case XVM::LOOP_BSGT_rr : + case XVM::LOOP_BUGT_rr : + case XVM::LOOP_BSLT_rr : + case XVM::LOOP_BULT_rr : + case XVM::LOOP_BSEQ_ri : + case XVM::LOOP_BUEQ_ri : + case XVM::LOOP_BSNEQ_ri : + case XVM::LOOP_BUNEQ_ri : + case XVM::LOOP_BSGE_ri : + case XVM::LOOP_BUGE_ri : + case XVM::LOOP_BSLE_ri : + case XVM::LOOP_BULE_ri : + case XVM::LOOP_BSGT_ri : + case XVM::LOOP_BUGT_ri : + case XVM::LOOP_BSLT_ri : + case XVM::LOOP_BULT_ri : + return true; + default : + return false; + } +} +bool XVMInstrInfo::isCondBranch(const MachineInstr *MI) const { + switch (MI->getOpcode()) { + case XVM::BUEQ_rr : + case XVM::BSNEQ_rr : + case XVM::BSGE_rr : + case XVM::BUGE_rr : + case XVM::BSLE_rr : + case XVM::BULE_rr : + case XVM::BSGT_rr : + case XVM::BUGT_rr : + case XVM::BSLT_rr : + case XVM::BULT_rr : + case XVM::BSEQ_ri : + case XVM::BUEQ_ri : + case XVM::BSNEQ_ri : + case XVM::BUNEQ_ri : + case XVM::BSGE_ri : + case XVM::BUGE_ri : + case XVM::BSLE_ri : + case XVM::BULE_ri : + case XVM::BSGT_ri : + case XVM::BUGT_ri : + case XVM::BSLT_ri : + case XVM::BULT_ri : + return true; + default : + return false; + } + return false; +} + +bool XVMInstrInfo::isUnCondBranch(const MachineInstr *MI) const{ + return MI->getOpcode() == XVM::BR; +} + +void XVMInstrInfo::negateCondBranch(MachineInstr *MI) const{ + assert(isCondBranch(MI)); + switch (MI->getOpcode()) { + case XVM::BUEQ_rr : MI->setDesc(get(XVM::BSNEQ_rr)); break; + case XVM::BSNEQ_rr : MI->setDesc(get(XVM::BUEQ_rr)); break; + case XVM::BSGE_rr : MI->setDesc(get(XVM::BSLT_rr)); break; + case XVM::BUGE_rr : MI->setDesc(get(XVM::BULT_rr)); break; + case XVM::BSLE_rr : MI->setDesc(get(XVM::BSGT_rr)); break; + case XVM::BULE_rr : MI->setDesc(get(XVM::BUGT_rr)); break; + case XVM::BSGT_rr : MI->setDesc(get(XVM::BSLE_rr)); break; + case XVM::BUGT_rr : MI->setDesc(get(XVM::BULE_rr)); break; + case XVM::BSLT_rr : MI->setDesc(get(XVM::BSGE_rr)); break; + case XVM::BULT_rr : MI->setDesc(get(XVM::BUGE_rr)); break; + case XVM::BSEQ_ri : MI->setDesc(get(XVM::BSNEQ_ri)); break; + case XVM::BUEQ_ri : MI->setDesc(get(XVM::BUNEQ_ri)); break; + case XVM::BSNEQ_ri : MI->setDesc(get(XVM::BSEQ_ri)); break; + case XVM::BUNEQ_ri : MI->setDesc(get(XVM::BUEQ_ri)); break; + case XVM::BSGE_ri : MI->setDesc(get(XVM::BSLT_ri)); break; + case XVM::BUGE_ri : MI->setDesc(get(XVM::BULT_ri)); break; + case XVM::BSLE_ri : MI->setDesc(get(XVM::BSGT_ri)); break; + case XVM::BULE_ri : MI->setDesc(get(XVM::BUGT_ri)); break; + case XVM::BSGT_ri : MI->setDesc(get(XVM::BSLE_ri)); break; + case XVM::BUGT_ri : MI->setDesc(get(XVM::BULE_ri)); break; + case XVM::BSLT_ri : MI->setDesc(get(XVM::BSGE_ri)); break; + case XVM::BULT_ri : MI->setDesc(get(XVM::BUGE_ri)); break; + default : + llvm_unreachable("Unknown Branch Opcode Existing in isCondBranch but missing in negateCondBranch"); + } +} #endif diff --git a/llvm/lib/Target/XVM/XVMMCInstLower.cpp b/llvm/lib/Target/XVM/XVMMCInstLower.cpp index 6414a7de94eaa7d7c5b8be68a693c82f5c42a6ea..4be8597b00af6a1b7c4b87a9844e4d583f2dc664 100644 --- a/llvm/lib/Target/XVM/XVMMCInstLower.cpp +++ b/llvm/lib/Target/XVM/XVMMCInstLower.cpp @@ -11,6 +11,73 @@ // //===----------------------------------------------------------------------===// #ifdef XVM_DYLIB_MODE -// Insert the XVM backend code here +#include "XVMMCInstLower.h" +#include "llvm/CodeGen/AsmPrinter.h" +#include "llvm/CodeGen/MachineBasicBlock.h" +#include "llvm/CodeGen/MachineInstr.h" +#include "llvm/MC/MCAsmInfo.h" +#include "llvm/MC/MCContext.h" +#include "llvm/MC/MCExpr.h" +#include "llvm/MC/MCInst.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/raw_ostream.h" +using namespace llvm; + +MCSymbol * +XVMMCInstLower::GetGlobalAddressSymbol(const MachineOperand &MO) const { + return Printer.getSymbol(MO.getGlobal()); +} + +MCSymbol * +XVMMCInstLower::GetExternalSymbolSymbol(const MachineOperand &MO) const { + return Printer.GetExternalSymbolSymbol(MO.getSymbolName()); +} + +MCOperand XVMMCInstLower::LowerSymbolOperand(const MachineOperand &MO, + MCSymbol *Sym) const { + + const MCExpr *Expr = MCSymbolRefExpr::create(Sym, Ctx); + + if (!MO.isJTI() && MO.getOffset()) + llvm_unreachable("unknown symbol op"); + + return MCOperand::createExpr(Expr); +} + +void XVMMCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const { + OutMI.setOpcode(MI->getOpcode()); + + for (const MachineOperand &MO : MI->operands()) { + MCOperand MCOp; + switch (MO.getType()) { + default: + MI->print(errs()); + llvm_unreachable("unknown operand type"); + case MachineOperand::MO_Register: + // Ignore all implicit register operands. + if (MO.isImplicit()) + continue; + MCOp = MCOperand::createReg(MO.getReg()); + break; + case MachineOperand::MO_Immediate: + MCOp = MCOperand::createImm(MO.getImm()); + break; + case MachineOperand::MO_MachineBasicBlock: + MCOp = MCOperand::createExpr( + MCSymbolRefExpr::create(MO.getMBB()->getSymbol(), Ctx)); + break; + case MachineOperand::MO_RegisterMask: + continue; + case MachineOperand::MO_ExternalSymbol: + MCOp = LowerSymbolOperand(MO, GetExternalSymbolSymbol(MO)); + break; + case MachineOperand::MO_GlobalAddress: + MCOp = LowerSymbolOperand(MO, GetGlobalAddressSymbol(MO)); + break; + } + + OutMI.addOperand(MCOp); + } +} #endif diff --git a/llvm/lib/Target/XVM/XVMRegisterInfo.cpp b/llvm/lib/Target/XVM/XVMRegisterInfo.cpp index 2be5295ec007ef7e50f14873a0da6e4bec7c64c6..de1f49848acfc8a893b4d458cf3fc112474e1cbd 100644 --- a/llvm/lib/Target/XVM/XVMRegisterInfo.cpp +++ b/llvm/lib/Target/XVM/XVMRegisterInfo.cpp @@ -10,6 +10,141 @@ // //===----------------------------------------------------------------------===// #ifdef XVM_DYLIB_MODE -// Insert the XVM backend code here +#include "XVMRegisterInfo.h" +#include "XVM.h" +#include "XVMSubtarget.h" +#include "MCTargetDesc/XVMInstPrinter.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/RegisterScavenging.h" +#include "llvm/CodeGen/TargetFrameLowering.h" +#include "llvm/IR/DiagnosticInfo.h" +#include "llvm/Support/ErrorHandling.h" + +#define GET_REGINFO_TARGET_DESC +#include "XVMGenRegisterInfo.inc" +using namespace llvm; +#include +#define XVM_STACK_SIZE_LIMIT 1024 + +XVMRegisterInfo::XVMRegisterInfo() + : XVMGenRegisterInfo(XVM::R0) {} + +const MCPhysReg * +XVMRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { + return CSR_SaveList; +} + +BitVector XVMRegisterInfo::getReservedRegs(const MachineFunction &MF) const { + BitVector Reserved(getNumRegs()); + return Reserved; +} + +static void WarnSize(int Offset, MachineFunction &MF, DebugLoc& DL) +{ + if (Offset <= -512) { + const Function &F = MF.getFunction(); + std::ostringstream OSS; + OSS << "Looks like the XVM stack limit of " << XVM_STACK_SIZE_LIMIT << " bytes is exceeded. " + << "Please move large on stack variables into XVM maps.\n"; + std::string StrCopy = OSS.str(); + DiagnosticInfoUnsupported DiagStackSize(F, StrCopy, DL); + F.getContext().diagnose(DiagStackSize); + } +} + +void XVMRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, + int SPAdj, unsigned FIOperandNum, + RegScavenger *RS) const { + assert(SPAdj == 0 && "Unexpected"); + unsigned i = 0; + MachineInstr &MI = *II; + MachineBasicBlock &MBB = *MI.getParent(); + MachineFunction &MF = *MBB.getParent(); + DebugLoc DL = MI.getDebugLoc(); + + if (!DL) + /* try harder to get some debug loc */ + for (auto &I : MBB) + if (I.getDebugLoc()) { + DL = I.getDebugLoc(); + break; + } + + while (!MI.getOperand(i).isFI()) { + ++i; + assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!"); + } + + Register FrameReg = getFrameRegister(MF); + int FrameIndex = MI.getOperand(i).getIndex(); + const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); + + /* Offset from frame index are offsets from FP, + * which is pointing at current function's stack bottom (larger address) + * Note that this stack bottom does not include saved registers in prologue + * i.e. the bottom is above the registers + * + * We don't have FP, so we use SP to locate data on stack. + * SP has been pre-determined in the prologue, + * it is pointing at (FP - StackSize), and it is fixed during function execution + * Therefore, we use SP+(Stacksize + FP_offset) to mimic (FP + FP_offset) + */ + auto &MFI = MF.getFrameInfo(); + uint64_t StackSize = MFI.getStackSize(); + + if (MI.getOpcode() == XVM::MOV_rr) { + int Offset = MF.getFrameInfo().getObjectOffset(FrameIndex); + WarnSize(Offset, MF, DL); + Register reg = MI.getOperand(i - 1).getReg(); + BuildMI(MBB, ++II, DL, TII.get(XVM::MOV_rr), reg).addReg(XVM::SP); + if(StackSize + Offset) { + BuildMI(MBB, II, DL, TII.get(XVM::AddRef_ri), reg).addReg(reg).addImm(StackSize + Offset); + } + MI.eraseFromParent(); + return; + } + + int Offset = MF.getFrameInfo().getObjectOffset(FrameIndex) + + MI.getOperand(i + 1).getImm(); + + if (!isInt<32>(Offset)) + llvm_unreachable("bug in frame offset"); + + WarnSize(Offset, MF, DL); + + if (MI.getOpcode() == XVM::FI_ri) { + // TODO: to be tested and modified + // architecture does not really support FI_ri, replace it with + // MOV_rr , frame_reg + // ADD_ri , imm + Register reg = MI.getOperand(i - 1).getReg(); + BuildMI(MBB, ++II, DL, TII.get(XVM::MOV_rr), reg).addReg(FrameReg); + BuildMI(MBB, II, DL, TII.get(XVM::AddRef_ri), reg).addReg(reg).addImm(StackSize + Offset); + + // Remove FI_ri instruction + MI.eraseFromParent(); + } else { + MI.getOperand(i).ChangeToRegister(FrameReg, false); + MI.getOperand(i + 1).ChangeToImmediate(StackSize + Offset); + } +} + +Register XVMRegisterInfo::getFrameRegister(const MachineFunction &MF) const { + return XVM::SP; +} + +const TargetRegisterClass * +XVMRegisterInfo::getPointerRegClass(const MachineFunction &MF, unsigned Kind) const { + assert(Kind == 0 && "Only one kind of pointer on XVM"); + return &XVM::XVMGPRRegClass; +} + +const uint32_t * +XVMRegisterInfo::getCallPreservedMask(const MachineFunction &MF, + CallingConv::ID CC) const { + return CSR_RegMask; +} #endif diff --git a/llvm/lib/Target/XVM/XVMSelectionDAGInfo.cpp b/llvm/lib/Target/XVM/XVMSelectionDAGInfo.cpp index a681b7bea5b19c46eaf07765318248a5feae26e1..b828765449d709359ad54975bcc8d6d941ba2d91 100644 --- a/llvm/lib/Target/XVM/XVMSelectionDAGInfo.cpp +++ b/llvm/lib/Target/XVM/XVMSelectionDAGInfo.cpp @@ -10,6 +10,66 @@ // //===----------------------------------------------------------------------===// #ifdef XVM_DYLIB_MODE -// Insert the XVM backend code here +#include "XVMTargetMachine.h" +#include "llvm/CodeGen/SelectionDAG.h" +#include "llvm/IR/DerivedTypes.h" +using namespace llvm; + +#define DEBUG_TYPE "XVM-selectiondag-info" + +SDValue XVMSelectionDAGInfo::EmitTargetCodeForMemcpy( + SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src, + SDValue Size, Align Alignment, bool isVolatile, bool AlwaysInline, + MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) const { + SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); + ConstantSDNode *ConstantSize = dyn_cast(Size); + if (ConstantSize) { + uint64_t CopyLen = ConstantSize->getZExtValue(); + Dst = DAG.getNode(XVMISD::MEMCPY, dl, VTs, Chain, Dst, Src, + DAG.getConstant(CopyLen, dl, MVT::i64)); + } else { + Dst = DAG.getNode(XVMISD::MEMCPY, dl, VTs, Chain, Dst, Src, + Size); + } + return Dst.getValue(0); +} + +SDValue XVMSelectionDAGInfo::EmitTargetCodeForMemmove( + SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, + SDValue Op1, SDValue Op2, SDValue Op3, + Align Alignment, bool isVolatile, + MachinePointerInfo DstPtrInfo, + MachinePointerInfo SrcPtrInfo) const { + SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); + ConstantSDNode *ConstantSize = dyn_cast(Op3); + if (ConstantSize) { + uint64_t CopyLen = ConstantSize->getZExtValue(); + Op1 = DAG.getNode(XVMISD::MEMMOV, dl, VTs, Chain, Op1, Op2, + DAG.getConstant(CopyLen, dl, MVT::i64)); + } else { + Op1 = DAG.getNode(XVMISD::MEMMOV, dl, VTs, Chain, Op1, Op2, + Op3); + } + return Op1.getValue(0); +} + +SDValue XVMSelectionDAGInfo::EmitTargetCodeForMemset( + SelectionDAG &DAG, const SDLoc &DL, + SDValue Chain, SDValue Op1, SDValue Op2, + SDValue Op3, Align Alignment, bool IsVolatile, + bool AlwaysInline, + MachinePointerInfo DstPtrInfo) const { + SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); + ConstantSDNode *ConstantSize = dyn_cast(Op3); + if (ConstantSize) { + uint64_t CopyLen = ConstantSize->getZExtValue(); + Op1 = DAG.getNode(XVMISD::MEMSET, DL, VTs, Chain, Op1, DAG.getAnyExtOrTrunc(Op2, DL, MVT::i64), + DAG.getZExtOrTrunc(Op3, DL, MVT::i64)); + } else { + Op1 = DAG.getNode(XVMISD::MEMSET, DL, VTs, Chain, Op1, DAG.getAnyExtOrTrunc(Op2, DL, MVT::i64), + Op3); + } + return Op1.getValue(0); +} #endif diff --git a/llvm/lib/Target/XVM/XVMSortRegion.cpp b/llvm/lib/Target/XVM/XVMSortRegion.cpp index dc79f12b0bc003091707bc4de050f8378c4a63c7..617823d229b2de53c0cace451fb973c411b70d69 100644 --- a/llvm/lib/Target/XVM/XVMSortRegion.cpp +++ b/llvm/lib/Target/XVM/XVMSortRegion.cpp @@ -1,4 +1,49 @@ #ifdef XVM_DYLIB_MODE -// Insert the XVM backend code here + +#include "XVMSortRegion.h" +#include "llvm/CodeGen/MachineLoopInfo.h" + +using namespace llvm; +using namespace XVM; + +namespace llvm { +namespace XVM { +template <> +bool ConcreteSortRegion::isLoop() const { + return true; +} +} // end namespace XVM +} // end namespace llvm + +const SortRegion *SortRegionInfo::getRegionFor(const MachineBasicBlock *MBB) { + const auto *ML = MLI.getLoopFor(MBB); + if (!ML) + return nullptr; + // We determine subregion relationship by domination of their headers, i.e., + // if region A's header dominates region B's header, B is a subregion of A. + if (ML) { + // If the smallest region containing MBB is a loop + if (LoopMap.count(ML)) + return LoopMap[ML].get(); + LoopMap[ML] = std::make_unique>(ML); + return LoopMap[ML].get(); + } + return nullptr; +} + +MachineBasicBlock *SortRegionInfo::getBottom(const SortRegion *R) { + if (R->isLoop()) + return getBottom(MLI.getLoopFor(R->getHeader())); + return nullptr; +} + +MachineBasicBlock *SortRegionInfo::getBottom(const MachineLoop *ML) { + MachineBasicBlock *Bottom = ML->getHeader(); + for (MachineBasicBlock *MBB : ML->blocks()) { + if (MBB->getNumber() > Bottom->getNumber()) + Bottom = MBB; + } + return Bottom; +} #endif \ No newline at end of file diff --git a/llvm/lib/Target/XVM/XVMSubtarget.cpp b/llvm/lib/Target/XVM/XVMSubtarget.cpp index 63b5b636d7a3bbbd366d450831d08793938d736b..4b2844c6103b8d79f86389a456b966f19e86c65a 100644 --- a/llvm/lib/Target/XVM/XVMSubtarget.cpp +++ b/llvm/lib/Target/XVM/XVMSubtarget.cpp @@ -10,6 +10,32 @@ // //===----------------------------------------------------------------------===// #ifdef XVM_DYLIB_MODE -// Insert the XVM backend code here + +#include "XVMSubtarget.h" +#include "XVM.h" +#include "llvm/MC/TargetRegistry.h" +#include "llvm/Support/Host.h" + +using namespace llvm; + +#define DEBUG_TYPE "xvm-subtarget" + +#define GET_SUBTARGETINFO_TARGET_DESC +#define GET_SUBTARGETINFO_CTOR +#include "XVMGenSubtargetInfo.inc" + +void XVMSubtarget::anchor() {} + +XVMSubtarget &XVMSubtarget::initializeSubtargetDependencies(StringRef CPU, + StringRef FS) { + ParseSubtargetFeatures(CPU, /*TuneCPU*/ CPU, FS); + return *this; +} + +XVMSubtarget::XVMSubtarget(const Triple &TT, const std::string &CPU, + const std::string &FS, const TargetMachine &TM) + : XVMGenSubtargetInfo(TT, CPU, /*TuneCPU*/ CPU, FS), + FrameLowering(initializeSubtargetDependencies(CPU, FS)), + TLInfo(TM, *this) { isCommon = true; } #endif diff --git a/llvm/lib/Target/XVM/XVMTargetMachine.cpp b/llvm/lib/Target/XVM/XVMTargetMachine.cpp index 0320e7a770ba8b3e8d08c38897c1989a2053e938..e4982da3bb5c8ca36c2341f1ac44791cf9dd5eb2 100644 --- a/llvm/lib/Target/XVM/XVMTargetMachine.cpp +++ b/llvm/lib/Target/XVM/XVMTargetMachine.cpp @@ -11,9 +11,136 @@ //===----------------------------------------------------------------------===// #ifdef XVM_DYLIB_MODE -// Insert the XVM backend code here -#include "llvm/Support/Compiler.h" + +#include "XVMTargetMachine.h" +#include "XVM.h" +#include "XVMTargetTransformInfo.h" +#include "MCTargetDesc/XVMMCAsmInfo.h" +#include "TargetInfo/XVMTargetInfo.h" +#include "llvm/CodeGen/Passes.h" +#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" +#include "llvm/CodeGen/TargetPassConfig.h" +#include "llvm/IR/LegacyPassManager.h" +#include "llvm/IR/PassManager.h" +#include "llvm/MC/TargetRegistry.h" +#include "llvm/Passes/PassBuilder.h" +#include "llvm/Support/FormattedStream.h" +#include "llvm/Target/TargetOptions.h" +#include "llvm/Transforms/IPO/PassManagerBuilder.h" +#include "llvm/Transforms/Scalar.h" +#include "llvm/Transforms/Scalar/SimplifyCFG.h" +#include "llvm/Transforms/Utils/SimplifyCFGOptions.h" +#include "llvm/Transforms/Utils.h" +using namespace llvm; + + extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeXVMTarget() { + // Register the target. + RegisterTargetMachine X(getTheXVMleTarget()); + RegisterTargetMachine Y(getTheXVMTarget()); + + // Register backend passes + auto &PR = *PassRegistry::getPassRegistry(); + initializeXVMCFGSortPass(PR); + initializeXVMCFGStackifyPass(PR); + initializeXVMCFGStructurePass(PR); + initializeXVMUpdateRefInstrForMIPass(PR); +} + +extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeXVMTargetCalledInDylib() { + LLVMInitializeXVMTarget(); +} + +// DataLayout: little endian +// Stack frame alignment: 128 +// integer size & alignment: 64 +static std::string computeDataLayout(const Triple &TT) { + return "e-m:e-p:64:64-i64:64-n64-S128"; +} + +static Reloc::Model getEffectiveRelocModel(Optional RM) { + return RM.value_or(Reloc::PIC_); +} + +XVMTargetMachine::XVMTargetMachine(const Target &T, const Triple &TT, + StringRef CPU, StringRef FS, + const TargetOptions &Options, + Optional RM, + Optional CM, + CodeGenOpt::Level OL, bool JIT) + : LLVMTargetMachine(T, computeDataLayout(TT), TT, CPU, FS, Options, + getEffectiveRelocModel(RM), + getEffectiveCodeModel(CM, CodeModel::Small), OL), + TLOF(std::make_unique()), + Subtarget(TT, std::string(CPU), std::string(FS), *this) { + initAsmInfo(); + this->Options.EmitAddrsig = false; + + XVMMCAsmInfo *MAI = + static_cast(const_cast(AsmInfo.get())); + setRequiresStructuredCFG(true); +} + +namespace { +// XVM Code Generator Pass Configuration Options. +class XVMPassConfig : public TargetPassConfig { +public: + XVMPassConfig(XVMTargetMachine &TM, PassManagerBase &PM) + : TargetPassConfig(TM, PM) {} + + XVMTargetMachine &getXVMTargetMachine() const { + return getTM(); + } + + void addIRPasses() override; + bool addInstSelector() override; + void addPreEmitPass() override; + void addPreRegAlloc() override; + bool addPreISel() override; +}; +} + +bool XVMPassConfig::addPreISel() { + addPass(createFlattenCFGPass()); + addPass(createFixIrreduciblePass()); + return false; +} + +TargetPassConfig *XVMTargetMachine::createPassConfig(PassManagerBase &PM) { + return new XVMPassConfig(*this, PM); +} + +void XVMPassConfig::addIRPasses() { +// addPass(createXVMCheckAndAdjustIR()); + TargetPassConfig::addIRPasses(); +} + +TargetTransformInfo +XVMTargetMachine::getTargetTransformInfo(const Function &F) const { + return TargetTransformInfo(XVMTTIImpl(this, F)); +} + +// Install an instruction selector pass using +// the ISelDag to gen XVM code. +bool XVMPassConfig::addInstSelector() { + addPass(createXVMISelDag(getXVMTargetMachine())); + + return false; +} + +void XVMPassConfig::addPreRegAlloc() { + // addPass(createXVMExpandPseudoPass()); + addPass(createXVMUpdateRefInstrForMIPass()); +} + + +void XVMPassConfig::addPreEmitPass() { + // Sort the blocks of the CFG into topological order, + // a prerequisite for BLOCK and LOOP markers. + // Currently, the algorithm is from WebAssembly. + addPass(createXVMCFGSort()); + addPass(createXVMCFGStackify()); + // addPass(createXVMCFGStructure()); } #else diff --git a/llvm/lib/Target/XVM/XVMUpdateRefInstrForMI.cpp b/llvm/lib/Target/XVM/XVMUpdateRefInstrForMI.cpp index 9427a2f4b6b7f8a4c06f8fc54319d1e556c3f159..99b5f8baa8e533af5fbc10997383da3dfdcc6579 100644 --- a/llvm/lib/Target/XVM/XVMUpdateRefInstrForMI.cpp +++ b/llvm/lib/Target/XVM/XVMUpdateRefInstrForMI.cpp @@ -1,4 +1,1001 @@ #ifdef XVM_DYLIB_MODE -// Insert the XVM backend code here + +#include "XVM.h" +#include "XVM_def.h" +#include "XVMInstrInfo.h" +#include "XVMTargetMachine.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "MCTargetDesc/XVMInstPrinter.h" +#include "llvm/CodeGen/MachineModuleInfo.h" + +#define DEBUG_TYPE "xvm-ref-trace" + +using namespace llvm; + +#define XVM_REF_DETERMINE_NAME "XVM pseudo instruction ref determine pass" +#define XVM_SYM_REG_NON_REF 0b00000000 +#define XVM_SYM_REG_REF 0b00000001 + +namespace { +class XVMUpdateRefInstrForMI : public MachineFunctionPass { +public: + static char ID; + const XVMInstrInfo * TII = nullptr; + XVMUpdateRefInstrForMI() : MachineFunctionPass(ID) { + initializeXVMUpdateRefInstrForMIPass(*PassRegistry::getPassRegistry()); + } + bool runOnMachineFunction(MachineFunction &MF) override; + StringRef getPassName() const override { return XVM_REF_DETERMINE_NAME; } +private: + bool updateRefInfoInMBB(MachineBasicBlock &MBB); + void updatePtrRefInMBB(MachineBasicBlock &MBB); + void FindNonRefRegInFunc(const MachineFunction &MF); +}; + char XVMUpdateRefInstrForMI::ID = 0; +} + +static std::map MapRefRegInFunc; +static std::map MapPtrRegInFunc; +static std::set SetNonRefRegInFunc; + +static void CheckFunctionReturn(Function & F) { + Type *Ty = F.getReturnType(); + /* Return is always r0 for xvm */ + if (auto *PTy = dyn_cast(Ty)) { + MapRefRegInFunc.insert(std::pair(XVM::R0, XVM_SYM_REG_REF)); + // } else if (Ty->isIntegerTy(64) || Ty->isIntegerTy(32) || Ty->isIntegerTy(16) || Ty->isIntegerTy(8) || Ty->isIntegerTy(1)) { + } else if (Ty->isIntegerTy()) { + MapRefRegInFunc.insert(std::pair(XVM::R0, XVM_SYM_REG_NON_REF)); + //} else if (Ty->isVoidTy() || Ty->isIntegerTy()){ + } else if (Ty->isVoidTy()){ + ; + } else { + llvm_unreachable("Invalid return type"); + } +} + +static Register inline getPhysicalRegister(unsigned index) { + switch(index) { + case 0: return XVM::R0; + case 1: return XVM::R1; + case 2: return XVM::R2; + case 3: return XVM::R3; + case 4: return XVM::R4; + case 5: return XVM::R5; + case 6: return XVM::R6; + case 7: return XVM::R7; + case 8: return XVM::R8; + case 9: return XVM::R9; + case 10: return XVM::R10; + case 11: return XVM::R11; + case 12: return XVM::R12; + case 13: return XVM::R13; + case 14: return XVM::R14; + case 15: return XVM::R15; + case 16: return XVM::R16; + case 17: return XVM::R17; + default: + llvm_unreachable("Invalid physical register index"); + } +} + +static void CheckFunctionArgs(Function & F) { + int idx = 0; + /* Here we assume r0, ..., r5 are the registers for input parameters + * We only need to check the register ones: for others in stack, + * it is handled with check/update steps followed. + */ + for (Function::const_arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) { + if (idx > 5) { + break; + } + Type *Ty = I->getType(); + std::string regName = "r" + itostr(idx); + if (auto *PTy = dyn_cast(Ty)) { + LLVM_DEBUG(dbgs() << "arg[" << idx << "]=" << I->getName().data() << ' is ref.\n'); + if (!I->hasAttribute(Attribute::StructRet)) { + MapRefRegInFunc.insert(std::pair(getPhysicalRegister(idx), XVM_SYM_REG_REF)); + } else { + // R6 is used to pass the sret return + MapRefRegInFunc.insert(std::pair(getPhysicalRegister(6), XVM_SYM_REG_REF)); + } + } else if (Ty->isIntegerTy()) { + MapRefRegInFunc.insert(std::pair(getPhysicalRegister(idx), XVM_SYM_REG_NON_REF)); + LLVM_DEBUG(dbgs() << "arg[" << idx << "]=" << I->getName().data() << ' is not ref.\n'); + } else { + llvm_unreachable("Invalid param type"); + } + idx++; + } +} + +static void setRefFlagFor2Ops(MachineOperand &MO_def, MachineOperand &MO_use) { + /* verify def */ + if (MO_def.isReg()) { + if (!MO_def.isDef() && !MO_def.isImplicit() && !MO_def.isKill()) { + llvm_unreachable("unhandled Operand!!"); + } + } + /* check use */ + if (MO_use.isReg()) { + if (!MO_def.isDef() && !MO_def.isImplicit() && !MO_def.isKill()) { + llvm_unreachable("unhandled Operand!!"); + } + Register regNo = MO_use.getReg(); + std::map::iterator I = MapRefRegInFunc.find(regNo); + if (I == MapRefRegInFunc.end()) { + MapRefRegInFunc.insert(std::pair(MO_def.getReg(), XVM_SYM_REG_NON_REF)); + } else { + MapRefRegInFunc.insert(std::pair(MO_def.getReg(), I->second)); + } + } +} + +static inline void updateRefMapForRefInst(MachineOperand &MO_use, unsigned char flag) { + std::map::iterator I1 = MapRefRegInFunc.find(MO_use.getReg()); + if (I1 == MapRefRegInFunc.end()) { + MapRefRegInFunc.insert(std::pair(MO_use.getReg(), flag)); + } + else + I1->second = flag; +} + +static void checkSimpleMIWithRef(MachineInstr &MI) { + if (MI.getOpcode() == XVM::COPY) { + assert(MI.getNumOperands() == 2); + MachineOperand &MO_def = MI.getOperand(0); + MachineOperand &MO_use = MI.getOperand(1); + setRefFlagFor2Ops(MO_def, MO_use); + } +} + +static bool updateCopyMIWithRef(MachineInstr &MI, const XVMInstrInfo * TII) { + /* No update for Copy */ + return false; +} + +static void checkMovMIWithRef(MachineInstr &MI) { + if (MI.getOpcode() == XVM::MOV_rr) { + assert(MI.getNumOperands() == 2); + MachineOperand &MO_def = MI.getOperand(0); + MachineOperand &MO_use = MI.getOperand(1); + setRefFlagFor2Ops(MO_def, MO_use); + } + if (MI.getOpcode() == XVM::MOV_ri) { + assert(MI.getNumOperands() == 2); + MachineOperand &MO_def = MI.getOperand(0); + MapRefRegInFunc.insert(std::pair(MO_def.getReg(), XVM_SYM_REG_NON_REF)); + } +} + +static bool updateMovMIWithRef(MachineInstr &MI, const XVMInstrInfo * TII) { + /* No update for Move */ + return false; +} + +static void checkLoadMIWithRef(MachineInstr &MI) { + if (MI.getOpcode() == XVM::LDB || MI.getOpcode() == XVM::LDH || MI.getOpcode() == XVM::LDW || + MI.getOpcode() == XVM::LDB_z || MI.getOpcode() == XVM::LDH_z || MI.getOpcode() == XVM::LDW_z || + MI.getOpcode() == XVM::LDD) { + assert(MI.getNumOperands() == 3); + MachineOperand &MO_def = MI.getOperand(0); + MachineOperand &MO_use = MI.getOperand(1); + /* verify def */ + if (MO_def.isReg()) { + if (!MO_def.isDef() && !MO_def.isImplicit() && !MO_def.isKill()) { + llvm_unreachable("unhandled Operand!!"); + } + } + /* check use */ + if (MO_use.isReg()) { + if (!MO_def.isDef() && !MO_def.isImplicit() && !MO_def.isKill()) { + llvm_unreachable("unhandled Operand!!"); + } + // always be ref + MapRefRegInFunc.insert(std::pair(MO_use.getReg(), XVM_SYM_REG_REF)); + } + } + + if (MI.getOpcode() == XVM::LD_global_imm64) { + assert(MI.getNumOperands() == 2); + MachineOperand &MO_def = MI.getOperand(0); + MachineOperand &MO_use = MI.getOperand(1); + if (MO_use.isGlobal()) { + LLVM_DEBUG(dbgs() << "Global:" << MO_use.getGlobal()->getName().data() << ' to load.\n'); + MapRefRegInFunc.insert(std::pair(MO_def.getReg(), XVM_SYM_REG_REF)); + } + } +} + +static bool updateLoadMIWithRef(MachineInstr &MI, const XVMInstrInfo * TII) { + if (MI.getOpcode() == XVM::LDD) { + assert(MI.getNumOperands() == 3); + MachineOperand &MO_def = MI.getOperand(0); + /* if MO_def is a ref (it is determined when it is used somewhere), then it should be ldrref */ + Register regNo = MO_def.getReg(); + std::map::iterator I = MapRefRegInFunc.find(regNo); + if (I != MapRefRegInFunc.end()) { + if (I->second == XVM_SYM_REG_REF && + SetNonRefRegInFunc.find(regNo) == SetNonRefRegInFunc.end()) { + MI.setDesc(TII->get(XVM::LoadRef_ri)); + return true; + } + } + } + return false; +} + +static void checkAddSubMIWithRef(MachineInstr &MI) { + if (MI.getOpcode() == XVM::ADD_ri || MI.getOpcode() == XVM::ADD_rr || + MI.getOpcode() == XVM::SUB_ri || MI.getOpcode() == XVM::SUB_rr ) { + assert(MI.getNumOperands() == 3); + MachineOperand &MO_def = MI.getOperand(0); + MachineOperand &MO_use = MI.getOperand(1); + setRefFlagFor2Ops(MO_def, MO_use); + return; + } +} + +static void checkOrXorAndMIWithRef(MachineInstr &MI) { + if (MI.getOpcode() == XVM::OR_ri || + MI.getOpcode() == XVM::XOR_ri|| + MI.getOpcode() == XVM::AND_ri) { + assert(MI.getNumOperands() == 3); + MachineOperand &MO_def = MI.getOperand(0); + MachineOperand &MO_use = MI.getOperand(1); + setRefFlagFor2Ops(MO_def, MO_use); + return; + } + if (MI.getOpcode() == XVM::OR_rr || + MI.getOpcode() == XVM::XOR_rr|| + MI.getOpcode() == XVM::AND_rr) { + assert(MI.getNumOperands() == 3); + MachineOperand &MO_def = MI.getOperand(0); + MachineOperand &MO_use = MI.getOperand(1); + setRefFlagFor2Ops(MO_def, MO_use); + // MO_use2 should not be ref; + MachineOperand &MO_use2 = MI.getOperand(2); + updateRefMapForRefInst(MO_use2, XVM_SYM_REG_NON_REF); + return; + } +} + +static inline bool updateAddSubWithSubAddForImm(MachineInstr &MI, const XVMInstrInfo * TII) { + if (MI.getOpcode() == XVM::ADD_ri || MI.getOpcode() == XVM::SUB_ri) { + assert(MI.getOperand(2).isImm()); + int64_t imm = MI.getOperand(2).getImm(); + if (imm < 0) { + imm = -imm; + MI.getOperand(2).setImm(imm); + if (MI.getOpcode() == XVM::ADD_ri) { + MI.setDesc(TII->get(XVM::SUB_ri)); + return true; + } else { + MI.setDesc(TII->get(XVM::ADD_ri)); + return true; + } + } + } + return false; +} + +static inline void updateAddRiWithRef(MachineInstr &MI, const XVMInstrInfo * TII) { + assert(MI.getOperand(2).isImm()); + int64_t imm = MI.getOperand(2).getImm(); + if (imm < 0) { + imm = -imm; + MI.getOperand(2).setImm(imm); + MI.setDesc(TII->get(XVM::SubRef_ri)); + } else { + MI.setDesc(TII->get(XVM::AddRef_ri)); + } +} + +static inline bool updateAddMIWithRef(MachineInstr &MI, const XVMInstrInfo * TII) { + assert(MI.getNumOperands() == 3); + MachineOperand &MO_def = MI.getOperand(0); + MachineOperand &MO_use = MI.getOperand(1); + /* if MO_def is a ref (it is determined when it is used somewhere), then it should be addref */ + Register regNo = MO_def.getReg(); + std::map::iterator I = MapRefRegInFunc.find(regNo); + // Update instr if MO_def is ref + if (I != MapRefRegInFunc.end()) { + if (I->second == XVM_SYM_REG_REF && + SetNonRefRegInFunc.find(regNo) == SetNonRefRegInFunc.end()) { + // Update MO_use to be ref if MO_def is ref + updateRefMapForRefInst(MO_use, XVM_SYM_REG_REF); + if(MI.getOpcode() == XVM::ADD_ri) + updateAddRiWithRef(MI, TII); + else + MI.setDesc(TII->get(XVM::AddRef_rr)); + return true; + } + } + // Update instr if MO_use if ref + regNo = MO_use.getReg(); + I = MapRefRegInFunc.find(regNo); + if (I != MapRefRegInFunc.end()) { + if (I->second == XVM_SYM_REG_REF && + SetNonRefRegInFunc.find(regNo) == SetNonRefRegInFunc.end()) { + if(MI.getOpcode() == XVM::ADD_ri) + updateAddRiWithRef(MI, TII); + else + MI.setDesc(TII->get(XVM::AddRef_rr)); + return true; + } + } + return false; +} + +static inline bool updateSubriMIWithRef(MachineInstr &MI, const XVMInstrInfo * TII) { + assert(MI.getNumOperands() == 3); + MachineOperand &MO_def = MI.getOperand(0); + MachineOperand &MO_use = MI.getOperand(1); + /* if MO_def is a ref (it is determined when it is used somewhere), then it should be subref */ + Register regNo = MO_def.getReg(); + std::map::iterator I = MapRefRegInFunc.find(regNo); + if (I != MapRefRegInFunc.end()) { + if (I->second == XVM_SYM_REG_REF && + SetNonRefRegInFunc.find(regNo) == SetNonRefRegInFunc.end()) { + updateRefMapForRefInst(MO_use, XVM_SYM_REG_REF); + if (MI.getOpcode() == XVM::SUB_ri) + MI.setDesc(TII->get(XVM::SubRef_ri)); + else + MI.setDesc(TII->get(XVM::SubRef_rr)); + return true; + } + } + regNo = MO_use.getReg(); + I = MapRefRegInFunc.find(regNo); + if (I != MapRefRegInFunc.end()) { + if (I->second == XVM_SYM_REG_REF && + SetNonRefRegInFunc.find(regNo) == SetNonRefRegInFunc.end()) { + if(MI.getOpcode() == XVM::SUB_ri) + MI.setDesc(TII->get(XVM::SubRef_ri)); + else + MI.setDesc(TII->get(XVM::SubRef_rr)); + return true; + } + } + return false; +} + +static inline bool updateSubrrMIWithRef(MachineInstr &MI, const XVMInstrInfo * TII) { + assert(MI.getNumOperands() == 3); + MachineOperand &MO_def = MI.getOperand(0); + MachineOperand &MO_use1 = MI.getOperand(1); + MachineOperand &MO_use2 = MI.getOperand(2); + /* if MO_def is a ref (it is determined when it is used somewhere), then it should be subref */ + Register regNo = MO_def.getReg(); + std::map::iterator I = MapRefRegInFunc.find(regNo); + if (I != MapRefRegInFunc.end()) { + if (I->second == XVM_SYM_REG_REF && + SetNonRefRegInFunc.find(regNo) == SetNonRefRegInFunc.end()) { + updateRefMapForRefInst(MO_use1, XVM_SYM_REG_REF); + I = MapRefRegInFunc.find(MO_use2.getReg()); + if (I != MapRefRegInFunc.end() && I->second == XVM_SYM_REG_REF) { + MI.setDesc(TII->get(XVM::DifRef_rr)); + } else { + MI.setDesc(TII->get(XVM::SubRef_rr)); + } + return true; + } + } + regNo = MO_use1.getReg(); + I = MapRefRegInFunc.find(regNo); + if (I != MapRefRegInFunc.end()) { + if (I->second == XVM_SYM_REG_REF && + SetNonRefRegInFunc.find(regNo) == SetNonRefRegInFunc.end()) { + if (MO_use2.isReg()) { + I = MapRefRegInFunc.find(MO_use2.getReg()); + if (I != MapRefRegInFunc.end() && I->second == XVM_SYM_REG_REF) { + MI.setDesc(TII->get(XVM::DifRef_rr)); + } else { + MI.setDesc(TII->get(XVM::SubRef_rr)); + } + return true; + } + } + } + return false; +} + // Update add/sub with sub/add if negative imm + + +static bool updateAddSubMIWithRef(MachineInstr &MI, const XVMInstrInfo * TII) { + bool Modified = false; + switch (MI.getOpcode()) { + case XVM::ADD_ri: + case XVM::ADD_rr: + Modified = updateAddMIWithRef(MI, TII); + return Modified | updateAddSubWithSubAddForImm(MI, TII); + case XVM::SUB_ri: + Modified = updateSubriMIWithRef(MI, TII); + return Modified | updateAddSubWithSubAddForImm(MI, TII); + case XVM::SUB_rr: + return updateSubrrMIWithRef(MI, TII); + default: + return false; + } +} + +static inline bool updateOrMIWithRef(MachineInstr &MI, const XVMInstrInfo * TII) { + assert(MI.getNumOperands() == 3); + MachineOperand &MO_def = MI.getOperand(0); + MachineOperand &MO_use = MI.getOperand(1); + /* if MO_def is a ref (it is determined when it is used somewhere), then it should be orref */ + Register regNo = MO_def.getReg(); + std::map::iterator I = MapRefRegInFunc.find(regNo); + // Update instr if MO_def is ref + if (I != MapRefRegInFunc.end()) { + if (I->second == XVM_SYM_REG_REF && + SetNonRefRegInFunc.find(regNo) == SetNonRefRegInFunc.end()) { + // Update MO_use to be ref if MO_def is ref + updateRefMapForRefInst(MO_use, XVM_SYM_REG_REF); + if(MI.getOpcode() == XVM::OR_ri) + MI.setDesc(TII->get(XVM::OrRef_ri)); + else + MI.setDesc(TII->get(XVM::OrRef_rr)); + return true; + } + } + // Update instr if MO_use if ref + regNo = MO_use.getReg(); + I = MapRefRegInFunc.find(regNo); + if (I != MapRefRegInFunc.end()) { + if (I->second == XVM_SYM_REG_REF && + SetNonRefRegInFunc.find(regNo) == SetNonRefRegInFunc.end()) { + if(MI.getOpcode() == XVM::OR_ri) + MI.setDesc(TII->get(XVM::OrRef_ri)); + else + MI.setDesc(TII->get(XVM::OrRef_rr)); + return true; + } + } + return false; +} + +static inline bool updateXorMIWithRef(MachineInstr &MI, const XVMInstrInfo * TII) { + assert(MI.getNumOperands() == 3); + MachineOperand &MO_def = MI.getOperand(0); + MachineOperand &MO_use = MI.getOperand(1); + /* if MO_def is a ref (it is determined when it is used somewhere), then it should be xorref */ + Register regNo = MO_def.getReg(); + std::map::iterator I = MapRefRegInFunc.find(regNo); + // Update instr if MO_def is ref + if (I != MapRefRegInFunc.end()) { + if (I->second == XVM_SYM_REG_REF && + SetNonRefRegInFunc.find(regNo) == SetNonRefRegInFunc.end()) { + // Update MO_use to be ref if MO_def is ref + updateRefMapForRefInst(MO_use, XVM_SYM_REG_REF); + if(MI.getOpcode() == XVM::XOR_ri) + MI.setDesc(TII->get(XVM::XorRef_ri)); + else + MI.setDesc(TII->get(XVM::XorRef_rr)); + return true; + } + } + // Update instr if MO_use if ref + regNo = MO_use.getReg(); + I = MapRefRegInFunc.find(regNo); + if (I != MapRefRegInFunc.end() && + SetNonRefRegInFunc.find(regNo) == SetNonRefRegInFunc.end()) { + if (I->second == XVM_SYM_REG_REF) { + if(MI.getOpcode() == XVM::XOR_ri) + MI.setDesc(TII->get(XVM::XorRef_ri)); + else + MI.setDesc(TII->get(XVM::XorRef_rr)); + return true; + } + } + return false; +} + +static inline bool updateAndMIWithRef(MachineInstr &MI, const XVMInstrInfo * TII) { + assert(MI.getNumOperands() == 3); + MachineOperand &MO_def = MI.getOperand(0); + MachineOperand &MO_use = MI.getOperand(1); + /* if MO_def is a ref (it is determined when it is used somewhere), then it should be andref */ + Register regNo = MO_def.getReg(); + std::map::iterator I = MapRefRegInFunc.find(regNo); + // Update instr if MO_def is ref + if (I != MapRefRegInFunc.end()) { + if (I->second == XVM_SYM_REG_REF && + SetNonRefRegInFunc.find(regNo) == SetNonRefRegInFunc.end()) { + // Update MO_use to be ref if MO_def is ref + updateRefMapForRefInst(MO_use, XVM_SYM_REG_REF); + if(MI.getOpcode() == XVM::AND_ri) + MI.setDesc(TII->get(XVM::AndRef_ri)); + else + MI.setDesc(TII->get(XVM::AndRef_rr)); + return true; + } + } + // Update instr if MO_use if ref + regNo = MO_use.getReg(); + I = MapRefRegInFunc.find(regNo); + if (I != MapRefRegInFunc.end() && + SetNonRefRegInFunc.find(regNo) == SetNonRefRegInFunc.end()) { + if (I->second == XVM_SYM_REG_REF) { + if(MI.getOpcode() == XVM::AND_ri) + MI.setDesc(TII->get(XVM::AndRef_ri)); + else + MI.setDesc(TII->get(XVM::AndRef_rr)); + return true; + } + } + return false; +} + +static bool updateOrXorAndMIWithRef(MachineInstr &MI, const XVMInstrInfo * TII) { + switch (MI.getOpcode()) + { + case XVM::OR_ri: + case XVM::OR_rr: + return updateOrMIWithRef(MI, TII); + case XVM::XOR_ri: + case XVM::XOR_rr: + return updateXorMIWithRef(MI, TII); + case XVM::AND_ri: + case XVM::AND_rr: + return updateAndMIWithRef(MI, TII); + default: + return false; + } +} + +static std::map MachineOperandRegisterReplacementMap; +static std::set MachineInstrExceptionSet; +/** mov rd #simm (16 bits) + * movk rd, #uimm (16 bits), #shift (0:no 1:16bits 2:32bits 3:48bits) + * addref rd, rn, #uimm (14bits) + */ +#define NUM_OF_IMM_BITS_ADDREF 14 +#define NUM_OF_IMM_BITS_MOV 16 +#define NUM_OF_IMM_BITS_MOVK1 32 +#define NUM_OF_IMM_BITS_MOVK2 48 +#define NUM_OF_IMM_BITS_MOVK3 64 + +static void handleOffsetWithInstr(MachineInstr &MI, const char * GlobalName) { + uint64_t SubSecOffset = GetSubSecOffsetForGlobal(GlobalName); + MachineBasicBlock &MB = *MI.getParent(); + MachineBasicBlock::iterator II = MI.getIterator(); + MachineFunction * MF = MB.getParent(); + DebugLoc DL = MI.getDebugLoc(); + const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); + MachineRegisterInfo &MRI = MF->getRegInfo(); + + MachineOperand &MO_def = MI.getOperand(0); + if (SubSecOffset > 0) { + if (SubSecOffset < ((1 << NUM_OF_IMM_BITS_ADDREF))) { + /* Addref */ + Register VRegForAddref = MRI.createVirtualRegister(&XVM::XVMGPRRegClass); + MachineInstr * AddrefMI = BuildMI(MB, ++II, DL, TII->get(XVM::AddRef_ri), VRegForAddref) + .addReg(MO_def.getReg()).addImm(SubSecOffset); + MachineInstrExceptionSet.insert(AddrefMI); + MachineOperandRegisterReplacementMap.insert(std::pair(MO_def.getReg(), &AddrefMI->getOperand(0))); + } else if (SubSecOffset < ((1 << NUM_OF_IMM_BITS_MOV) -1)) { + /* Mov */ + Register VRegForMov = MRI.createVirtualRegister(&XVM::XVMGPRRegClass); + MachineInstr * MovMI = BuildMI(MB, ++II, DL, TII->get(XVM::MOV_ri), VRegForMov) + .addImm(SubSecOffset); + MachineInstrExceptionSet.insert(MovMI); + /* Addref */ + Register VRegForAddref = MRI.createVirtualRegister(&XVM::XVMGPRRegClass); + MachineInstr * AddrefMI = BuildMI(MB, II, DL, TII->get(XVM::AddRef_rr), VRegForAddref) + .addReg(MO_def.getReg()).addReg(VRegForMov); + MachineInstrExceptionSet.insert(AddrefMI); + MachineOperandRegisterReplacementMap.insert(std::pair(MO_def.getReg(), &AddrefMI->getOperand(0))); + } else if (SubSecOffset < ((1 << NUM_OF_IMM_BITS_MOVK1) -1)) { + /* Mov */ + unsigned int imm1 = SubSecOffset & 0X000000000000FFFF; + Register VRegForMov = MRI.createVirtualRegister(&XVM::XVMGPRRegClass); + MachineInstr * MovMI = BuildMI(MB, ++II, DL, TII->get(XVM::MOV_ri), VRegForMov) + .addImm(imm1); + MachineInstrExceptionSet.insert(MovMI); + /* Movk */ + unsigned int imm2 = (SubSecOffset & 0X00000000FFFF0000)>>16; + Register VRegForMovk1 = MRI.createVirtualRegister(&XVM::XVMGPRRegClass); + MachineInstr * MovkMI = BuildMI(MB, II, DL, TII->get(XVM::MOVK_ri), VRegForMovk1) + .addReg(VRegForMov).addImm(imm2).addImm(1); + MachineInstrExceptionSet.insert(MovkMI); + /* Addref only*/ + Register VRegForAddref = MRI.createVirtualRegister(&XVM::XVMGPRRegClass); + MachineInstr * AddrefMI = BuildMI(MB, II, DL, TII->get(XVM::AddRef_rr), VRegForAddref) + .addReg(MO_def.getReg()).addReg(VRegForMovk1); + MachineInstrExceptionSet.insert(AddrefMI); + MachineOperandRegisterReplacementMap.insert(std::pair(MO_def.getReg(), &AddrefMI->getOperand(0))); + } else if (SubSecOffset < ((1 << NUM_OF_IMM_BITS_MOVK2) -1)) { + /* Mov */ + unsigned int imm1 = SubSecOffset & 0X000000000000FFFF; + Register VRegForMov = MRI.createVirtualRegister(&XVM::XVMGPRRegClass); + MachineInstr * MovMI = BuildMI(MB, ++II, DL, TII->get(XVM::MOV_ri), VRegForMov) + .addImm(imm1); + MachineInstrExceptionSet.insert(MovMI); + /* Movk */ + unsigned int imm2 = (SubSecOffset & 0X00000000FFFF0000)>>16; + Register VRegForMovk1 = MRI.createVirtualRegister(&XVM::XVMGPRRegClass); + MachineInstr * Movk1MI = BuildMI(MB, II, DL, TII->get(XVM::MOVK_ri), VRegForMovk1) + .addReg(VRegForMov).addImm(imm2).addImm(1); + MachineInstrExceptionSet.insert(Movk1MI); + /* Movk */ + unsigned int imm3 = (SubSecOffset & 0X0000FFFF00000000)>>32; + Register VRegForMovk2 = MRI.createVirtualRegister(&XVM::XVMGPRRegClass); + MachineInstr * Movk2MI = BuildMI(MB, II, DL, TII->get(XVM::MOVK_ri), VRegForMovk2) + .addReg(VRegForMovk1).addImm(imm3).addImm(2); + MachineInstrExceptionSet.insert(Movk2MI); + /* Addref only*/ + Register VRegForAddref = MRI.createVirtualRegister(&XVM::XVMGPRRegClass); + MachineInstr * AddrefMI = BuildMI(MB, II, DL, TII->get(XVM::AddRef_rr), VRegForAddref) + .addReg(MO_def.getReg()).addReg(VRegForMovk2); + MachineInstrExceptionSet.insert(AddrefMI); + MachineOperandRegisterReplacementMap.insert(std::pair(MO_def.getReg(), &AddrefMI->getOperand(0))); + } else if (SubSecOffset < ((1 << NUM_OF_IMM_BITS_MOVK3) -1)) { + /* Mov */ + unsigned int imm1 = SubSecOffset & 0X000000000000FFFF; + Register VRegForMov = MRI.createVirtualRegister(&XVM::XVMGPRRegClass); + MachineInstr * MovMI = BuildMI(MB, ++II, DL, TII->get(XVM::MOV_ri), VRegForMov) + .addImm(imm1); + MachineInstrExceptionSet.insert(MovMI); + /* Movk */ + unsigned int imm2 = (SubSecOffset & 0X00000000FFFF0000)>>16; + Register VRegForMovk1 = MRI.createVirtualRegister(&XVM::XVMGPRRegClass); + MachineInstr * Movk1MI = BuildMI(MB, II, DL, TII->get(XVM::MOVK_ri), VRegForMovk1) + .addReg(VRegForMov).addImm(imm2).addImm(1); + MachineInstrExceptionSet.insert(Movk1MI); + /* Movk */ + unsigned int imm3 = (SubSecOffset & 0X0000FFFF00000000)>>32; + Register VRegForMovk2 = MRI.createVirtualRegister(&XVM::XVMGPRRegClass); + MachineInstr * Movk2MI = BuildMI(MB, II, DL, TII->get(XVM::MOVK_ri), VRegForMovk2) + .addReg(VRegForMovk1).addImm(imm3).addImm(2); + MachineInstrExceptionSet.insert(Movk2MI); + /* Movk */ + unsigned int imm4 = (SubSecOffset & 0XFFFF000000000000)>>48; + Register VRegForMovk3 = MRI.createVirtualRegister(&XVM::XVMGPRRegClass); + MachineInstr * Movk3MI = BuildMI(MB, II, DL, TII->get(XVM::MOVK_ri), VRegForMovk3) + .addReg(VRegForMovk2).addImm(imm4).addImm(3); + MachineInstrExceptionSet.insert(Movk3MI); + /* Addref only*/ + Register VRegForAddref = MRI.createVirtualRegister(&XVM::XVMGPRRegClass); + MachineInstr * AddrefMI = BuildMI(MB, II, DL, TII->get(XVM::AddRef_rr), VRegForAddref) + .addReg(MO_def.getReg()).addReg(VRegForMovk3); + MachineInstrExceptionSet.insert(AddrefMI); + MachineOperandRegisterReplacementMap.insert(std::pair(MO_def.getReg(), &AddrefMI->getOperand(0))); + } + } +} + +static void updatePtrRegRefBasedGlobals(MachineInstr &MI) { + switch(MI.getOpcode()) { + case XVM::LD_global_imm64: { + assert(MI.getNumOperands() >= 2); + MachineOperand &MO_def = MI.getOperand(0); + MachineOperand &MO_use = MI.getOperand(1); + if (MO_use.isGlobal()) { + const char * GlobalName = MO_use.getGlobal()->getName().data(); + LLVM_DEBUG(dbgs() << "Global:" << GlobalName << ' to load.\n'); + MapRefRegInFunc.insert(std::pair(MO_def.getReg(), XVM_SYM_REG_REF)); + unsigned int ptrLevel = GetPtrRegisterLevelBasedOnName(GlobalName); + if (ptrLevel > 0) { + MapPtrRegInFunc.insert(std::pair(MO_def.getReg(), ptrLevel)); + } + // get the offset and add instructions of mov/movk/movk addref + handleOffsetWithInstr(MI, GlobalName); + MachineInstrExceptionSet.insert(&MI); + } + break; + } + case XVM::LDD: { + assert(MI.getNumOperands() >= 2); + MachineOperand &MO_def = MI.getOperand(0); + MachineOperand &MO_use = MI.getOperand(1); + if (MO_use.isReg()) { + std::map::iterator I = MapPtrRegInFunc.find(MO_use.getReg()); + if (I != MapPtrRegInFunc.end() && I->second >= 1) { + MapRefRegInFunc.insert(std::pair(MO_def.getReg(), XVM_SYM_REG_REF)); + + // check the flags to see if the def is a ref + MachineOperand &MO_imm = MI.getOperand(2); + if (MO_imm.isImm()) { + int64_t imm = MO_imm.getImm(); + if (imm == 0) { + std::map::iterator I1 = MapPtrRegInFunc.find(MO_use.getReg()); + if (I1 != MapPtrRegInFunc.end() && I1->second >= 1) { + MapPtrRegInFunc.insert(std::pair(MO_def.getReg(), I1->second -1)); + } + } + } + } + } + break; + } + case XVM::STD: { + assert(MI.getNumOperands() >= 2); + MachineOperand &MO_use1 = MI.getOperand(0); + MachineOperand &MO_use2 = MI.getOperand(1); + if (MO_use2.isReg()) { + std::map::iterator I = MapPtrRegInFunc.find(MO_use2.getReg()); + if (I != MapPtrRegInFunc.end() && I->second >= 1) { + MapRefRegInFunc.insert(std::pair(MO_use1.getReg(), XVM_SYM_REG_REF)); + } + } + break; + } + } +} + +static void checkStoreMIWithRef(MachineInstr &MI) { + if (MI.getOpcode() == XVM::STB || MI.getOpcode() == XVM::STH || + MI.getOpcode() == XVM::STW || MI.getOpcode() == XVM::STD) { + assert(MI.getNumOperands() == 3); + MachineOperand &MO_use1 = MI.getOperand(0); + MachineOperand &MO_use2 = MI.getOperand(1); + assert(MO_use1.isUse()); + if (MO_use2.isReg()) { + // STW killed %48:xvmgpr, killed %54:xvmgpr, 0 :: (store (s32) into %ir.arrayidx5) + assert(MO_use2.isUse()); + // always be ref + MapRefRegInFunc.insert(std::pair(MO_use2.getReg(), XVM_SYM_REG_REF)); + } else if (MO_use2.isFI()) { + /* FIXME: we might need a fix for FI scenario: + STB killed %6:xvmgpr, %stack.2.atomic-temp, 0 :: (store (s8) into %ir.atomic-temp) + It will be handled in eliminateFrameIndex. + */ + ; + } + return; + } +} + +static bool updateStoreMIWithRef(MachineInstr &MI, const XVMInstrInfo * TII) { + if (MI.getOpcode() == XVM::STD) { + assert(MI.getNumOperands() == 3); + MachineOperand &MO_use1 = MI.getOperand(0); + Register regNo = MO_use1.getReg(); + std::map::iterator I = MapRefRegInFunc.find(regNo); + if (I != MapRefRegInFunc.end()) { + if (I->second == XVM_SYM_REG_REF && + SetNonRefRegInFunc.find(regNo) == SetNonRefRegInFunc.end()) { + MI.setDesc(TII->get(XVM::StoreRef_ri)); + return true; + } + } + } + return false; +} + +static void checkPhiMIWithRef(MachineInstr &MI) { + if (MI.getOpcode() == XVM::PHI) { + unsigned numOfFrom = MI.getNumOperands() / 2; + assert(numOfFrom * 2 + 1 == MI.getNumOperands()); + MachineOperand &MO_def = MI.getOperand(0); + for(unsigned idx = 0; idx < numOfFrom; idx++) { + MachineOperand &MO_use = MI.getOperand(idx*2+1); + setRefFlagFor2Ops(MO_def, MO_use); + } + return; + } +} + +static bool updatePhiMIWithRef(MachineInstr &MI, const XVMInstrInfo * TII) { + /* No update for Phi*/ + return false; +} + +static bool updateRegistersInMI(MachineInstr &MI, const XVMInstrInfo * TII) { + SmallVector OperandsInMI; + bool replaceOperand = false; + if ( MachineInstrExceptionSet.find(&MI) == MachineInstrExceptionSet.end()) { + for(unsigned int i = 0; i < MI.getNumOperands(); i++) { + MachineOperand &MO = MI.getOperand(i); + if (MO.isReg()) { + std::map::iterator I = MachineOperandRegisterReplacementMap.find(MO.getReg()); + if (I == MachineOperandRegisterReplacementMap.end()) { + OperandsInMI.push_back(MO); + } else { + bool isDef = false; + if (MO.isDef()) + isDef = true; + MachineOperand NewMO = MachineOperand::CreateReg((I->second)->getReg(), isDef); + OperandsInMI.push_back(NewMO); + replaceOperand = true; + } + } else { + OperandsInMI.push_back(MO); + } + } + if (replaceOperand) { + MachineBasicBlock &MB = *MI.getParent(); + MachineBasicBlock::iterator II = MI.getIterator(); + MachineFunction * MF = MB.getParent(); + DebugLoc DL = MI.getDebugLoc(); + const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); + MachineRegisterInfo &MRI = MF->getRegInfo(); + MachineInstr * ReplaceMI = BuildMI(MB, II, DL, TII->get(MI.getOpcode())); + for( MachineOperand pMO: OperandsInMI) { + ReplaceMI->addOperand(pMO); + } + MB.remove_instr(&MI); + MRI.verifyUseLists(); + } + } + return replaceOperand; +} + +/** + * Propogate the non ref registers via COPY and MOV statements + * + * */ +static void propogateNonRefInfo(const MachineBasicBlock &MBB) { + MachineBasicBlock::const_iterator MBBI = MBB.begin(), E = MBB.end(); + while(MBBI != E) { + MachineBasicBlock::const_iterator NMBBI = std::next(MBBI); + const MachineInstr &MI = *MBBI; + MBBI = NMBBI; + // if MO_use is in SetNonRefRegInFunc, then MO_def should be in SetNonRefRegInFunc + if (MI.getOpcode() == XVM::COPY || MI.getOpcode() == XVM::MOV_rr) { + assert(MI.getNumOperands() == 2); + const MachineOperand &MO_def = MI.getOperand(0); + const MachineOperand &MO_use = MI.getOperand(1); + if (MO_def.isReg() && MO_use.isReg()) { + if (SetNonRefRegInFunc.find(MO_use.getReg()) != SetNonRefRegInFunc.end()) { + SetNonRefRegInFunc.insert(MO_def.getReg()); + } + } + } + } +} + +/** + * Update SetNonRefRegInFunc based on function calls + * + * */ +static void updateNonRefInfoViaCalls(const MachineBasicBlock &MBB, + std::set &FuncSet) { + MachineBasicBlock::const_iterator MBBI = MBB.begin(), E = MBB.end(); + while(MBBI != E) { + MachineBasicBlock::const_iterator NMBBI = std::next(MBBI); + const MachineInstr &MI = *MBBI; + MBBI = NMBBI; + if (MI.getOpcode() == XVM::CALL_IMM) { + assert(MI.getNumOperands() >= 3); + const MachineOperand &MO_0 = MI.getOperand(0); + const MachineOperand &MO_2 = MI.getOperand(2); + if (!MO_0.isReg() && MO_0.isGlobal() && + MO_2.isReg() && MO_2.isImplicit() && MO_2.isDef() && !MO_2.isDead()) { + // Function without ptr as return + if (FuncSet.find(MO_0.getGlobal()->getName().str()) == FuncSet.end()) { + if (MBBI == E) { + return; + } + const MachineInstr &NextMI = *MBBI; + if (NextMI.getOpcode() == XVM::ADJCALLSTACKUP) { + // skip call stack up + MBBI = std::next(MBBI); + if (MBBI == E) { + return; + } + } + // save va reg from the copy with r0 + const MachineInstr &NextNextMI =*MBBI; + if (NextNextMI.getOpcode() == XVM::COPY || + NextNextMI.getOpcode() == XVM::MOV_rr) { + assert(NextNextMI.getNumOperands() == 2); + const MachineOperand &MO_def = NextNextMI.getOperand(0); + const MachineOperand &MO_use = NextNextMI.getOperand(1); + if (MO_def.isReg() && MO_use.isReg()) { + if (MO_use.getReg() == XVM::R0) { + SetNonRefRegInFunc.insert(MO_def.getReg()); + } + } + } + } + } + } + } +} + +/** + * Find non ref registers via function calls + * */ +void XVMUpdateRefInstrForMI::FindNonRefRegInFunc(const MachineFunction &MF) { + MachineModuleInfo &MMI = MF.getMMI(); + const Module *M = MMI.getModule(); + std::set FuncSetWithRetPtr; + + if (M != NULL) { + for (const Function &F : M->getFunctionList()) { + Type *Ty = F.getReturnType(); + if (auto *PTy = dyn_cast(Ty)) { + FuncSetWithRetPtr.insert(F.getName().str()); + } + } + } + for (auto &MBB : MF) { + updateNonRefInfoViaCalls(MBB, FuncSetWithRetPtr); + } + for (auto &MBB : MF) { + propogateNonRefInfo(MBB); + } +} + +bool XVMUpdateRefInstrForMI::runOnMachineFunction(MachineFunction &MF) { + TII = MF.getSubtarget().getInstrInfo(); + LLVM_DEBUG(dbgs() << "Check/update refs in fun:" << MF.getFunction().getName().data() << '.\n'); + + MapRefRegInFunc.clear(); + MapPtrRegInFunc.clear(); + MachineOperandRegisterReplacementMap.clear(); + MachineInstrExceptionSet.clear(); + SetNonRefRegInFunc.clear(); + CheckFunctionArgs(MF.getFunction()); + CheckFunctionReturn(MF.getFunction()); + FindNonRefRegInFunc(MF); + + bool Modified = false; + for (auto &MBB : MF) { + Modified |= updateRefInfoInMBB(MBB); + } + return Modified; +} + +void XVMUpdateRefInstrForMI::updatePtrRefInMBB(MachineBasicBlock &MBB) { + MachineBasicBlock::iterator MBBI = MBB.begin(); + int InstNumber = std::distance(MBB.begin(), MBB.end()); + for (int i = 0; i < InstNumber; i++) { + MachineBasicBlock::iterator NMBBI = std::next(MBBI); + MachineInstr &MI = *MBBI; + updatePtrRegRefBasedGlobals(MI); + MBBI = NMBBI; + } +} + +bool XVMUpdateRefInstrForMI::updateRefInfoInMBB(MachineBasicBlock &MBB) { + int InstNumber = 0; + bool Modified = false; + updatePtrRefInMBB(MBB); + + /* FIXME: the two passes may be merged for efficiency */ + MachineBasicBlock::reverse_iterator MBBI = MBB.rbegin(), E = MBB.rend(); + InstNumber = std::distance(MBB.begin(), MBB.end()); + for (int i = 0; i < InstNumber; i++) { + MachineBasicBlock::reverse_iterator NMBBI = std::next(MBBI); + MachineInstr &MI = *MBBI; + checkSimpleMIWithRef(MI); + checkMovMIWithRef(MI); + checkLoadMIWithRef(MI); + checkStoreMIWithRef(MI); + checkAddSubMIWithRef(MI); + checkOrXorAndMIWithRef(MI); + checkPhiMIWithRef(MI); + MBBI = NMBBI; + } + /* update the instructions */ + MBBI = MBB.rbegin(), E = MBB.rend(); + InstNumber = std::distance(MBB.begin(), MBB.end()); + for (int i = 0; i < InstNumber; i++) { + MachineBasicBlock::reverse_iterator NMBBI = std::next(MBBI); + MachineInstr &MI = *MBBI; + Modified |= updateCopyMIWithRef(MI, TII); + Modified |= updateMovMIWithRef(MI, TII); + Modified |= updateLoadMIWithRef(MI, TII); + Modified |= updateStoreMIWithRef(MI, TII); + Modified |= updateAddSubMIWithRef(MI, TII); + Modified |= updateOrXorAndMIWithRef(MI, TII); + Modified |= updatePhiMIWithRef(MI, TII); + Modified |= updateRegistersInMI(MI, TII); + MBBI = NMBBI; + } + return Modified; +} + +INITIALIZE_PASS(XVMUpdateRefInstrForMI, "xvm-Ref-Determine-pseudo", + XVM_REF_DETERMINE_NAME, false, false) +namespace llvm { + +FunctionPass *createXVMUpdateRefInstrForMIPass() { return new XVMUpdateRefInstrForMI(); } + +} #endif