diff --git a/OAT.xml b/OAT.xml
new file mode 100644
index 0000000000000000000000000000000000000000..77785abdecf26d02e913b7ba012171935e64d85d
--- /dev/null
+++ b/OAT.xml
@@ -0,0 +1,222 @@
+
+
+
+
+
+ llvm/LICENSE.TXT|clang/LICENSE.TXT|libcxx/LICENSE.TXT|lldb/LICENSE.TXT|clang-tools-extra/LICENSE.TXT|llvm/utils/lit/LICENSE.TXT|llvm/tools/msbuild/license.txt|llvm/tools/msbuild/license.txt|clang-tools-extra/clang-tidy/cert/LICENSE.TXT|clang-tools-extra/clang-tidy/hicpp/LICENSE.TXT|libclc/LICENSE.TXT|llvm/include/llvm/Support/LICENSE.TXT
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/README.OpenSource b/README.OpenSource
new file mode 100644
index 0000000000000000000000000000000000000000..ca0e7c99245741cb8d0977a234255654401f66a8
--- /dev/null
+++ b/README.OpenSource
@@ -0,0 +1,11 @@
+[
+ {
+ "Name": "LLVM",
+ "License": "Apache License v2.0 with LLVM Exceptions",
+ "License File": "./libunwind/LICENSE.TXT ./lld/LICENSE.TXT ./polly/tools/GPURuntime/LICENSE.TXT ./libcxxabi/LICENSE.TXT ./llvm/include/llvm/Support/LICENSE.TXT ./llvm/utils/unittest/googletest/LICENSE.TXT ./llvm/LICENSE.TXT ./compiler-rt/LICENSE.TXT ./clang-tools-extra/clang-tidy/cert/LICENSE.TXT ./clang-tools-extra/clang-tidy/hicpp/LICENSE.TXT ./clang-tools-extra/LICENSE.TXT ./lldb/LICENSE.TXT ./llgo/LICENSE.TXT ./libcxx/LICENSE.TXT ./clang/LICENSE.TXT ./libclc/LICENSE.TXT ./parallel-libs/acxxel/LICENSE.TXT",
+ "Version Number": "10.0.1",
+ "Owner": "sunqiang13@huawei.com",
+ "Upstream URL": "http://llvm.org/",
+ "Description": "The LLVM Project is a collection of modular and reusable compiler and toolchain technologies. Despite its name, LLVM has little to do with traditional virtual machines. The name \"LLVM\" itself is not an acronym; it is the full name of the project."
+ }
+]
diff --git a/README_zh.md b/README_zh.md
new file mode 100644
index 0000000000000000000000000000000000000000..f58d9cd8a0316cb5ef65c4d792dbf05f6e7001c6
--- /dev/null
+++ b/README_zh.md
@@ -0,0 +1,70 @@
+# LLVM编译器基础架构
+
+此目录及其子目录包含LLVM的源代码,LLVM是一个用于构建高度优化的编译器、优化器和运行时环境的工具包。
+
+自述文件简要介绍了如何开始构建LLVM。有关如何为LLVM项目做出贡献的更多信息,请查看 [为LLVM做出贡献](https://llvm.org/docs/Contributing.html) 指南。
+
+## LLVM系统入门
+
+取自 [https://llvm.org/docs/GettingStarted.html](https://gitee.com/link?target=https%3A%2F%2Fllvm.org%2Fdocs%2FGettingStarted.html).
+
+### 概述
+
+欢迎来到LLVM项目!
+
+LLVM项目有多个组件。该项目的核心本身被称为“LLVM”。这包含处理中间表示并将其转换为对象文件所需的所有工具、库和头文件。工具包括汇编器、反汇编器、位码分析器和位码优化器。它还包含基本的回归测试。
+
+类C语言使用 [Clang](https://clang.llvm.org/) 前端。此组件将C、C++、目标-C和目标-C++代码编译为LLVM位码,并使用LLVM将其编译为目标文件。
+
+其他组件包括:[libc++ C++ 标准库](https://libcxx.llvm.org/),[LLD链接器](https://lld.llvm.org/) 等。
+
+### 获取源代码并构建LLVM
+
+LLVM入门文档可能已过期。[Clang入门](https://clang.llvm.org/get_started.html) 页面可能有更准确的信息。
+
+这是获取和构建LLVM源的示例工作流和配置:
+
+1. Checkout LLVM (包括Clang等相关子项目):
+
+ - `git clone https://github.com/llvm/llvm-project.git`
+ - 或 在 Windows上, `git clone --config core.autocrlf=false https://github.com/llvm/llvm-project.git`
+
+2. 配置和构建LLVM和Clang:
+
+ - `cd llvm-project`
+
+ - `mkdir build`
+
+ - `cd build`
+
+ - `cmake -G [options] ../llvm`
+
+ 一些常见的构建系统生成器有:
+
+ - `Ninja`---用于生成 [Ninja](https://ninja-build.org/) 构建文件。大多数llvm开发人员使用Ninja。
+ - `Unix Makefiles` --- 用于生成与make兼容的并行makefile。
+ - `Visual Studio` --- 用于生成Visual Studio项目和解决方案。
+ - `Xcode` --- 用于生成Xcode项目。
+
+ 一些常见选项:
+
+ - `-DLLVM_ENABLE_PROJECTS='...'` --- 要额外构建的LLVM子项目的分号分隔列表。可以包括:clang、clang-tools-extra、libcxx、libcxxabi、libunwind、lldb、编译器-rt、lld、波莉或调试信息测试中的任何一个。
+
+ 例如,要构建LLVM、Clang、libcxx和libcxxabi,请使用 `-DLLVM_ENABLE_PROJECTS="clang;libcxx;libcxxabi"`.
+
+ - `-DCMAKE_INSTALL_PREFIX=directory` --- 为*目录* 指定要安装LLVM工具和库的完整路径名 (默认 `/usr/local`).
+
+ - `-DCMAKE_BUILD_TYPE=type` --- *类型* 的有效选项为 调试、发布、带调试信息发布 和最小尺寸发布。默认值为调试。
+
+ - `-DLLVM_ENABLE_ASSERTIONS=On` --- 在启用断言检查的情况下编译(调试编译的默认值为Yes,所有其他编译类型的默认值为No)。
+
+ - `cmake --build . [-- [options] ]` 或直接在上面指定的构建系统。
+
+ - 默认目标(即 `ninja` or `make`)将构建所有LLVM。
+ - `check-all`目标(即`ninja check-all`)将运行回归测试,以确保一切都正常工作。
+ - CMake将为每个工具和库生成目标,大多数LLVM子项目生成自己的`check-`目标。
+ - 运行串行构建将是 **缓慢** 。要提高速度,请尝试运行并行构建。在忍者中,默认情况下,这是在“make”中完成的;对于`make`,请使用选项`-j NNN`,其中`NNN`是并行作业的数量,例如您拥有的CPU数量。
+
+ - 有关详细信息,请参阅 [CMake](https://llvm.org/docs/CMake.html)
+
+请参阅 [LLVM入门](https://llvm.org/docs/GettingStarted.html#getting-started-with-llvm) 页面,了解有关配置和编译LLVM的详细信息。您可以访问 [目录布局](https://llvm.org/docs/GettingStarted.html#directory-layout) 以了解源代码树的布局。
\ No newline at end of file
diff --git a/llvm/CMakeLists.txt b/llvm/CMakeLists.txt
index 28ccef34d8fc74a28edaab7752319882e1523e12..b7b5bc247899732530eb8fa6cb6d6008c45aaecf 100644
--- a/llvm/CMakeLists.txt
+++ b/llvm/CMakeLists.txt
@@ -545,6 +545,12 @@ option(LLVM_BUILD_RUNTIME
"Build the LLVM runtime libraries." ON)
option(LLVM_BUILD_EXAMPLES
"Build the LLVM example programs. If OFF, just generate build targets." OFF)
+option(BUILD_ARK_GC_SUPPORT
+ "ARK support GC. If ON, support GC." OFF)
+if(BUILD_ARK_GC_SUPPORT)
+ add_definitions(-DARK_GC_SUPPORT)
+endif(BUILD_ARK_GC_SUPPORT)
+
option(LLVM_INCLUDE_EXAMPLES "Generate build targets for the LLVM examples" ON)
if(LLVM_BUILD_EXAMPLES)
diff --git a/llvm/include/llvm-c/Core.h b/llvm/include/llvm-c/Core.h
index 2901ab715810fa43b9a6604db41b94bd909a4fda..4a84a1a4abab489d105545c8b3dc78194ebfc909 100644
--- a/llvm/include/llvm-c/Core.h
+++ b/llvm/include/llvm-c/Core.h
@@ -3946,6 +3946,10 @@ LLVMValueRef LLVMBuildCall(LLVMBuilderRef, LLVMValueRef Fn,
LLVMValueRef LLVMBuildCall2(LLVMBuilderRef, LLVMTypeRef, LLVMValueRef Fn,
LLVMValueRef *Args, unsigned NumArgs,
const char *Name);
+LLVMValueRef LLVMBuildCall3(LLVMBuilderRef B, LLVMTypeRef Ty, LLVMValueRef Fn,
+ LLVMValueRef *Args, unsigned NumArgs,
+ const char *Name, LLVMValueRef *deoptVals,
+ int NumVals);
LLVMValueRef LLVMBuildSelect(LLVMBuilderRef, LLVMValueRef If,
LLVMValueRef Then, LLVMValueRef Else,
const char *Name);
diff --git a/llvm/include/llvm-c/ExecutionEngine.h b/llvm/include/llvm-c/ExecutionEngine.h
index c5fc9bdb4d07f62462c65924e6ae8faf75748dec..8f7be471c8146f92f361aca2cb731029a9a28177 100644
--- a/llvm/include/llvm-c/ExecutionEngine.h
+++ b/llvm/include/llvm-c/ExecutionEngine.h
@@ -42,6 +42,7 @@ typedef struct LLVMOpaqueMCJITMemoryManager *LLVMMCJITMemoryManagerRef;
struct LLVMMCJITCompilerOptions {
unsigned OptLevel;
+ LLVMRelocMode RelMode;
LLVMCodeModel CodeModel;
LLVMBool NoFramePointerElim;
LLVMBool EnableFastISel;
diff --git a/llvm/include/llvm/CodeGen/TargetFrameLowering.h b/llvm/include/llvm/CodeGen/TargetFrameLowering.h
index 792452f6e81d6ea18b6a0de3d479672f65b1bf70..304711e38001580db5b16aceda39feabd0526637 100644
--- a/llvm/include/llvm/CodeGen/TargetFrameLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetFrameLowering.h
@@ -16,6 +16,9 @@
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/Support/TypeSize.h"
#include
+#ifdef ARK_GC_SUPPORT
+#include "llvm/ADT/Triple.h"
+#endif
namespace llvm {
class BitVector;
@@ -208,6 +211,27 @@ public:
MachineBasicBlock &MBB) const = 0;
virtual void emitEpilogue(MachineFunction &MF,
MachineBasicBlock &MBB) const = 0;
+#ifdef ARK_GC_SUPPORT
+
+template
+constexpr T RoundUp(T x, size_t n) const
+{
+ static_assert(std::is_integral::value, "T must be integral");
+ return (static_cast(x) + n - 1U) & (-n);
+}
+
+virtual Triple::ArchType GetArkSupportTarget() const
+{
+ return Triple::UnknownArch;
+}
+
+virtual int GetFixedFpPosition() const
+{
+ return 2;
+}
+
+virtual int GetFrameReserveSize(MachineFunction &MF) const;
+#endif
/// With basic block sections, emit callee saved frame moves for basic blocks
/// that are in a different section.
diff --git a/llvm/include/llvm/Target/CodeGenCWrappers.h b/llvm/include/llvm/Target/CodeGenCWrappers.h
index a995463570535d04ccb0c378639c076760b88c73..a0a533a715346288c761114fd89f8238b86a29fa 100644
--- a/llvm/include/llvm/Target/CodeGenCWrappers.h
+++ b/llvm/include/llvm/Target/CodeGenCWrappers.h
@@ -59,6 +59,36 @@ inline LLVMCodeModel wrap(CodeModel::Model Model) {
}
llvm_unreachable("Bad CodeModel!");
}
+
+inline Reloc::Model unwrap(LLVMRelocMode Model) {
+ switch (Model) {
+ case LLVMRelocDefault:
+ case LLVMRelocStatic:
+ return Reloc::Static;
+ case LLVMRelocPIC:
+ return Reloc::PIC_;
+ case LLVMRelocDynamicNoPic:
+ return Reloc::DynamicNoPIC;
+ }
+ llvm_unreachable("Invalid LLVMRelocMode!");
+}
+
+inline LLVMRelocMode unwrap(Reloc::Model Model) {
+ switch (Model) {
+ case Reloc::Static:
+ return LLVMRelocStatic;
+ case Reloc::PIC_:
+ return LLVMRelocPIC;
+ case Reloc::DynamicNoPIC:
+ return LLVMRelocDynamicNoPic;
+ case Reloc::ROPI:
+ case Reloc::RWPI:
+ case Reloc::ROPI_RWPI:
+ break;
+ }
+ llvm_unreachable("Invalid Reloc::Model!");
+}
+
} // namespace llvm
#endif
diff --git a/llvm/lib/CodeGen/PrologEpilogInserter.cpp b/llvm/lib/CodeGen/PrologEpilogInserter.cpp
index 378aaba2a65f15164eea97efd1127d094e053289..2a42bd5af90227470d06bd7a3903ba3822875a13 100644
--- a/llvm/lib/CodeGen/PrologEpilogInserter.cpp
+++ b/llvm/lib/CodeGen/PrologEpilogInserter.cpp
@@ -69,6 +69,11 @@
#include
#include
#include
+#include
+
+#ifdef ARK_GC_SUPPORT
+#include
+#endif
using namespace llvm;
@@ -123,6 +128,9 @@ private:
void calculateCallFrameInfo(MachineFunction &MF);
void calculateSaveRestoreBlocks(MachineFunction &MF);
+#ifdef ARK_GC_SUPPORT
+ void RecordCalleeSaveRegisterAndOffset(MachineFunction &MF, std::vector &CSI);
+#endif
void spillCalleeSavedRegs(MachineFunction &MF);
void calculateFrameObjectOffsets(MachineFunction &MF);
@@ -589,6 +597,26 @@ static void insertCSRRestores(MachineBasicBlock &RestoreBlock,
}
}
+#ifdef ARK_GC_SUPPORT
+void PEI::RecordCalleeSaveRegisterAndOffset(MachineFunction &MF, std::vector &CSI)
+{
+ MachineModuleInfo &MMI = MF.getMMI();
+ MachineFrameInfo &MFI = MF.getFrameInfo();
+ const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
+ Function &func = const_cast(MF.getFunction());
+ for (std::vector::const_iterator
+ I = CSI.begin(), E = CSI.end(); I != E; ++I) {
+ int64_t Offset = MFI.getObjectOffset(I->getFrameIdx());
+ unsigned Reg = I->getReg();
+ unsigned DwarfRegNum = MRI->getDwarfRegNum(Reg, true);
+ std::string key = std::string("DwarfReg") + std::to_string(DwarfRegNum);
+ std::string value = std::to_string(Offset);
+ Attribute attr = Attribute::get(func.getContext(), key.c_str(), value.c_str());
+ func.addAttribute(AttributeList::FunctionIndex, attr);
+ }
+}
+#endif
+
void PEI::spillCalleeSavedRegs(MachineFunction &MF) {
// We can't list this requirement in getRequiredProperties because some
// targets (WebAssembly) use virtual registers past this point, and the pass
@@ -629,6 +657,9 @@ void PEI::spillCalleeSavedRegs(MachineFunction &MF) {
for (MachineBasicBlock *RestoreBlock : RestoreBlocks)
insertCSRRestores(*RestoreBlock, CSI);
}
+#ifdef ARK_GC_SUPPORT
+ RecordCalleeSaveRegisterAndOffset(MF, CSI);
+#endif
}
}
@@ -877,6 +908,62 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &MF) {
int64_t FixedCSEnd = Offset;
Align MaxAlign = MFI.getMaxAlign();
+#ifdef ARK_GC_SUPPORT
+ int CalleeSavedFrameSize = 0;
+ Triple::ArchType archType = TFI.GetArkSupportTarget();
+ if (archType != Triple::UnknownArch && TFI.hasFP(MF)) {
+ int fpPosition = TFI.GetFixedFpPosition();
+ int slotSize = sizeof(uint64_t);
+ int fpToCallerSpDelta = 0;
+ // 0:not exist +:count from head -:count from tail
+ // for x86-64
+ // +--------------------------+
+ // | caller Frame |
+ // +--------------------------+---
+ // | returnAddr | ^
+ // +--------------------------+ 2 slot(fpToCallerSpDelta)
+ // | Fp | V fpPosition = 2
+ // +--------------------------+---
+ // | type |
+ // +--------------------------+
+ // | ReServeSize |
+ // +--------------------------+
+ // | R14 |
+ // +--------------------------+
+ // | R13 |
+ // +--------------------------+
+ // | R12 |
+ // +--------------------------+
+ // | RBX |
+ // +--------------------------+
+ // for ARM64
+ // +--------------------------+
+ // | caller Frame |
+ // +--------------------------+---
+ // | callee save registers | ^
+ // | (exclude Fp) | |
+ // | | callee save registers size(fpToCallerSpDelta)
+ // +--------------------------+ |
+ // | Fp | V fpPosition = -1
+ // +--------------------------+--- FixedCSEnd
+ // | type |
+ // +--------------------------+
+ // | ReServeSize |
+ // +--------------------------+
+ if (fpPosition >= 0) {
+ fpToCallerSpDelta = fpPosition * slotSize;
+ } else {
+ fpToCallerSpDelta = FixedCSEnd + (fpPosition + 1) * slotSize;
+ }
+ Function &func = const_cast(MF.getFunction());
+ Attribute attr = Attribute::get(func.getContext(), "fpToCallerSpDelta", std::to_string(fpToCallerSpDelta).c_str());
+ func.addAttribute(AttributeList::FunctionIndex, attr);
+
+ CalleeSavedFrameSize = TFI.GetFrameReserveSize(MF);
+ Offset += CalleeSavedFrameSize;
+ }
+#endif
+
// Make sure the special register scavenging spill slot is closest to the
// incoming stack pointer if a frame pointer is required and is closer
// to the incoming rather than the final stack pointer.
diff --git a/llvm/lib/CodeGen/StackMaps.cpp b/llvm/lib/CodeGen/StackMaps.cpp
index faf07e90c39cce215ec749e71796375a259a1c85..5152bbfda8326d93e121343ff5f6b035c8b8bc3e 100644
--- a/llvm/lib/CodeGen/StackMaps.cpp
+++ b/llvm/lib/CodeGen/StackMaps.cpp
@@ -29,6 +29,9 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
+#ifdef ARK_GC_SUPPORT
+#include "llvm/Target/TargetMachine.h"
+#endif
#include
#include
#include
@@ -599,10 +602,11 @@ void StackMaps::emitFunctionFrameRecords(MCStreamer &OS) {
// Function Frame records.
LLVM_DEBUG(dbgs() << WSMP << "functions:\n");
for (auto const &FR : FnInfos) {
- LLVM_DEBUG(dbgs() << WSMP << "function addr: " << FR.first
- << " frame size: " << FR.second.StackSize
- << " callsite count: " << FR.second.RecordCount << '\n');
- OS.emitSymbolValue(FR.first, 8);
+ #ifdef ARK_GC_SUPPORT
+ OS.emitSymbolValue(FR.first, AP.TM.getProgramPointerSize());
+ #else
+ OS.emitSymbolValue(FR.first, 8);
+ #endif
OS.emitIntValue(FR.second.StackSize, 8);
OS.emitIntValue(FR.second.RecordCount, 8);
}
diff --git a/llvm/lib/CodeGen/TargetFrameLoweringImpl.cpp b/llvm/lib/CodeGen/TargetFrameLoweringImpl.cpp
index b0594ec086b28f69d7b1ee0eb2c2294ad7d3ccd1..5d2bedfe3600b4d4b1a806394bc8b301875067c6 100644
--- a/llvm/lib/CodeGen/TargetFrameLoweringImpl.cpp
+++ b/llvm/lib/CodeGen/TargetFrameLoweringImpl.cpp
@@ -162,3 +162,17 @@ TargetFrameLowering::getDwarfFrameBase(const MachineFunction &MF) const {
const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
return DwarfFrameBase{DwarfFrameBase::Register, {RI->getFrameRegister(MF)}};
}
+
+#ifdef ARK_GC_SUPPORT
+int TargetFrameLowering::GetFrameReserveSize(MachineFunction &MF) const
+{
+ int slotSize = sizeof(uint64_t);
+ int64_t marker = 0x0;
+ int reserveSize = 0;
+ MF.getFunction()
+ .getFnAttribute("frame-reserved-slots")
+ .getValueAsString()
+ .getAsInteger(10, marker);
+ return marker;
+}
+#endif
diff --git a/llvm/lib/ExecutionEngine/ExecutionEngineBindings.cpp b/llvm/lib/ExecutionEngine/ExecutionEngineBindings.cpp
index addec6871fa1001e1e737f2f9af1062d0f2619ec..789f40fd11079a0e156b328b7aca751e8674c072 100644
--- a/llvm/lib/ExecutionEngine/ExecutionEngineBindings.cpp
+++ b/llvm/lib/ExecutionEngine/ExecutionEngineBindings.cpp
@@ -198,6 +198,7 @@ LLVMBool LLVMCreateMCJITCompilerForModule(
builder.setEngineKind(EngineKind::JIT)
.setErrorStr(&Error)
.setOptLevel((CodeGenOpt::Level)options.OptLevel)
+ .setRelocationModel(unwrap(options.RelMode))
.setTargetOptions(targetOptions);
bool JIT;
if (Optional CM = unwrap(options.CodeModel, JIT))
diff --git a/llvm/lib/IR/Core.cpp b/llvm/lib/IR/Core.cpp
index 039b34ace6abe85795fa7c1baa1c01ed49c74e35..937134d9494f8fa9ebe2a3b2a399b5faedd5dffd 100644
--- a/llvm/lib/IR/Core.cpp
+++ b/llvm/lib/IR/Core.cpp
@@ -3914,6 +3914,25 @@ LLVMValueRef LLVMBuildCall2(LLVMBuilderRef B, LLVMTypeRef Ty, LLVMValueRef Fn,
makeArrayRef(unwrap(Args), NumArgs), Name));
}
+#ifdef ARK_GC_SUPPORT
+LLVMValueRef LLVMBuildCall3(LLVMBuilderRef B, LLVMTypeRef Ty, LLVMValueRef Fn,
+ LLVMValueRef *Args, unsigned NumArgs,
+ const char *Name, LLVMValueRef *deoptVals,
+ int NumVals) {
+ FunctionType *FTy = unwrap(Ty);
+ std::vector vals;
+ for (int i = 0; i < NumVals; i++) {
+ vals.push_back(unwrap(deoptVals[i]));
+ }
+ OperandBundleDefT deoptBundle("deopt", vals);
+
+ return wrap(unwrap(B)->CreateCall(FTy, unwrap(Fn),
+ makeArrayRef(unwrap(Args), NumArgs), // Args
+ {deoptBundle}, // ArrayRef
+ Name));
+}
+#endif
+
LLVMValueRef LLVMBuildSelect(LLVMBuilderRef B, LLVMValueRef If,
LLVMValueRef Then, LLVMValueRef Else,
const char *Name) {
diff --git a/llvm/lib/Target/AArch64/AArch64CallingConvention.td b/llvm/lib/Target/AArch64/AArch64CallingConvention.td
index fdcc890bf58927b90a26db426a7da60c3bda226e..0f2af98ee9abff8cc173d8ef3ec015839a3bbc0b 100644
--- a/llvm/lib/Target/AArch64/AArch64CallingConvention.td
+++ b/llvm/lib/Target/AArch64/AArch64CallingConvention.td
@@ -359,7 +359,7 @@ def CC_AArch64_GHC : CallingConv<[
CCIfType<[i8, i16, i32], CCPromoteToType>,
// Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, SpLim
- CCIfType<[i64], CCAssignToReg<[X19, X20, X21, X22, X23, X24, X25, X26, X27, X28]>>
+ CCIfType<[i64], CCAssignToReg<[X19, FP, X20, X21, X22, X23, X24, X25, X26, X27, X28]>>
]>;
// The order of the callee-saves in this file is important, because the
@@ -377,9 +377,10 @@ def CC_AArch64_GHC : CallingConv<[
// vreg on entry, use it in RET & tail call generation; make that vreg def if we
// end up saving LR as part of a call frame). Watch this space...
def CSR_AArch64_AAPCS : CalleeSavedRegs<(add X19, X20, X21, X22, X23, X24,
- X25, X26, X27, X28, LR, FP,
+ X25, X26, X27, X28,
D8, D9, D10, D11,
- D12, D13, D14, D15)>;
+ D12, D13, D14, D15,
+ LR, FP)>;
// A variant for treating X18 as callee saved, when interfacing with
// code that needs X18 to be preserved.
diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
index 65ee5016042cc3d6951c77c65531acaf7d5c0027..4f14434b7393c88c10dbc8f36f13e0aa9c5d77e6 100644
--- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -1023,6 +1023,19 @@ static bool IsSVECalleeSave(MachineBasicBlock::iterator I) {
}
}
+#ifdef ARK_GC_SUPPORT
+Triple::ArchType AArch64FrameLowering::GetArkSupportTarget() const
+{
+ return Triple::aarch64;
+}
+
+int AArch64FrameLowering::GetFixedFpPosition() const
+{
+ return -1;
+}
+
+#endif
+
void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
MachineBasicBlock::iterator MBBI = MBB.begin();
@@ -1072,8 +1085,11 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
// All calls are tail calls in GHC calling conv, and functions have no
// prologue/epilogue.
+#ifndef ARK_GC_SUPPORT
if (MF.getFunction().getCallingConv() == CallingConv::GHC)
return;
+#endif
+ // asm-int GHC call webkit function, we need push regs to stack.
// Set tagged base pointer to the requested stack slot.
// Ideally it should match SP value after prologue.
@@ -1558,8 +1574,11 @@ void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
// All calls are tail calls in GHC calling conv, and functions have no
// prologue/epilogue.
+ #ifndef ARK_GC_SUPPORT
if (MF.getFunction().getCallingConv() == CallingConv::GHC)
return;
+ #endif
+ // asm-int GHC call webkit function, we need push regs to stack.
// Initial and residual are named for consistency with the prologue. Note that
// in the epilogue, the residual adjustment is executed first.
@@ -2542,8 +2561,11 @@ void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF,
RegScavenger *RS) const {
// All calls are tail calls in GHC calling conv, and functions have no
// prologue/epilogue.
+ #ifndef ARK_GC_SUPPORT
if (MF.getFunction().getCallingConv() == CallingConv::GHC)
return;
+ #endif
+ // asm-int GHC call webkit function, we need push regs to stack.
TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
const AArch64RegisterInfo *RegInfo = static_cast(
diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.h b/llvm/lib/Target/AArch64/AArch64FrameLowering.h
index 80079a9d9836d969496b595229b9817a299ccca6..7b660c6c5b04be3729d9413a5d7f0affb50aed14 100644
--- a/llvm/lib/Target/AArch64/AArch64FrameLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.h
@@ -15,6 +15,9 @@
#include "llvm/Support/TypeSize.h"
#include "llvm/CodeGen/TargetFrameLowering.h"
+#ifdef ARK_GC_SUPPORT
+#include "llvm/ADT/Triple.h"
+#endif
namespace llvm {
@@ -38,6 +41,10 @@ public:
/// the function.
void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
+#ifdef ARK_GC_SUPPORT
+ Triple::ArchType GetArkSupportTarget() const override;
+ int GetFixedFpPosition() const override;
+#endif
bool canUseAsPrologue(const MachineBasicBlock &MBB) const override;
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index c522ee76626d2b1cdf148143413096d53988a988..a53a719cba72e0eabf6383a606e131c376e52ac9 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -4987,7 +4987,11 @@ SDValue AArch64TargetLowering::LowerCallResult(
/// Return true if the calling convention is one that we can guarantee TCO for.
static bool canGuaranteeTCO(CallingConv::ID CC) {
+#ifdef ARK_GC_SUPPORT
+ return (CC == CallingConv::GHC) || (CC == CallingConv::Fast);
+#else
return CC == CallingConv::Fast;
+#endif
}
/// Return true if we might ever do TCO for calls with this calling convention.
@@ -5183,7 +5187,11 @@ SDValue AArch64TargetLowering::addTokenForArgument(SDValue Chain,
bool AArch64TargetLowering::DoesCalleeRestoreStack(CallingConv::ID CallCC,
bool TailCallOpt) const {
+#ifdef ARK_GC_SUPPORT
+ return (CallCC == CallingConv::GHC || (CallCC == CallingConv::Fast)) && TailCallOpt;
+#else
return CallCC == CallingConv::Fast && TailCallOpt;
+#endif
}
/// LowerCall - Lower a call to a callseq_start + CALL + callseq_end chain,
diff --git a/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp b/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
index f90856d14b2fe7616774edc0024de12bc57fedc0..3f1a9d4e8c9f82b284d007b23d8c0bd48f678566 100644
--- a/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
@@ -312,6 +312,17 @@ AArch64RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
if (TFI->hasFP(MF) || TT.isOSDarwin())
markSuperRegs(Reserved, AArch64::W29);
+#ifdef ARK_GC_SUPPORT
+ if (MF.getFunction().getCallingConv() == CallingConv::GHC) {
+ markSuperRegs(Reserved, AArch64::W29);
+ markSuperRegs(Reserved, AArch64::W30);
+ }
+ if ((MF.getFunction().getCallingConv() == CallingConv::WebKit_JS) ||
+ (MF.getFunction().getCallingConv() == CallingConv::C)) {
+ markSuperRegs(Reserved, AArch64::W30);
+ }
+#endif
+
for (size_t i = 0; i < AArch64::GPR32commonRegClass.getNumRegs(); ++i) {
if (MF.getSubtarget().isXRegisterReserved(i))
markSuperRegs(Reserved, AArch64::GPR32commonRegClass.getRegister(i));
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp b/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp
index 0f8b1d6584b1a09660033451ca56131e0be97db8..9dcb2e35e3e225a0a5fd1c8837808f4221a6d784 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp
@@ -231,7 +231,11 @@ struct OutgoingArgHandler : public CallLowering::OutgoingValueHandler {
} // namespace
static bool doesCalleeRestoreStack(CallingConv::ID CallConv, bool TailCallOpt) {
- return CallConv == CallingConv::Fast && TailCallOpt;
+ #ifdef ARK_GC_SUPPORT
+ return (CallConv == CallingConv::GHC || (CallConv == CallingConv::Fast)) && TailCallOpt;
+ #else
+ return CallConv == CallingConv::Fast && TailCallOpt;
+ #endif
}
void AArch64CallLowering::splitToValueTypes(
@@ -522,7 +526,11 @@ bool AArch64CallLowering::lowerFormalArguments(
/// Return true if the calling convention is one that we can guarantee TCO for.
static bool canGuaranteeTCO(CallingConv::ID CC) {
+#ifdef ARK_GC_SUPPORT
+ return (CC == CallingConv::GHC) || (CC == CallingConv::Fast);
+#else
return CC == CallingConv::Fast;
+#endif
}
/// Return true if we might ever do TCO for calls with this calling convention.
diff --git a/llvm/lib/Target/ARM/ARMAsmPrinter.cpp b/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
index 04e21867d57112d4b5fc97e82a8acfba261133a9..9dcf08bb82ba963d5c9c5941c59ced6855f0de93 100644
--- a/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
+++ b/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
@@ -55,7 +55,11 @@ using namespace llvm;
ARMAsmPrinter::ARMAsmPrinter(TargetMachine &TM,
std::unique_ptr Streamer)
: AsmPrinter(TM, std::move(Streamer)), Subtarget(nullptr), AFI(nullptr),
+#ifdef ARK_GC_SUPPORT
+ MCP(nullptr), InConstantPool(false), OptimizationGoals(-1), SM(*this) {}
+#else
MCP(nullptr), InConstantPool(false), OptimizationGoals(-1) {}
+#endif
void ARMAsmPrinter::emitFunctionBodyEnd() {
// Make sure to terminate any constant pools that were at the end
@@ -577,6 +581,9 @@ void ARMAsmPrinter::emitEndOfAsmFile(Module &M) {
OptimizationGoals = -1;
ATS.finishAttributeSection();
+#ifdef ARK_GC_SUPPORT
+ SM.serializeToStackMapSection();
+#endif
}
//===----------------------------------------------------------------------===//
@@ -2180,6 +2187,14 @@ void ARMAsmPrinter::emitInstruction(const MachineInstr *MI) {
case ARM::PATCHABLE_TAIL_CALL:
LowerPATCHABLE_TAIL_CALL(*MI);
return;
+ #ifdef ARK_GC_SUPPORT
+ case TargetOpcode::STACKMAP:
+ return LowerSTACKMAP(*OutStreamer, SM, *MI);
+ case TargetOpcode::PATCHPOINT:
+ return LowerPATCHPOINT(*OutStreamer, SM, *MI);
+ case TargetOpcode::STATEPOINT:
+ return LowerSTATEPOINT(*OutStreamer, SM, *MI);
+ #endif
case ARM::SpeculationBarrierISBDSBEndBB: {
// Print DSB SYS + ISB
MCInst TmpInstDSB;
@@ -2230,6 +2245,76 @@ void ARMAsmPrinter::emitInstruction(const MachineInstr *MI) {
EmitToStreamer(*OutStreamer, TmpInst);
}
+#ifdef ARK_GC_SUPPORT
+static unsigned roundUpTo4ByteAligned(unsigned n) {
+ unsigned mask = 3;
+ unsigned rev = ~3;
+ n = (n & rev) + (((n & mask) + mask) & rev);
+ return n;
+}
+
+void ARMAsmPrinter::LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM,
+ const MachineInstr &MI) {
+ llvm_unreachable("Stackmap lowering is not implemented");
+}
+
+void ARMAsmPrinter::LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM,
+ const MachineInstr &MI) {
+ llvm_unreachable("Patchpoint lowering is not implemented");
+}
+
+void ARMAsmPrinter::LowerSTATEPOINT(MCStreamer &OutStreamer, StackMaps &SM,
+ const MachineInstr &MI) {
+ assert(!AFI->isThumbFunction());
+
+ StatepointOpers SOpers(&MI);
+ MCInst Noop;
+ Subtarget->getInstrInfo()->getNoop(Noop);
+ if (unsigned PatchBytes = SOpers.getNumPatchBytes()) {
+ unsigned NumBytes = roundUpTo4ByteAligned(PatchBytes);
+ unsigned EncodedBytes = 0;
+ assert(NumBytes >= EncodedBytes &&
+ "Statepoint can't request size less than the length of a call.");
+ assert((NumBytes - EncodedBytes) % 4 == 0 &&
+ "Invalid number of NOP bytes requested!");
+ MCInst Noop;
+ Subtarget->getInstrInfo()->getNoop(Noop);
+ for (unsigned i = EncodedBytes; i < NumBytes; i += 4)
+ EmitToStreamer(OutStreamer, Noop);
+ } else {
+ const MachineOperand &CallTarget = SOpers.getCallTarget();
+ MCOperand CallTargetMCOp;
+ unsigned CallOpcode;
+ switch (CallTarget.getType()) {
+ case MachineOperand::MO_GlobalAddress:
+ case MachineOperand::MO_ExternalSymbol:
+ ARMAsmPrinter::lowerOperand(CallTarget, CallTargetMCOp);
+ CallOpcode = ARM::BL;
+ break;
+ case MachineOperand::MO_Immediate:
+ CallTargetMCOp = MCOperand::createImm(CallTarget.getImm());
+ CallOpcode = ARM::BL;
+ break;
+ case MachineOperand::MO_Register:
+ CallTargetMCOp = MCOperand::createReg(CallTarget.getReg());
+ CallOpcode = ARM::BLX;
+ break;
+ default:
+ llvm_unreachable("Unsupported operand type in statepoint call target");
+ break;
+ }
+
+ EmitToStreamer(OutStreamer,
+ MCInstBuilder(CallOpcode).addOperand(CallTargetMCOp));
+ }
+
+ auto &Ctx = OutStreamer.getContext();
+ MCSymbol *MILabel = Ctx.createTempSymbol();
+ OutStreamer.emitLabel(MILabel);
+ SM.recordStatepoint(*MILabel, MI);
+}
+#endif
+
//===----------------------------------------------------------------------===//
// Target Registry Stuff
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/ARM/ARMAsmPrinter.h b/llvm/lib/Target/ARM/ARMAsmPrinter.h
index f8ff047a1d068c6073bc6d16fbb76948d3158dbe..c8593f107c06b3b93d9fa7ad26620b5a87aa37d2 100644
--- a/llvm/lib/Target/ARM/ARMAsmPrinter.h
+++ b/llvm/lib/Target/ARM/ARMAsmPrinter.h
@@ -11,6 +11,9 @@
#include "ARMSubtarget.h"
#include "llvm/CodeGen/AsmPrinter.h"
+#ifdef ARK_GC_SUPPORT
+#include "llvm/CodeGen/StackMaps.h"
+#endif
#include "llvm/Target/TargetMachine.h"
namespace llvm {
@@ -65,6 +68,10 @@ class LLVM_LIBRARY_VISIBILITY ARMAsmPrinter : public AsmPrinter {
/// debug info can link properly.
SmallPtrSet EmittedPromotedGlobalLabels;
+#ifdef ARK_GC_SUPPORT
+ StackMaps SM;
+#endif
+
public:
explicit ARMAsmPrinter(TargetMachine &TM,
std::unique_ptr Streamer);
@@ -129,6 +136,17 @@ private:
bool emitPseudoExpansionLowering(MCStreamer &OutStreamer,
const MachineInstr *MI);
+#ifdef ARK_GC_SUPPORT
+ void LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM,
+ const MachineInstr &MI);
+
+ void LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM,
+ const MachineInstr &MI);
+
+ void LowerSTATEPOINT(MCStreamer &OutStreamer, StackMaps &SM,
+ const MachineInstr &MI);
+#endif
+
public:
unsigned getISAEncoding() override {
// ARM/Darwin adds ISA to the DWARF info for each function.
diff --git a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
index e418d53b56a45e6bbe88d70a88ca51d7d93d3ac0..a576734972f9514b737cbd37c771853e5b24506c 100644
--- a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
@@ -763,6 +763,11 @@ unsigned ARMBaseInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
return 0;
case TargetOpcode::BUNDLE:
return getInstBundleLength(MI);
+#ifdef ARK_GC_SUPPORT
+ case TargetOpcode::PATCHPOINT:
+ case TargetOpcode::STATEPOINT:
+ return MI.getOperand(1).getImm();
+#endif
case ARM::MOVi16_ga_pcrel:
case ARM::MOVTi16_ga_pcrel:
case ARM::t2MOVi16_ga_pcrel:
diff --git a/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp b/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp
index 1a264dabeeb5153a9240f96ad47f5fcf218b29ea..e8f0378de71e674b8417e2db0de9e83eaaafcaeb 100644
--- a/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp
@@ -198,6 +198,12 @@ getReservedRegs(const MachineFunction &MF) const {
markSuperRegs(Reserved, ARM::APSR_NZCV);
if (TFI->hasFP(MF))
markSuperRegs(Reserved, getFramePointerReg(STI));
+#ifdef ARK_GC_SUPPORT
+ if (MF.getFunction().getCallingConv() == CallingConv::GHC) {
+ markSuperRegs(Reserved, ARM::R11);
+ markSuperRegs(Reserved, ARM::LR);
+ }
+#endif
if (hasBasePointer(MF))
markSuperRegs(Reserved, BasePtr);
// Some targets reserve R9.
diff --git a/llvm/lib/Target/ARM/ARMCallingConv.td b/llvm/lib/Target/ARM/ARMCallingConv.td
index 3517274e4c5c03c8406ac893e4551ed7fdaaa312..79c1645f4557febfa43a81055947840f62794a77 100644
--- a/llvm/lib/Target/ARM/ARMCallingConv.td
+++ b/llvm/lib/Target/ARM/ARMCallingConv.td
@@ -119,7 +119,7 @@ def CC_ARM_APCS_GHC : CallingConv<[
CCIfType<[i8, i16], CCPromoteToType>,
// Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, SpLim
- CCIfType<[i32], CCAssignToReg<[R4, R5, R6, R7, R8, R9, R10, R11]>>
+ CCIfType<[i32], CCAssignToReg<[R4, R11, R5, R6, R7, R8, R9, R10]>>
]>;
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp b/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
index 630490f6f9149edd871960115f83970377caa1f3..8882b0c8173c97475a8fcd7430a86fed26df7a2c 100644
--- a/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
+++ b/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
@@ -873,6 +873,11 @@ initializeFunctionInfo(const std::vector &CPEMIs) {
NegOk = true;
break;
}
+ // Decrease a bit here to workaround the out of range pc-relative
+ // fixup value error during the MCJIT compilation process
+ if (Bits > 0) {
+ Bits--;
+ }
// Remember that this is a user of a CP entry.
unsigned CPI = I.getOperand(op).getIndex();
diff --git a/llvm/lib/Target/ARM/ARMFrameLowering.cpp b/llvm/lib/Target/ARM/ARMFrameLowering.cpp
index 9eeb7f20dc8d32da67e4b922e0d7591a399679af..0d270789579809aa4737e99fa6019a996edc4a80 100644
--- a/llvm/lib/Target/ARM/ARMFrameLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMFrameLowering.cpp
@@ -1588,6 +1588,7 @@ static unsigned estimateRSStackSizeLimit(MachineFunction &MF,
Limit = std::min(Limit, ((1U << 7) - 1) * 4);
break;
default:
+ break;
llvm_unreachable("Unhandled addressing mode in stack size limit calculation");
}
break; // At most one FI per instruction
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 598062672a56449be04d21f327f0e9471f213565..c7bd6885d102329319c3ae0dab76ff4e2f83a216 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -10991,6 +10991,14 @@ ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
llvm_unreachable("Unexpected instr type to insert");
}
+#ifdef ARK_GC_SUPPORT
+ case TargetOpcode::STATEPOINT:
+ case TargetOpcode::STACKMAP:
+ case TargetOpcode::PATCHPOINT:
+ MI.addOperand(*BB->getParent(), MachineOperand::CreateReg(ARM::LR, true, true));
+ return emitPatchPoint(MI, BB);
+#endif
+
// Thumb1 post-indexed loads are really just single-register LDMs.
case ARM::tLDR_postidx: {
MachineOperand Def(MI.getOperand(1));
diff --git a/llvm/lib/Target/X86/X86FrameLowering.cpp b/llvm/lib/Target/X86/X86FrameLowering.cpp
index 866f1136400477ea5f2cb3ac302567ba38765ec9..4ca41ece19f3f95edee635ecd7310509590b49a0 100644
--- a/llvm/lib/Target/X86/X86FrameLowering.cpp
+++ b/llvm/lib/Target/X86/X86FrameLowering.cpp
@@ -1286,6 +1286,37 @@ bool X86FrameLowering::has128ByteRedZone(const MachineFunction& MF) const {
- for 32-bit code, substitute %e?? registers for %r??
*/
+#ifdef ARK_GC_SUPPORT
+Triple::ArchType X86FrameLowering::GetArkSupportTarget() const
+{
+ return Is64Bit ? Triple::x86_64 : Triple::x86;
+}
+
+int X86FrameLowering::GetFixedFpPosition() const
+{
+ return 2;
+}
+
+int X86FrameLowering::GetFrameReserveSize(MachineFunction &MF) const
+{
+ int slotSize = sizeof(uint64_t);
+ if (!Is64Bit) {
+ slotSize = sizeof(uint32_t);
+ }
+ int reserveSize = 0;
+ MF.getFunction()
+ .getFnAttribute("frame-reserved-slots")
+ .getValueAsString()
+ .getAsInteger(10, reserveSize);
+
+ // x86-64 shoule align 16 bytes
+ if (Is64Bit) {
+ return RoundUp(reserveSize, 2 * sizeof(uint64_t));
+ }
+ return reserveSize;
+}
+#endif
+
void X86FrameLowering::emitPrologue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
assert(&STI == &MF.getSubtarget() &&
@@ -1486,6 +1517,20 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
else
MFI.setOffsetAdjustment(-StackSize);
}
+#ifdef ARK_GC_SUPPORT
+ // push marker
+ if (MF.getFunction().hasFnAttribute("frame-reserved-slots"))
+ {
+ unsigned StackPtr = TRI->getStackRegister();
+ int reserveSize = GetFrameReserveSize(MF);
+ const unsigned SUBOpc =
+ getSUBriOpcode(Uses64BitFramePtr, reserveSize);
+ BuildMI(MBB, MBBI, DL, TII.get(SUBOpc), StackPtr)
+ .addReg(StackPtr)
+ .addImm(reserveSize)
+ .setMIFlag(MachineInstr::FrameSetup);
+ }
+#endif
// For EH funclets, only allocate enough space for outgoing calls. Save the
// NumBytes value that we would've used for the parent frame.
@@ -1959,6 +2004,21 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
// AfterPop is the position to insert .cfi_restore.
MachineBasicBlock::iterator AfterPop = MBBI;
if (HasFP) {
+#ifdef ARK_GC_SUPPORT
+ if (MF.getFunction().hasFnAttribute("frame-reserved-slots"))
+ {
+ int slotSize = sizeof(uint32_t);
+ if (Is64Bit) {
+ slotSize = sizeof(uint64_t);
+ }
+ int reserveSize = GetFrameReserveSize(MF);
+ for (unsigned i = 0; i < reserveSize / slotSize; i++) {
+ BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::POP64r : X86::POP32r),
+ MachineFramePtr)
+ .setMIFlag(MachineInstr::FrameDestroy);
+ }
+ }
+#endif
// Pop EBP.
BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::POP64r : X86::POP32r),
MachineFramePtr)
diff --git a/llvm/lib/Target/X86/X86FrameLowering.h b/llvm/lib/Target/X86/X86FrameLowering.h
index 26e80811af2e5aab3320a62e7ed41cb68eddb78c..81be7fb94bb9f506071739393a226747f6cb79de 100644
--- a/llvm/lib/Target/X86/X86FrameLowering.h
+++ b/llvm/lib/Target/X86/X86FrameLowering.h
@@ -70,6 +70,11 @@ public:
/// emitProlog/emitEpilog - These methods insert prolog and epilog code into
/// the function.
+#ifdef ARK_GC_SUPPORT
+ Triple::ArchType GetArkSupportTarget() const override;
+ int GetFixedFpPosition() const override;
+ int GetFrameReserveSize(MachineFunction &MF) const override;
+#endif
void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 1e2407c7e7f6b8b29cc964409a1274f71faeaf31..f20b78aa918a4d5a243ed480a4e9cd5226707d1a 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -3958,8 +3958,13 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
++NumTailCalls;
}
+#ifdef ARK_GC_SUPPORT
+ assert(!(isVarArg && canGuaranteeTCO(CallConv) && (CallConv != CallingConv::GHC)) &&
+ "Var args not supported with calling convention fastcc, ghc or hipe");
+#else
assert(!(isVarArg && canGuaranteeTCO(CallConv)) &&
"Var args not supported with calling convention fastcc, ghc or hipe");
+#endif
// Analyze operands of the call, assigning locations to each operand.
SmallVector ArgLocs;
diff --git a/llvm/lib/Target/X86/X86RegisterInfo.cpp b/llvm/lib/Target/X86/X86RegisterInfo.cpp
index d90b4e7bdc7ea1549b51b333a8665fbc724f40f9..00d30e503f120471d23f6d81d39af01fff4f491a 100644
--- a/llvm/lib/Target/X86/X86RegisterInfo.cpp
+++ b/llvm/lib/Target/X86/X86RegisterInfo.cpp
@@ -541,6 +541,12 @@ BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
for (const MCPhysReg &SubReg : subregs_inclusive(X86::RBP))
Reserved.set(SubReg);
}
+#ifdef ARK_GC_SUPPORT
+ if (MF.getFunction().getCallingConv() == CallingConv::GHC) {
+ for (const MCPhysReg &SubReg : subregs_inclusive(X86::RBP))
+ Reserved.set(SubReg);
+ }
+#endif
// Set the base-pointer register and its aliases as reserved if needed.
if (hasBasePointer(MF)) {
diff --git a/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp b/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
index b7830555bf737b3e6a5faf80a684db104c5c7e34..fb6ee8278c0201642b157215944e425d5b5136b7 100644
--- a/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
+++ b/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
@@ -521,6 +521,16 @@ static BaseDefiningValueResult findBaseDefiningValue(Value *I) {
return BaseDefiningValueResult(
ConstantPointerNull::get(cast(I->getType())), true);
}
+#ifdef ARK_GC_SUPPORT
+ // inttoptrs in an integral address space are currently ill-defined. We
+ // treat them as defining base pointers here for consistency with the
+ // constant rule above and because we don't really have a better semantic
+ // to give them. Note that the optimizer is always free to insert undefined
+ // behavior on dynamically dead paths as well.
+ // issue:https://gitee.com/openharmony/arkcompiler_ets_runtime/issues/I5L2AP?from=project-issue
+ if (isa(I))
+ return BaseDefiningValueResult(I, true);
+#endif
if (CastInst *CI = dyn_cast(I)) {
Value *Def = CI->stripPointerCasts();