1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-19 02:52:53 +02:00

[AArch64] [ARM] Make a target-independent llvm.thread.pointer intrinsic.

Both AArch64 and ARM support llvm.<arch>.thread.pointer intrinsics that
just return the thread pointer.  I have a pending patch that does the same
for SystemZ (D19054), and there are many more targets that could benefit
from one.

This patch merges the ARM and AArch64 intrinsics into a single target
independent one that will also be used by subsequent targets.

Differential Revision: http://reviews.llvm.org/D19098

llvm-svn: 266818
This commit is contained in:
Marcin Koscielnicki 2016-04-19 20:51:05 +00:00
parent 1607a81238
commit 0919e582e6
12 changed files with 71 additions and 18 deletions

View File

@ -9634,6 +9634,33 @@ pass will generate the appropriate data structures and replace the
``llvm.instrprof_value_profile`` intrinsic with the call to the profile
runtime library with proper arguments.
'``llvm.thread.pointer``' Intrinsic
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
::
declare i8* @llvm.thread.pointer()
Overview:
"""""""""
The '``llvm.thread.pointer``' intrinsic returns the value of the thread
pointer.
Semantics:
""""""""""
The '``llvm.thread.pointer``' intrinsic returns a pointer to the TLS area
for the current thread. The exact semantics of this value are target
specific: it may point to the start of TLS area, to the end, or somewhere
in the middle. Depending on the target, this intrinsic may read a register,
call a helper function, read from an alternate memory space, or perform
other operations necessary to locate the TLS area. Not all targets support
this intrinsic.
Standard C Library Intrinsics
-----------------------------

View File

@ -306,6 +306,9 @@ def int_stackrestore : Intrinsic<[], [llvm_ptr_ty]>,
def int_get_dynamic_area_offset : Intrinsic<[llvm_anyint_ty]>;
def int_thread_pointer : Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>,
GCCBuiltin<"__builtin_thread_pointer">;
// IntrReadWriteArgMem is more pessimistic than strictly necessary for prefetch,
// however it does conveniently prevent the prefetch from being reordered
// with respect to nearby accesses to the same memory.

View File

@ -13,9 +13,6 @@
let TargetPrefix = "aarch64" in {
def int_aarch64_thread_pointer : GCCBuiltin<"__builtin_thread_pointer">,
Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>;
def int_aarch64_ldxr : Intrinsic<[llvm_i64_ty], [llvm_anyptr_ty]>;
def int_aarch64_ldaxr : Intrinsic<[llvm_i64_ty], [llvm_anyptr_ty]>;
def int_aarch64_stxr : Intrinsic<[llvm_i32_ty], [llvm_i64_ty, llvm_anyptr_ty]>;

View File

@ -17,9 +17,6 @@
let TargetPrefix = "arm" in { // All intrinsics start with "llvm.arm.".
def int_arm_thread_pointer : GCCBuiltin<"__builtin_thread_pointer">,
Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>;
// A space-consuming intrinsic primarily for testing ARMConstantIslands. The
// first argument is the number of bytes this "instruction" takes up, the second
// and return value are essentially chains, used to force ordering during ISel.

View File

@ -126,6 +126,10 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
StoreLaneInts[fArgs.size() - 5], Tys);
return true;
}
if (Name == "aarch64.thread.pointer" || Name == "arm.thread.pointer") {
NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::thread_pointer);
return true;
}
break;
}
@ -799,6 +803,12 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
CI->eraseFromParent();
return;
}
case Intrinsic::thread_pointer: {
CI->replaceAllUsesWith(Builder.CreateCall(NewFn, {}));
CI->eraseFromParent();
return;
}
}
}

View File

@ -2326,7 +2326,7 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
SDLoc dl(Op);
switch (IntNo) {
default: return SDValue(); // Don't custom lower most intrinsics.
case Intrinsic::aarch64_thread_pointer: {
case Intrinsic::thread_pointer: {
EVT PtrVT = getPointerTy(DAG.getDataLayout());
return DAG.getNode(AArch64ISD::THREAD_POINTER, dl, PtrVT);
}
@ -10265,7 +10265,7 @@ Value *AArch64TargetLowering::getIRStackGuard(IRBuilder<> &IRB) const {
const unsigned TlsOffset = 0x28;
Module *M = IRB.GetInsertBlock()->getParent()->getParent();
Function *ThreadPointerFunc =
Intrinsic::getDeclaration(M, Intrinsic::aarch64_thread_pointer);
Intrinsic::getDeclaration(M, Intrinsic::thread_pointer);
return IRB.CreatePointerCast(
IRB.CreateConstGEP1_32(IRB.CreateCall(ThreadPointerFunc), TlsOffset),
Type::getInt8PtrTy(IRB.getContext())->getPointerTo(0));
@ -10281,7 +10281,7 @@ Value *AArch64TargetLowering::getSafeStackPointerLocation(IRBuilder<> &IRB) cons
const unsigned TlsOffset = 0x48;
Module *M = IRB.GetInsertBlock()->getParent()->getParent();
Function *ThreadPointerFunc =
Intrinsic::getDeclaration(M, Intrinsic::aarch64_thread_pointer);
Intrinsic::getDeclaration(M, Intrinsic::thread_pointer);
return IRB.CreatePointerCast(
IRB.CreateConstGEP1_32(IRB.CreateCall(ThreadPointerFunc), TlsOffset),
Type::getInt8PtrTy(IRB.getContext())->getPointerTo(0));

View File

@ -2929,7 +2929,7 @@ ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
"RBIT intrinsic must have i32 type!");
return DAG.getNode(ISD::BITREVERSE, dl, MVT::i32, Op.getOperand(1));
}
case Intrinsic::arm_thread_pointer: {
case Intrinsic::thread_pointer: {
EVT PtrVT = getPointerTy(DAG.getDataLayout());
return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);
}

View File

@ -0,0 +1,19 @@
; Test autoupgrade of arch-specific thread pointer intrinsics
; RUN: llvm-as < %s | llvm-dis | FileCheck %s
declare i8* @llvm.aarch64.thread.pointer()
declare i8* @llvm.arm.thread.pointer()
define i8* @test1() {
; CHECK: test1()
; CHECK: call i8* @llvm.thread.pointer()
%1 = call i8* @llvm.aarch64.thread.pointer()
ret i8 *%1
}
define i8* @test2() {
; CHECK: test2()
; CHECK: call i8* @llvm.thread.pointer()
%1 = call i8* @llvm.arm.thread.pointer()
ret i8 *%1
}

View File

@ -1,11 +1,11 @@
; RUN: llc < %s -march=aarch64 -mtriple=aarch64-linux-gnu | FileCheck %s
; Function Attrs: nounwind readnone
declare i8* @llvm.aarch64.thread.pointer() #1
declare i8* @llvm.thread.pointer() #1
define i8* @thread_pointer() {
; CHECK: thread_pointer:
; CHECK: mrs {{x[0-9]+}}, TPIDR_EL0
%1 = tail call i8* @llvm.aarch64.thread.pointer()
%1 = tail call i8* @llvm.thread.pointer()
ret i8* %1
}

View File

@ -3,8 +3,8 @@
define i8* @test() {
entry:
%tmp1 = call i8* @llvm.arm.thread.pointer( ) ; <i8*> [#uses=0]
%tmp1 = call i8* @llvm.thread.pointer( ) ; <i8*> [#uses=0]
ret i8* %tmp1
}
declare i8* @llvm.arm.thread.pointer()
declare i8* @llvm.thread.pointer()

View File

@ -3,7 +3,7 @@
define void @foo() nounwind uwtable safestack {
entry:
; CHECK: %[[TP:.*]] = call i8* @llvm.aarch64.thread.pointer()
; CHECK: %[[TP:.*]] = call i8* @llvm.thread.pointer()
; CHECK: %[[SPA0:.*]] = getelementptr i8, i8* %[[TP]], i32 72
; CHECK: %[[SPA:.*]] = bitcast i8* %[[SPA0]] to i8**
; CHECK: %[[USP:.*]] = load i8*, i8** %[[SPA]]

View File

@ -3,10 +3,10 @@
define void @foo() nounwind uwtable safestack sspreq {
entry:
; The first @llvm.aarch64.thread.pointer is for the unsafe stack pointer, skip it.
; TLS: call i8* @llvm.aarch64.thread.pointer()
; The first @llvm.thread.pointer is for the unsafe stack pointer, skip it.
; TLS: call i8* @llvm.thread.pointer()
; TLS: %[[TP2:.*]] = call i8* @llvm.aarch64.thread.pointer()
; TLS: %[[TP2:.*]] = call i8* @llvm.thread.pointer()
; TLS: %[[B:.*]] = getelementptr i8, i8* %[[TP2]], i32 40
; TLS: %[[C:.*]] = bitcast i8* %[[B]] to i8**
; TLS: %[[StackGuard:.*]] = load i8*, i8** %[[C]]