mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-22 18:54:02 +01:00
Add support for lowering 32-bit/64-bit pointers
Summary: This follows a previous patch that changes the X86 datalayout to represent mixed size pointers (32-bit sext, 32-bit zext, and 64-bit) with address spaces (https://reviews.llvm.org/D64931) This patch implements the address space cast lowering to the corresponding sign extension, zero extension, or truncate instructions. Related to https://bugs.llvm.org/show_bug.cgi?id=42359 Reviewers: rnk, craig.topper, RKSimon Subscribers: hiraditya, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D69639
This commit is contained in:
parent
794b22ce6e
commit
dfe74f4b3c
@ -410,8 +410,8 @@ unsigned FastISel::materializeConstant(const Value *V, MVT VT) {
|
||||
else if (isa<ConstantPointerNull>(V))
|
||||
// Translate this as an integer zero so that it can be
|
||||
// local-CSE'd with actual integer zeros.
|
||||
Reg = getRegForValue(
|
||||
Constant::getNullValue(DL.getIntPtrType(V->getContext())));
|
||||
Reg =
|
||||
getRegForValue(Constant::getNullValue(DL.getIntPtrType(V->getType())));
|
||||
else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
|
||||
if (CF->isNullValue())
|
||||
Reg = fastMaterializeFloatZero(CF);
|
||||
|
@ -150,6 +150,18 @@ void initializeX86ExpandPseudoPass(PassRegistry &);
|
||||
void initializeX86FlagsCopyLoweringPassPass(PassRegistry &);
|
||||
void initializeX86OptimizeLEAPassPass(PassRegistry &);
|
||||
void initializeX86SpeculativeLoadHardeningPassPass(PassRegistry &);
|
||||
|
||||
namespace X86AS {
|
||||
enum : unsigned {
|
||||
GS = 256,
|
||||
FS = 257,
|
||||
SS = 258,
|
||||
PTR32_SPTR = 270,
|
||||
PTR32_UPTR = 271,
|
||||
PTR64 = 272
|
||||
};
|
||||
} // End X86AS namespace
|
||||
|
||||
} // End llvm namespace
|
||||
|
||||
#endif
|
||||
|
@ -2224,12 +2224,11 @@ bool X86DAGToDAGISel::selectVectorAddr(SDNode *Parent, SDValue N, SDValue &Base,
|
||||
AM.Scale = cast<ConstantSDNode>(Mgs->getScale())->getZExtValue();
|
||||
|
||||
unsigned AddrSpace = cast<MemSDNode>(Parent)->getPointerInfo().getAddrSpace();
|
||||
// AddrSpace 256 -> GS, 257 -> FS, 258 -> SS.
|
||||
if (AddrSpace == 256)
|
||||
if (AddrSpace == X86AS::GS)
|
||||
AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
|
||||
if (AddrSpace == 257)
|
||||
if (AddrSpace == X86AS::FS)
|
||||
AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
|
||||
if (AddrSpace == 258)
|
||||
if (AddrSpace == X86AS::SS)
|
||||
AM.Segment = CurDAG->getRegister(X86::SS, MVT::i16);
|
||||
|
||||
SDLoc DL(N);
|
||||
|
@ -277,6 +277,10 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
|
||||
setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Custom);
|
||||
}
|
||||
|
||||
// Handle address space casts between mixed sized pointers.
|
||||
setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom);
|
||||
setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom);
|
||||
|
||||
// TODO: when we have SSE, these could be more efficient, by using movd/movq.
|
||||
if (!X86ScalarSSEf64) {
|
||||
setOperationAction(ISD::BITCAST , MVT::f32 , Expand);
|
||||
@ -2443,6 +2447,10 @@ bool X86TargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
|
||||
unsigned DestAS) const {
|
||||
assert(SrcAS != DestAS && "Expected different address spaces!");
|
||||
|
||||
const TargetMachine &TM = getTargetMachine();
|
||||
if (TM.getPointerSize(SrcAS) != TM.getPointerSize(DestAS))
|
||||
return false;
|
||||
|
||||
return SrcAS < 256 && DestAS < 256;
|
||||
}
|
||||
|
||||
@ -27762,6 +27770,29 @@ static SDValue LowerMGATHER(SDValue Op, const X86Subtarget &Subtarget,
|
||||
return DAG.getMergeValues({Extract, NewGather.getValue(2)}, dl);
|
||||
}
|
||||
|
||||
static SDValue LowerADDRSPACECAST(SDValue Op, SelectionDAG &DAG) {
|
||||
SDLoc dl(Op);
|
||||
SDValue Src = Op.getOperand(0);
|
||||
MVT DstVT = Op.getSimpleValueType();
|
||||
|
||||
AddrSpaceCastSDNode *N = cast<AddrSpaceCastSDNode>(Op.getNode());
|
||||
unsigned SrcAS = N->getSrcAddressSpace();
|
||||
|
||||
assert(SrcAS != N->getDestAddressSpace() &&
|
||||
"addrspacecast must be between different address spaces");
|
||||
|
||||
if (SrcAS == X86AS::PTR32_UPTR && DstVT == MVT::i64) {
|
||||
Op = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Src);
|
||||
} else if (DstVT == MVT::i64) {
|
||||
Op = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Src);
|
||||
} else if (DstVT == MVT::i32) {
|
||||
Op = DAG.getNode(ISD::TRUNCATE, dl, DstVT, Src);
|
||||
} else {
|
||||
report_fatal_error("Bad address space in addrspacecast");
|
||||
}
|
||||
return Op;
|
||||
}
|
||||
|
||||
SDValue X86TargetLowering::LowerGC_TRANSITION_START(SDValue Op,
|
||||
SelectionDAG &DAG) const {
|
||||
// TODO: Eventually, the lowering of these nodes should be informed by or
|
||||
@ -27948,6 +27979,8 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
|
||||
case ISD::GC_TRANSITION_START:
|
||||
return LowerGC_TRANSITION_START(Op, DAG);
|
||||
case ISD::GC_TRANSITION_END: return LowerGC_TRANSITION_END(Op, DAG);
|
||||
case ISD::ADDRSPACECAST:
|
||||
return LowerADDRSPACECAST(Op, DAG);
|
||||
}
|
||||
}
|
||||
|
||||
@ -28737,6 +28770,28 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
|
||||
Results.push_back(Res.getValue(1));
|
||||
return;
|
||||
}
|
||||
case ISD::ADDRSPACECAST: {
|
||||
SDValue Src = N->getOperand(0);
|
||||
EVT DstVT = N->getValueType(0);
|
||||
AddrSpaceCastSDNode *CastN = cast<AddrSpaceCastSDNode>(N);
|
||||
unsigned SrcAS = CastN->getSrcAddressSpace();
|
||||
|
||||
assert(SrcAS != CastN->getDestAddressSpace() &&
|
||||
"addrspacecast must be between different address spaces");
|
||||
|
||||
SDValue Res;
|
||||
if (SrcAS == X86AS::PTR32_UPTR && DstVT == MVT::i64)
|
||||
Res = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Src);
|
||||
else if (DstVT == MVT::i64)
|
||||
Res = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Src);
|
||||
else if (DstVT == MVT::i32)
|
||||
Res = DAG.getNode(ISD::TRUNCATE, dl, DstVT, Src);
|
||||
else
|
||||
report_fatal_error("Unrecognized addrspacecast type legalization");
|
||||
|
||||
Results.push_back(Res);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
128
test/CodeGen/X86/mixed-ptr-sizes.ll
Normal file
128
test/CodeGen/X86/mixed-ptr-sizes.ll
Normal file
@ -0,0 +1,128 @@
|
||||
; RUN: llc < %s | FileCheck --check-prefixes=CHECK %s
|
||||
; RUN: llc -O0 < %s | FileCheck --check-prefixes=CHECK %s
|
||||
|
||||
; Source to regenerate:
|
||||
; struct Foo {
|
||||
; int * __ptr32 p32;
|
||||
; int * __ptr64 p64;
|
||||
; __attribute__((address_space(9))) int *p_other;
|
||||
; };
|
||||
; void use_foo(Foo *f);
|
||||
; void test_sign_ext(Foo *f, int * __ptr32 __sptr i) {
|
||||
; f->p64 = i;
|
||||
; use_foo(f);
|
||||
; }
|
||||
; void test_zero_ext(Foo *f, int * __ptr32 __uptr i) {
|
||||
; f->p64 = i;
|
||||
; use_foo(f);
|
||||
; }
|
||||
; void test_trunc(Foo *f, int * __ptr64 i) {
|
||||
; f->p32 = i;
|
||||
; use_foo(f);
|
||||
; }
|
||||
; void test_noop1(Foo *f, int * __ptr32 i) {
|
||||
; f->p32 = i;
|
||||
; use_foo(f);
|
||||
; }
|
||||
; void test_noop2(Foo *f, int * __ptr64 i) {
|
||||
; f->p64 = i;
|
||||
; use_foo(f);
|
||||
; }
|
||||
; void test_null_arg(Foo *f, int * __ptr32 i) {
|
||||
; test_noop1(f, 0);
|
||||
; }
|
||||
; void test_unrecognized(Foo *f, __attribute__((address_space(14))) int *i) {
|
||||
; f->p32 = (int * __ptr32)i;
|
||||
; use_foo(f);
|
||||
; }
|
||||
;
|
||||
; $ clang -cc1 -triple x86_64-windows-msvc -fms-extensions -O2 -S t.cpp
|
||||
|
||||
target datalayout = "e-m:w-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
|
||||
target triple = "x86_64-unknown-windows-msvc"
|
||||
|
||||
%struct.Foo = type { i32 addrspace(270)*, i32*, i32 addrspace(9)* }
|
||||
declare dso_local void @use_foo(%struct.Foo*)
|
||||
|
||||
define dso_local void @test_sign_ext(%struct.Foo* %f, i32 addrspace(270)* %i) {
|
||||
; CHECK-LABEL: test_sign_ext
|
||||
; CHECK: movslq %edx, %rax
|
||||
entry:
|
||||
%0 = addrspacecast i32 addrspace(270)* %i to i32*
|
||||
%p64 = getelementptr inbounds %struct.Foo, %struct.Foo* %f, i64 0, i32 1
|
||||
store i32* %0, i32** %p64, align 8
|
||||
tail call void @use_foo(%struct.Foo* %f)
|
||||
ret void
|
||||
}
|
||||
|
||||
define dso_local void @test_zero_ext(%struct.Foo* %f, i32 addrspace(271)* %i) {
|
||||
; CHECK-LABEL: test_zero_ext
|
||||
; CHECK: movl %edx, %eax
|
||||
entry:
|
||||
%0 = addrspacecast i32 addrspace(271)* %i to i32*
|
||||
%p64 = getelementptr inbounds %struct.Foo, %struct.Foo* %f, i64 0, i32 1
|
||||
store i32* %0, i32** %p64, align 8
|
||||
tail call void @use_foo(%struct.Foo* %f)
|
||||
ret void
|
||||
}
|
||||
|
||||
define dso_local void @test_trunc(%struct.Foo* %f, i32* %i) {
|
||||
; CHECK-LABEL: test_trunc
|
||||
; CHECK: movl %edx, (%rcx)
|
||||
entry:
|
||||
%0 = addrspacecast i32* %i to i32 addrspace(270)*
|
||||
%p32 = getelementptr inbounds %struct.Foo, %struct.Foo* %f, i64 0, i32 0
|
||||
store i32 addrspace(270)* %0, i32 addrspace(270)** %p32, align 8
|
||||
tail call void @use_foo(%struct.Foo* %f)
|
||||
ret void
|
||||
}
|
||||
|
||||
define dso_local void @test_noop1(%struct.Foo* %f, i32 addrspace(270)* %i) {
|
||||
; CHECK-LABEL: test_noop1
|
||||
; CHECK: movl %edx, (%rcx)
|
||||
entry:
|
||||
%p32 = getelementptr inbounds %struct.Foo, %struct.Foo* %f, i64 0, i32 0
|
||||
store i32 addrspace(270)* %i, i32 addrspace(270)** %p32, align 8
|
||||
tail call void @use_foo(%struct.Foo* %f)
|
||||
ret void
|
||||
}
|
||||
|
||||
define dso_local void @test_noop2(%struct.Foo* %f, i32* %i) {
|
||||
; CHECK-LABEL: test_noop2
|
||||
; CHECK: movq %rdx, 8(%rcx)
|
||||
entry:
|
||||
%p64 = getelementptr inbounds %struct.Foo, %struct.Foo* %f, i64 0, i32 1
|
||||
store i32* %i, i32** %p64, align 8
|
||||
tail call void @use_foo(%struct.Foo* %f)
|
||||
ret void
|
||||
}
|
||||
|
||||
; Test that null can be passed as a 32-bit pointer.
|
||||
define dso_local void @test_null_arg(%struct.Foo* %f) {
|
||||
entry:
|
||||
call void @test_noop1(%struct.Foo* %f, i32 addrspace(270)* null)
|
||||
ret void
|
||||
}
|
||||
|
||||
; Test casts between unrecognized address spaces.
|
||||
define void @test_unrecognized(%struct.Foo* %f, i32 addrspace(14)* %i) {
|
||||
; CHECK-LABEL: test_unrecognized
|
||||
; CHECK: movl %edx, (%rcx)
|
||||
entry:
|
||||
%0 = addrspacecast i32 addrspace(14)* %i to i32 addrspace(270)*
|
||||
%p32 = getelementptr inbounds %struct.Foo, %struct.Foo* %f, i64 0, i32 0
|
||||
store i32 addrspace(270)* %0, i32 addrspace(270)** %p32, align 8
|
||||
tail call void @use_foo(%struct.Foo* %f)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @test_unrecognized2(%struct.Foo* %f, i32 addrspace(271)* %i) {
|
||||
; CHECK-LABEL: test_unrecognized2
|
||||
; CHECK: movl %edx, %eax
|
||||
entry:
|
||||
%0 = addrspacecast i32 addrspace(271)* %i to i32 addrspace(9)*
|
||||
%p32 = getelementptr inbounds %struct.Foo, %struct.Foo* %f, i64 0, i32 2
|
||||
store i32 addrspace(9)* %0, i32 addrspace(9)** %p32, align 8
|
||||
tail call void @use_foo(%struct.Foo* %f)
|
||||
ret void
|
||||
}
|
Loading…
Reference in New Issue
Block a user