1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 03:02:36 +01:00

[NVPTX] added match.{any,all}.sync instructions, intrinsics & builtins.

Differential Revision: https://reviews.llvm.org/D38191

llvm-svn: 314223
This commit is contained in:
Artem Belevich 2017-09-26 17:07:23 +00:00
parent 0bba282e33
commit 66a9be35a9
6 changed files with 237 additions and 0 deletions

View File

@ -3842,4 +3842,31 @@ def int_nvvm_vote_ballot_sync :
[IntrNoMem, IntrConvergent], "llvm.nvvm.vote.ballot.sync">,
GCCBuiltin<"__nvvm_vote_ballot_sync">;
//
// MATCH.SYNC
//
// match.any.sync.b32 mask, value
def int_nvvm_match_any_sync_i32 :
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
[IntrNoMem, IntrConvergent], "llvm.nvvm.match.any.sync.i32">,
GCCBuiltin<"__nvvm_match_any_sync_i32">;
// match.any.sync.b64 mask, value
def int_nvvm_match_any_sync_i64 :
Intrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_i64_ty],
[IntrNoMem, IntrConvergent], "llvm.nvvm.match.any.sync.i64">,
GCCBuiltin<"__nvvm_match_any_sync_i64">;
// match.all instruction have two variants -- one returns a single value, another
// returns a pair {value, predicate}. We currently only implement the latter as
// that's the variant exposed by CUDA API.
// match.all.sync.b32p mask, value
def int_nvvm_match_all_sync_i32p :
Intrinsic<[llvm_i32_ty, llvm_i1_ty], [llvm_i32_ty, llvm_i32_ty],
[IntrNoMem, IntrConvergent], "llvm.nvvm.match.all.sync.i32p">;
// match.all.sync.b64p mask, value
def int_nvvm_match_all_sync_i64p :
Intrinsic<[llvm_i64_ty, llvm_i1_ty], [llvm_i32_ty, llvm_i64_ty],
[IntrNoMem, IntrConvergent], "llvm.nvvm.match.all.sync.i64p">;
} // let TargetPrefix = "nvvm"

View File

@ -715,6 +715,10 @@ bool NVPTXDAGToDAGISel::tryIntrinsicNoChain(SDNode *N) {
case Intrinsic::nvvm_texsurf_handle_internal:
SelectTexSurfHandle(N);
return true;
case Intrinsic::nvvm_match_all_sync_i32p:
case Intrinsic::nvvm_match_all_sync_i64p:
SelectMatchAll(N);
return true;
}
}
@ -726,6 +730,36 @@ void NVPTXDAGToDAGISel::SelectTexSurfHandle(SDNode *N) {
MVT::i64, GlobalVal));
}
void NVPTXDAGToDAGISel::SelectMatchAll(SDNode *N) {
SDLoc DL(N);
enum { IS_I64 = 4, HAS_CONST_VALUE = 2, HAS_CONST_MASK = 1 };
unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
unsigned OpcodeIndex =
(IID == Intrinsic::nvvm_match_all_sync_i64p) ? IS_I64 : 0;
SDValue MaskOp = N->getOperand(1);
SDValue ValueOp = N->getOperand(2);
if (ConstantSDNode *ValueConst = dyn_cast<ConstantSDNode>(ValueOp)) {
OpcodeIndex |= HAS_CONST_VALUE;
ValueOp = CurDAG->getTargetConstant(ValueConst->getZExtValue(), DL,
ValueConst->getValueType(0));
}
if (ConstantSDNode *MaskConst = dyn_cast<ConstantSDNode>(MaskOp)) {
OpcodeIndex |= HAS_CONST_MASK;
MaskOp = CurDAG->getTargetConstant(MaskConst->getZExtValue(), DL,
MaskConst->getValueType(0));
}
// Maps {IS_I64, HAS_CONST_VALUE, HAS_CONST_MASK} -> opcode
unsigned Opcodes[8] = {
NVPTX::MATCH_ALLP_SYNC_32rr, NVPTX::MATCH_ALLP_SYNC_32ri,
NVPTX::MATCH_ALLP_SYNC_32ir, NVPTX::MATCH_ALLP_SYNC_32ii,
NVPTX::MATCH_ALLP_SYNC_64rr, NVPTX::MATCH_ALLP_SYNC_64ri,
NVPTX::MATCH_ALLP_SYNC_64ir, NVPTX::MATCH_ALLP_SYNC_64ii};
SDNode *NewNode = CurDAG->getMachineNode(Opcodes[OpcodeIndex], DL,
{ValueOp->getValueType(0), MVT::i1},
{MaskOp, ValueOp});
ReplaceNode(N, NewNode);
}
void NVPTXDAGToDAGISel::SelectAddrSpaceCast(SDNode *N) {
SDValue Src = N->getOperand(0);
AddrSpaceCastSDNode *CastN = cast<AddrSpaceCastSDNode>(N);

View File

@ -58,6 +58,7 @@ private:
bool tryIntrinsicNoChain(SDNode *N);
bool tryIntrinsicChain(SDNode *N);
void SelectTexSurfHandle(SDNode *N);
void SelectMatchAll(SDNode *N);
bool tryLoad(SDNode *N);
bool tryLoadVector(SDNode *N);
bool tryLDGLDU(SDNode *N);

View File

@ -158,6 +158,7 @@ def hasPTX31 : Predicate<"Subtarget->getPTXVersion() >= 31">;
def hasPTX60 : Predicate<"Subtarget->getPTXVersion() >= 60">;
def hasSM30 : Predicate<"Subtarget->getSmVersion() >= 30">;
def hasSM70 : Predicate<"Subtarget->getSmVersion() >= 70">;
def useFP16Math: Predicate<"Subtarget->allowFP16Math()">;

View File

@ -247,6 +247,63 @@ defm VOTE_SYNC_ANY : VOTE_SYNC<Int1Regs, "any.pred", int_nvvm_vote_any_sync>;
defm VOTE_SYNC_UNI : VOTE_SYNC<Int1Regs, "uni.pred", int_nvvm_vote_uni_sync>;
defm VOTE_SYNC_BALLOT : VOTE_SYNC<Int32Regs, "ballot.b32", int_nvvm_vote_ballot_sync>;
multiclass MATCH_ANY_SYNC<NVPTXRegClass regclass, string ptxtype, Intrinsic IntOp,
Operand ImmOp> {
def ii : NVPTXInst<(outs regclass:$dest), (ins i32imm:$mask, ImmOp:$value),
"match.any.sync." # ptxtype # " \t$dest, $value, $mask;",
[(set regclass:$dest, (IntOp imm:$mask, imm:$value))]>,
Requires<[hasPTX60, hasSM70]>;
def ir : NVPTXInst<(outs regclass:$dest), (ins Int32Regs:$mask, ImmOp:$value),
"match.any.sync." # ptxtype # " \t$dest, $value, $mask;",
[(set regclass:$dest, (IntOp Int32Regs:$mask, imm:$value))]>,
Requires<[hasPTX60, hasSM70]>;
def ri : NVPTXInst<(outs regclass:$dest), (ins i32imm:$mask, regclass:$value),
"match.any.sync." # ptxtype # " \t$dest, $value, $mask;",
[(set regclass:$dest, (IntOp imm:$mask, regclass:$value))]>,
Requires<[hasPTX60, hasSM70]>;
def rr : NVPTXInst<(outs regclass:$dest), (ins Int32Regs:$mask, regclass:$value),
"match.any.sync." # ptxtype # " \t$dest, $value, $mask;",
[(set regclass:$dest, (IntOp Int32Regs:$mask, regclass:$value))]>,
Requires<[hasPTX60, hasSM70]>;
}
defm MATCH_ANY_SYNC_32 : MATCH_ANY_SYNC<Int32Regs, "b32", int_nvvm_match_any_sync_i32,
i32imm>;
defm MATCH_ANY_SYNC_64 : MATCH_ANY_SYNC<Int64Regs, "b64", int_nvvm_match_any_sync_i64,
i64imm>;
multiclass MATCH_ALLP_SYNC<NVPTXRegClass regclass, string ptxtype, Intrinsic IntOp,
Operand ImmOp> {
def ii : NVPTXInst<(outs regclass:$dest, Int1Regs:$pred),
(ins i32imm:$mask, ImmOp:$value),
"match.all.sync." # ptxtype # " \t$dest|$pred, $value, $mask;",
// If would be nice if tablegen could match multiple return values,
// but it does not seem to be the case. Thus we have an empty pattern and
// lower intrinsic to instruction manually.
// [(set regclass:$dest, Int1Regs:$pred, (IntOp imm:$value, imm:$mask))]>,
[]>,
Requires<[hasPTX60, hasSM70]>;
def ir : NVPTXInst<(outs regclass:$dest, Int1Regs:$pred),
(ins Int32Regs:$mask, ImmOp:$value),
"match.all.sync." # ptxtype # " \t$dest|$pred, $value, $mask;",
[]>,
Requires<[hasPTX60, hasSM70]>;
def ri : NVPTXInst<(outs regclass:$dest, Int1Regs:$pred),
(ins i32imm:$mask, regclass:$value),
"match.all.sync." # ptxtype # " \t$dest|$pred, $value, $mask;",
[]>,
Requires<[hasPTX60, hasSM70]>;
def rr : NVPTXInst<(outs regclass:$dest, Int1Regs:$pred),
(ins Int32Regs:$mask, regclass:$value),
"match.all.sync." # ptxtype # " \t$dest|$pred, $value, $mask;",
[]>,
Requires<[hasPTX60, hasSM70]>;
}
defm MATCH_ALLP_SYNC_32 : MATCH_ALLP_SYNC<Int32Regs, "b32", int_nvvm_match_all_sync_i32p,
i32imm>;
defm MATCH_ALLP_SYNC_64 : MATCH_ALLP_SYNC<Int64Regs, "b64", int_nvvm_match_all_sync_i64p,
i64imm>;
} // isConvergent = 1
//-----------------------------------

117
test/CodeGen/NVPTX/match.ll Normal file
View File

@ -0,0 +1,117 @@
; RUN: llc < %s -march=nvptx64 -mcpu=sm_70 -mattr=+ptx60 | FileCheck %s
declare i32 @llvm.nvvm.match.any.sync.i32(i32, i32)
declare i64 @llvm.nvvm.match.any.sync.i64(i32, i64)
; CHECK-LABEL: .func{{.*}}match.any.sync.i32
define i32 @match.any.sync.i32(i32 %mask, i32 %value) {
; CHECK: ld.param.u32 [[MASK:%r[0-9]+]], [match.any.sync.i32_param_0];
; CHECK: ld.param.u32 [[VALUE:%r[0-9]+]], [match.any.sync.i32_param_1];
; CHECK: match.any.sync.b32 [[V0:%r[0-9]+]], [[VALUE]], [[MASK]];
%v0 = call i32 @llvm.nvvm.match.any.sync.i32(i32 %mask, i32 %value)
; CHECK: match.any.sync.b32 [[V1:%r[0-9]+]], [[VALUE]], 1;
%v1 = call i32 @llvm.nvvm.match.any.sync.i32(i32 1, i32 %value)
; CHECK: match.any.sync.b32 [[V2:%r[0-9]+]], 2, [[MASK]];
%v2 = call i32 @llvm.nvvm.match.any.sync.i32(i32 %mask, i32 2)
; CHECK: match.any.sync.b32 [[V3:%r[0-9]+]], 4, 3;
%v3 = call i32 @llvm.nvvm.match.any.sync.i32(i32 3, i32 4)
%sum1 = add i32 %v0, %v1
%sum2 = add i32 %v2, %v3
%sum3 = add i32 %sum1, %sum2
ret i32 %sum3;
}
; CHECK-LABEL: .func{{.*}}match.any.sync.i64
define i64 @match.any.sync.i64(i32 %mask, i64 %value) {
; CHECK: ld.param.u32 [[MASK:%r[0-9]+]], [match.any.sync.i64_param_0];
; CHECK: ld.param.u64 [[VALUE:%rd[0-9]+]], [match.any.sync.i64_param_1];
; CHECK: match.any.sync.b64 [[V0:%rd[0-9]+]], [[VALUE]], [[MASK]];
%v0 = call i64 @llvm.nvvm.match.any.sync.i64(i32 %mask, i64 %value)
; CHECK: match.any.sync.b64 [[V1:%rd[0-9]+]], [[VALUE]], 1;
%v1 = call i64 @llvm.nvvm.match.any.sync.i64(i32 1, i64 %value)
; CHECK: match.any.sync.b64 [[V2:%rd[0-9]+]], 2, [[MASK]];
%v2 = call i64 @llvm.nvvm.match.any.sync.i64(i32 %mask, i64 2)
; CHECK: match.any.sync.b64 [[V3:%rd[0-9]+]], 4, 3;
%v3 = call i64 @llvm.nvvm.match.any.sync.i64(i32 3, i64 4)
%sum1 = add i64 %v0, %v1
%sum2 = add i64 %v2, %v3
%sum3 = add i64 %sum1, %sum2
ret i64 %sum3;
}
declare {i32, i1} @llvm.nvvm.match.all.sync.i32p(i32, i32)
declare {i64, i1} @llvm.nvvm.match.all.sync.i64p(i32, i64)
; CHECK-LABEL: .func{{.*}}match.all.sync.i32p(
define {i32,i1} @match.all.sync.i32p(i32 %mask, i32 %value) {
; CHECK: ld.param.u32 [[MASK:%r[0-9]+]], [match.all.sync.i32p_param_0];
; CHECK: ld.param.u32 [[VALUE:%r[0-9]+]], [match.all.sync.i32p_param_1];
; CHECK: match.all.sync.b32 {{%r[0-9]+\|%p[0-9]+}}, [[VALUE]], [[MASK]];
%r1 = call {i32, i1} @llvm.nvvm.match.all.sync.i32p(i32 %mask, i32 %value)
%v1 = extractvalue {i32, i1} %r1, 0
%p1 = extractvalue {i32, i1} %r1, 1
; CHECK: match.all.sync.b32 {{%r[0-9]+\|%p[0-9]+}}, 1, [[MASK]];
%r2 = call {i32, i1} @llvm.nvvm.match.all.sync.i32p(i32 %mask, i32 1)
%v2 = extractvalue {i32, i1} %r2, 0
%p2 = extractvalue {i32, i1} %r2, 1
; CHECK: match.all.sync.b32 {{%r[0-9]+\|%p[0-9]+}}, [[VALUE]], 2;
%r3 = call {i32, i1} @llvm.nvvm.match.all.sync.i32p(i32 2, i32 %value)
%v3 = extractvalue {i32, i1} %r3, 0
%p3 = extractvalue {i32, i1} %r3, 1
; CHECK: match.all.sync.b32 {{%r[0-9]+\|%p[0-9]+}}, 4, 3;
%r4 = call {i32, i1} @llvm.nvvm.match.all.sync.i32p(i32 3, i32 4)
%v4 = extractvalue {i32, i1} %r4, 0
%p4 = extractvalue {i32, i1} %r4, 1
%vsum1 = add i32 %v1, %v2
%vsum2 = add i32 %v3, %v4
%vsum3 = add i32 %vsum1, %vsum2
%psum1 = add i1 %p1, %p2
%psum2 = add i1 %p3, %p4
%psum3 = add i1 %psum1, %psum2
%ret0 = insertvalue {i32, i1} undef, i32 %vsum3, 0
%ret1 = insertvalue {i32, i1} %ret0, i1 %psum3, 1
ret {i32, i1} %ret1;
}
; CHECK-LABEL: .func{{.*}}match.all.sync.i64p(
define {i64,i1} @match.all.sync.i64p(i32 %mask, i64 %value) {
; CHECK: ld.param.u32 [[MASK:%r[0-9]+]], [match.all.sync.i64p_param_0];
; CHECK: ld.param.u64 [[VALUE:%rd[0-9]+]], [match.all.sync.i64p_param_1];
; CHECK: match.all.sync.b64 {{%rd[0-9]+\|%p[0-9]+}}, [[VALUE]], [[MASK]];
%r1 = call {i64, i1} @llvm.nvvm.match.all.sync.i64p(i32 %mask, i64 %value)
%v1 = extractvalue {i64, i1} %r1, 0
%p1 = extractvalue {i64, i1} %r1, 1
; CHECK: match.all.sync.b64 {{%rd[0-9]+\|%p[0-9]+}}, 1, [[MASK]];
%r2 = call {i64, i1} @llvm.nvvm.match.all.sync.i64p(i32 %mask, i64 1)
%v2 = extractvalue {i64, i1} %r2, 0
%p2 = extractvalue {i64, i1} %r2, 1
; CHECK: match.all.sync.b64 {{%rd[0-9]+\|%p[0-9]+}}, [[VALUE]], 2;
%r3 = call {i64, i1} @llvm.nvvm.match.all.sync.i64p(i32 2, i64 %value)
%v3 = extractvalue {i64, i1} %r3, 0
%p3 = extractvalue {i64, i1} %r3, 1
; CHECK: match.all.sync.b64 {{%rd[0-9]+\|%p[0-9]+}}, 4, 3;
%r4 = call {i64, i1} @llvm.nvvm.match.all.sync.i64p(i32 3, i64 4)
%v4 = extractvalue {i64, i1} %r4, 0
%p4 = extractvalue {i64, i1} %r4, 1
%vsum1 = add i64 %v1, %v2
%vsum2 = add i64 %v3, %v4
%vsum3 = add i64 %vsum1, %vsum2
%psum1 = add i1 %p1, %p2
%psum2 = add i1 %p3, %p4
%psum3 = add i1 %psum1, %psum2
%ret0 = insertvalue {i64, i1} undef, i64 %vsum3, 0
%ret1 = insertvalue {i64, i1} %ret0, i1 %psum3, 1
ret {i64, i1} %ret1;
}