1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 11:13:28 +01:00

[llvm][SVE] IR intrinsic for LD1RO.

Reviewers: sdesmalen, efriedma

Subscribers: tschuett, hiraditya, rkruppe, psnobl, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D80738
This commit is contained in:
Francesco Petrogalli 2020-05-27 19:58:04 +00:00
parent f6e5991a6b
commit 1a767c23f0
6 changed files with 110 additions and 11 deletions

View File

@ -1311,6 +1311,7 @@ def int_aarch64_sve_ldnf1 : AdvSIMD_1Vec_PredLoad_Intrinsic;
def int_aarch64_sve_ldff1 : AdvSIMD_1Vec_PredLoad_Intrinsic;
def int_aarch64_sve_ld1rq : AdvSIMD_1Vec_PredLoad_Intrinsic;
def int_aarch64_sve_ld1ro : AdvSIMD_1Vec_PredLoad_Intrinsic;
//
// Stores

View File

@ -1463,6 +1463,7 @@ const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const {
case AArch64ISD::LDFF1: return "AArch64ISD::LDFF1";
case AArch64ISD::LDFF1S: return "AArch64ISD::LDFF1S";
case AArch64ISD::LD1RQ: return "AArch64ISD::LD1RQ";
case AArch64ISD::LD1RO: return "AArch64ISD::LD1RO";
case AArch64ISD::GLD1: return "AArch64ISD::GLD1";
case AArch64ISD::GLD1_SCALED: return "AArch64ISD::GLD1_SCALED";
case AArch64ISD::GLD1_SXTW: return "AArch64ISD::GLD1_SXTW";
@ -11885,7 +11886,10 @@ static SDValue performLDNT1Combine(SDNode *N, SelectionDAG &DAG) {
return L;
}
static SDValue performLD1RQCombine(SDNode *N, SelectionDAG &DAG) {
template <unsigned Opcode>
static SDValue performLD1ReplicateCombine(SDNode *N, SelectionDAG &DAG) {
static_assert(Opcode == AArch64ISD::LD1RQ || Opcode == AArch64ISD::LD1RO,
"Unsupported opcode.");
SDLoc DL(N);
EVT VT = N->getValueType(0);
@ -11894,13 +11898,13 @@ static SDValue performLD1RQCombine(SDNode *N, SelectionDAG &DAG) {
LoadVT = VT.changeTypeToInteger();
SDValue Ops[] = {N->getOperand(0), N->getOperand(2), N->getOperand(3)};
SDValue Load = DAG.getNode(AArch64ISD::LD1RQ, DL, {LoadVT, MVT::Other}, Ops);
SDValue Load = DAG.getNode(Opcode, DL, {LoadVT, MVT::Other}, Ops);
SDValue LoadChain = SDValue(Load.getNode(), 1);
if (VT.isFloatingPoint())
Load = DAG.getNode(ISD::BITCAST, DL, VT, Load.getValue(0));
return DAG.getMergeValues({ Load, LoadChain }, DL);
return DAG.getMergeValues({Load, LoadChain}, DL);
}
static SDValue performST1Combine(SDNode *N, SelectionDAG &DAG) {
@ -13493,7 +13497,9 @@ SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N,
case Intrinsic::aarch64_sve_ldnt1:
return performLDNT1Combine(N, DAG);
case Intrinsic::aarch64_sve_ld1rq:
return performLD1RQCombine(N, DAG);
return performLD1ReplicateCombine<AArch64ISD::LD1RQ>(N, DAG);
case Intrinsic::aarch64_sve_ld1ro:
return performLD1ReplicateCombine<AArch64ISD::LD1RO>(N, DAG);
case Intrinsic::aarch64_sve_ldnt1_gather_scalar_offset:
return performGatherLoadCombine(N, DAG, AArch64ISD::GLDNT1);
case Intrinsic::aarch64_sve_ldnt1_gather:

View File

@ -253,6 +253,7 @@ enum NodeType : unsigned {
LDFF1,
LDFF1S,
LD1RQ,
LD1RO,
// Unsigned gather loads.
GLD1,

View File

@ -31,12 +31,13 @@ def AArch64ldff1s : SDNode<"AArch64ISD::LDFF1S", SDT_AArch64_LD1, [SDNPHasChain,
// Contiguous load and replicate - node definitions
//
def SDT_AArch64_LD1RQ : SDTypeProfile<1, 2, [
def SDT_AArch64_LD1Replicate : SDTypeProfile<1, 2, [
SDTCisVec<0>, SDTCisVec<1>, SDTCisPtrTy<2>,
SDTCVecEltisVT<1,i1>, SDTCisSameNumEltsAs<0,1>
]>;
def AArch64ld1rq : SDNode<"AArch64ISD::LD1RQ", SDT_AArch64_LD1RQ, [SDNPHasChain, SDNPMayLoad]>;
def AArch64ld1rq : SDNode<"AArch64ISD::LD1RQ", SDT_AArch64_LD1Replicate, [SDNPHasChain, SDNPMayLoad]>;
def AArch64ld1ro : SDNode<"AArch64ISD::LD1RO", SDT_AArch64_LD1Replicate, [SDNPHasChain, SDNPMayLoad]>;
// Gather loads - node definitions
//
@ -1434,6 +1435,7 @@ multiclass sve_prefetch<SDPatternOperator prefetch, ValueType PredTy, Instructio
def : Pat<(nxv8f16 (bitconvert (nxv16i8 ZPR:$src))), (nxv8f16 ZPR:$src)>;
def : Pat<(nxv8f16 (bitconvert (nxv8i16 ZPR:$src))), (nxv8f16 ZPR:$src)>;
def : Pat<(nxv8bf16 (bitconvert (nxv8i16 ZPR:$src))), (nxv8bf16 ZPR:$src)>;
def : Pat<(nxv8f16 (bitconvert (nxv4i32 ZPR:$src))), (nxv8f16 ZPR:$src)>;
def : Pat<(nxv8f16 (bitconvert (nxv2i64 ZPR:$src))), (nxv8f16 ZPR:$src)>;
def : Pat<(nxv8f16 (bitconvert (nxv4f32 ZPR:$src))), (nxv8f16 ZPR:$src)>;
@ -1954,10 +1956,10 @@ let Predicates = [HasSVE, HasMatMulFP32] in {
let Predicates = [HasSVE, HasMatMulFP64] in {
defm FMMLA_ZZZ_D : sve_fp_matrix_mla<1, "fmmla", ZPR64, int_aarch64_sve_fmmla, nxv2f64>;
defm LD1RO_B_IMM : sve_mem_ldor_si<0b00, "ld1rob", Z_b, ZPR8>;
defm LD1RO_H_IMM : sve_mem_ldor_si<0b01, "ld1roh", Z_h, ZPR16>;
defm LD1RO_W_IMM : sve_mem_ldor_si<0b10, "ld1row", Z_s, ZPR32>;
defm LD1RO_D_IMM : sve_mem_ldor_si<0b11, "ld1rod", Z_d, ZPR64>;
defm LD1RO_B_IMM : sve_mem_ldor_si<0b00, "ld1rob", Z_b, ZPR8, nxv16i8, nxv16i1, AArch64ld1ro>;
defm LD1RO_H_IMM : sve_mem_ldor_si<0b01, "ld1roh", Z_h, ZPR16, nxv8i16, nxv8i1, AArch64ld1ro>;
defm LD1RO_W_IMM : sve_mem_ldor_si<0b10, "ld1row", Z_s, ZPR32, nxv4i32, nxv4i1, AArch64ld1ro>;
defm LD1RO_D_IMM : sve_mem_ldor_si<0b11, "ld1rod", Z_d, ZPR64, nxv2i64, nxv2i1, AArch64ld1ro>;
defm LD1RO_B : sve_mem_ldor_ss<0b00, "ld1rob", Z_b, ZPR8, GPR64NoXZRshifted8>;
defm LD1RO_H : sve_mem_ldor_ss<0b01, "ld1roh", Z_h, ZPR16, GPR64NoXZRshifted16>;
defm LD1RO_W : sve_mem_ldor_ss<0b10, "ld1row", Z_s, ZPR32, GPR64NoXZRshifted32>;

View File

@ -7663,7 +7663,7 @@ class sve_mem_ldor_si<bits<2> sz, string asm, RegisterOperand VecList>
}
multiclass sve_mem_ldor_si<bits<2> sz, string asm, RegisterOperand listty,
ZPRRegOp zprty> {
ZPRRegOp zprty, ValueType Ty, ValueType PredTy, SDNode Ld1ro> {
def NAME : sve_mem_ldor_si<sz, asm, listty>;
def : InstAlias<asm # "\t$Zt, $Pg/z, [$Rn]",
(!cast<Instruction>(NAME) listty:$Zt, PPR3bAny:$Pg, GPR64sp:$Rn, 0), 1>;
@ -7671,6 +7671,11 @@ multiclass sve_mem_ldor_si<bits<2> sz, string asm, RegisterOperand listty,
(!cast<Instruction>(NAME) zprty:$Zt, PPR3bAny:$Pg, GPR64sp:$Rn, 0), 0>;
def : InstAlias<asm # "\t$Zt, $Pg/z, [$Rn, $imm4]",
(!cast<Instruction>(NAME) zprty:$Zt, PPR3bAny:$Pg, GPR64sp:$Rn, simm4s32:$imm4), 0>;
// Base addressing mode
def : Pat<(Ty (Ld1ro (PredTy PPR3bAny:$gp), GPR64sp:$base)),
(!cast<Instruction>(NAME) PPR3bAny:$gp, GPR64sp:$base, (i64 0))>;
}
class sve_mem_ldor_ss<bits<2> sz, string asm, RegisterOperand VecList,

View File

@ -0,0 +1,84 @@
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve,+f64mm -asm-verbose=0 < %s | FileCheck %s
;
; LD1ROB
;
define <vscale x 16 x i8> @ld1rob_i8(<vscale x 16 x i1> %pred, i8* %addr) nounwind {
; CHECK-LABEL: ld1rob_i8:
; CHECK-NEXT: ld1rob { z0.b }, p0/z, [x0]
; CHECK-NEXT: ret
%res = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1ro.nxv16i8(<vscale x 16 x i1> %pred, i8* %addr)
ret <vscale x 16 x i8> %res
}
;
; LD1ROH
;
define <vscale x 8 x i16> @ld1roh_i16(<vscale x 8 x i1> %pred, i16* %addr) nounwind {
; CHECK-LABEL: ld1roh_i16:
; CHECK-NEXT: ld1roh { z0.h }, p0/z, [x0]
; CHECK-NEXT: ret
%res = call <vscale x 8 x i16> @llvm.aarch64.sve.ld1ro.nxv8i16(<vscale x 8 x i1> %pred, i16* %addr)
ret <vscale x 8 x i16> %res
}
define <vscale x 8 x half> @ld1roh_half(<vscale x 8 x i1> %pred, half* %addr) nounwind {
; CHECK-LABEL: ld1roh_half:
; CHECK-NEXT: ld1roh { z0.h }, p0/z, [x0]
; CHECK-NEXT: ret
%res = call <vscale x 8 x half> @llvm.aarch64.sve.ld1ro.nxv8f16(<vscale x 8 x i1> %pred, half* %addr)
ret <vscale x 8 x half> %res
}
;
; LD1ROW
;
define <vscale x 4 x i32> @ld1row_i32(<vscale x 4 x i1> %pred, i32* %addr) nounwind {
; CHECK-LABEL: ld1row_i32:
; CHECK-NEXT: ld1row { z0.s }, p0/z, [x0]
; CHECK-NEXT: ret
%res = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1ro.nxv4i32(<vscale x 4 x i1> %pred, i32* %addr)
ret <vscale x 4 x i32> %res
}
define <vscale x 4 x float> @ld1row_float(<vscale x 4 x i1> %pred, float* %addr) nounwind {
; CHECK-LABEL: ld1row_float:
; CHECK-NEXT: ld1row { z0.s }, p0/z, [x0]
; CHECK-NEXT: ret
%res = call <vscale x 4 x float> @llvm.aarch64.sve.ld1ro.nxv4f32(<vscale x 4 x i1> %pred, float* %addr)
ret <vscale x 4 x float> %res
}
;
; LD1ROD
;
define <vscale x 2 x i64> @ld1rod_i64(<vscale x 2 x i1> %pred, i64* %addr) nounwind {
; CHECK-LABEL: ld1rod_i64:
; CHECK-NEXT: ld1rod { z0.d }, p0/z, [x0]
; CHECK-NEXT: ret
%res = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1ro.nxv2i64(<vscale x 2 x i1> %pred, i64* %addr)
ret <vscale x 2 x i64> %res
}
define <vscale x 2 x double> @ld1rod_double(<vscale x 2 x i1> %pred, double* %addr) nounwind {
; CHECK-LABEL: ld1rod_double:
; CHECK-NEXT: ld1rod { z0.d }, p0/z, [x0]
; CHECK-NEXT: ret
%res = call <vscale x 2 x double> @llvm.aarch64.sve.ld1ro.nxv2f64(<vscale x 2 x i1> %pred, double* %addr)
ret <vscale x 2 x double> %res
}
declare <vscale x 16 x i8> @llvm.aarch64.sve.ld1ro.nxv16i8(<vscale x 16 x i1>, i8*)
declare <vscale x 8 x i16> @llvm.aarch64.sve.ld1ro.nxv8i16(<vscale x 8 x i1>, i16*)
declare <vscale x 8 x half> @llvm.aarch64.sve.ld1ro.nxv8f16(<vscale x 8 x i1>, half*)
declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1ro.nxv4i32(<vscale x 4 x i1>, i32*)
declare <vscale x 4 x float> @llvm.aarch64.sve.ld1ro.nxv4f32(<vscale x 4 x i1>, float*)
declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1ro.nxv2i64(<vscale x 2 x i1>, i64*)
declare <vscale x 2 x double> @llvm.aarch64.sve.ld1ro.nxv2f64(<vscale x 2 x i1>, double*)