mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-10-20 03:23:01 +02:00
[AArch64][SVE] Extend int_aarch64_sve_ld1_gather_imm
The ACLE distinguishes between the following addressing modes for gather loads: * "scalar base, vector offset", and * "vector base, scalar offset". For the "vector base, scalar offset" case, the `int_aarch64_sve_ld1_gather_imm` intrinsic was added in 79f2422d. Currently, that intrinsic assumes that the scalar offset is passed as an immediate. As a result, it does not cater for cases where scalar offset is stored in a register. In this patch `int_aarch64_sve_ld1_gather_imm` is extended so that all cases are covered: * `int_aarch64_sve_ld1_gather_imm` is renamed as `int_aarch64_sve_ld1_gather_scalar_offset` * new DAG combine rules are added for GLD1_IMM for scenarios where the offset is a non-immediate scalar or an out-of-range immediate * sve-intrinsics-gather-loads-vector-base.ll is renamed as sve-intrinsics-gather-loads-vector-base-imm-offset.ll * sve-intrinsics-gather-loads-vector-base-scalar-offset.ll is added to test file for non-immediate offsets Similar changes are made for scatter store intrinsics. Reviewed By: sdesmalen, efriedma Differential Revision: https://reviews.llvm.org/D71773
This commit is contained in:
parent
79c6b0fa43
commit
c6e3a07bc6
@ -1125,7 +1125,7 @@ class AdvSIMD_GatherLoad_32bitOffset_Intrinsic
|
||||
],
|
||||
[IntrReadMem, IntrArgMemOnly]>;
|
||||
|
||||
class AdvSIMD_GatherLoad_VecTorBase_Intrinsic
|
||||
class AdvSIMD_GatherLoad_VectorBase_Intrinsic
|
||||
: Intrinsic<[llvm_anyvector_ty],
|
||||
[
|
||||
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
|
||||
@ -1161,7 +1161,7 @@ class AdvSIMD_ScatterStore_VectorBase_Intrinsic
|
||||
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
|
||||
llvm_anyvector_ty, llvm_i64_ty
|
||||
],
|
||||
[IntrWriteMem, IntrArgMemOnly, ImmArg<3>]>;
|
||||
[IntrWriteMem, IntrArgMemOnly]>;
|
||||
|
||||
//
|
||||
// Loads
|
||||
@ -1574,57 +1574,59 @@ def int_aarch64_sve_ptest_first : AdvSIMD_SVE_PTEST_Intrinsic;
|
||||
def int_aarch64_sve_ptest_last : AdvSIMD_SVE_PTEST_Intrinsic;
|
||||
|
||||
//
|
||||
// Gather loads:
|
||||
// Gather loads: scalar base + vector offsets
|
||||
//
|
||||
|
||||
// scalar + vector, 64 bit unscaled offsets
|
||||
// 64 bit unscaled offsets
|
||||
def int_aarch64_sve_ld1_gather : AdvSIMD_GatherLoad_64bitOffset_Intrinsic;
|
||||
|
||||
// scalar + vector, 64 bit scaled offsets
|
||||
// 64 bit scaled offsets
|
||||
def int_aarch64_sve_ld1_gather_index : AdvSIMD_GatherLoad_64bitOffset_Intrinsic;
|
||||
|
||||
// scalar + vector, 32 bit unscaled offsets, sign (sxtw) or zero (zxtw)
|
||||
// extended to 64 bits
|
||||
// 32 bit unscaled offsets, sign (sxtw) or zero (zxtw) extended to 64 bits
|
||||
def int_aarch64_sve_ld1_gather_sxtw : AdvSIMD_GatherLoad_32bitOffset_Intrinsic;
|
||||
def int_aarch64_sve_ld1_gather_uxtw : AdvSIMD_GatherLoad_32bitOffset_Intrinsic;
|
||||
|
||||
// scalar + vector, 32 bit scaled offsets, sign (sxtw) or zero (zxtw) extended
|
||||
// to 64 bits
|
||||
// 32 bit scaled offsets, sign (sxtw) or zero (zxtw) extended to 64 bits
|
||||
def int_aarch64_sve_ld1_gather_sxtw_index : AdvSIMD_GatherLoad_32bitOffset_Intrinsic;
|
||||
def int_aarch64_sve_ld1_gather_uxtw_index : AdvSIMD_GatherLoad_32bitOffset_Intrinsic;
|
||||
|
||||
// vector base + immediate index
|
||||
def int_aarch64_sve_ld1_gather_imm : AdvSIMD_GatherLoad_VecTorBase_Intrinsic;
|
||||
|
||||
//
|
||||
// Scatter stores:
|
||||
// Gather loads: vector base + scalar offset
|
||||
//
|
||||
|
||||
// scalar + vector, 64 bit unscaled offsets
|
||||
def int_aarch64_sve_ld1_gather_scalar_offset : AdvSIMD_GatherLoad_VectorBase_Intrinsic;
|
||||
|
||||
//
|
||||
// Scatter stores: scalar base + vector offsets
|
||||
//
|
||||
|
||||
// 64 bit unscaled offsets
|
||||
def int_aarch64_sve_st1_scatter : AdvSIMD_ScatterStore_64bitOffset_Intrinsic;
|
||||
|
||||
// scalar + vector, 64 bit scaled offsets
|
||||
// 64 bit scaled offsets
|
||||
def int_aarch64_sve_st1_scatter_index
|
||||
: AdvSIMD_ScatterStore_64bitOffset_Intrinsic;
|
||||
|
||||
// scalar + vector, 32 bit unscaled offsets, sign (sxtw) or zero (zxtw)
|
||||
// extended to 64 bits
|
||||
// 32 bit unscaled offsets, sign (sxtw) or zero (zxtw) extended to 64 bits
|
||||
def int_aarch64_sve_st1_scatter_sxtw
|
||||
: AdvSIMD_ScatterStore_32bitOffset_Intrinsic;
|
||||
|
||||
def int_aarch64_sve_st1_scatter_uxtw
|
||||
: AdvSIMD_ScatterStore_32bitOffset_Intrinsic;
|
||||
|
||||
// scalar + vector, 32 bit scaled offsets, sign (sxtw) or zero (zxtw) extended
|
||||
// to 64 bits
|
||||
// 32 bit scaled offsets, sign (sxtw) or zero (zxtw) extended to 64 bits
|
||||
def int_aarch64_sve_st1_scatter_sxtw_index
|
||||
: AdvSIMD_ScatterStore_32bitOffset_Intrinsic;
|
||||
|
||||
def int_aarch64_sve_st1_scatter_uxtw_index
|
||||
: AdvSIMD_ScatterStore_32bitOffset_Intrinsic;
|
||||
|
||||
// vector base + immediate index
|
||||
def int_aarch64_sve_st1_scatter_imm : AdvSIMD_ScatterStore_VectorBase_Intrinsic;
|
||||
//
|
||||
// Scatter stores: vector base + scalar offset
|
||||
//
|
||||
|
||||
def int_aarch64_sve_st1_scatter_scalar_offset : AdvSIMD_ScatterStore_VectorBase_Intrinsic;
|
||||
|
||||
//
|
||||
// SVE2 - Non-widening pairwise arithmetic
|
||||
|
@ -12303,11 +12303,34 @@ static SDValue performST1ScatterCombine(SDNode *N, SelectionDAG &DAG,
|
||||
|
||||
// Depending on the addressing mode, this is either a pointer or a vector of
|
||||
// pointers (that fits into one register)
|
||||
const SDValue Base = N->getOperand(4);
|
||||
SDValue Base = N->getOperand(4);
|
||||
// Depending on the addressing mode, this is either a single offset or a
|
||||
// vector of offsets (that fits into one register)
|
||||
SDValue Offset = N->getOperand(5);
|
||||
|
||||
// SST1_IMM requires that the offset is an immediate:
|
||||
// * multiple of #SizeInBytes
|
||||
// * in the range [0, 31 x #SizeInBytes]
|
||||
// where #SizeInBytes is the size in bytes of the stored
|
||||
// items. For immediates outside that range and non-immediate scalar offsets use
|
||||
// SST1 or SST1_UXTW instead.
|
||||
if (Opcode == AArch64ISD::SST1_IMM) {
|
||||
uint64_t MaxIndex = 31;
|
||||
uint64_t SrcElSize = SrcElVT.getStoreSize().getKnownMinSize();
|
||||
|
||||
ConstantSDNode *OffsetConst = dyn_cast<ConstantSDNode>(Offset.getNode());
|
||||
if (nullptr == OffsetConst ||
|
||||
OffsetConst->getZExtValue() > MaxIndex * SrcElSize ||
|
||||
OffsetConst->getZExtValue() % SrcElSize) {
|
||||
if (MVT::nxv4i32 == Base.getValueType().getSimpleVT().SimpleTy)
|
||||
Opcode = AArch64ISD::SST1_UXTW;
|
||||
else
|
||||
Opcode = AArch64ISD::SST1;
|
||||
|
||||
std::swap(Base, Offset);
|
||||
}
|
||||
}
|
||||
|
||||
auto &TLI = DAG.getTargetLoweringInfo();
|
||||
if (!TLI.isTypeLegal(Base.getValueType()))
|
||||
return SDValue();
|
||||
@ -12363,11 +12386,37 @@ static SDValue performLD1GatherCombine(SDNode *N, SelectionDAG &DAG,
|
||||
|
||||
// Depending on the addressing mode, this is either a pointer or a vector of
|
||||
// pointers (that fits into one register)
|
||||
const SDValue Base = N->getOperand(3);
|
||||
SDValue Base = N->getOperand(3);
|
||||
// Depending on the addressing mode, this is either a single offset or a
|
||||
// vector of offsets (that fits into one register)
|
||||
SDValue Offset = N->getOperand(4);
|
||||
|
||||
// GLD1_IMM requires that the offset is an immediate:
|
||||
// * multiple of #SizeInBytes
|
||||
// * in the range [0, 31 x #SizeInBytes]
|
||||
// where #SizeInBytes is the size in bytes of the loaded items. For immediates
|
||||
// outside that range and non-immediate scalar offsets use GLD1 or GLD1_UXTW
|
||||
// instead.
|
||||
if (Opcode == AArch64ISD::GLD1_IMM) {
|
||||
uint64_t MaxIndex = 31;
|
||||
uint64_t RetElSize = RetVT.getVectorElementType()
|
||||
.getSimpleVT()
|
||||
.getStoreSize()
|
||||
.getKnownMinSize();
|
||||
|
||||
ConstantSDNode *OffsetConst = dyn_cast<ConstantSDNode>(Offset.getNode());
|
||||
if (nullptr == OffsetConst ||
|
||||
OffsetConst->getZExtValue() > MaxIndex * RetElSize ||
|
||||
OffsetConst->getZExtValue() % RetElSize) {
|
||||
if (MVT::nxv4i32 == Base.getValueType().getSimpleVT().SimpleTy)
|
||||
Opcode = AArch64ISD::GLD1_UXTW;
|
||||
else
|
||||
Opcode = AArch64ISD::GLD1;
|
||||
|
||||
std::swap(Base, Offset);
|
||||
}
|
||||
}
|
||||
|
||||
auto &TLI = DAG.getTargetLoweringInfo();
|
||||
if (!TLI.isTypeLegal(Base.getValueType()))
|
||||
return SDValue();
|
||||
@ -12573,7 +12622,7 @@ SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N,
|
||||
case Intrinsic::aarch64_sve_ld1_gather_uxtw_index:
|
||||
return performLD1GatherCombine(N, DAG, AArch64ISD::GLD1_UXTW_SCALED,
|
||||
/*OnlyPackedOffsets=*/false);
|
||||
case Intrinsic::aarch64_sve_ld1_gather_imm:
|
||||
case Intrinsic::aarch64_sve_ld1_gather_scalar_offset:
|
||||
return performLD1GatherCombine(N, DAG, AArch64ISD::GLD1_IMM);
|
||||
case Intrinsic::aarch64_sve_st1_scatter:
|
||||
return performST1ScatterCombine(N, DAG, AArch64ISD::SST1);
|
||||
@ -12591,7 +12640,7 @@ SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N,
|
||||
case Intrinsic::aarch64_sve_st1_scatter_uxtw_index:
|
||||
return performST1ScatterCombine(N, DAG, AArch64ISD::SST1_UXTW_SCALED,
|
||||
/*OnlyPackedOffsets=*/false);
|
||||
case Intrinsic::aarch64_sve_st1_scatter_imm:
|
||||
case Intrinsic::aarch64_sve_st1_scatter_scalar_offset:
|
||||
return performST1ScatterCombine(N, DAG, AArch64ISD::SST1_IMM);
|
||||
default:
|
||||
break;
|
||||
|
@ -640,16 +640,16 @@ let Predicates = [HasSVE] in {
|
||||
|
||||
// Scatters using 32/64-bit pointers with offset, e.g.
|
||||
// st1h z0.s, p0, [z0.s, #16]
|
||||
defm SST1B_S : sve_mem_32b_sst_vi_ptrs<0b001, "st1b", timm0_31, AArch64st1_scatter_imm, nxv4i8>;
|
||||
defm SST1H_S : sve_mem_32b_sst_vi_ptrs<0b011, "st1h", tuimm5s2, AArch64st1_scatter_imm, nxv4i16>;
|
||||
defm SST1W : sve_mem_32b_sst_vi_ptrs<0b101, "st1w", tuimm5s4, AArch64st1_scatter_imm, nxv4i32>;
|
||||
defm SST1B_S : sve_mem_32b_sst_vi_ptrs<0b001, "st1b", imm0_31, AArch64st1_scatter_imm, nxv4i8>;
|
||||
defm SST1H_S : sve_mem_32b_sst_vi_ptrs<0b011, "st1h", uimm5s2, AArch64st1_scatter_imm, nxv4i16>;
|
||||
defm SST1W : sve_mem_32b_sst_vi_ptrs<0b101, "st1w", uimm5s4, AArch64st1_scatter_imm, nxv4i32>;
|
||||
|
||||
// Scatters using 32/64-bit pointers with offset, e.g.
|
||||
// st1h z0.d, p0, [z0.d, #16]
|
||||
defm SST1B_D : sve_mem_64b_sst_vi_ptrs<0b000, "st1b", timm0_31, AArch64st1_scatter_imm, nxv2i8>;
|
||||
defm SST1H_D : sve_mem_64b_sst_vi_ptrs<0b010, "st1h", tuimm5s2, AArch64st1_scatter_imm, nxv2i16>;
|
||||
defm SST1W_D : sve_mem_64b_sst_vi_ptrs<0b100, "st1w", tuimm5s4, AArch64st1_scatter_imm, nxv2i32>;
|
||||
defm SST1D : sve_mem_64b_sst_vi_ptrs<0b110, "st1d", tuimm5s8, AArch64st1_scatter_imm, nxv2i64>;
|
||||
defm SST1B_D : sve_mem_64b_sst_vi_ptrs<0b000, "st1b", imm0_31, AArch64st1_scatter_imm, nxv2i8>;
|
||||
defm SST1H_D : sve_mem_64b_sst_vi_ptrs<0b010, "st1h", uimm5s2, AArch64st1_scatter_imm, nxv2i16>;
|
||||
defm SST1W_D : sve_mem_64b_sst_vi_ptrs<0b100, "st1w", uimm5s4, AArch64st1_scatter_imm, nxv2i32>;
|
||||
defm SST1D : sve_mem_64b_sst_vi_ptrs<0b110, "st1d", uimm5s8, AArch64st1_scatter_imm, nxv2i64>;
|
||||
|
||||
// Scatters using unscaled 64-bit offsets, e.g.
|
||||
// st1h z0.d, p0, [x0, z0.d]
|
||||
|
@ -12,9 +12,9 @@ define <vscale x 2 x i64> @no_dag_combine_zext_sext(<vscale x 2 x i1> %pg,
|
||||
; CHECK-NEXT: st1b { z0.d }, p1, [x0]
|
||||
; CHECK-NEXT: and z0.d, z0.d, #0xff
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.imm.nxv2i8.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 16)
|
||||
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 16)
|
||||
%res1 = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
|
||||
%res2 = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
|
||||
call void @llvm.masked.store.nxv2i8(<vscale x 2 x i8> %load,
|
||||
@ -35,9 +35,9 @@ define <vscale x 2 x i64> @no_dag_combine_sext(<vscale x 2 x i1> %pg,
|
||||
; CHECK-NEXT: sxtb z0.d, p0/m, z1.d
|
||||
; CHECK-NEXT: st1b { z1.d }, p1, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.imm.nxv2i8.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 16)
|
||||
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 16)
|
||||
%res = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
|
||||
call void @llvm.masked.store.nxv2i8(<vscale x 2 x i8> %load,
|
||||
<vscale x 2 x i8> *%res_out,
|
||||
@ -56,9 +56,9 @@ define <vscale x 2 x i64> @no_dag_combine_zext(<vscale x 2 x i1> %pg,
|
||||
; CHECK-NEXT: st1b { z0.d }, p1, [x0]
|
||||
; CHECK-NEXT: and z0.d, z0.d, #0xff
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.imm.nxv2i8.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 16)
|
||||
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 16)
|
||||
%res = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
|
||||
call void @llvm.masked.store.nxv2i8(<vscale x 2 x i8> %load,
|
||||
<vscale x 2 x i8> *%res_out,
|
||||
@ -68,5 +68,5 @@ define <vscale x 2 x i64> @no_dag_combine_zext(<vscale x 2 x i1> %pg,
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
declare <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.imm.nxv2i8.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, i64)
|
||||
declare <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, i64)
|
||||
declare void @llvm.masked.store.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>*, i32, <vscale x 2 x i1>)
|
||||
|
@ -0,0 +1,368 @@
|
||||
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
|
||||
|
||||
;
|
||||
; LD1B, LD1W, LD1H, LD1D: vector base + immediate offset (index)
|
||||
; e.g. ld1h { z0.s }, p0/z, [z0.s, #16]
|
||||
;
|
||||
|
||||
; LD1B
|
||||
define <vscale x 4 x i32> @gld1b_s_imm_offset(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
|
||||
; CHECK-LABEL: gld1b_s_imm_offset:
|
||||
; CHECK: ld1b { z0.s }, p0/z, [z0.s, #16]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i8.nxv4i32(<vscale x 4 x i1> %pg,
|
||||
<vscale x 4 x i32> %base,
|
||||
i64 16)
|
||||
%res = zext <vscale x 4 x i8> %load to <vscale x 4 x i32>
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @gld1b_d_imm_offset(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
|
||||
; CHECK-LABEL: gld1b_d_imm_offset:
|
||||
; CHECK: ld1b { z0.d }, p0/z, [z0.d, #16]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 16)
|
||||
%res = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
; LD1H
|
||||
define <vscale x 4 x i32> @gld1h_s_imm_offset(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
|
||||
; CHECK-LABEL: gld1h_s_imm_offset:
|
||||
; CHECK: ld1h { z0.s }, p0/z, [z0.s, #16]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i1> %pg,
|
||||
<vscale x 4 x i32> %base,
|
||||
i64 16)
|
||||
%res = zext <vscale x 4 x i16> %load to <vscale x 4 x i32>
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @gld1h_d_imm_offset(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
|
||||
; CHECK-LABEL: gld1h_d_imm_offset:
|
||||
; CHECK: ld1h { z0.d }, p0/z, [z0.d, #16]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 16)
|
||||
%res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
; LD1W
|
||||
define <vscale x 4 x i32> @gld1w_s_imm_offset(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
|
||||
; CHECK-LABEL: gld1w_s_imm_offset:
|
||||
; CHECK: ld1w { z0.s }, p0/z, [z0.s, #16]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i32.nxv4i32(<vscale x 4 x i1> %pg,
|
||||
<vscale x 4 x i32> %base,
|
||||
i64 16)
|
||||
ret <vscale x 4 x i32> %load
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @gld1w_d_imm_offset(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
|
||||
; CHECK-LABEL: gld1w_d_imm_offset:
|
||||
; CHECK: ld1w { z0.d }, p0/z, [z0.d, #16]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i32.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 16)
|
||||
%res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x float> @gld1w_s_imm_offset_float(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
|
||||
; CHECK-LABEL: gld1w_s_imm_offset_float:
|
||||
; CHECK: ld1w { z0.s }, p0/z, [z0.s, #16]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4f32.nxv4i32(<vscale x 4 x i1> %pg,
|
||||
<vscale x 4 x i32> %base,
|
||||
i64 16)
|
||||
ret <vscale x 4 x float> %load
|
||||
}
|
||||
|
||||
; LD1D
|
||||
define <vscale x 2 x i64> @gld1d_d_imm_offset(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
|
||||
; CHECK-LABEL: gld1d_d_imm_offset:
|
||||
; CHECK: ld1d { z0.d }, p0/z, [z0.d, #16]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 16)
|
||||
ret <vscale x 2 x i64> %load
|
||||
}
|
||||
|
||||
define <vscale x 2 x double> @gld1d_d_imm_offset_double(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
|
||||
; CHECK-LABEL: gld1d_d_imm_offset_double:
|
||||
; CHECK: ld1d { z0.d }, p0/z, [z0.d, #16]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2f64.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 16)
|
||||
ret <vscale x 2 x double> %load
|
||||
}
|
||||
|
||||
;
|
||||
; LD1SB, LD1SW, LD1SH: vector base + immediate offset (index)
|
||||
; e.g. ld1sh { z0.s }, p0/z, [z0.s, #16]
|
||||
;
|
||||
|
||||
; LD1SB
|
||||
define <vscale x 4 x i32> @gld1sb_s_imm_offset(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
|
||||
; CHECK-LABEL: gld1sb_s_imm_offset:
|
||||
; CHECK: ld1sb { z0.s }, p0/z, [z0.s, #16]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i8.nxv4i32(<vscale x 4 x i1> %pg,
|
||||
<vscale x 4 x i32> %base,
|
||||
i64 16)
|
||||
%res = sext <vscale x 4 x i8> %load to <vscale x 4 x i32>
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @gld1sb_d_imm_offset(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
|
||||
; CHECK-LABEL: gld1sb_d_imm_offset:
|
||||
; CHECK: ld1sb { z0.d }, p0/z, [z0.d, #16]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 16)
|
||||
%res = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
; LD1SH
|
||||
define <vscale x 4 x i32> @gld1sh_s_imm_offset(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
|
||||
; CHECK-LABEL: gld1sh_s_imm_offset:
|
||||
; CHECK: ld1sh { z0.s }, p0/z, [z0.s, #16]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i1> %pg,
|
||||
<vscale x 4 x i32> %base,
|
||||
i64 16)
|
||||
%res = sext <vscale x 4 x i16> %load to <vscale x 4 x i32>
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @gld1sh_d_imm_offset(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
|
||||
; CHECK-LABEL: gld1sh_d_imm_offset:
|
||||
; CHECK: ld1sh { z0.d }, p0/z, [z0.d, #16]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 16)
|
||||
%res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
; LD1SW
|
||||
define <vscale x 2 x i64> @gld1sw_d_imm_offset(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
|
||||
; CHECK-LABEL: gld1sw_d_imm_offset:
|
||||
; CHECK: ld1sw { z0.d }, p0/z, [z0.d, #16]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i32.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 16)
|
||||
%res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
;
|
||||
; LD1B, LD1W, LD1H, LD1D: vector base + out of range immediate offset
|
||||
; e.g. ld1b { z0.d }, p0/z, [x0, z0.d]
|
||||
;
|
||||
|
||||
; LD1B
|
||||
define <vscale x 4 x i32> @gld1b_s_imm_offset_out_of_range(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
|
||||
; CHECK-LABEL: gld1b_s_imm_offset_out_of_range:
|
||||
; CHECK: mov w8, #32
|
||||
; CHECK-NEXT: ld1b { z0.s }, p0/z, [x8, z0.s, uxtw]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i8.nxv4i32(<vscale x 4 x i1> %pg,
|
||||
<vscale x 4 x i32> %base,
|
||||
i64 32)
|
||||
%res = zext <vscale x 4 x i8> %load to <vscale x 4 x i32>
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @gld1b_d_imm_offset_out_of_range(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
|
||||
; CHECK-LABEL: gld1b_d_imm_offset_out_of_range:
|
||||
; CHECK: mov w8, #32
|
||||
; CHECK-NEXT: ld1b { z0.d }, p0/z, [x8, z0.d]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 32)
|
||||
%res = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
; LD1H
|
||||
define <vscale x 4 x i32> @gld1h_s_imm_offset_out_of_range(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
|
||||
; CHECK-LABEL: gld1h_s_imm_offset_out_of_range:
|
||||
; CHECK: mov w8, #63
|
||||
; CHECK-NEXT: ld1h { z0.s }, p0/z, [x8, z0.s, uxtw]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i1> %pg,
|
||||
<vscale x 4 x i32> %base,
|
||||
i64 63)
|
||||
%res = zext <vscale x 4 x i16> %load to <vscale x 4 x i32>
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @gld1h_d_imm_offset_out_of_range(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
|
||||
; CHECK-LABEL: gld1h_d_imm_offset_out_of_range:
|
||||
; CHECK: mov w8, #63
|
||||
; CHECK-NEXT: ld1h { z0.d }, p0/z, [x8, z0.d]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 63)
|
||||
%res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
; LD1W
|
||||
define <vscale x 4 x i32> @gld1w_s_imm_offset_out_of_range(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
|
||||
; CHECK-LABEL: gld1w_s_imm_offset_out_of_range:
|
||||
; CHECK: mov w8, #125
|
||||
; CHECK-NEXT: ld1w { z0.s }, p0/z, [x8, z0.s, uxtw]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i32.nxv4i32(<vscale x 4 x i1> %pg,
|
||||
<vscale x 4 x i32> %base,
|
||||
i64 125)
|
||||
ret <vscale x 4 x i32> %load
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @gld1w_d_imm_offset_out_of_range(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
|
||||
; CHECK-LABEL: gld1w_d_imm_offset_out_of_range:
|
||||
; CHECK: mov w8, #125
|
||||
; CHECK-NEXT: ld1w { z0.d }, p0/z, [x8, z0.d]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i32.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 125)
|
||||
%res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x float> @gld1w_s_imm_offset_out_of_range_float(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
|
||||
; CHECK-LABEL: gld1w_s_imm_offset_out_of_range_float:
|
||||
; CHECK: mov w8, #125
|
||||
; CHECK-NEXT: ld1w { z0.s }, p0/z, [x8, z0.s, uxtw]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4f32.nxv4i32(<vscale x 4 x i1> %pg,
|
||||
<vscale x 4 x i32> %base,
|
||||
i64 125)
|
||||
ret <vscale x 4 x float> %load
|
||||
}
|
||||
|
||||
; LD1D
|
||||
define <vscale x 2 x i64> @gld1d_d_imm_offset_out_of_range(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
|
||||
; CHECK-LABEL: gld1d_d_imm_offset_out_of_range:
|
||||
; CHECK: mov w8, #249
|
||||
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x8, z0.d]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 249)
|
||||
ret <vscale x 2 x i64> %load
|
||||
}
|
||||
|
||||
define <vscale x 2 x double> @gld1d_d_imm_offset_out_of_range_double(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
|
||||
; CHECK-LABEL: gld1d_d_imm_offset_out_of_range_double:
|
||||
; CHECK: mov w8, #249
|
||||
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x8, z0.d]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2f64.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 249)
|
||||
ret <vscale x 2 x double> %load
|
||||
}
|
||||
|
||||
;
|
||||
; LD1SB, LD1SW, LD1SH: vector base + out of range immediate offset
|
||||
; e.g. ld1sb { z0.s }, p0/z, [x8, z0.s, uxtw]
|
||||
;
|
||||
|
||||
; LD1SB
|
||||
define <vscale x 4 x i32> @gld1sb_s_imm_offset_out_of_range(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
|
||||
; CHECK-LABEL: gld1sb_s_imm_offset_out_of_range:
|
||||
; CHECK: mov w8, #32
|
||||
; CHECK-NEXT: ld1sb { z0.s }, p0/z, [x8, z0.s, uxtw]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i8.nxv4i32(<vscale x 4 x i1> %pg,
|
||||
<vscale x 4 x i32> %base,
|
||||
i64 32)
|
||||
%res = sext <vscale x 4 x i8> %load to <vscale x 4 x i32>
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @gld1sb_d_imm_offset_out_of_range(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
|
||||
; CHECK-LABEL: gld1sb_d_imm_offset_out_of_range:
|
||||
; CHECK: mov w8, #32
|
||||
; CHECK-NEXT: ld1sb { z0.d }, p0/z, [x8, z0.d]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 32)
|
||||
%res = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
; LD1SH
|
||||
define <vscale x 4 x i32> @gld1sh_s_imm_offset_out_of_range(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
|
||||
; CHECK-LABEL: gld1sh_s_imm_offset_out_of_range:
|
||||
; CHECK: mov w8, #63
|
||||
; CHECK-NEXT: ld1sh { z0.s }, p0/z, [x8, z0.s, uxtw]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i1> %pg,
|
||||
<vscale x 4 x i32> %base,
|
||||
i64 63)
|
||||
%res = sext <vscale x 4 x i16> %load to <vscale x 4 x i32>
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @gld1sh_d_imm_offset_out_of_range(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
|
||||
; CHECK-LABEL: gld1sh_d_imm_offset_out_of_range:
|
||||
; CHECK: mov w8, #63
|
||||
; CHECK-NEXT: ld1sh { z0.d }, p0/z, [x8, z0.d]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 63)
|
||||
%res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
; LD1SW
|
||||
define <vscale x 2 x i64> @gld1sw_d_imm_offset_out_of_range(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
|
||||
; CHECK-LABEL: gld1sw_d_imm_offset_out_of_range:
|
||||
; CHECK: mov w8, #125
|
||||
; CHECK-NEXT: ld1sw { z0.d }, p0/z, [x8, z0.d]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i32.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 125)
|
||||
%res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
; LD1B/LD1SB
|
||||
declare <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i8.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, i64)
|
||||
declare <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, i64)
|
||||
|
||||
; LD1H/LD1SH
|
||||
declare <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, i64)
|
||||
declare <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, i64)
|
||||
|
||||
; LD1W/LD1SW
|
||||
declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i32.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, i64)
|
||||
declare <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i32.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, i64)
|
||||
|
||||
declare <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4f32.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, i64)
|
||||
|
||||
; LD1D
|
||||
declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, i64)
|
||||
|
||||
declare <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2f64.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, i64)
|
@ -0,0 +1,186 @@
|
||||
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
|
||||
|
||||
;
|
||||
; LD1B, LD1W, LD1H, LD1D: vector base + scalar offset (index)
|
||||
; e.g. ld1b { z0.d }, p0/z, [x0, z0.d]
|
||||
;
|
||||
|
||||
; LD1B
|
||||
define <vscale x 4 x i32> @gld1b_s_scalar_offset(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %base, i64 %offset) {
|
||||
; CHECK-LABEL: gld1b_s_scalar_offset:
|
||||
; CHECK: ld1b { z0.s }, p0/z, [x0, z0.s, uxtw]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i8.nxv4i32(<vscale x 4 x i1> %pg,
|
||||
<vscale x 4 x i32> %base,
|
||||
i64 %offset)
|
||||
%res = zext <vscale x 4 x i8> %load to <vscale x 4 x i32>
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @gld1b_d_scalar_offset(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base, i64 %offset) {
|
||||
; CHECK-LABEL: gld1b_d_scalar_offset:
|
||||
; CHECK: ld1b { z0.d }, p0/z, [x0, z0.d]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 %offset)
|
||||
%res = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
; LD1H
|
||||
define <vscale x 4 x i32> @gld1h_s_scalar_offset(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %base, i64 %offset) {
|
||||
; CHECK-LABEL: gld1h_s_scalar_offset:
|
||||
; CHECK: ld1h { z0.s }, p0/z, [x0, z0.s, uxtw]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i1> %pg,
|
||||
<vscale x 4 x i32> %base,
|
||||
i64 %offset)
|
||||
%res = zext <vscale x 4 x i16> %load to <vscale x 4 x i32>
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @gld1h_d_scalar_offset(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base, i64 %offset) {
|
||||
; CHECK-LABEL: gld1h_d_scalar_offset:
|
||||
; CHECK: ld1h { z0.d }, p0/z, [x0, z0.d]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 %offset)
|
||||
%res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
; LD1W
|
||||
define <vscale x 4 x i32> @gld1w_s_scalar_offset(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %base, i64 %offset) {
|
||||
; CHECK-LABEL: gld1w_s_scalar_offset:
|
||||
; CHECK: ld1w { z0.s }, p0/z, [x0, z0.s, uxtw]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i32.nxv4i32(<vscale x 4 x i1> %pg,
|
||||
<vscale x 4 x i32> %base,
|
||||
i64 %offset)
|
||||
ret <vscale x 4 x i32> %load
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @gld1w_d_scalar_offset(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base, i64 %offset) {
|
||||
; CHECK-LABEL: gld1w_d_scalar_offset:
|
||||
; CHECK: ld1w { z0.d }, p0/z, [x0, z0.d]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i32.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 %offset)
|
||||
%res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x float> @gld1w_s_scalar_offset_float(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %base, i64 %offset) {
|
||||
; CHECK-LABEL: gld1w_s_scalar_offset_float:
|
||||
; CHECK: ld1w { z0.s }, p0/z, [x0, z0.s, uxtw]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4f32.nxv4i32(<vscale x 4 x i1> %pg,
|
||||
<vscale x 4 x i32> %base,
|
||||
i64 %offset)
|
||||
ret <vscale x 4 x float> %load
|
||||
}
|
||||
|
||||
; LD1D
|
||||
define <vscale x 2 x i64> @gld1d_d_scalar_offset(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base, i64 %offset) {
|
||||
; CHECK-LABEL: gld1d_d_scalar_offset:
|
||||
; CHECK: ld1d { z0.d }, p0/z, [x0, z0.d]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 %offset)
|
||||
ret <vscale x 2 x i64> %load
|
||||
}
|
||||
|
||||
define <vscale x 2 x double> @gld1d_d_scalar_offset_double(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base, i64 %offset) {
|
||||
; CHECK-LABEL: gld1d_d_scalar_offset_double:
|
||||
; CHECK: ld1d { z0.d }, p0/z, [x0, z0.d]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2f64.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 %offset)
|
||||
ret <vscale x 2 x double> %load
|
||||
}
|
||||
|
||||
; LD1SB, LD1SW, LD1SH: vector base + scalar offset (index)
|
||||
; e.g. ld1b { z0.d }, p0/z, [x0, z0.d]
|
||||
;
|
||||
|
||||
; LD1SB
|
||||
define <vscale x 4 x i32> @gld1sb_s_scalar_offset(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %base, i64 %offset) {
|
||||
; CHECK-LABEL: gld1sb_s_scalar_offset:
|
||||
; CHECK: ld1sb { z0.s }, p0/z, [x0, z0.s, uxtw]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i8.nxv4i32(<vscale x 4 x i1> %pg,
|
||||
<vscale x 4 x i32> %base,
|
||||
i64 %offset)
|
||||
%res = sext <vscale x 4 x i8> %load to <vscale x 4 x i32>
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @gld1sb_d_scalar_offset(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base, i64 %offset) {
|
||||
; CHECK-LABEL: gld1sb_d_scalar_offset:
|
||||
; CHECK: ld1sb { z0.d }, p0/z, [x0, z0.d]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 %offset)
|
||||
%res = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
; LD1SH
|
||||
define <vscale x 4 x i32> @gld1sh_s_scalar_offset(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %base, i64 %offset) {
|
||||
; CHECK-LABEL: gld1sh_s_scalar_offset:
|
||||
; CHECK: ld1sh { z0.s }, p0/z, [x0, z0.s, uxtw]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i1> %pg,
|
||||
<vscale x 4 x i32> %base,
|
||||
i64 %offset)
|
||||
%res = sext <vscale x 4 x i16> %load to <vscale x 4 x i32>
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @gld1sh_d_scalar_offset(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base, i64 %offset) {
|
||||
; CHECK-LABEL: gld1sh_d_scalar_offset:
|
||||
; CHECK: ld1sh { z0.d }, p0/z, [x0, z0.d]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 %offset)
|
||||
%res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
; LD1SW
|
||||
define <vscale x 2 x i64> @gld1sw_d_scalar_offset(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base, i64 %offset) {
|
||||
; CHECK-LABEL: gld1sw_d_scalar_offset:
|
||||
; CHECK: ld1sw { z0.d }, p0/z, [x0, z0.d]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i32.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 %offset)
|
||||
%res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
; LD1B/LD1SB
|
||||
declare <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i8.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, i64)
|
||||
declare <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, i64)
|
||||
|
||||
; LD1H/LD1SH
|
||||
declare <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, i64)
|
||||
declare <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, i64)
|
||||
|
||||
; LD1W/LD1SW
|
||||
declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i32.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, i64)
|
||||
declare <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i32.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, i64)
|
||||
|
||||
declare <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4f32.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, i64)
|
||||
|
||||
; LD1D
|
||||
declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, i64)
|
||||
|
||||
declare <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2f64.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, i64)
|
@ -1,186 +0,0 @@
|
||||
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
|
||||
|
||||
;
|
||||
; LD1B, LD1W, LD1H, LD1D: vector + immediate (index)
|
||||
; e.g. ld1h { z0.s }, p0/z, [z0.s, #16]
|
||||
;
|
||||
|
||||
; LD1B
|
||||
define <vscale x 4 x i32> @gld1b_s_imm(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
|
||||
; CHECK-LABEL: gld1b_s_imm:
|
||||
; CHECK: ld1b { z0.s }, p0/z, [z0.s, #16]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.imm.nxv4i8.nxv4i32(<vscale x 4 x i1> %pg,
|
||||
<vscale x 4 x i32> %base,
|
||||
i64 16)
|
||||
%res = zext <vscale x 4 x i8> %load to <vscale x 4 x i32>
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @gld1b_d_imm(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
|
||||
; CHECK-LABEL: gld1b_d_imm:
|
||||
; CHECK: ld1b { z0.d }, p0/z, [z0.d, #16]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.imm.nxv2i8.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 16)
|
||||
%res = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
; LD1H
|
||||
define <vscale x 4 x i32> @gld1h_s_imm(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
|
||||
; CHECK-LABEL: gld1h_s_imm:
|
||||
; CHECK: ld1h { z0.s }, p0/z, [z0.s, #16]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.imm.nxv4i16.nxv4i32(<vscale x 4 x i1> %pg,
|
||||
<vscale x 4 x i32> %base,
|
||||
i64 16)
|
||||
%res = zext <vscale x 4 x i16> %load to <vscale x 4 x i32>
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @gld1h_d_imm(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
|
||||
; CHECK-LABEL: gld1h_d_imm:
|
||||
; CHECK: ld1h { z0.d }, p0/z, [z0.d, #16]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.imm.nxv2i16.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 16)
|
||||
%res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
; LD1W
|
||||
define <vscale x 4 x i32> @gld1w_s_imm(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
|
||||
; CHECK-LABEL: gld1w_s_imm:
|
||||
; CHECK: ld1w { z0.s }, p0/z, [z0.s, #16]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.imm.nxv4i32.nxv4i32(<vscale x 4 x i1> %pg,
|
||||
<vscale x 4 x i32> %base,
|
||||
i64 16)
|
||||
ret <vscale x 4 x i32> %load
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @gld1w_d_imm(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
|
||||
; CHECK-LABEL: gld1w_d_imm:
|
||||
; CHECK: ld1w { z0.d }, p0/z, [z0.d, #16]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.imm.nxv2i32.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 16)
|
||||
%res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x float> @gld1w_s_imm_float(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
|
||||
; CHECK-LABEL: gld1w_s_imm_float:
|
||||
; CHECK: ld1w { z0.s }, p0/z, [z0.s, #16]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.imm.nxv4f32.nxv4i32(<vscale x 4 x i1> %pg,
|
||||
<vscale x 4 x i32> %base,
|
||||
i64 16)
|
||||
ret <vscale x 4 x float> %load
|
||||
}
|
||||
|
||||
; LD1D
|
||||
define <vscale x 2 x i64> @gld1d_d_imm(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
|
||||
; CHECK-LABEL: gld1d_d_imm:
|
||||
; CHECK: ld1d { z0.d }, p0/z, [z0.d, #16]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.imm.nxv2i64.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 16)
|
||||
ret <vscale x 2 x i64> %load
|
||||
}
|
||||
|
||||
define <vscale x 2 x double> @gld1d_d_imm_double(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
|
||||
; CHECK-LABEL: gld1d_d_imm_double:
|
||||
; CHECK: ld1d { z0.d }, p0/z, [z0.d, #16]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.imm.nxv2f64.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 16)
|
||||
ret <vscale x 2 x double> %load
|
||||
}
|
||||
|
||||
; LD1SB, LD1SW, LD1SH: vector + immediate (index)
|
||||
; e.g. ld1sh { z0.s }, p0/z, [z0.s, #16]
|
||||
;
|
||||
|
||||
; LD1SB
|
||||
define <vscale x 4 x i32> @gld1sb_s_imm(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
|
||||
; CHECK-LABEL: gld1sb_s_imm:
|
||||
; CHECK: ld1sb { z0.s }, p0/z, [z0.s, #16]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.imm.nxv4i8.nxv4i32(<vscale x 4 x i1> %pg,
|
||||
<vscale x 4 x i32> %base,
|
||||
i64 16)
|
||||
%res = sext <vscale x 4 x i8> %load to <vscale x 4 x i32>
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @gld1sb_d_imm(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
|
||||
; CHECK-LABEL: gld1sb_d_imm:
|
||||
; CHECK: ld1sb { z0.d }, p0/z, [z0.d, #16]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.imm.nxv2i8.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 16)
|
||||
%res = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
; LD1SH
|
||||
define <vscale x 4 x i32> @gld1sh_s_imm(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
|
||||
; CHECK-LABEL: gld1sh_s_imm:
|
||||
; CHECK: ld1sh { z0.s }, p0/z, [z0.s, #16]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.imm.nxv4i16.nxv4i32(<vscale x 4 x i1> %pg,
|
||||
<vscale x 4 x i32> %base,
|
||||
i64 16)
|
||||
%res = sext <vscale x 4 x i16> %load to <vscale x 4 x i32>
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @gld1sh_d_imm(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
|
||||
; CHECK-LABEL: gld1sh_d_imm:
|
||||
; CHECK: ld1sh { z0.d }, p0/z, [z0.d, #16]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.imm.nxv2i16.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 16)
|
||||
%res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
; LD1SW
|
||||
define <vscale x 2 x i64> @gld1sw_d_imm(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
|
||||
; CHECK-LABEL: gld1sw_d_imm:
|
||||
; CHECK: ld1sw { z0.d }, p0/z, [z0.d, #16]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.imm.nxv2i32.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 16)
|
||||
%res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
; LD1B/LD1SB
|
||||
declare <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.imm.nxv4i8.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, i64)
|
||||
declare <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.imm.nxv2i8.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, i64)
|
||||
|
||||
; LD1H/LD1SH
|
||||
declare <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.imm.nxv4i16.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, i64)
|
||||
declare <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.imm.nxv2i16.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, i64)
|
||||
|
||||
; LD1W/LD1SW
|
||||
declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.imm.nxv4i32.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, i64)
|
||||
declare <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.imm.nxv2i32.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, i64)
|
||||
|
||||
declare <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.imm.nxv4f32.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, i64)
|
||||
|
||||
; LD1D
|
||||
declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.imm.nxv2i64.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, i64)
|
||||
|
||||
declare <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.imm.nxv2f64.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, i64)
|
@ -0,0 +1,255 @@
|
||||
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
|
||||
|
||||
;
|
||||
; ST1B, ST1W, ST1H, ST1D: vector base + immediate offset
|
||||
; e.g. st1h { z0.s }, p0, [z1.s, #16]
|
||||
;
|
||||
|
||||
; ST1B
|
||||
define void @sst1b_s_imm_offset(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
|
||||
; CHECK-LABEL: sst1b_s_imm_offset:
|
||||
; CHECK: st1b { z0.s }, p0, [z1.s, #16]
|
||||
; CHECK-NEXT: ret
|
||||
%data_trunc = trunc <vscale x 4 x i32> %data to <vscale x 4 x i8>
|
||||
call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i8.nxv4i32(<vscale x 4 x i8> %data_trunc,
|
||||
<vscale x 4 x i1> %pg,
|
||||
<vscale x 4 x i32> %base,
|
||||
i64 16)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @sst1b_d_imm_offset(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
|
||||
; CHECK-LABEL: sst1b_d_imm_offset:
|
||||
; CHECK: st1b { z0.d }, p0, [z1.d, #16]
|
||||
; CHECK-NEXT: ret
|
||||
%data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i8>
|
||||
call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i8> %data_trunc,
|
||||
<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 16)
|
||||
ret void
|
||||
}
|
||||
|
||||
; ST1H
|
||||
define void @sst1h_s_imm_offset(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
|
||||
; CHECK-LABEL: sst1h_s_imm_offset:
|
||||
; CHECK: st1h { z0.s }, p0, [z1.s, #16]
|
||||
; CHECK-NEXT: ret
|
||||
%data_trunc = trunc <vscale x 4 x i32> %data to <vscale x 4 x i16>
|
||||
call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i16> %data_trunc,
|
||||
<vscale x 4 x i1> %pg,
|
||||
<vscale x 4 x i32> %base,
|
||||
i64 16)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @sst1h_d_imm_offset(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
|
||||
; CHECK-LABEL: sst1h_d_imm_offset:
|
||||
; CHECK: st1h { z0.d }, p0, [z1.d, #16]
|
||||
; CHECK-NEXT: ret
|
||||
%data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i16>
|
||||
call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i16> %data_trunc,
|
||||
<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 16)
|
||||
ret void
|
||||
}
|
||||
|
||||
; ST1W
|
||||
define void @sst1w_s_imm_offset(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
|
||||
; CHECK-LABEL: sst1w_s_imm_offset:
|
||||
; CHECK: st1w { z0.s }, p0, [z1.s, #16]
|
||||
; CHECK-NEXT: ret
|
||||
call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i32.nxv4i32(<vscale x 4 x i32> %data,
|
||||
<vscale x 4 x i1> %pg,
|
||||
<vscale x 4 x i32> %base,
|
||||
i64 16)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @sst1w_d_imm_offset(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
|
||||
; CHECK-LABEL: sst1w_d_imm_offset:
|
||||
; CHECK: st1w { z0.d }, p0, [z1.d, #16]
|
||||
; CHECK-NEXT: ret
|
||||
%data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i32>
|
||||
call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i32.nxv2i64(<vscale x 2 x i32> %data_trunc,
|
||||
<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 16)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @sst1w_s_imm_offset_float(<vscale x 4 x float> %data, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
|
||||
; CHECK-LABEL: sst1w_s_imm_offset_float:
|
||||
; CHECK: st1w { z0.s }, p0, [z1.s, #16]
|
||||
; CHECK-NEXT: ret
|
||||
call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4f32.nxv4i32(<vscale x 4 x float> %data,
|
||||
<vscale x 4 x i1> %pg,
|
||||
<vscale x 4 x i32> %base,
|
||||
i64 16)
|
||||
ret void
|
||||
}
|
||||
|
||||
; ST1D
|
||||
define void @sst1d_d_imm_offset(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
|
||||
; CHECK-LABEL: sst1d_d_imm_offset:
|
||||
; CHECK: st1d { z0.d }, p0, [z1.d, #16]
|
||||
; CHECK-NEXT: ret
|
||||
call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i64> %data,
|
||||
<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 16)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @sst1d_d_imm_offset_double(<vscale x 2 x double> %data, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
|
||||
; CHECK-LABEL: sst1d_d_imm_offset_double:
|
||||
; CHECK: st1d { z0.d }, p0, [z1.d, #16]
|
||||
; CHECK-NEXT: ret
|
||||
call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2f64.nxv2i64(<vscale x 2 x double> %data,
|
||||
<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 16)
|
||||
ret void
|
||||
}
|
||||
|
||||
;
|
||||
; ST1B, ST1W, ST1H, ST1D: vector base + out of range immediate offset
|
||||
; e.g. st1h { z0.s }, p0, [z1.s, #16]
|
||||
;
|
||||
|
||||
; ST1B
|
||||
define void @sst1b_s_imm_offset_out_of_range(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
|
||||
; CHECK-LABEL: sst1b_s_imm_offset_out_of_range:
|
||||
; CHECK: mov w8, #32
|
||||
; CHECK-NEXT: st1b { z0.s }, p0, [x8, z1.s, uxtw]
|
||||
; CHECK-NEXT: ret
|
||||
%data_trunc = trunc <vscale x 4 x i32> %data to <vscale x 4 x i8>
|
||||
call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i8.nxv4i32(<vscale x 4 x i8> %data_trunc,
|
||||
<vscale x 4 x i1> %pg,
|
||||
<vscale x 4 x i32> %base,
|
||||
i64 32)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @sst1b_d_imm_offset_out_of_range(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
|
||||
; CHECK-LABEL: sst1b_d_imm_offset_out_of_range:
|
||||
; CHECK: mov w8, #32
|
||||
; CHECK-NEXT: st1b { z0.d }, p0, [x8, z1.d]
|
||||
; CHECK-NEXT: ret
|
||||
%data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i8>
|
||||
call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i8> %data_trunc,
|
||||
<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 32)
|
||||
ret void
|
||||
}
|
||||
|
||||
; ST1H
|
||||
define void @sst1h_s_imm_offset_out_of_range(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
|
||||
; CHECK-LABEL: sst1h_s_imm_offset_out_of_range:
|
||||
; CHECK: mov w8, #63
|
||||
; CHECK-NEXT: st1h { z0.s }, p0, [x8, z1.s, uxtw]
|
||||
; CHECK-NEXT: ret
|
||||
%data_trunc = trunc <vscale x 4 x i32> %data to <vscale x 4 x i16>
|
||||
call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i16> %data_trunc,
|
||||
<vscale x 4 x i1> %pg,
|
||||
<vscale x 4 x i32> %base,
|
||||
i64 63)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @sst1h_d_imm_offset_out_of_range(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
|
||||
; CHECK-LABEL: sst1h_d_imm_offset_out_of_range:
|
||||
; CHECK: mov w8, #63
|
||||
; CHECK-NEXT: st1h { z0.d }, p0, [x8, z1.d]
|
||||
; CHECK-NEXT: ret
|
||||
%data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i16>
|
||||
call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i16> %data_trunc,
|
||||
<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 63)
|
||||
ret void
|
||||
}
|
||||
|
||||
; ST1W
|
||||
define void @sst1w_s_imm_offset_out_of_range(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
|
||||
; CHECK-LABEL: sst1w_s_imm_offset_out_of_range:
|
||||
; CHECK: mov w8, #125
|
||||
; CHECK-NEXT: st1w { z0.s }, p0, [x8, z1.s, uxtw]
|
||||
; CHECK-NEXT: ret
|
||||
call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i32.nxv4i32(<vscale x 4 x i32> %data,
|
||||
<vscale x 4 x i1> %pg,
|
||||
<vscale x 4 x i32> %base,
|
||||
i64 125)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @sst1w_d_imm_offset_out_of_range(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
|
||||
; CHECK-LABEL: sst1w_d_imm_offset_out_of_range:
|
||||
; CHECK: mov w8, #125
|
||||
; CHECK-NEXT: st1w { z0.d }, p0, [x8, z1.d]
|
||||
; CHECK-NEXT: ret
|
||||
%data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i32>
|
||||
call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i32.nxv2i64(<vscale x 2 x i32> %data_trunc,
|
||||
<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 125)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @sst1w_s_imm_offset_float_out_of_range(<vscale x 4 x float> %data, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
|
||||
; CHECK-LABEL: sst1w_s_imm_offset_float_out_of_range:
|
||||
; CHECK: mov w8, #125
|
||||
; CHECK-NEXT: st1w { z0.s }, p0, [x8, z1.s, uxtw]
|
||||
; CHECK-NEXT: ret
|
||||
call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4f32.nxv4i32(<vscale x 4 x float> %data,
|
||||
<vscale x 4 x i1> %pg,
|
||||
<vscale x 4 x i32> %base,
|
||||
i64 125)
|
||||
ret void
|
||||
}
|
||||
|
||||
; ST1D
|
||||
define void @sst1d_d_imm_offset_out_of_range(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
|
||||
; CHECK-LABEL: sst1d_d_imm_offset_out_of_range:
|
||||
; CHECK: mov w8, #249
|
||||
; CHECK-NEXT: st1d { z0.d }, p0, [x8, z1.d]
|
||||
; CHECK-NEXT: ret
|
||||
call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i64> %data,
|
||||
<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 249)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @sst1d_d_imm_offset_double_out_of_range(<vscale x 2 x double> %data, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
|
||||
; CHECK-LABEL: sst1d_d_imm_offset_double_out_of_range:
|
||||
; CHECK: mov w8, #249
|
||||
; CHECK-NEXT: st1d { z0.d }, p0, [x8, z1.d]
|
||||
; CHECK-NEXT: ret
|
||||
call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2f64.nxv2i64(<vscale x 2 x double> %data,
|
||||
<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 249)
|
||||
ret void
|
||||
}
|
||||
|
||||
; ST1B
|
||||
declare void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i8.nxv4i32(<vscale x 4 x i8>, <vscale x 4 x i1>, <vscale x 4 x i32>, i64)
|
||||
declare void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i8>, <vscale x 2 x i1>, <vscale x 2 x i64>, i64)
|
||||
|
||||
; ST1H
|
||||
declare void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i16>, <vscale x 4 x i1>, <vscale x 4 x i32>, i64)
|
||||
declare void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i16>, <vscale x 2 x i1>, <vscale x 2 x i64>, i64)
|
||||
|
||||
; ST1W
|
||||
declare void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i32.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, <vscale x 4 x i32>, i64)
|
||||
declare void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i32.nxv2i64(<vscale x 2 x i32>, <vscale x 2 x i1>, <vscale x 2 x i64>, i64)
|
||||
|
||||
declare void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4f32.nxv4i32(<vscale x 4 x float>, <vscale x 4 x i1>, <vscale x 4 x i32>, i64)
|
||||
|
||||
; ST1D
|
||||
declare void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 2 x i64>, i64)
|
||||
|
||||
declare void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2f64.nxv2i64(<vscale x 2 x double>, <vscale x 2 x i1>, <vscale x 2 x i64>, i64)
|
@ -0,0 +1,133 @@
|
||||
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
|
||||
|
||||
;
|
||||
; ST1B, ST1W, ST1H, ST1D: vector base + scalar offset
|
||||
; e.g. st1h { z0.s }, p0, [x0, z1.d]
|
||||
;
|
||||
|
||||
; ST1B
|
||||
define void @sst1b_s_scalar_offset(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %base, i64 %offset) {
|
||||
; CHECK-LABEL: sst1b_s_scalar_offset:
|
||||
; CHECK: st1b { z0.s }, p0, [x0, z1.s, uxtw]
|
||||
; CHECK-NEXT: ret
|
||||
%data_trunc = trunc <vscale x 4 x i32> %data to <vscale x 4 x i8>
|
||||
call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i8.nxv4i32(<vscale x 4 x i8> %data_trunc,
|
||||
<vscale x 4 x i1> %pg,
|
||||
<vscale x 4 x i32> %base,
|
||||
i64 %offset)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @sst1b_d_scalar_offset(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %base, i64 %offset) {
|
||||
; CHECK-LABEL: sst1b_d_scalar_offset:
|
||||
; CHECK: st1b { z0.d }, p0, [x0, z1.d]
|
||||
; CHECK-NEXT: ret
|
||||
%data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i8>
|
||||
call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i8> %data_trunc,
|
||||
<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 %offset)
|
||||
ret void
|
||||
}
|
||||
|
||||
; ST1H
|
||||
define void @sst1h_s_scalar_offset(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %base, i64 %offset) {
|
||||
; CHECK-LABEL: sst1h_s_scalar_offset:
|
||||
; CHECK: st1h { z0.s }, p0, [x0, z1.s, uxtw]
|
||||
; CHECK-NEXT: ret
|
||||
%data_trunc = trunc <vscale x 4 x i32> %data to <vscale x 4 x i16>
|
||||
call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i16> %data_trunc,
|
||||
<vscale x 4 x i1> %pg,
|
||||
<vscale x 4 x i32> %base,
|
||||
i64 %offset)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @sst1h_d_scalar_offset(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %base, i64 %offset) {
|
||||
; CHECK-LABEL: sst1h_d_scalar_offset:
|
||||
; CHECK: st1h { z0.d }, p0, [x0, z1.d]
|
||||
; CHECK-NEXT: ret
|
||||
%data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i16>
|
||||
call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i16> %data_trunc,
|
||||
<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 %offset)
|
||||
ret void
|
||||
}
|
||||
|
||||
; ST1W
|
||||
define void @sst1w_s_scalar_offset(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %base, i64 %offset) {
|
||||
; CHECK-LABEL: sst1w_s_scalar_offset:
|
||||
; CHECK: st1w { z0.s }, p0, [x0, z1.s, uxtw]
|
||||
; CHECK-NEXT: ret
|
||||
call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i32.nxv4i32(<vscale x 4 x i32> %data,
|
||||
<vscale x 4 x i1> %pg,
|
||||
<vscale x 4 x i32> %base,
|
||||
i64 %offset)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @sst1w_d_scalar_offset(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %base, i64 %offset) {
|
||||
; CHECK-LABEL: sst1w_d_scalar_offset:
|
||||
; CHECK: st1w { z0.d }, p0, [x0, z1.d]
|
||||
; CHECK-NEXT: ret
|
||||
%data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i32>
|
||||
call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i32.nxv2i64(<vscale x 2 x i32> %data_trunc,
|
||||
<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 %offset)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @sst1w_s_scalar_offset_float(<vscale x 4 x float> %data, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %base, i64 %offset) {
|
||||
; CHECK-LABEL: sst1w_s_scalar_offset_float:
|
||||
; CHECK: st1w { z0.s }, p0, [x0, z1.s, uxtw]
|
||||
; CHECK-NEXT: ret
|
||||
call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4f32.nxv4i32(<vscale x 4 x float> %data,
|
||||
<vscale x 4 x i1> %pg,
|
||||
<vscale x 4 x i32> %base,
|
||||
i64 %offset)
|
||||
ret void
|
||||
}
|
||||
|
||||
; ST1D
|
||||
define void @sst1d_d_scalar_offset(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %base, i64 %offset) {
|
||||
; CHECK-LABEL: sst1d_d_scalar_offset:
|
||||
; CHECK: st1d { z0.d }, p0, [x0, z1.d]
|
||||
; CHECK-NEXT: ret
|
||||
call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i64> %data,
|
||||
<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 %offset)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @sst1d_d_scalar_offset_double(<vscale x 2 x double> %data, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %base, i64 %offset) {
|
||||
; CHECK-LABEL: sst1d_d_scalar_offset_double:
|
||||
; CHECK: st1d { z0.d }, p0, [x0, z1.d]
|
||||
; CHECK-NEXT: ret
|
||||
call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2f64.nxv2i64(<vscale x 2 x double> %data,
|
||||
<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 %offset)
|
||||
ret void
|
||||
}
|
||||
|
||||
; ST1B
|
||||
declare void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i8.nxv4i32(<vscale x 4 x i8>, <vscale x 4 x i1>, <vscale x 4 x i32>, i64)
|
||||
declare void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i8>, <vscale x 2 x i1>, <vscale x 2 x i64>, i64)
|
||||
|
||||
; ST1H
|
||||
declare void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i16>, <vscale x 4 x i1>, <vscale x 4 x i32>, i64)
|
||||
declare void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i16>, <vscale x 2 x i1>, <vscale x 2 x i64>, i64)
|
||||
|
||||
; ST1W
|
||||
declare void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i32.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, <vscale x 4 x i32>, i64)
|
||||
declare void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i32.nxv2i64(<vscale x 2 x i32>, <vscale x 2 x i1>, <vscale x 2 x i64>, i64)
|
||||
|
||||
declare void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4f32.nxv4i32(<vscale x 4 x float>, <vscale x 4 x i1>, <vscale x 4 x i32>, i64)
|
||||
|
||||
; ST1D
|
||||
declare void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 2 x i64>, i64)
|
||||
|
||||
declare void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2f64.nxv2i64(<vscale x 2 x double>, <vscale x 2 x i1>, <vscale x 2 x i64>, i64)
|
@ -1,133 +0,0 @@
|
||||
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
|
||||
|
||||
;
|
||||
; ST1B, ST1W, ST1H, ST1D: vector + immediate (index)
|
||||
; e.g. st1h { z0.s }, p0, [z1.s, #16]
|
||||
;
|
||||
|
||||
; ST1B
|
||||
define void @sst1b_s_imm(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
|
||||
; CHECK-LABEL: sst1b_s_imm:
|
||||
; CHECK: st1b { z0.s }, p0, [z1.s, #16]
|
||||
; CHECK-NEXT: ret
|
||||
%data_trunc = trunc <vscale x 4 x i32> %data to <vscale x 4 x i8>
|
||||
call void @llvm.aarch64.sve.st1.scatter.imm.nxv4i8.nxv4i32(<vscale x 4 x i8> %data_trunc,
|
||||
<vscale x 4 x i1> %pg,
|
||||
<vscale x 4 x i32> %base,
|
||||
i64 16)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @sst1b_d_imm(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
|
||||
; CHECK-LABEL: sst1b_d_imm:
|
||||
; CHECK: st1b { z0.d }, p0, [z1.d, #16]
|
||||
; CHECK-NEXT: ret
|
||||
%data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i8>
|
||||
call void @llvm.aarch64.sve.st1.scatter.imm.nxv2i8.nxv2i64(<vscale x 2 x i8> %data_trunc,
|
||||
<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 16)
|
||||
ret void
|
||||
}
|
||||
|
||||
; ST1H
|
||||
define void @sst1h_s_imm(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
|
||||
; CHECK-LABEL: sst1h_s_imm:
|
||||
; CHECK: st1h { z0.s }, p0, [z1.s, #16]
|
||||
; CHECK-NEXT: ret
|
||||
%data_trunc = trunc <vscale x 4 x i32> %data to <vscale x 4 x i16>
|
||||
call void @llvm.aarch64.sve.st1.scatter.imm.nxv4i16.nxv4i32(<vscale x 4 x i16> %data_trunc,
|
||||
<vscale x 4 x i1> %pg,
|
||||
<vscale x 4 x i32> %base,
|
||||
i64 16)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @sst1h_d_imm(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
|
||||
; CHECK-LABEL: sst1h_d_imm:
|
||||
; CHECK: st1h { z0.d }, p0, [z1.d, #16]
|
||||
; CHECK-NEXT: ret
|
||||
%data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i16>
|
||||
call void @llvm.aarch64.sve.st1.scatter.imm.nxv2i16.nxv2i64(<vscale x 2 x i16> %data_trunc,
|
||||
<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 16)
|
||||
ret void
|
||||
}
|
||||
|
||||
; ST1W
|
||||
define void @sst1w_s_imm(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
|
||||
; CHECK-LABEL: sst1w_s_imm:
|
||||
; CHECK: st1w { z0.s }, p0, [z1.s, #16]
|
||||
; CHECK-NEXT: ret
|
||||
call void @llvm.aarch64.sve.st1.scatter.imm.nxv4i32.nxv4i32(<vscale x 4 x i32> %data,
|
||||
<vscale x 4 x i1> %pg,
|
||||
<vscale x 4 x i32> %base,
|
||||
i64 16)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @sst1w_d_imm(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
|
||||
; CHECK-LABEL: sst1w_d_imm:
|
||||
; CHECK: st1w { z0.d }, p0, [z1.d, #16]
|
||||
; CHECK-NEXT: ret
|
||||
%data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i32>
|
||||
call void @llvm.aarch64.sve.st1.scatter.imm.nxv2i32.nxv2i64(<vscale x 2 x i32> %data_trunc,
|
||||
<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 16)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @sst1w_s_imm_float(<vscale x 4 x float> %data, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
|
||||
; CHECK-LABEL: sst1w_s_imm_float:
|
||||
; CHECK: st1w { z0.s }, p0, [z1.s, #16]
|
||||
; CHECK-NEXT: ret
|
||||
call void @llvm.aarch64.sve.st1.scatter.imm.nxv4f32.nxv4i32(<vscale x 4 x float> %data,
|
||||
<vscale x 4 x i1> %pg,
|
||||
<vscale x 4 x i32> %base,
|
||||
i64 16)
|
||||
ret void
|
||||
}
|
||||
|
||||
; ST1D
|
||||
define void @sst1d_d_imm(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
|
||||
; CHECK-LABEL: sst1d_d_imm:
|
||||
; CHECK: st1d { z0.d }, p0, [z1.d, #16]
|
||||
; CHECK-NEXT: ret
|
||||
call void @llvm.aarch64.sve.st1.scatter.imm.nxv2i64.nxv2i64(<vscale x 2 x i64> %data,
|
||||
<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 16)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @sst1d_d_imm_double(<vscale x 2 x double> %data, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
|
||||
; CHECK-LABEL: sst1d_d_imm_double:
|
||||
; CHECK: st1d { z0.d }, p0, [z1.d, #16]
|
||||
; CHECK-NEXT: ret
|
||||
call void @llvm.aarch64.sve.st1.scatter.imm.nxv2f64.nxv2i64(<vscale x 2 x double> %data,
|
||||
<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
i64 16)
|
||||
ret void
|
||||
}
|
||||
|
||||
; ST1B
|
||||
declare void @llvm.aarch64.sve.st1.scatter.imm.nxv4i8.nxv4i32(<vscale x 4 x i8>, <vscale x 4 x i1>, <vscale x 4 x i32>, i64)
|
||||
declare void @llvm.aarch64.sve.st1.scatter.imm.nxv2i8.nxv2i64(<vscale x 2 x i8>, <vscale x 2 x i1>, <vscale x 2 x i64>, i64)
|
||||
|
||||
; ST1H
|
||||
declare void @llvm.aarch64.sve.st1.scatter.imm.nxv4i16.nxv4i32(<vscale x 4 x i16>, <vscale x 4 x i1>, <vscale x 4 x i32>, i64)
|
||||
declare void @llvm.aarch64.sve.st1.scatter.imm.nxv2i16.nxv2i64(<vscale x 2 x i16>, <vscale x 2 x i1>, <vscale x 2 x i64>, i64)
|
||||
|
||||
; ST1W
|
||||
declare void @llvm.aarch64.sve.st1.scatter.imm.nxv4i32.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, <vscale x 4 x i32>, i64)
|
||||
declare void @llvm.aarch64.sve.st1.scatter.imm.nxv2i32.nxv2i64(<vscale x 2 x i32>, <vscale x 2 x i1>, <vscale x 2 x i64>, i64)
|
||||
|
||||
declare void @llvm.aarch64.sve.st1.scatter.imm.nxv4f32.nxv4i32(<vscale x 4 x float>, <vscale x 4 x i1>, <vscale x 4 x i32>, i64)
|
||||
|
||||
; ST1D
|
||||
declare void @llvm.aarch64.sve.st1.scatter.imm.nxv2i64.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 2 x i64>, i64)
|
||||
|
||||
declare void @llvm.aarch64.sve.st1.scatter.imm.nxv2f64.nxv2i64(<vscale x 2 x double>, <vscale x 2 x i1>, <vscale x 2 x i64>, i64)
|
Loading…
Reference in New Issue
Block a user