mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-23 19:23:23 +01:00
c09f6fd5ff
Summary: This patch adds the following SVE intrinsics for scatter stores: * 64-bit offsets: * @llvm.aarch64.sve.st1.scatter (unscaled) * @llvm.aarch64.sve.st1.scatter.index (scaled) * 32-bit unscaled offsets: * @llvm.aarch64.sve.st1.scatter.uxtw (zero-extended offset) * @llvm.aarch64.sve.st1.scatter.sxtw (sign-extended-offset) * 32-bit scaled offsets: * @llvm.aarch64.sve.st1.scatter.uxtw.index (zero-extended offset) * @llvm.aarch64.sve.st1.scatter.sxtw.index (sign-extended offset) * vector base + immediate: * @llvm.aarch64.sve.st1.scatter.imm Reviewers: rengolin, efriedma, sdesmalen Reviewed By: efriedma, sdesmalen Subscribers: kmclaughlin, eli.friedman, tschuett, kristof.beyls, hiraditya, rkruppe, psnobl, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D71074
249 lines
13 KiB
LLVM
249 lines
13 KiB
LLVM
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
|
|
|
|
;
|
|
; ST1B, ST1W, ST1H, ST1D: base + 32-bit unscaled offset, sign (sxtw) or zero
|
|
; (uxtw) extended to 64 bits.
|
|
; e.g. st1h { z0.d }, p0, [x0, z1.d, uxtw]
|
|
;
|
|
|
|
; ST1B
|
|
define void @sst1b_s_uxtw(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, i8* %base, <vscale x 4 x i32> %offsets) {
|
|
; CHECK-LABEL: sst1b_s_uxtw:
|
|
; CHECK: st1b { z0.s }, p0, [x0, z1.s, uxtw]
|
|
; CHECK-NEXT: ret
|
|
%data_trunc = trunc <vscale x 4 x i32> %data to <vscale x 4 x i8>
|
|
call void @llvm.aarch64.sve.st1.scatter.uxtw.nxv4i8(<vscale x 4 x i8> %data_trunc,
|
|
<vscale x 4 x i1> %pg,
|
|
i8* %base,
|
|
<vscale x 4 x i32> %offsets)
|
|
ret void
|
|
}
|
|
|
|
define void @sst1b_s_sxtw(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, i8* %base, <vscale x 4 x i32> %offsets) {
|
|
; CHECK-LABEL: sst1b_s_sxtw:
|
|
; CHECK: st1b { z0.s }, p0, [x0, z1.s, sxtw]
|
|
; CHECK-NEXT: ret
|
|
%data_trunc = trunc <vscale x 4 x i32> %data to <vscale x 4 x i8>
|
|
call void @llvm.aarch64.sve.st1.scatter.sxtw.nxv4i8(<vscale x 4 x i8> %data_trunc,
|
|
<vscale x 4 x i1> %pg,
|
|
i8* %base,
|
|
<vscale x 4 x i32> %offsets)
|
|
ret void
|
|
}
|
|
|
|
define void @sst1b_d_uxtw(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i8* %base, <vscale x 2 x i32> %offsets) {
|
|
; CHECK-LABEL: sst1b_d_uxtw:
|
|
; CHECK: st1b { z0.d }, p0, [x0, z1.d, uxtw]
|
|
; CHECK-NEXT: ret
|
|
%data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i8>
|
|
call void @llvm.aarch64.sve.st1.scatter.uxtw.nxv2i8(<vscale x 2 x i8> %data_trunc,
|
|
<vscale x 2 x i1> %pg,
|
|
i8* %base,
|
|
<vscale x 2 x i32> %offsets)
|
|
ret void
|
|
}
|
|
|
|
define void @sst1b_d_sxtw(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i8* %base, <vscale x 2 x i32> %offsets) {
|
|
; CHECK-LABEL: sst1b_d_sxtw:
|
|
; CHECK: st1b { z0.d }, p0, [x0, z1.d, sxtw]
|
|
; CHECK-NEXT: ret
|
|
%data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i8>
|
|
call void @llvm.aarch64.sve.st1.scatter.sxtw.nxv2i8(<vscale x 2 x i8> %data_trunc,
|
|
<vscale x 2 x i1> %pg,
|
|
i8* %base,
|
|
<vscale x 2 x i32> %offsets)
|
|
ret void
|
|
}
|
|
|
|
; ST1H
|
|
define void @sst1h_s_uxtw(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, i16* %base, <vscale x 4 x i32> %offsets) {
|
|
; CHECK-LABEL: sst1h_s_uxtw:
|
|
; CHECK: st1h { z0.s }, p0, [x0, z1.s, uxtw]
|
|
; CHECK-NEXT: ret
|
|
%data_trunc = trunc <vscale x 4 x i32> %data to <vscale x 4 x i16>
|
|
call void @llvm.aarch64.sve.st1.scatter.uxtw.nxv4i16(<vscale x 4 x i16> %data_trunc,
|
|
<vscale x 4 x i1> %pg,
|
|
i16* %base,
|
|
<vscale x 4 x i32> %offsets)
|
|
ret void
|
|
}
|
|
|
|
define void @sst1h_s_sxtw(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, i16* %base, <vscale x 4 x i32> %offsets) {
|
|
; CHECK-LABEL: sst1h_s_sxtw:
|
|
; CHECK: st1h { z0.s }, p0, [x0, z1.s, sxtw]
|
|
; CHECK-NEXT: ret
|
|
%data_trunc = trunc <vscale x 4 x i32> %data to <vscale x 4 x i16>
|
|
call void @llvm.aarch64.sve.st1.scatter.sxtw.nxv4i16(<vscale x 4 x i16> %data_trunc,
|
|
<vscale x 4 x i1> %pg,
|
|
i16* %base,
|
|
<vscale x 4 x i32> %offsets)
|
|
ret void
|
|
}
|
|
|
|
define void @sst1h_d_uxtw(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i32> %offsets) {
|
|
; CHECK-LABEL: sst1h_d_uxtw:
|
|
; CHECK: st1h { z0.d }, p0, [x0, z1.d, uxtw]
|
|
; CHECK-NEXT: ret
|
|
%data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i16>
|
|
call void @llvm.aarch64.sve.st1.scatter.uxtw.nxv2i16(<vscale x 2 x i16> %data_trunc,
|
|
<vscale x 2 x i1> %pg,
|
|
i16* %base,
|
|
<vscale x 2 x i32> %offsets)
|
|
ret void
|
|
}
|
|
|
|
define void @sst1h_d_sxtw(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i32> %offsets) {
|
|
; CHECK-LABEL: sst1h_d_sxtw:
|
|
; CHECK: st1h { z0.d }, p0, [x0, z1.d, sxtw]
|
|
; CHECK-NEXT: ret
|
|
%data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i16>
|
|
call void @llvm.aarch64.sve.st1.scatter.sxtw.nxv2i16(<vscale x 2 x i16> %data_trunc,
|
|
<vscale x 2 x i1> %pg,
|
|
i16* %base,
|
|
<vscale x 2 x i32> %offsets)
|
|
ret void
|
|
}
|
|
|
|
; ST1W
|
|
define void @sst1w_s_uxtw(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, i32* %base, <vscale x 4 x i32> %offsets) {
|
|
; CHECK-LABEL: sst1w_s_uxtw:
|
|
; CHECK: st1w { z0.s }, p0, [x0, z1.s, uxtw]
|
|
; CHECK-NEXT: ret
|
|
call void @llvm.aarch64.sve.st1.scatter.uxtw.nxv4i32(<vscale x 4 x i32> %data,
|
|
<vscale x 4 x i1> %pg,
|
|
i32* %base,
|
|
<vscale x 4 x i32> %offsets)
|
|
ret void
|
|
}
|
|
|
|
define void @sst1w_s_sxtw(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, i32* %base, <vscale x 4 x i32> %offsets) {
|
|
; CHECK-LABEL: sst1w_s_sxtw:
|
|
; CHECK: st1w { z0.s }, p0, [x0, z1.s, sxtw]
|
|
; CHECK-NEXT: ret
|
|
call void @llvm.aarch64.sve.st1.scatter.sxtw.nxv4i32(<vscale x 4 x i32> %data,
|
|
<vscale x 4 x i1> %pg,
|
|
i32* %base,
|
|
<vscale x 4 x i32> %offsets)
|
|
ret void
|
|
}
|
|
|
|
define void @sst1w_d_uxtw(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i32> %offsets) {
|
|
; CHECK-LABEL: sst1w_d_uxtw:
|
|
; CHECK: st1w { z0.d }, p0, [x0, z1.d, uxtw]
|
|
; CHECK-NEXT: ret
|
|
%data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i32>
|
|
call void @llvm.aarch64.sve.st1.scatter.uxtw.nxv2i32(<vscale x 2 x i32> %data_trunc,
|
|
<vscale x 2 x i1> %pg,
|
|
i32* %base,
|
|
<vscale x 2 x i32> %offsets)
|
|
ret void
|
|
}
|
|
|
|
define void @sst1w_d_sxtw(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i32> %offsets) {
|
|
; CHECK-LABEL: sst1w_d_sxtw:
|
|
; CHECK: st1w { z0.d }, p0, [x0, z1.d, sxtw]
|
|
; CHECK-NEXT: ret
|
|
%data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i32>
|
|
call void @llvm.aarch64.sve.st1.scatter.sxtw.nxv2i32(<vscale x 2 x i32> %data_trunc,
|
|
<vscale x 2 x i1> %pg,
|
|
i32* %base,
|
|
<vscale x 2 x i32> %offsets)
|
|
ret void
|
|
}
|
|
|
|
define void @sst1w_s_uxtw_float(<vscale x 4 x float> %data, <vscale x 4 x i1> %pg, float* %base, <vscale x 4 x i32> %offsets) {
|
|
; CHECK-LABEL: sst1w_s_uxtw_float:
|
|
; CHECK: st1w { z0.s }, p0, [x0, z1.s, uxtw]
|
|
; CHECK-NEXT: ret
|
|
call void @llvm.aarch64.sve.st1.scatter.uxtw.nxv4f32(<vscale x 4 x float> %data,
|
|
<vscale x 4 x i1> %pg,
|
|
float* %base,
|
|
<vscale x 4 x i32> %offsets)
|
|
ret void
|
|
}
|
|
|
|
define void @sst1w_s_sxtw_float(<vscale x 4 x float> %data, <vscale x 4 x i1> %pg, float* %base, <vscale x 4 x i32> %offsets) {
|
|
; CHECK-LABEL: sst1w_s_sxtw_float:
|
|
; CHECK: st1w { z0.s }, p0, [x0, z1.s, sxtw]
|
|
; CHECK-NEXT: ret
|
|
call void @llvm.aarch64.sve.st1.scatter.sxtw.nxv4f32(<vscale x 4 x float> %data,
|
|
<vscale x 4 x i1> %pg,
|
|
float* %base,
|
|
<vscale x 4 x i32> %offsets)
|
|
ret void
|
|
}
|
|
|
|
; ST1D
|
|
define void @sst1d_d_uxtw(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i64* %base, <vscale x 2 x i32> %offsets) {
|
|
; CHECK-LABEL: sst1d_d_uxtw:
|
|
; CHECK: st1d { z0.d }, p0, [x0, z1.d, uxtw]
|
|
; CHECK-NEXT: ret
|
|
call void @llvm.aarch64.sve.st1.scatter.uxtw.nxv2i64(<vscale x 2 x i64> %data,
|
|
<vscale x 2 x i1> %pg,
|
|
i64* %base,
|
|
<vscale x 2 x i32> %offsets)
|
|
ret void
|
|
}
|
|
|
|
define void @sst1d_d_sxtw(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i64* %base, <vscale x 2 x i32> %offsets) {
|
|
; CHECK-LABEL: sst1d_d_sxtw:
|
|
; CHECK: st1d { z0.d }, p0, [x0, z1.d, sxtw]
|
|
; CHECK-NEXT: ret
|
|
call void @llvm.aarch64.sve.st1.scatter.sxtw.nxv2i64(<vscale x 2 x i64> %data,
|
|
<vscale x 2 x i1> %pg,
|
|
i64* %base,
|
|
<vscale x 2 x i32> %offsets)
|
|
ret void
|
|
}
|
|
|
|
define void @sst1d_d_uxtw_double(<vscale x 2 x double> %data, <vscale x 2 x i1> %pg, double* %base, <vscale x 2 x i32> %offsets) {
|
|
; CHECK-LABEL: sst1d_d_uxtw_double:
|
|
; CHECK: st1d { z0.d }, p0, [x0, z1.d, uxtw]
|
|
; CHECK-NEXT: ret
|
|
call void @llvm.aarch64.sve.st1.scatter.uxtw.nxv2f64(<vscale x 2 x double> %data,
|
|
<vscale x 2 x i1> %pg,
|
|
double* %base,
|
|
<vscale x 2 x i32> %offsets)
|
|
ret void
|
|
}
|
|
|
|
define void @sst1d_d_sxtw_double(<vscale x 2 x double> %data, <vscale x 2 x i1> %pg, double* %base, <vscale x 2 x i32> %offsets) {
|
|
; CHECK-LABEL: sst1d_d_sxtw_double:
|
|
; CHECK: st1d { z0.d }, p0, [x0, z1.d, sxtw]
|
|
; CHECK-NEXT: ret
|
|
call void @llvm.aarch64.sve.st1.scatter.sxtw.nxv2f64(<vscale x 2 x double> %data,
|
|
<vscale x 2 x i1> %pg,
|
|
double* %base,
|
|
<vscale x 2 x i32> %offsets)
|
|
ret void
|
|
}
|
|
|
|
|
|
; ST1B
|
|
declare void @llvm.aarch64.sve.st1.scatter.uxtw.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i1>, i8*, <vscale x 4 x i32>)
|
|
declare void @llvm.aarch64.sve.st1.scatter.uxtw.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i1>, i8*, <vscale x 2 x i32>)
|
|
declare void @llvm.aarch64.sve.st1.scatter.sxtw.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i1>, i8*, <vscale x 4 x i32>)
|
|
declare void @llvm.aarch64.sve.st1.scatter.sxtw.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i1>, i8*, <vscale x 2 x i32>)
|
|
|
|
; ST1H
|
|
declare void @llvm.aarch64.sve.st1.scatter.sxtw.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i1>, i16*, <vscale x 4 x i32>)
|
|
declare void @llvm.aarch64.sve.st1.scatter.sxtw.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, i16*, <vscale x 2 x i32>)
|
|
declare void @llvm.aarch64.sve.st1.scatter.uxtw.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i1>, i16*, <vscale x 4 x i32>)
|
|
declare void @llvm.aarch64.sve.st1.scatter.uxtw.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, i16*, <vscale x 2 x i32>)
|
|
|
|
; ST1W
|
|
declare void @llvm.aarch64.sve.st1.scatter.sxtw.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, i32*, <vscale x 4 x i32>)
|
|
declare void @llvm.aarch64.sve.st1.scatter.sxtw.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, i32*, <vscale x 2 x i32>)
|
|
declare void @llvm.aarch64.sve.st1.scatter.uxtw.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, i32*, <vscale x 4 x i32>)
|
|
declare void @llvm.aarch64.sve.st1.scatter.uxtw.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, i32*, <vscale x 2 x i32>)
|
|
|
|
declare void @llvm.aarch64.sve.st1.scatter.sxtw.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, float*, <vscale x 4 x i32>)
|
|
declare void @llvm.aarch64.sve.st1.scatter.uxtw.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, float*, <vscale x 4 x i32>)
|
|
|
|
; ST1D
|
|
declare void @llvm.aarch64.sve.st1.scatter.sxtw.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i64*, <vscale x 2 x i32>)
|
|
declare void @llvm.aarch64.sve.st1.scatter.uxtw.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i64*, <vscale x 2 x i32>)
|
|
|
|
declare void @llvm.aarch64.sve.st1.scatter.sxtw.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, double*, <vscale x 2 x i32>)
|
|
declare void @llvm.aarch64.sve.st1.scatter.uxtw.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, double*, <vscale x 2 x i32>)
|