mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-02-01 05:01:59 +01:00
[SVE][NFC] Regenerate a few CodeGen tests
Regenerated using llvm/utils/update_llc_test_checks.py as part of D94504, committing separately to reduce the diff for D94504.
This commit is contained in:
parent
ce76c6de45
commit
b30ad48824
@ -1,3 +1,4 @@
|
||||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s 2>%t | FileCheck %s
|
||||
; RUN: FileCheck --check-prefix=WARN --allow-empty %s <%t
|
||||
|
||||
@ -10,7 +11,8 @@
|
||||
|
||||
define <vscale x 16 x i8> @ldnf1b(<vscale x 16 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: ldnf1b:
|
||||
; CHECK: ldnf1b { z0.b }, p0/z, [x0]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldnf1b { z0.b }, p0/z, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 16 x i8> @llvm.aarch64.sve.ldnf1.nxv16i8(<vscale x 16 x i1> %pg, i8* %a)
|
||||
ret <vscale x 16 x i8> %load
|
||||
@ -18,9 +20,10 @@ define <vscale x 16 x i8> @ldnf1b(<vscale x 16 x i1> %pg, i8* %a) {
|
||||
|
||||
define <vscale x 16 x i8> @ldnf1b_out_of_lower_bound(<vscale x 16 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: ldnf1b_out_of_lower_bound:
|
||||
; CHECK: rdvl x[[OFFSET:[0-9]+]], #-9
|
||||
; CHECK-NEXT: add x[[BASE:[0-9]+]], x0, x[[OFFSET]]
|
||||
; CHECK-NEXT: ldnf1b { z0.b }, p0/z, [x[[BASE]]]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: rdvl x8, #-9
|
||||
; CHECK-NEXT: add x8, x0, x8
|
||||
; CHECK-NEXT: ldnf1b { z0.b }, p0/z, [x8]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i8* %a to <vscale x 16 x i8>*
|
||||
%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %base_scalable, i64 -9
|
||||
@ -31,7 +34,8 @@ define <vscale x 16 x i8> @ldnf1b_out_of_lower_bound(<vscale x 16 x i1> %pg, i8*
|
||||
|
||||
define <vscale x 16 x i8> @ldnf1b_lower_bound(<vscale x 16 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: ldnf1b_lower_bound:
|
||||
; CHECK: ldnf1b { z0.b }, p0/z, [x0, #-8, mul vl]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldnf1b { z0.b }, p0/z, [x0, #-8, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i8* %a to <vscale x 16 x i8>*
|
||||
%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %base_scalable, i64 -8
|
||||
@ -42,7 +46,8 @@ define <vscale x 16 x i8> @ldnf1b_lower_bound(<vscale x 16 x i1> %pg, i8* %a) {
|
||||
|
||||
define <vscale x 16 x i8> @ldnf1b_inbound(<vscale x 16 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: ldnf1b_inbound:
|
||||
; CHECK: ldnf1b { z0.b }, p0/z, [x0, #1, mul vl]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldnf1b { z0.b }, p0/z, [x0, #1, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i8* %a to <vscale x 16 x i8>*
|
||||
%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %base_scalable, i64 1
|
||||
@ -53,7 +58,8 @@ define <vscale x 16 x i8> @ldnf1b_inbound(<vscale x 16 x i1> %pg, i8* %a) {
|
||||
|
||||
define <vscale x 16 x i8> @ldnf1b_upper_bound(<vscale x 16 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: ldnf1b_upper_bound:
|
||||
; CHECK: ldnf1b { z0.b }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldnf1b { z0.b }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i8* %a to <vscale x 16 x i8>*
|
||||
%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %base_scalable, i64 7
|
||||
@ -64,9 +70,10 @@ define <vscale x 16 x i8> @ldnf1b_upper_bound(<vscale x 16 x i1> %pg, i8* %a) {
|
||||
|
||||
define <vscale x 16 x i8> @ldnf1b_out_of_upper_bound(<vscale x 16 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: ldnf1b_out_of_upper_bound:
|
||||
; CHECK: rdvl x[[OFFSET:[0-9]+]], #8
|
||||
; CHECK-NEXT: add x[[BASE:[0-9]+]], x0, x[[OFFSET]]
|
||||
; CHECK-NEXT: ldnf1b { z0.b }, p0/z, [x[[BASE]]]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: rdvl x8, #8
|
||||
; CHECK-NEXT: add x8, x0, x8
|
||||
; CHECK-NEXT: ldnf1b { z0.b }, p0/z, [x8]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i8* %a to <vscale x 16 x i8>*
|
||||
%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %base_scalable, i64 8
|
||||
@ -77,7 +84,8 @@ define <vscale x 16 x i8> @ldnf1b_out_of_upper_bound(<vscale x 16 x i1> %pg, i8*
|
||||
|
||||
define <vscale x 8 x i16> @ldnf1b_h(<vscale x 8 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: ldnf1b_h:
|
||||
; CHECK: ldnf1b { z0.h }, p0/z, [x0]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldnf1b { z0.h }, p0/z, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 8 x i8> @llvm.aarch64.sve.ldnf1.nxv8i8(<vscale x 8 x i1> %pg, i8* %a)
|
||||
%res = zext <vscale x 8 x i8> %load to <vscale x 8 x i16>
|
||||
@ -86,7 +94,8 @@ define <vscale x 8 x i16> @ldnf1b_h(<vscale x 8 x i1> %pg, i8* %a) {
|
||||
|
||||
define <vscale x 8 x i16> @ldnf1b_h_inbound(<vscale x 8 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: ldnf1b_h_inbound:
|
||||
; CHECK: ldnf1b { z0.h }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldnf1b { z0.h }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i8* %a to <vscale x 8 x i8>*
|
||||
%base = getelementptr <vscale x 8 x i8>, <vscale x 8 x i8>* %base_scalable, i64 7
|
||||
@ -98,7 +107,8 @@ define <vscale x 8 x i16> @ldnf1b_h_inbound(<vscale x 8 x i1> %pg, i8* %a) {
|
||||
|
||||
define <vscale x 8 x i16> @ldnf1sb_h(<vscale x 8 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: ldnf1sb_h:
|
||||
; CHECK: ldnf1sb { z0.h }, p0/z, [x0]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldnf1sb { z0.h }, p0/z, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 8 x i8> @llvm.aarch64.sve.ldnf1.nxv8i8(<vscale x 8 x i1> %pg, i8* %a)
|
||||
%res = sext <vscale x 8 x i8> %load to <vscale x 8 x i16>
|
||||
@ -107,7 +117,8 @@ define <vscale x 8 x i16> @ldnf1sb_h(<vscale x 8 x i1> %pg, i8* %a) {
|
||||
|
||||
define <vscale x 8 x i16> @ldnf1sb_h_inbound(<vscale x 8 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: ldnf1sb_h_inbound:
|
||||
; CHECK: ldnf1sb { z0.h }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldnf1sb { z0.h }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i8* %a to <vscale x 8 x i8>*
|
||||
%base = getelementptr <vscale x 8 x i8>, <vscale x 8 x i8>* %base_scalable, i64 7
|
||||
@ -119,7 +130,8 @@ define <vscale x 8 x i16> @ldnf1sb_h_inbound(<vscale x 8 x i1> %pg, i8* %a) {
|
||||
|
||||
define <vscale x 8 x i16> @ldnf1h(<vscale x 8 x i1> %pg, i16* %a) {
|
||||
; CHECK-LABEL: ldnf1h:
|
||||
; CHECK: ldnf1h { z0.h }, p0/z, [x0]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldnf1h { z0.h }, p0/z, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 8 x i16> @llvm.aarch64.sve.ldnf1.nxv8i16(<vscale x 8 x i1> %pg, i16* %a)
|
||||
ret <vscale x 8 x i16> %load
|
||||
@ -127,7 +139,8 @@ define <vscale x 8 x i16> @ldnf1h(<vscale x 8 x i1> %pg, i16* %a) {
|
||||
|
||||
define <vscale x 8 x i16> @ldnf1h_inbound(<vscale x 8 x i1> %pg, i16* %a) {
|
||||
; CHECK-LABEL: ldnf1h_inbound:
|
||||
; CHECK: ldnf1h { z0.h }, p0/z, [x0, #1, mul vl]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldnf1h { z0.h }, p0/z, [x0, #1, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i16* %a to <vscale x 8 x i16>*
|
||||
%base = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %base_scalable, i64 1
|
||||
@ -138,7 +151,8 @@ define <vscale x 8 x i16> @ldnf1h_inbound(<vscale x 8 x i1> %pg, i16* %a) {
|
||||
|
||||
define <vscale x 8 x half> @ldnf1h_f16(<vscale x 8 x i1> %pg, half* %a) {
|
||||
; CHECK-LABEL: ldnf1h_f16:
|
||||
; CHECK: ldnf1h { z0.h }, p0/z, [x0]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldnf1h { z0.h }, p0/z, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 8 x half> @llvm.aarch64.sve.ldnf1.nxv8f16(<vscale x 8 x i1> %pg, half* %a)
|
||||
ret <vscale x 8 x half> %load
|
||||
@ -146,7 +160,8 @@ define <vscale x 8 x half> @ldnf1h_f16(<vscale x 8 x i1> %pg, half* %a) {
|
||||
|
||||
define <vscale x 8 x bfloat> @ldnf1h_bf16(<vscale x 8 x i1> %pg, bfloat* %a) #0 {
|
||||
; CHECK-LABEL: ldnf1h_bf16:
|
||||
; CHECK: ldnf1h { z0.h }, p0/z, [x0]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldnf1h { z0.h }, p0/z, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 8 x bfloat> @llvm.aarch64.sve.ldnf1.nxv8bf16(<vscale x 8 x i1> %pg, bfloat* %a)
|
||||
ret <vscale x 8 x bfloat> %load
|
||||
@ -154,7 +169,8 @@ define <vscale x 8 x bfloat> @ldnf1h_bf16(<vscale x 8 x i1> %pg, bfloat* %a) #0
|
||||
|
||||
define <vscale x 8 x half> @ldnf1h_f16_inbound(<vscale x 8 x i1> %pg, half* %a) {
|
||||
; CHECK-LABEL: ldnf1h_f16_inbound:
|
||||
; CHECK: ldnf1h { z0.h }, p0/z, [x0, #1, mul vl]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldnf1h { z0.h }, p0/z, [x0, #1, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast half* %a to <vscale x 8 x half>*
|
||||
%base = getelementptr <vscale x 8 x half>, <vscale x 8 x half>* %base_scalable, i64 1
|
||||
@ -165,7 +181,8 @@ define <vscale x 8 x half> @ldnf1h_f16_inbound(<vscale x 8 x i1> %pg, half* %a)
|
||||
|
||||
define <vscale x 8 x bfloat> @ldnf1h_bf16_inbound(<vscale x 8 x i1> %pg, bfloat* %a) #0 {
|
||||
; CHECK-LABEL: ldnf1h_bf16_inbound:
|
||||
; CHECK: ldnf1h { z0.h }, p0/z, [x0, #1, mul vl]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldnf1h { z0.h }, p0/z, [x0, #1, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast bfloat* %a to <vscale x 8 x bfloat>*
|
||||
%base = getelementptr <vscale x 8 x bfloat>, <vscale x 8 x bfloat>* %base_scalable, i64 1
|
||||
@ -176,7 +193,8 @@ define <vscale x 8 x bfloat> @ldnf1h_bf16_inbound(<vscale x 8 x i1> %pg, bfloat*
|
||||
|
||||
define <vscale x 4 x i32> @ldnf1b_s(<vscale x 4 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: ldnf1b_s:
|
||||
; CHECK: ldnf1b { z0.s }, p0/z, [x0]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldnf1b { z0.s }, p0/z, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 4 x i8> @llvm.aarch64.sve.ldnf1.nxv4i8(<vscale x 4 x i1> %pg, i8* %a)
|
||||
%res = zext <vscale x 4 x i8> %load to <vscale x 4 x i32>
|
||||
@ -185,7 +203,8 @@ define <vscale x 4 x i32> @ldnf1b_s(<vscale x 4 x i1> %pg, i8* %a) {
|
||||
|
||||
define <vscale x 4 x i32> @ldnf1b_s_inbound(<vscale x 4 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: ldnf1b_s_inbound:
|
||||
; CHECK: ldnf1b { z0.s }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldnf1b { z0.s }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i8* %a to <vscale x 4 x i8>*
|
||||
%base = getelementptr <vscale x 4 x i8>, <vscale x 4 x i8>* %base_scalable, i64 7
|
||||
@ -197,7 +216,8 @@ define <vscale x 4 x i32> @ldnf1b_s_inbound(<vscale x 4 x i1> %pg, i8* %a) {
|
||||
|
||||
define <vscale x 4 x i32> @ldnf1sb_s(<vscale x 4 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: ldnf1sb_s:
|
||||
; CHECK: ldnf1sb { z0.s }, p0/z, [x0]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldnf1sb { z0.s }, p0/z, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 4 x i8> @llvm.aarch64.sve.ldnf1.nxv4i8(<vscale x 4 x i1> %pg, i8* %a)
|
||||
%res = sext <vscale x 4 x i8> %load to <vscale x 4 x i32>
|
||||
@ -206,7 +226,8 @@ define <vscale x 4 x i32> @ldnf1sb_s(<vscale x 4 x i1> %pg, i8* %a) {
|
||||
|
||||
define <vscale x 4 x i32> @ldnf1sb_s_inbound(<vscale x 4 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: ldnf1sb_s_inbound:
|
||||
; CHECK: ldnf1sb { z0.s }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldnf1sb { z0.s }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i8* %a to <vscale x 4 x i8>*
|
||||
%base = getelementptr <vscale x 4 x i8>, <vscale x 4 x i8>* %base_scalable, i64 7
|
||||
@ -218,7 +239,8 @@ define <vscale x 4 x i32> @ldnf1sb_s_inbound(<vscale x 4 x i1> %pg, i8* %a) {
|
||||
|
||||
define <vscale x 4 x i32> @ldnf1h_s(<vscale x 4 x i1> %pg, i16* %a) {
|
||||
; CHECK-LABEL: ldnf1h_s:
|
||||
; CHECK: ldnf1h { z0.s }, p0/z, [x0]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldnf1h { z0.s }, p0/z, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 4 x i16> @llvm.aarch64.sve.ldnf1.nxv4i16(<vscale x 4 x i1> %pg, i16* %a)
|
||||
%res = zext <vscale x 4 x i16> %load to <vscale x 4 x i32>
|
||||
@ -227,7 +249,8 @@ define <vscale x 4 x i32> @ldnf1h_s(<vscale x 4 x i1> %pg, i16* %a) {
|
||||
|
||||
define <vscale x 4 x i32> @ldnf1h_s_inbound(<vscale x 4 x i1> %pg, i16* %a) {
|
||||
; CHECK-LABEL: ldnf1h_s_inbound:
|
||||
; CHECK: ldnf1h { z0.s }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldnf1h { z0.s }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i16* %a to <vscale x 4 x i16>*
|
||||
%base = getelementptr <vscale x 4 x i16>, <vscale x 4 x i16>* %base_scalable, i64 7
|
||||
@ -239,7 +262,8 @@ define <vscale x 4 x i32> @ldnf1h_s_inbound(<vscale x 4 x i1> %pg, i16* %a) {
|
||||
|
||||
define <vscale x 4 x i32> @ldnf1sh_s(<vscale x 4 x i1> %pg, i16* %a) {
|
||||
; CHECK-LABEL: ldnf1sh_s:
|
||||
; CHECK: ldnf1sh { z0.s }, p0/z, [x0]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldnf1sh { z0.s }, p0/z, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 4 x i16> @llvm.aarch64.sve.ldnf1.nxv4i16(<vscale x 4 x i1> %pg, i16* %a)
|
||||
%res = sext <vscale x 4 x i16> %load to <vscale x 4 x i32>
|
||||
@ -248,7 +272,8 @@ define <vscale x 4 x i32> @ldnf1sh_s(<vscale x 4 x i1> %pg, i16* %a) {
|
||||
|
||||
define <vscale x 4 x i32> @ldnf1sh_s_inbound(<vscale x 4 x i1> %pg, i16* %a) {
|
||||
; CHECK-LABEL: ldnf1sh_s_inbound:
|
||||
; CHECK: ldnf1sh { z0.s }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldnf1sh { z0.s }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i16* %a to <vscale x 4 x i16>*
|
||||
%base = getelementptr <vscale x 4 x i16>, <vscale x 4 x i16>* %base_scalable, i64 7
|
||||
@ -260,7 +285,8 @@ define <vscale x 4 x i32> @ldnf1sh_s_inbound(<vscale x 4 x i1> %pg, i16* %a) {
|
||||
|
||||
define <vscale x 4 x i32> @ldnf1w(<vscale x 4 x i1> %pg, i32* %a) {
|
||||
; CHECK-LABEL: ldnf1w:
|
||||
; CHECK: ldnf1w { z0.s }, p0/z, [x0]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldnf1w { z0.s }, p0/z, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 4 x i32> @llvm.aarch64.sve.ldnf1.nxv4i32(<vscale x 4 x i1> %pg, i32* %a)
|
||||
ret <vscale x 4 x i32> %load
|
||||
@ -268,7 +294,8 @@ define <vscale x 4 x i32> @ldnf1w(<vscale x 4 x i1> %pg, i32* %a) {
|
||||
|
||||
define <vscale x 4 x i32> @ldnf1w_inbound(<vscale x 4 x i1> %pg, i32* %a) {
|
||||
; CHECK-LABEL: ldnf1w_inbound:
|
||||
; CHECK: ldnf1w { z0.s }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldnf1w { z0.s }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i32* %a to <vscale x 4 x i32>*
|
||||
%base = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %base_scalable, i64 7
|
||||
@ -279,7 +306,8 @@ define <vscale x 4 x i32> @ldnf1w_inbound(<vscale x 4 x i1> %pg, i32* %a) {
|
||||
|
||||
define <vscale x 4 x float> @ldnf1w_f32(<vscale x 4 x i1> %pg, float* %a) {
|
||||
; CHECK-LABEL: ldnf1w_f32:
|
||||
; CHECK: ldnf1w { z0.s }, p0/z, [x0]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldnf1w { z0.s }, p0/z, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 4 x float> @llvm.aarch64.sve.ldnf1.nxv4f32(<vscale x 4 x i1> %pg, float* %a)
|
||||
ret <vscale x 4 x float> %load
|
||||
@ -287,7 +315,8 @@ define <vscale x 4 x float> @ldnf1w_f32(<vscale x 4 x i1> %pg, float* %a) {
|
||||
|
||||
define <vscale x 4 x float> @ldnf1w_f32_inbound(<vscale x 4 x i1> %pg, float* %a) {
|
||||
; CHECK-LABEL: ldnf1w_f32_inbound:
|
||||
; CHECK: ldnf1w { z0.s }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldnf1w { z0.s }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast float* %a to <vscale x 4 x float>*
|
||||
%base = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %base_scalable, i64 7
|
||||
@ -298,7 +327,8 @@ define <vscale x 4 x float> @ldnf1w_f32_inbound(<vscale x 4 x i1> %pg, float* %a
|
||||
|
||||
define <vscale x 2 x i64> @ldnf1b_d(<vscale x 2 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: ldnf1b_d:
|
||||
; CHECK: ldnf1b { z0.d }, p0/z, [x0]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldnf1b { z0.d }, p0/z, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ldnf1.nxv2i8(<vscale x 2 x i1> %pg, i8* %a)
|
||||
%res = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
|
||||
@ -307,7 +337,8 @@ define <vscale x 2 x i64> @ldnf1b_d(<vscale x 2 x i1> %pg, i8* %a) {
|
||||
|
||||
define <vscale x 2 x i64> @ldnf1b_d_inbound(<vscale x 2 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: ldnf1b_d_inbound:
|
||||
; CHECK: ldnf1b { z0.d }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldnf1b { z0.d }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i8* %a to <vscale x 2 x i8>*
|
||||
%base = getelementptr <vscale x 2 x i8>, <vscale x 2 x i8>* %base_scalable, i64 7
|
||||
@ -319,7 +350,8 @@ define <vscale x 2 x i64> @ldnf1b_d_inbound(<vscale x 2 x i1> %pg, i8* %a) {
|
||||
|
||||
define <vscale x 2 x i64> @ldnf1sb_d(<vscale x 2 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: ldnf1sb_d:
|
||||
; CHECK: ldnf1sb { z0.d }, p0/z, [x0]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldnf1sb { z0.d }, p0/z, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ldnf1.nxv2i8(<vscale x 2 x i1> %pg, i8* %a)
|
||||
%res = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
|
||||
@ -328,7 +360,8 @@ define <vscale x 2 x i64> @ldnf1sb_d(<vscale x 2 x i1> %pg, i8* %a) {
|
||||
|
||||
define <vscale x 2 x i64> @ldnf1sb_d_inbound(<vscale x 2 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: ldnf1sb_d_inbound:
|
||||
; CHECK: ldnf1sb { z0.d }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldnf1sb { z0.d }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i8* %a to <vscale x 2 x i8>*
|
||||
%base = getelementptr <vscale x 2 x i8>, <vscale x 2 x i8>* %base_scalable, i64 7
|
||||
@ -340,7 +373,8 @@ define <vscale x 2 x i64> @ldnf1sb_d_inbound(<vscale x 2 x i1> %pg, i8* %a) {
|
||||
|
||||
define <vscale x 2 x i64> @ldnf1h_d(<vscale x 2 x i1> %pg, i16* %a) {
|
||||
; CHECK-LABEL: ldnf1h_d:
|
||||
; CHECK: ldnf1h { z0.d }, p0/z, [x0]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldnf1h { z0.d }, p0/z, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ldnf1.nxv2i16(<vscale x 2 x i1> %pg, i16* %a)
|
||||
%res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
|
||||
@ -349,7 +383,8 @@ define <vscale x 2 x i64> @ldnf1h_d(<vscale x 2 x i1> %pg, i16* %a) {
|
||||
|
||||
define <vscale x 2 x i64> @ldnf1h_d_inbound(<vscale x 2 x i1> %pg, i16* %a) {
|
||||
; CHECK-LABEL: ldnf1h_d_inbound:
|
||||
; CHECK: ldnf1h { z0.d }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldnf1h { z0.d }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i16* %a to <vscale x 2 x i16>*
|
||||
%base = getelementptr <vscale x 2 x i16>, <vscale x 2 x i16>* %base_scalable, i64 7
|
||||
@ -361,7 +396,8 @@ define <vscale x 2 x i64> @ldnf1h_d_inbound(<vscale x 2 x i1> %pg, i16* %a) {
|
||||
|
||||
define <vscale x 2 x i64> @ldnf1sh_d(<vscale x 2 x i1> %pg, i16* %a) {
|
||||
; CHECK-LABEL: ldnf1sh_d:
|
||||
; CHECK: ldnf1sh { z0.d }, p0/z, [x0]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldnf1sh { z0.d }, p0/z, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ldnf1.nxv2i16(<vscale x 2 x i1> %pg, i16* %a)
|
||||
%res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
|
||||
@ -370,7 +406,8 @@ define <vscale x 2 x i64> @ldnf1sh_d(<vscale x 2 x i1> %pg, i16* %a) {
|
||||
|
||||
define <vscale x 2 x i64> @ldnf1sh_d_inbound(<vscale x 2 x i1> %pg, i16* %a) {
|
||||
; CHECK-LABEL: ldnf1sh_d_inbound:
|
||||
; CHECK: ldnf1sh { z0.d }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldnf1sh { z0.d }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i16* %a to <vscale x 2 x i16>*
|
||||
%base = getelementptr <vscale x 2 x i16>, <vscale x 2 x i16>* %base_scalable, i64 7
|
||||
@ -382,7 +419,8 @@ define <vscale x 2 x i64> @ldnf1sh_d_inbound(<vscale x 2 x i1> %pg, i16* %a) {
|
||||
|
||||
define <vscale x 2 x i64> @ldnf1w_d(<vscale x 2 x i1> %pg, i32* %a) {
|
||||
; CHECK-LABEL: ldnf1w_d:
|
||||
; CHECK: ldnf1w { z0.d }, p0/z, [x0]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldnf1w { z0.d }, p0/z, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ldnf1.nxv2i32(<vscale x 2 x i1> %pg, i32* %a)
|
||||
%res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
|
||||
@ -391,7 +429,8 @@ define <vscale x 2 x i64> @ldnf1w_d(<vscale x 2 x i1> %pg, i32* %a) {
|
||||
|
||||
define <vscale x 2 x i64> @ldnf1w_d_inbound(<vscale x 2 x i1> %pg, i32* %a) {
|
||||
; CHECK-LABEL: ldnf1w_d_inbound:
|
||||
; CHECK: ldnf1w { z0.d }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldnf1w { z0.d }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i32* %a to <vscale x 2 x i32>*
|
||||
%base = getelementptr <vscale x 2 x i32>, <vscale x 2 x i32>* %base_scalable, i64 7
|
||||
@ -403,7 +442,8 @@ define <vscale x 2 x i64> @ldnf1w_d_inbound(<vscale x 2 x i1> %pg, i32* %a) {
|
||||
|
||||
define <vscale x 2 x i64> @ldnf1sw_d(<vscale x 2 x i1> %pg, i32* %a) {
|
||||
; CHECK-LABEL: ldnf1sw_d:
|
||||
; CHECK: ldnf1sw { z0.d }, p0/z, [x0]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldnf1sw { z0.d }, p0/z, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ldnf1.nxv2i32(<vscale x 2 x i1> %pg, i32* %a)
|
||||
%res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
|
||||
@ -412,7 +452,8 @@ define <vscale x 2 x i64> @ldnf1sw_d(<vscale x 2 x i1> %pg, i32* %a) {
|
||||
|
||||
define <vscale x 2 x i64> @ldnf1sw_d_inbound(<vscale x 2 x i1> %pg, i32* %a) {
|
||||
; CHECK-LABEL: ldnf1sw_d_inbound:
|
||||
; CHECK: ldnf1sw { z0.d }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldnf1sw { z0.d }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i32* %a to <vscale x 2 x i32>*
|
||||
%base = getelementptr <vscale x 2 x i32>, <vscale x 2 x i32>* %base_scalable, i64 7
|
||||
@ -424,7 +465,8 @@ define <vscale x 2 x i64> @ldnf1sw_d_inbound(<vscale x 2 x i1> %pg, i32* %a) {
|
||||
|
||||
define <vscale x 2 x i64> @ldnf1d(<vscale x 2 x i1> %pg, i64* %a) {
|
||||
; CHECK-LABEL: ldnf1d:
|
||||
; CHECK: ldnf1d { z0.d }, p0/z, [x0]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldnf1d { z0.d }, p0/z, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i64> @llvm.aarch64.sve.ldnf1.nxv2i64(<vscale x 2 x i1> %pg, i64* %a)
|
||||
ret <vscale x 2 x i64> %load
|
||||
@ -432,7 +474,8 @@ define <vscale x 2 x i64> @ldnf1d(<vscale x 2 x i1> %pg, i64* %a) {
|
||||
|
||||
define <vscale x 2 x i64> @ldnf1d_inbound(<vscale x 2 x i1> %pg, i64* %a) {
|
||||
; CHECK-LABEL: ldnf1d_inbound:
|
||||
; CHECK: ldnf1d { z0.d }, p0/z, [x0, #1, mul vl]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldnf1d { z0.d }, p0/z, [x0, #1, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i64* %a to <vscale x 2 x i64>*
|
||||
%base = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %base_scalable, i64 1
|
||||
@ -443,7 +486,8 @@ define <vscale x 2 x i64> @ldnf1d_inbound(<vscale x 2 x i1> %pg, i64* %a) {
|
||||
|
||||
define <vscale x 2 x double> @ldnf1d_f64(<vscale x 2 x i1> %pg, double* %a) {
|
||||
; CHECK-LABEL: ldnf1d_f64:
|
||||
; CHECK: ldnf1d { z0.d }, p0/z, [x0]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldnf1d { z0.d }, p0/z, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x double> @llvm.aarch64.sve.ldnf1.nxv2f64(<vscale x 2 x i1> %pg, double* %a)
|
||||
ret <vscale x 2 x double> %load
|
||||
@ -451,7 +495,8 @@ define <vscale x 2 x double> @ldnf1d_f64(<vscale x 2 x i1> %pg, double* %a) {
|
||||
|
||||
define <vscale x 2 x double> @ldnf1d_f64_inbound(<vscale x 2 x i1> %pg, double* %a) {
|
||||
; CHECK-LABEL: ldnf1d_f64_inbound:
|
||||
; CHECK: ldnf1d { z0.d }, p0/z, [x0, #1, mul vl]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldnf1d { z0.d }, p0/z, [x0, #1, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast double* %a to <vscale x 2 x double>*
|
||||
%base = getelementptr <vscale x 2 x double>, <vscale x 2 x double>* %base_scalable, i64 1
|
||||
|
@ -1,4 +1,5 @@
|
||||
; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve --asm-verbose=false < %s 2>%t | FileCheck %s
|
||||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s 2>%t | FileCheck %s
|
||||
; RUN: FileCheck --check-prefix=WARN --allow-empty %s <%t
|
||||
|
||||
; If this check fails please read test/CodeGen/AArch64/README for instructions on how to resolve it.
|
||||
@ -11,12 +12,13 @@
|
||||
|
||||
define void @imm_out_of_range(<vscale x 2 x i64> * %base, <vscale x 2 x i1> %mask) nounwind {
|
||||
; CHECK-LABEL: imm_out_of_range:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: rdvl x8, #8
|
||||
; CHECK-NEXT: add x8, x0, x8
|
||||
; CHECK-NEXT: ld1d { z[[DATA:[0-9]+]].d }, p0/z, [x{{[0-9]+}}]
|
||||
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x8]
|
||||
; CHECK-NEXT: rdvl x8, #-9
|
||||
; CHECK-NEXT: add x8, x0, x8
|
||||
; CHECK-NEXT: st1d { z[[DATA]].d }, p0, [x{{[0-9]+}}]
|
||||
; CHECK-NEXT: st1d { z0.d }, p0, [x8]
|
||||
; CHECK-NEXT: ret
|
||||
%base_load = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %base, i64 8
|
||||
%data = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64(<vscale x 2 x i64>* %base_load,
|
||||
@ -35,8 +37,9 @@ define void @imm_out_of_range(<vscale x 2 x i64> * %base, <vscale x 2 x i1> %mas
|
||||
|
||||
define void @test_masked_ldst_sv2i8(<vscale x 2 x i8> * %base, <vscale x 2 x i1> %mask) nounwind {
|
||||
; CHECK-LABEL: test_masked_ldst_sv2i8:
|
||||
; CHECK-NEXT: ld1sb { z[[DATA:[0-9]+]].d }, p0/z, [x0, #-8, mul vl]
|
||||
; CHECK-NEXT: st1b { z[[DATA]].d }, p0, [x0, #-7, mul vl]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ld1sb { z0.d }, p0/z, [x0, #-8, mul vl]
|
||||
; CHECK-NEXT: st1b { z0.d }, p0, [x0, #-7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_load = getelementptr <vscale x 2 x i8>, <vscale x 2 x i8>* %base, i64 -8
|
||||
%data = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(<vscale x 2 x i8>* %base_load,
|
||||
@ -53,8 +56,9 @@ define void @test_masked_ldst_sv2i8(<vscale x 2 x i8> * %base, <vscale x 2 x i1>
|
||||
|
||||
define void @test_masked_ldst_sv2i16(<vscale x 2 x i16> * %base, <vscale x 2 x i1> %mask) nounwind {
|
||||
; CHECK-LABEL: test_masked_ldst_sv2i16:
|
||||
; CHECK-NEXT: ld1sh { z[[DATA:[0-9]+]].d }, p0/z, [x0, #-8, mul vl]
|
||||
; CHECK-NEXT: st1h { z[[DATA]].d }, p0, [x0, #-7, mul vl]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ld1sh { z0.d }, p0/z, [x0, #-8, mul vl]
|
||||
; CHECK-NEXT: st1h { z0.d }, p0, [x0, #-7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_load = getelementptr <vscale x 2 x i16>, <vscale x 2 x i16>* %base, i64 -8
|
||||
%data = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(<vscale x 2 x i16>* %base_load,
|
||||
@ -72,8 +76,9 @@ define void @test_masked_ldst_sv2i16(<vscale x 2 x i16> * %base, <vscale x 2 x i
|
||||
|
||||
define void @test_masked_ldst_sv2i32(<vscale x 2 x i32> * %base, <vscale x 2 x i1> %mask) nounwind {
|
||||
; CHECK-LABEL: test_masked_ldst_sv2i32:
|
||||
; CHECK-NEXT: ld1sw { z[[DATA:[0-9]+]].d }, p0/z, [x0, #-8, mul vl]
|
||||
; CHECK-NEXT: st1w { z[[DATA]].d }, p0, [x0, #-7, mul vl]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ld1sw { z0.d }, p0/z, [x0, #-8, mul vl]
|
||||
; CHECK-NEXT: st1w { z0.d }, p0, [x0, #-7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_load = getelementptr <vscale x 2 x i32>, <vscale x 2 x i32>* %base, i64 -8
|
||||
%data = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(<vscale x 2 x i32>* %base_load,
|
||||
@ -90,8 +95,9 @@ define void @test_masked_ldst_sv2i32(<vscale x 2 x i32> * %base, <vscale x 2 x i
|
||||
|
||||
define void @test_masked_ldst_sv2i64(<vscale x 2 x i64> * %base, <vscale x 2 x i1> %mask) nounwind {
|
||||
; CHECK-LABEL: test_masked_ldst_sv2i64:
|
||||
; CHECK-NEXT: ld1d { z[[DATA:[0-9]+]].d }, p0/z, [x0, #-8, mul vl]
|
||||
; CHECK-NEXT: st1d { z[[DATA]].d }, p0, [x0, #-7, mul vl]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, #-8, mul vl]
|
||||
; CHECK-NEXT: st1d { z0.d }, p0, [x0, #-7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_load = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %base, i64 -8
|
||||
%data = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64(<vscale x 2 x i64>* %base_load,
|
||||
@ -108,8 +114,9 @@ define void @test_masked_ldst_sv2i64(<vscale x 2 x i64> * %base, <vscale x 2 x i
|
||||
|
||||
define void @test_masked_ldst_sv2f16(<vscale x 2 x half> * %base, <vscale x 2 x i1> %mask) nounwind {
|
||||
; CHECK-LABEL: test_masked_ldst_sv2f16:
|
||||
; CHECK-NEXT: ld1h { z[[DATA:[0-9]+]].d }, p0/z, [x0, #-8, mul vl]
|
||||
; CHECK-NEXT: st1h { z[[DATA]].d }, p0, [x0, #-7, mul vl]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, #-8, mul vl]
|
||||
; CHECK-NEXT: st1h { z0.d }, p0, [x0, #-7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_load = getelementptr <vscale x 2 x half>, <vscale x 2 x half>* %base, i64 -8
|
||||
%data = call <vscale x 2 x half> @llvm.masked.load.nxv2f16(<vscale x 2 x half>* %base_load,
|
||||
@ -127,8 +134,9 @@ define void @test_masked_ldst_sv2f16(<vscale x 2 x half> * %base, <vscale x 2 x
|
||||
|
||||
define void @test_masked_ldst_sv2f32(<vscale x 2 x float> * %base, <vscale x 2 x i1> %mask) nounwind {
|
||||
; CHECK-LABEL: test_masked_ldst_sv2f32:
|
||||
; CHECK-NEXT: ld1w { z[[DATA:[0-9]+]].d }, p0/z, [x0, #-8, mul vl]
|
||||
; CHECK-NEXT: st1w { z[[DATA]].d }, p0, [x0, #-7, mul vl]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0, #-8, mul vl]
|
||||
; CHECK-NEXT: st1w { z0.d }, p0, [x0, #-7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_load = getelementptr <vscale x 2 x float>, <vscale x 2 x float>* %base, i64 -8
|
||||
%data = call <vscale x 2 x float> @llvm.masked.load.nxv2f32(<vscale x 2 x float>* %base_load,
|
||||
@ -145,8 +153,9 @@ define void @test_masked_ldst_sv2f32(<vscale x 2 x float> * %base, <vscale x 2 x
|
||||
|
||||
define void @test_masked_ldst_sv2f64(<vscale x 2 x double> * %base, <vscale x 2 x i1> %mask) nounwind {
|
||||
; CHECK-LABEL: test_masked_ldst_sv2f64:
|
||||
; CHECK-NEXT: ld1d { z[[DATA:[0-9]+]].d }, p0/z, [x0, #-6, mul vl]
|
||||
; CHECK-NEXT: st1d { z[[DATA]].d }, p0, [x0, #-5, mul vl]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, #-6, mul vl]
|
||||
; CHECK-NEXT: st1d { z0.d }, p0, [x0, #-5, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_load = getelementptr <vscale x 2 x double>, <vscale x 2 x double>* %base, i64 -6
|
||||
%data = call <vscale x 2 x double> @llvm.masked.load.nxv2f64(<vscale x 2 x double>* %base_load,
|
||||
@ -165,6 +174,7 @@ define void @test_masked_ldst_sv2f64(<vscale x 2 x double> * %base, <vscale x 2
|
||||
|
||||
define <vscale x 2 x i64> @masked_zload_sv2i8_to_sv2i64(<vscale x 2 x i8>* %base, <vscale x 2 x i1> %mask) nounwind {
|
||||
; CHECK-LABEL: masked_zload_sv2i8_to_sv2i64:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0, #-4, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_load = getelementptr <vscale x 2 x i8>, <vscale x 2 x i8>* %base, i64 -4
|
||||
@ -178,6 +188,7 @@ define <vscale x 2 x i64> @masked_zload_sv2i8_to_sv2i64(<vscale x 2 x i8>* %base
|
||||
|
||||
define <vscale x 2 x i64> @masked_sload_sv2i8_to_sv2i64(<vscale x 2 x i8>* %base, <vscale x 2 x i1> %mask) nounwind {
|
||||
; CHECK-LABEL: masked_sload_sv2i8_to_sv2i64:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ld1sb { z0.d }, p0/z, [x0, #-3, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_load = getelementptr <vscale x 2 x i8>, <vscale x 2 x i8>* %base, i64 -3
|
||||
@ -191,6 +202,7 @@ define <vscale x 2 x i64> @masked_sload_sv2i8_to_sv2i64(<vscale x 2 x i8>* %base
|
||||
|
||||
define <vscale x 2 x i64> @masked_zload_sv2i16_to_sv2i64(<vscale x 2 x i16>* %base, <vscale x 2 x i1> %mask) nounwind {
|
||||
; CHECK-LABEL: masked_zload_sv2i16_to_sv2i64:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, #1, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_load = getelementptr <vscale x 2 x i16>, <vscale x 2 x i16>* %base, i64 1
|
||||
@ -204,6 +216,7 @@ define <vscale x 2 x i64> @masked_zload_sv2i16_to_sv2i64(<vscale x 2 x i16>* %ba
|
||||
|
||||
define <vscale x 2 x i64> @masked_sload_sv2i16_to_sv2i64(<vscale x 2 x i16>* %base, <vscale x 2 x i1> %mask) nounwind {
|
||||
; CHECK-LABEL: masked_sload_sv2i16_to_sv2i64:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ld1sh { z0.d }, p0/z, [x0, #2, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_load = getelementptr <vscale x 2 x i16>, <vscale x 2 x i16>* %base, i64 2
|
||||
@ -217,6 +230,7 @@ define <vscale x 2 x i64> @masked_sload_sv2i16_to_sv2i64(<vscale x 2 x i16>* %ba
|
||||
|
||||
define <vscale x 2 x i64> @masked_zload_sv2i32_to_sv2i64(<vscale x 2 x i32>* %base, <vscale x 2 x i1> %mask) nounwind {
|
||||
; CHECK-LABEL: masked_zload_sv2i32_to_sv2i64:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0, #-2, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_load = getelementptr <vscale x 2 x i32>, <vscale x 2 x i32>* %base, i64 -2
|
||||
@ -230,6 +244,7 @@ define <vscale x 2 x i64> @masked_zload_sv2i32_to_sv2i64(<vscale x 2 x i32>* %ba
|
||||
|
||||
define <vscale x 2 x i64> @masked_sload_sv2i32_to_sv2i64(<vscale x 2 x i32>* %base, <vscale x 2 x i1> %mask) nounwind {
|
||||
; CHECK-LABEL: masked_sload_sv2i32_to_sv2i64:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ld1sw { z0.d }, p0/z, [x0, #-1, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_load = getelementptr <vscale x 2 x i32>, <vscale x 2 x i32>* %base, i64 -1
|
||||
@ -245,6 +260,7 @@ define <vscale x 2 x i64> @masked_sload_sv2i32_to_sv2i64(<vscale x 2 x i32>* %ba
|
||||
|
||||
define void @masked_trunc_store_sv2i64_to_sv2i8(<vscale x 2 x i64> %val, <vscale x 2 x i8> *%base, <vscale x 2 x i1> %mask) nounwind {
|
||||
; CHECK-LABEL: masked_trunc_store_sv2i64_to_sv2i8:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: st1b { z0.d }, p0, [x0, #3, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_load = getelementptr <vscale x 2 x i8>, <vscale x 2 x i8>* %base, i64 3
|
||||
@ -259,6 +275,7 @@ define void @masked_trunc_store_sv2i64_to_sv2i8(<vscale x 2 x i64> %val, <vscale
|
||||
|
||||
define void @masked_trunc_store_sv2i64_to_sv2i16(<vscale x 2 x i64> %val, <vscale x 2 x i16> *%base, <vscale x 2 x i1> %mask) nounwind {
|
||||
; CHECK-LABEL: masked_trunc_store_sv2i64_to_sv2i16:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: st1h { z0.d }, p0, [x0, #4, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_load = getelementptr <vscale x 2 x i16>, <vscale x 2 x i16>* %base, i64 4
|
||||
@ -272,6 +289,7 @@ define void @masked_trunc_store_sv2i64_to_sv2i16(<vscale x 2 x i64> %val, <vscal
|
||||
|
||||
define void @masked_trunc_store_sv2i64_to_sv2i32(<vscale x 2 x i64> %val, <vscale x 2 x i32> *%base, <vscale x 2 x i1> %mask) nounwind {
|
||||
; CHECK-LABEL: masked_trunc_store_sv2i64_to_sv2i32:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: st1w { z0.d }, p0, [x0, #5, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_load = getelementptr <vscale x 2 x i32>, <vscale x 2 x i32>* %base, i64 5
|
||||
@ -287,8 +305,9 @@ define void @masked_trunc_store_sv2i64_to_sv2i32(<vscale x 2 x i64> %val, <vscal
|
||||
|
||||
define void @test_masked_ldst_sv4i8(<vscale x 4 x i8> * %base, <vscale x 4 x i1> %mask) nounwind {
|
||||
; CHECK-LABEL: test_masked_ldst_sv4i8:
|
||||
; CHECK-NEXT: ld1sb { z[[DATA:[0-9]+]].s }, p0/z, [x0, #-1, mul vl]
|
||||
; CHECK-NEXT: st1b { z[[DATA]].s }, p0, [x0, #2, mul vl]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ld1sb { z0.s }, p0/z, [x0, #-1, mul vl]
|
||||
; CHECK-NEXT: st1b { z0.s }, p0, [x0, #2, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_load = getelementptr <vscale x 4 x i8>, <vscale x 4 x i8>* %base, i64 -1
|
||||
%data = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(<vscale x 4 x i8>* %base_load,
|
||||
@ -305,8 +324,9 @@ define void @test_masked_ldst_sv4i8(<vscale x 4 x i8> * %base, <vscale x 4 x i1>
|
||||
|
||||
define void @test_masked_ldst_sv4i16(<vscale x 4 x i16> * %base, <vscale x 4 x i1> %mask) nounwind {
|
||||
; CHECK-LABEL: test_masked_ldst_sv4i16:
|
||||
; CHECK-NEXT: ld1sh { z[[DATA:[0-9]+]].s }, p0/z, [x0, #-1, mul vl]
|
||||
; CHECK-NEXT: st1h { z[[DATA]].s }, p0, [x0, #2, mul vl]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ld1sh { z0.s }, p0/z, [x0, #-1, mul vl]
|
||||
; CHECK-NEXT: st1h { z0.s }, p0, [x0, #2, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_load = getelementptr <vscale x 4 x i16>, <vscale x 4 x i16>* %base, i64 -1
|
||||
%data = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(<vscale x 4 x i16>* %base_load,
|
||||
@ -323,8 +343,9 @@ define void @test_masked_ldst_sv4i16(<vscale x 4 x i16> * %base, <vscale x 4 x i
|
||||
|
||||
define void @test_masked_ldst_sv4i32(<vscale x 4 x i32> * %base, <vscale x 4 x i1> %mask) nounwind {
|
||||
; CHECK-LABEL: test_masked_ldst_sv4i32:
|
||||
; CHECK-NEXT: ld1w { z[[DATA:[0-9]+]].s }, p0/z, [x0, #6, mul vl]
|
||||
; CHECK-NEXT: st1w { z[[DATA]].s }, p0, [x0, #7, mul vl]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0, #6, mul vl]
|
||||
; CHECK-NEXT: st1w { z0.s }, p0, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_load = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %base, i64 6
|
||||
%data = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32(<vscale x 4 x i32>* %base_load,
|
||||
@ -341,8 +362,9 @@ define void @test_masked_ldst_sv4i32(<vscale x 4 x i32> * %base, <vscale x 4 x i
|
||||
|
||||
define void @test_masked_ldst_sv4f16(<vscale x 4 x half> * %base, <vscale x 4 x i1> %mask) nounwind {
|
||||
; CHECK-LABEL: test_masked_ldst_sv4f16:
|
||||
; CHECK-NEXT: ld1h { z[[DATA:[0-9]+]].s }, p0/z, [x0, #-1, mul vl]
|
||||
; CHECK-NEXT: st1h { z[[DATA]].s }, p0, [x0, #2, mul vl]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0, #-1, mul vl]
|
||||
; CHECK-NEXT: st1h { z0.s }, p0, [x0, #2, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_load = getelementptr <vscale x 4 x half>, <vscale x 4 x half>* %base, i64 -1
|
||||
%data = call <vscale x 4 x half> @llvm.masked.load.nxv4f16(<vscale x 4 x half>* %base_load,
|
||||
@ -359,8 +381,9 @@ define void @test_masked_ldst_sv4f16(<vscale x 4 x half> * %base, <vscale x 4 x
|
||||
|
||||
define void @test_masked_ldst_sv4f32(<vscale x 4 x float> * %base, <vscale x 4 x i1> %mask) nounwind {
|
||||
; CHECK-LABEL: test_masked_ldst_sv4f32:
|
||||
; CHECK-NEXT: ld1w { z[[DATA:[0-9]+]].s }, p0/z, [x0, #-1, mul vl]
|
||||
; CHECK-NEXT: st1w { z[[DATA]].s }, p0, [x0, #2, mul vl]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0, #-1, mul vl]
|
||||
; CHECK-NEXT: st1w { z0.s }, p0, [x0, #2, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_load = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %base, i64 -1
|
||||
%data = call <vscale x 4 x float> @llvm.masked.load.nxv4f32(<vscale x 4 x float>* %base_load,
|
||||
@ -379,6 +402,7 @@ define void @test_masked_ldst_sv4f32(<vscale x 4 x float> * %base, <vscale x 4 x
|
||||
|
||||
define <vscale x 4 x i32> @masked_zload_sv4i8_to_sv4i32(<vscale x 4 x i8>* %base, <vscale x 4 x i1> %mask) nounwind {
|
||||
; CHECK-LABEL: masked_zload_sv4i8_to_sv4i32:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ld1b { z0.s }, p0/z, [x0, #-4, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_load = getelementptr <vscale x 4 x i8>, <vscale x 4 x i8>* %base, i64 -4
|
||||
@ -392,6 +416,7 @@ define <vscale x 4 x i32> @masked_zload_sv4i8_to_sv4i32(<vscale x 4 x i8>* %base
|
||||
|
||||
define <vscale x 4 x i32> @masked_sload_sv4i8_to_sv4i32(<vscale x 4 x i8>* %base, <vscale x 4 x i1> %mask) nounwind {
|
||||
; CHECK-LABEL: masked_sload_sv4i8_to_sv4i32:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ld1sb { z0.s }, p0/z, [x0, #-3, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_load = getelementptr <vscale x 4 x i8>, <vscale x 4 x i8>* %base, i64 -3
|
||||
@ -405,6 +430,7 @@ define <vscale x 4 x i32> @masked_sload_sv4i8_to_sv4i32(<vscale x 4 x i8>* %base
|
||||
|
||||
define <vscale x 4 x i32> @masked_zload_sv4i16_to_sv4i32(<vscale x 4 x i16>* %base, <vscale x 4 x i1> %mask) nounwind {
|
||||
; CHECK-LABEL: masked_zload_sv4i16_to_sv4i32:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0, #1, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_load = getelementptr <vscale x 4 x i16>, <vscale x 4 x i16>* %base, i64 1
|
||||
@ -418,6 +444,7 @@ define <vscale x 4 x i32> @masked_zload_sv4i16_to_sv4i32(<vscale x 4 x i16>* %ba
|
||||
|
||||
define <vscale x 4 x i32> @masked_sload_sv4i16_to_sv4i32(<vscale x 4 x i16>* %base, <vscale x 4 x i1> %mask) nounwind {
|
||||
; CHECK-LABEL: masked_sload_sv4i16_to_sv4i32:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ld1sh { z0.s }, p0/z, [x0, #2, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_load = getelementptr <vscale x 4 x i16>, <vscale x 4 x i16>* %base, i64 2
|
||||
@ -433,6 +460,7 @@ define <vscale x 4 x i32> @masked_sload_sv4i16_to_sv4i32(<vscale x 4 x i16>* %ba
|
||||
|
||||
define void @masked_trunc_store_sv4i32_to_sv4i8(<vscale x 4 x i32> %val, <vscale x 4 x i8> *%base, <vscale x 4 x i1> %mask) nounwind {
|
||||
; CHECK-LABEL: masked_trunc_store_sv4i32_to_sv4i8:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: st1b { z0.s }, p0, [x0, #3, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_load = getelementptr <vscale x 4 x i8>, <vscale x 4 x i8>* %base, i64 3
|
||||
@ -447,6 +475,7 @@ define void @masked_trunc_store_sv4i32_to_sv4i8(<vscale x 4 x i32> %val, <vscale
|
||||
|
||||
define void @masked_trunc_store_sv4i32_to_sv4i16(<vscale x 4 x i32> %val, <vscale x 4 x i16> *%base, <vscale x 4 x i1> %mask) nounwind {
|
||||
; CHECK-LABEL: masked_trunc_store_sv4i32_to_sv4i16:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: st1h { z0.s }, p0, [x0, #4, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_load = getelementptr <vscale x 4 x i16>, <vscale x 4 x i16>* %base, i64 4
|
||||
@ -462,8 +491,9 @@ define void @masked_trunc_store_sv4i32_to_sv4i16(<vscale x 4 x i32> %val, <vscal
|
||||
|
||||
define void @test_masked_ldst_sv8i8(<vscale x 8 x i8> * %base, <vscale x 8 x i1> %mask) nounwind {
|
||||
; CHECK-LABEL: test_masked_ldst_sv8i8:
|
||||
; CHECK-NEXT: ld1sb { z[[DATA:[0-9]+]].h }, p0/z, [x0, #6, mul vl]
|
||||
; CHECK-NEXT: st1b { z[[DATA]].h }, p0, [x0, #7, mul vl]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ld1sb { z0.h }, p0/z, [x0, #6, mul vl]
|
||||
; CHECK-NEXT: st1b { z0.h }, p0, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_load = getelementptr <vscale x 8 x i8>, <vscale x 8 x i8>* %base, i64 6
|
||||
%data = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8>* %base_load,
|
||||
@ -480,8 +510,9 @@ define void @test_masked_ldst_sv8i8(<vscale x 8 x i8> * %base, <vscale x 8 x i1>
|
||||
|
||||
define void @test_masked_ldst_sv8i16(<vscale x 8 x i16> * %base, <vscale x 8 x i1> %mask) nounwind {
|
||||
; CHECK-LABEL: test_masked_ldst_sv8i16:
|
||||
; CHECK-NEXT: ld1h { z[[DATA:[0-9]+]].h }, p0/z, [x0, #6, mul vl]
|
||||
; CHECK-NEXT: st1h { z[[DATA]].h }, p0, [x0, #7, mul vl]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0, #6, mul vl]
|
||||
; CHECK-NEXT: st1h { z0.h }, p0, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_load = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %base, i64 6
|
||||
%data = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16(<vscale x 8 x i16>* %base_load,
|
||||
@ -498,8 +529,9 @@ define void @test_masked_ldst_sv8i16(<vscale x 8 x i16> * %base, <vscale x 8 x i
|
||||
|
||||
define void @test_masked_ldst_sv8f16(<vscale x 8 x half> * %base, <vscale x 8 x i1> %mask) nounwind {
|
||||
; CHECK-LABEL: test_masked_ldst_sv8f16:
|
||||
; CHECK-NEXT: ld1h { z[[DATA:[0-9]+]].h }, p0/z, [x0, #-1, mul vl]
|
||||
; CHECK-NEXT: st1h { z[[DATA]].h }, p0, [x0, #2, mul vl]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0, #-1, mul vl]
|
||||
; CHECK-NEXT: st1h { z0.h }, p0, [x0, #2, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_load = getelementptr <vscale x 8 x half>, <vscale x 8 x half>* %base, i64 -1
|
||||
%data = call <vscale x 8 x half> @llvm.masked.load.nxv8f16(<vscale x 8 x half>* %base_load,
|
||||
@ -516,8 +548,9 @@ define void @test_masked_ldst_sv8f16(<vscale x 8 x half> * %base, <vscale x 8 x
|
||||
|
||||
define void @test_masked_ldst_sv8bf16(<vscale x 8 x bfloat> * %base, <vscale x 8 x i1> %mask) nounwind #0 {
|
||||
; CHECK-LABEL: test_masked_ldst_sv8bf16:
|
||||
; CHECK-NEXT: ld1h { z[[DATA:[0-9]+]].h }, p0/z, [x0, #-1, mul vl]
|
||||
; CHECK-NEXT: st1h { z[[DATA]].h }, p0, [x0, #2, mul vl]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0, #-1, mul vl]
|
||||
; CHECK-NEXT: st1h { z0.h }, p0, [x0, #2, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_load = getelementptr <vscale x 8 x bfloat>, <vscale x 8 x bfloat>* %base, i64 -1
|
||||
%data = call <vscale x 8 x bfloat> @llvm.masked.load.nxv8bf16(<vscale x 8 x bfloat>* %base_load,
|
||||
@ -536,6 +569,7 @@ define void @test_masked_ldst_sv8bf16(<vscale x 8 x bfloat> * %base, <vscale x 8
|
||||
|
||||
define <vscale x 8 x i16> @masked_zload_sv8i8_to_sv8i16(<vscale x 8 x i8>* %base, <vscale x 8 x i1> %mask) nounwind {
|
||||
; CHECK-LABEL: masked_zload_sv8i8_to_sv8i16:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ld1b { z0.h }, p0/z, [x0, #-4, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_load = getelementptr <vscale x 8 x i8>, <vscale x 8 x i8>* %base, i64 -4
|
||||
@ -549,6 +583,7 @@ define <vscale x 8 x i16> @masked_zload_sv8i8_to_sv8i16(<vscale x 8 x i8>* %base
|
||||
|
||||
define <vscale x 8 x i16> @masked_sload_sv8i8_to_sv8i16(<vscale x 8 x i8>* %base, <vscale x 8 x i1> %mask) nounwind {
|
||||
; CHECK-LABEL: masked_sload_sv8i8_to_sv8i16:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ld1sb { z0.h }, p0/z, [x0, #-3, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_load = getelementptr <vscale x 8 x i8>, <vscale x 8 x i8>* %base, i64 -3
|
||||
@ -564,6 +599,7 @@ define <vscale x 8 x i16> @masked_sload_sv8i8_to_sv8i16(<vscale x 8 x i8>* %base
|
||||
|
||||
define void @masked_trunc_store_sv8i16_to_sv8i8(<vscale x 8 x i16> %val, <vscale x 8 x i8> *%base, <vscale x 8 x i1> %mask) nounwind {
|
||||
; CHECK-LABEL: masked_trunc_store_sv8i16_to_sv8i8:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: st1b { z0.h }, p0, [x0, #3, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_load = getelementptr <vscale x 8 x i8>, <vscale x 8 x i8>* %base, i64 3
|
||||
@ -579,8 +615,9 @@ define void @masked_trunc_store_sv8i16_to_sv8i8(<vscale x 8 x i16> %val, <vscale
|
||||
|
||||
define void @test_masked_ldst_sv16i8(<vscale x 16 x i8> * %base, <vscale x 16 x i1> %mask) nounwind {
|
||||
; CHECK-LABEL: test_masked_ldst_sv16i8:
|
||||
; CHECK-NEXT: ld1b { z[[DATA:[0-9]+]].b }, p0/z, [x0, #6, mul vl]
|
||||
; CHECK-NEXT: st1b { z[[DATA]].b }, p0, [x0, #7, mul vl]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0, #6, mul vl]
|
||||
; CHECK-NEXT: st1b { z0.b }, p0, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_load = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %base, i64 6
|
||||
%data = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8(<vscale x 16 x i8>* %base_load,
|
||||
|
@ -1,4 +1,5 @@
|
||||
; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve --asm-verbose=false < %s 2>%t | FileCheck %s
|
||||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s 2>%t | FileCheck %s
|
||||
; RUN: FileCheck --check-prefix=WARN --allow-empty %s <%t
|
||||
|
||||
; If this check fails please read test/CodeGen/AArch64/README for instructions on how to resolve it.
|
||||
@ -11,12 +12,13 @@
|
||||
|
||||
define void @imm_out_of_range(<vscale x 2 x i64> * %base, <vscale x 2 x i1> %mask) nounwind {
|
||||
; CHECK-LABEL: imm_out_of_range:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: rdvl x8, #8
|
||||
; CHECK-NEXT: add x8, x0, x8
|
||||
; CHECK-NEXT: ldnt1d { z[[DATA:[0-9]+]].d }, p0/z, [x{{[0-9]+}}]
|
||||
; CHECK-NEXT: ldnt1d { z0.d }, p0/z, [x8]
|
||||
; CHECK-NEXT: rdvl x8, #-9
|
||||
; CHECK-NEXT: add x8, x0, x8
|
||||
; CHECK-NEXT: stnt1d { z[[DATA]].d }, p0, [x{{[0-9]+}}]
|
||||
; CHECK-NEXT: stnt1d { z0.d }, p0, [x8]
|
||||
; CHECK-NEXT: ret
|
||||
%base_load = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %base, i64 8
|
||||
%base_load_bc = bitcast <vscale x 2 x i64>* %base_load to i64*
|
||||
@ -35,8 +37,9 @@ define void @imm_out_of_range(<vscale x 2 x i64> * %base, <vscale x 2 x i1> %mas
|
||||
|
||||
define void @test_masked_ldst_sv2i64(<vscale x 2 x i64> * %base, <vscale x 2 x i1> %mask) nounwind {
|
||||
; CHECK-LABEL: test_masked_ldst_sv2i64:
|
||||
; CHECK-NEXT: ldnt1d { z[[DATA:[0-9]+]].d }, p0/z, [x0, #-8, mul vl]
|
||||
; CHECK-NEXT: stnt1d { z[[DATA]].d }, p0, [x0, #-7, mul vl]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldnt1d { z0.d }, p0/z, [x0, #-8, mul vl]
|
||||
; CHECK-NEXT: stnt1d { z0.d }, p0, [x0, #-7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_load = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %base, i64 -8
|
||||
%base_load_bc = bitcast <vscale x 2 x i64>* %base_load to i64*
|
||||
@ -52,8 +55,9 @@ define void @test_masked_ldst_sv2i64(<vscale x 2 x i64> * %base, <vscale x 2 x i
|
||||
|
||||
define void @test_masked_ldst_sv2f64(<vscale x 2 x double> * %base, <vscale x 2 x i1> %mask) nounwind {
|
||||
; CHECK-LABEL: test_masked_ldst_sv2f64:
|
||||
; CHECK-NEXT: ldnt1d { z[[DATA:[0-9]+]].d }, p0/z, [x0, #-6, mul vl]
|
||||
; CHECK-NEXT: stnt1d { z[[DATA]].d }, p0, [x0, #-5, mul vl]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldnt1d { z0.d }, p0/z, [x0, #-6, mul vl]
|
||||
; CHECK-NEXT: stnt1d { z0.d }, p0, [x0, #-5, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_load = getelementptr <vscale x 2 x double>, <vscale x 2 x double>* %base, i64 -6
|
||||
%base_load_bc = bitcast <vscale x 2 x double>* %base_load to double*
|
||||
@ -71,8 +75,9 @@ define void @test_masked_ldst_sv2f64(<vscale x 2 x double> * %base, <vscale x 2
|
||||
|
||||
define void @test_masked_ldst_sv4i32(<vscale x 4 x i32> * %base, <vscale x 4 x i1> %mask) nounwind {
|
||||
; CHECK-LABEL: test_masked_ldst_sv4i32:
|
||||
; CHECK-NEXT: ldnt1w { z[[DATA:[0-9]+]].s }, p0/z, [x0, #6, mul vl]
|
||||
; CHECK-NEXT: stnt1w { z[[DATA]].s }, p0, [x0, #7, mul vl]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldnt1w { z0.s }, p0/z, [x0, #6, mul vl]
|
||||
; CHECK-NEXT: stnt1w { z0.s }, p0, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_load = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %base, i64 6
|
||||
%base_load_bc = bitcast <vscale x 4 x i32>* %base_load to i32*
|
||||
@ -88,8 +93,9 @@ define void @test_masked_ldst_sv4i32(<vscale x 4 x i32> * %base, <vscale x 4 x i
|
||||
|
||||
define void @test_masked_ldst_sv4f32(<vscale x 4 x float> * %base, <vscale x 4 x i1> %mask) nounwind {
|
||||
; CHECK-LABEL: test_masked_ldst_sv4f32:
|
||||
; CHECK-NEXT: ldnt1w { z[[DATA:[0-9]+]].s }, p0/z, [x0, #-1, mul vl]
|
||||
; CHECK-NEXT: stnt1w { z[[DATA]].s }, p0, [x0, #2, mul vl]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldnt1w { z0.s }, p0/z, [x0, #-1, mul vl]
|
||||
; CHECK-NEXT: stnt1w { z0.s }, p0, [x0, #2, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_load = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %base, i64 -1
|
||||
%base_load_bc = bitcast <vscale x 4 x float>* %base_load to float*
|
||||
@ -108,8 +114,9 @@ define void @test_masked_ldst_sv4f32(<vscale x 4 x float> * %base, <vscale x 4 x
|
||||
|
||||
define void @test_masked_ldst_sv8i16(<vscale x 8 x i16> * %base, <vscale x 8 x i1> %mask) nounwind {
|
||||
; CHECK-LABEL: test_masked_ldst_sv8i16:
|
||||
; CHECK-NEXT: ldnt1h { z[[DATA:[0-9]+]].h }, p0/z, [x0, #6, mul vl]
|
||||
; CHECK-NEXT: stnt1h { z[[DATA]].h }, p0, [x0, #7, mul vl]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldnt1h { z0.h }, p0/z, [x0, #6, mul vl]
|
||||
; CHECK-NEXT: stnt1h { z0.h }, p0, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_load = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %base, i64 6
|
||||
%base_load_bc = bitcast <vscale x 8 x i16>* %base_load to i16*
|
||||
@ -125,8 +132,9 @@ define void @test_masked_ldst_sv8i16(<vscale x 8 x i16> * %base, <vscale x 8 x i
|
||||
|
||||
define void @test_masked_ldst_sv8f16(<vscale x 8 x half> * %base, <vscale x 8 x i1> %mask) nounwind {
|
||||
; CHECK-LABEL: test_masked_ldst_sv8f16:
|
||||
; CHECK-NEXT: ldnt1h { z[[DATA:[0-9]+]].h }, p0/z, [x0, #-1, mul vl]
|
||||
; CHECK-NEXT: stnt1h { z[[DATA]].h }, p0, [x0, #2, mul vl]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldnt1h { z0.h }, p0/z, [x0, #-1, mul vl]
|
||||
; CHECK-NEXT: stnt1h { z0.h }, p0, [x0, #2, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_load = getelementptr <vscale x 8 x half>, <vscale x 8 x half>* %base, i64 -1
|
||||
%base_load_bc = bitcast <vscale x 8 x half>* %base_load to half*
|
||||
@ -142,8 +150,9 @@ define void @test_masked_ldst_sv8f16(<vscale x 8 x half> * %base, <vscale x 8 x
|
||||
|
||||
define void @test_masked_ldst_sv8bf16(<vscale x 8 x bfloat> * %base, <vscale x 8 x i1> %mask) nounwind #0 {
|
||||
; CHECK-LABEL: test_masked_ldst_sv8bf16:
|
||||
; CHECK-NEXT: ldnt1h { z[[DATA:[0-9]+]].h }, p0/z, [x0, #-1, mul vl]
|
||||
; CHECK-NEXT: stnt1h { z[[DATA]].h }, p0, [x0, #2, mul vl]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldnt1h { z0.h }, p0/z, [x0, #-1, mul vl]
|
||||
; CHECK-NEXT: stnt1h { z0.h }, p0, [x0, #2, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_load = getelementptr <vscale x 8 x bfloat>, <vscale x 8 x bfloat>* %base, i64 -1
|
||||
%base_load_bc = bitcast <vscale x 8 x bfloat>* %base_load to bfloat*
|
||||
@ -161,8 +170,9 @@ define void @test_masked_ldst_sv8bf16(<vscale x 8 x bfloat> * %base, <vscale x 8
|
||||
|
||||
define void @test_masked_ldst_sv16i8(<vscale x 16 x i8> * %base, <vscale x 16 x i1> %mask) nounwind {
|
||||
; CHECK-LABEL: test_masked_ldst_sv16i8:
|
||||
; CHECK-NEXT: ldnt1b { z[[DATA:[0-9]+]].b }, p0/z, [x0, #6, mul vl]
|
||||
; CHECK-NEXT: stnt1b { z[[DATA]].b }, p0, [x0, #7, mul vl]
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ldnt1b { z0.b }, p0/z, [x0, #6, mul vl]
|
||||
; CHECK-NEXT: stnt1b { z0.b }, p0, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_load = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %base, i64 6
|
||||
%base_load_bc = bitcast <vscale x 16 x i8>* %base_load to i8*
|
||||
|
Loading…
x
Reference in New Issue
Block a user