1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2025-01-31 20:51:52 +01:00

[AArch64][SVE] Correct intrinsics and patterns for logical predicate instructions

In general SVE intrinsics are considered predicated and merging
with everything else having suitable decoration.  For predicated
zeroing operations (like the predicate logical instructions) we
use the "_z" suffix.  After this change all intrinsics use their
expected names (i.e. orr instead of or and eor instead of xor).

I've removed intrinsics and patterns for condition code setting
instructions as that data is not returned as part of the intrinsic.
The expectation is to ask for a cc flag explicitly.

For example:
  a = and_z(pg, p1, p2)
  cc = ptest_<flag>(pg, a)

With the code generator expected to use "s" variants of instructions
when available.

Differential Revision: https://reviews.llvm.org/D71715
This commit is contained in:
Paul Walker 2019-12-19 13:34:37 +00:00
parent 6bf5a34797
commit 887a76b062
5 changed files with 109 additions and 422 deletions

View File

@ -1397,28 +1397,14 @@ def int_aarch64_sve_zip2 : AdvSIMD_2VectorArg_Intrinsic;
// Logical operations
//
def int_aarch64_sve_and : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_bic : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_cnot : AdvSIMD_Merged1VectorArg_Intrinsic;
def int_aarch64_sve_eor : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_not : AdvSIMD_Merged1VectorArg_Intrinsic;
def int_aarch64_sve_orr : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_and : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_or : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_xor : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_bic_base : AdvSIMD_2VectorArg_Intrinsic;
def int_aarch64_sve_bic : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_eor : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_ands : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_bics : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_eors : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_orr : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_orn : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_nor : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_nand : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_orrs : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_orns : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_nors : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_nands : AdvSIMD_Pred2VectorArg_Intrinsic;
// TODO: Deprecated and will be replaced by isel patterns.
def int_aarch64_sve_orr_imm : AdvSIMD_1VectorArg_Imm64_Intrinsic;
def int_aarch64_sve_eor_imm : AdvSIMD_1VectorArg_Imm64_Intrinsic;
def int_aarch64_sve_and_imm : AdvSIMD_1VectorArg_Imm64_Intrinsic;
@ -1578,6 +1564,13 @@ def int_aarch64_sve_ptrue : AdvSIMD_SVE_PTRUE_Intrinsic;
// Predicate operations
//
def int_aarch64_sve_and_z : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_bic_z : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_eor_z : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_nand_z : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_nor_z : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_orn_z : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_orr_z : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_pfirst : AdvSIMD_Pred1VectorArg_Intrinsic;
def int_aarch64_sve_pnext : AdvSIMD_Pred1VectorArg_Intrinsic;
def int_aarch64_sve_punpkhi : AdvSIMD_SVE_PUNPKHI_Intrinsic;

View File

@ -91,14 +91,14 @@ let Predicates = [HasSVE] in {
defm AND_ZZZ : sve_int_bin_cons_log<0b00, "and", and>;
defm ORR_ZZZ : sve_int_bin_cons_log<0b01, "orr", or>;
defm EOR_ZZZ : sve_int_bin_cons_log<0b10, "eor", xor>;
defm BIC_ZZZ : sve_int_bin_cons_log<0b11, "bic", int_aarch64_sve_bic_base>;
defm BIC_ZZZ : sve_int_bin_cons_log<0b11, "bic", null_frag>;
defm ADD_ZPmZ : sve_int_bin_pred_arit_0<0b000, "add", int_aarch64_sve_add>;
defm SUB_ZPmZ : sve_int_bin_pred_arit_0<0b001, "sub", int_aarch64_sve_sub>;
defm SUBR_ZPmZ : sve_int_bin_pred_arit_0<0b011, "subr", int_aarch64_sve_subr>;
defm ORR_ZPmZ : sve_int_bin_pred_log<0b000, "orr", int_aarch64_sve_or>;
defm EOR_ZPmZ : sve_int_bin_pred_log<0b001, "eor", int_aarch64_sve_xor>;
defm ORR_ZPmZ : sve_int_bin_pred_log<0b000, "orr", int_aarch64_sve_orr>;
defm EOR_ZPmZ : sve_int_bin_pred_log<0b001, "eor", int_aarch64_sve_eor>;
defm AND_ZPmZ : sve_int_bin_pred_log<0b010, "and", int_aarch64_sve_and>;
defm BIC_ZPmZ : sve_int_bin_pred_log<0b011, "bic", int_aarch64_sve_bic>;
@ -307,21 +307,21 @@ let Predicates = [HasSVE] in {
defm PFIRST : sve_int_pfirst<0b00000, "pfirst", int_aarch64_sve_pfirst>;
defm PNEXT : sve_int_pnext<0b00110, "pnext", int_aarch64_sve_pnext>;
defm AND_PPzPP : sve_int_pred_log<0b0000, "and", int_aarch64_sve_and>;
defm BIC_PPzPP : sve_int_pred_log<0b0001, "bic", int_aarch64_sve_bic>;
defm EOR_PPzPP : sve_int_pred_log<0b0010, "eor", int_aarch64_sve_eor>;
defm AND_PPzPP : sve_int_pred_log<0b0000, "and", int_aarch64_sve_and_z>;
defm BIC_PPzPP : sve_int_pred_log<0b0001, "bic", int_aarch64_sve_bic_z>;
defm EOR_PPzPP : sve_int_pred_log<0b0010, "eor", int_aarch64_sve_eor_z>;
defm SEL_PPPP : sve_int_pred_log<0b0011, "sel", vselect>;
defm ANDS_PPzPP : sve_int_pred_log<0b0100, "ands", int_aarch64_sve_ands>;
defm BICS_PPzPP : sve_int_pred_log<0b0101, "bics", int_aarch64_sve_bics>;
defm EORS_PPzPP : sve_int_pred_log<0b0110, "eors", int_aarch64_sve_eors>;
defm ORR_PPzPP : sve_int_pred_log<0b1000, "orr", int_aarch64_sve_orr>;
defm ORN_PPzPP : sve_int_pred_log<0b1001, "orn", int_aarch64_sve_orn>;
defm NOR_PPzPP : sve_int_pred_log<0b1010, "nor", int_aarch64_sve_nor>;
defm NAND_PPzPP : sve_int_pred_log<0b1011, "nand", int_aarch64_sve_nand>;
defm ORRS_PPzPP : sve_int_pred_log<0b1100, "orrs", int_aarch64_sve_orrs>;
defm ORNS_PPzPP : sve_int_pred_log<0b1101, "orns", int_aarch64_sve_orns>;
defm NORS_PPzPP : sve_int_pred_log<0b1110, "nors", int_aarch64_sve_nors>;
defm NANDS_PPzPP : sve_int_pred_log<0b1111, "nands", int_aarch64_sve_nands>;
defm ANDS_PPzPP : sve_int_pred_log<0b0100, "ands", null_frag>;
defm BICS_PPzPP : sve_int_pred_log<0b0101, "bics", null_frag>;
defm EORS_PPzPP : sve_int_pred_log<0b0110, "eors", null_frag>;
defm ORR_PPzPP : sve_int_pred_log<0b1000, "orr", int_aarch64_sve_orr_z>;
defm ORN_PPzPP : sve_int_pred_log<0b1001, "orn", int_aarch64_sve_orn_z>;
defm NOR_PPzPP : sve_int_pred_log<0b1010, "nor", int_aarch64_sve_nor_z>;
defm NAND_PPzPP : sve_int_pred_log<0b1011, "nand", int_aarch64_sve_nand_z>;
defm ORRS_PPzPP : sve_int_pred_log<0b1100, "orrs", null_frag>;
defm ORNS_PPzPP : sve_int_pred_log<0b1101, "orns", null_frag>;
defm NORS_PPzPP : sve_int_pred_log<0b1110, "nors", null_frag>;
defm NANDS_PPzPP : sve_int_pred_log<0b1111, "nands", null_frag>;
defm CLASTA_RPZ : sve_int_perm_clast_rz<0, "clasta", AArch64clasta_n>;
defm CLASTB_RPZ : sve_int_perm_clast_rz<1, "clastb", AArch64clastb_n>;

View File

@ -5,8 +5,8 @@ define <vscale x 16 x i8> @and_pred_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8
; CHECK: and z0.b, p0/m, z0.b, z1.b
; CHECK-NEXT: ret
%out = call <vscale x 16 x i8> @llvm.aarch64.sve.and.nxv2i8(<vscale x 16 x i1> %pg,
<vscale x 16 x i8> %a,
<vscale x 16 x i8> %b)
<vscale x 16 x i8> %a,
<vscale x 16 x i8> %b)
ret <vscale x 16 x i8> %out
}
@ -20,7 +20,6 @@ define <vscale x 8 x i16> @and_pred_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16
ret <vscale x 8 x i16> %out
}
define <vscale x 4 x i32> @and_pred_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
; CHECK-LABEL: and_pred_i32:
; CHECK: and z0.s, p0/m, z0.s, z1.s
@ -41,14 +40,13 @@ define <vscale x 2 x i64> @and_pred_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64
ret <vscale x 2 x i64> %out
}
define <vscale x 16 x i8> @or_pred_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
; CHECK-LABEL: or_pred_i8:
; CHECK: orr z0.b, p0/m, z0.b, z1.b
; CHECK-NEXT: ret
%out = call <vscale x 16 x i8> @llvm.aarch64.sve.or.nxv2i8(<vscale x 16 x i1> %pg,
<vscale x 16 x i8> %a,
<vscale x 16 x i8> %b)
%out = call <vscale x 16 x i8> @llvm.aarch64.sve.orr.nxv2i8(<vscale x 16 x i1> %pg,
<vscale x 16 x i8> %a,
<vscale x 16 x i8> %b)
ret <vscale x 16 x i8> %out
}
@ -56,18 +54,17 @@ define <vscale x 8 x i16> @or_pred_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16>
; CHECK-LABEL: or_pred_i16:
; CHECK: orr z0.h, p0/m, z0.h, z1.h
; CHECK-NEXT: ret
%out = call <vscale x 8 x i16> @llvm.aarch64.sve.or.nxv2i16(<vscale x 8 x i1> %pg,
%out = call <vscale x 8 x i16> @llvm.aarch64.sve.orr.nxv2i16(<vscale x 8 x i1> %pg,
<vscale x 8 x i16> %a,
<vscale x 8 x i16> %b)
ret <vscale x 8 x i16> %out
}
define <vscale x 4 x i32> @or_pred_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
; CHECK-LABEL: or_pred_i32:
; CHECK: orr z0.s, p0/m, z0.s, z1.s
; CHECK-NEXT: ret
%out = call <vscale x 4 x i32> @llvm.aarch64.sve.or.nxv2i32(<vscale x 4 x i1> %pg,
%out = call <vscale x 4 x i32> @llvm.aarch64.sve.orr.nxv2i32(<vscale x 4 x i1> %pg,
<vscale x 4 x i32> %a,
<vscale x 4 x i32> %b)
ret <vscale x 4 x i32> %out
@ -77,20 +74,19 @@ define <vscale x 2 x i64> @or_pred_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64>
; CHECK-LABEL: or_pred_i64:
; CHECK: orr z0.d, p0/m, z0.d, z1.d
; CHECK-NEXT: ret
%out = call <vscale x 2 x i64> @llvm.aarch64.sve.or.nxv2i64(<vscale x 2 x i1> %pg,
%out = call <vscale x 2 x i64> @llvm.aarch64.sve.orr.nxv2i64(<vscale x 2 x i1> %pg,
<vscale x 2 x i64> %a,
<vscale x 2 x i64> %b)
ret <vscale x 2 x i64> %out
}
define <vscale x 16 x i8> @xor_pred_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
; CHECK-LABEL: xor_pred_i8:
; CHECK: eor z0.b, p0/m, z0.b, z1.b
; CHECK-NEXT: ret
%out = call <vscale x 16 x i8> @llvm.aarch64.sve.xor.nxv2i8(<vscale x 16 x i1> %pg,
<vscale x 16 x i8> %a,
<vscale x 16 x i8> %b)
%out = call <vscale x 16 x i8> @llvm.aarch64.sve.eor.nxv2i8(<vscale x 16 x i1> %pg,
<vscale x 16 x i8> %a,
<vscale x 16 x i8> %b)
ret <vscale x 16 x i8> %out
}
@ -98,18 +94,17 @@ define <vscale x 8 x i16> @xor_pred_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16
; CHECK-LABEL: xor_pred_i16:
; CHECK: eor z0.h, p0/m, z0.h, z1.h
; CHECK-NEXT: ret
%out = call <vscale x 8 x i16> @llvm.aarch64.sve.xor.nxv2i16(<vscale x 8 x i1> %pg,
%out = call <vscale x 8 x i16> @llvm.aarch64.sve.eor.nxv2i16(<vscale x 8 x i1> %pg,
<vscale x 8 x i16> %a,
<vscale x 8 x i16> %b)
ret <vscale x 8 x i16> %out
}
define <vscale x 4 x i32> @xor_pred_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
; CHECK-LABEL: xor_pred_i32:
; CHECK: eor z0.s, p0/m, z0.s, z1.s
; CHECK-NEXT: ret
%out = call <vscale x 4 x i32> @llvm.aarch64.sve.xor.nxv2i32(<vscale x 4 x i1> %pg,
%out = call <vscale x 4 x i32> @llvm.aarch64.sve.eor.nxv2i32(<vscale x 4 x i1> %pg,
<vscale x 4 x i32> %a,
<vscale x 4 x i32> %b)
ret <vscale x 4 x i32> %out
@ -119,7 +114,7 @@ define <vscale x 2 x i64> @xor_pred_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64
; CHECK-LABEL: xor_pred_i64:
; CHECK: eor z0.d, p0/m, z0.d, z1.d
; CHECK-NEXT: ret
%out = call <vscale x 2 x i64> @llvm.aarch64.sve.xor.nxv2i64(<vscale x 2 x i1> %pg,
%out = call <vscale x 2 x i64> @llvm.aarch64.sve.eor.nxv2i64(<vscale x 2 x i1> %pg,
<vscale x 2 x i64> %a,
<vscale x 2 x i64> %b)
ret <vscale x 2 x i64> %out
@ -130,8 +125,8 @@ define <vscale x 16 x i8> @bic_pred_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8
; CHECK: bic z0.b, p0/m, z0.b, z1.b
; CHECK-NEXT: ret
%out = call <vscale x 16 x i8> @llvm.aarch64.sve.bic.nxv2i8(<vscale x 16 x i1> %pg,
<vscale x 16 x i8> %a,
<vscale x 16 x i8> %b)
<vscale x 16 x i8> %a,
<vscale x 16 x i8> %b)
ret <vscale x 16 x i8> %out
}
@ -170,14 +165,14 @@ declare <vscale x 16 x i8> @llvm.aarch64.sve.and.nxv2i8(<vscale x 16 x i1>,<vsca
declare <vscale x 8 x i16> @llvm.aarch64.sve.and.nxv2i16(<vscale x 8 x i1>,<vscale x 8 x i16>,<vscale x 8 x i16>)
declare <vscale x 4 x i32> @llvm.aarch64.sve.and.nxv2i32(<vscale x 4 x i1>,<vscale x 4 x i32>,<vscale x 4 x i32>)
declare <vscale x 2 x i64> @llvm.aarch64.sve.and.nxv2i64(<vscale x 2 x i1>,<vscale x 2 x i64>,<vscale x 2 x i64>)
declare <vscale x 16 x i8> @llvm.aarch64.sve.or.nxv2i8(<vscale x 16 x i1>,<vscale x 16 x i8>,<vscale x 16 x i8>)
declare <vscale x 8 x i16> @llvm.aarch64.sve.or.nxv2i16(<vscale x 8 x i1>,<vscale x 8 x i16>,<vscale x 8 x i16>)
declare <vscale x 4 x i32> @llvm.aarch64.sve.or.nxv2i32(<vscale x 4 x i1>,<vscale x 4 x i32>,<vscale x 4 x i32>)
declare <vscale x 2 x i64> @llvm.aarch64.sve.or.nxv2i64(<vscale x 2 x i1>,<vscale x 2 x i64>,<vscale x 2 x i64>)
declare <vscale x 16 x i8> @llvm.aarch64.sve.xor.nxv2i8(<vscale x 16 x i1>,<vscale x 16 x i8>,<vscale x 16 x i8>)
declare <vscale x 8 x i16> @llvm.aarch64.sve.xor.nxv2i16(<vscale x 8 x i1>,<vscale x 8 x i16>,<vscale x 8 x i16>)
declare <vscale x 4 x i32> @llvm.aarch64.sve.xor.nxv2i32(<vscale x 4 x i1>,<vscale x 4 x i32>,<vscale x 4 x i32>)
declare <vscale x 2 x i64> @llvm.aarch64.sve.xor.nxv2i64(<vscale x 2 x i1>,<vscale x 2 x i64>,<vscale x 2 x i64>)
declare <vscale x 16 x i8> @llvm.aarch64.sve.orr.nxv2i8(<vscale x 16 x i1>,<vscale x 16 x i8>,<vscale x 16 x i8>)
declare <vscale x 8 x i16> @llvm.aarch64.sve.orr.nxv2i16(<vscale x 8 x i1>,<vscale x 8 x i16>,<vscale x 8 x i16>)
declare <vscale x 4 x i32> @llvm.aarch64.sve.orr.nxv2i32(<vscale x 4 x i1>,<vscale x 4 x i32>,<vscale x 4 x i32>)
declare <vscale x 2 x i64> @llvm.aarch64.sve.orr.nxv2i64(<vscale x 2 x i1>,<vscale x 2 x i64>,<vscale x 2 x i64>)
declare <vscale x 16 x i8> @llvm.aarch64.sve.eor.nxv2i8(<vscale x 16 x i1>,<vscale x 16 x i8>,<vscale x 16 x i8>)
declare <vscale x 8 x i16> @llvm.aarch64.sve.eor.nxv2i16(<vscale x 8 x i1>,<vscale x 8 x i16>,<vscale x 8 x i16>)
declare <vscale x 4 x i32> @llvm.aarch64.sve.eor.nxv2i32(<vscale x 4 x i1>,<vscale x 4 x i32>,<vscale x 4 x i32>)
declare <vscale x 2 x i64> @llvm.aarch64.sve.eor.nxv2i64(<vscale x 2 x i1>,<vscale x 2 x i64>,<vscale x 2 x i64>)
declare <vscale x 16 x i8> @llvm.aarch64.sve.bic.nxv2i8(<vscale x 16 x i1>,<vscale x 16 x i8>,<vscale x 16 x i8>)
declare <vscale x 8 x i16> @llvm.aarch64.sve.bic.nxv2i16(<vscale x 8 x i1>,<vscale x 8 x i16>,<vscale x 8 x i16>)
declare <vscale x 4 x i32> @llvm.aarch64.sve.bic.nxv2i32(<vscale x 4 x i1>,<vscale x 4 x i32>,<vscale x 4 x i32>)

View File

@ -94,45 +94,3 @@ define <vscale x 16 x i8> @xor_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
%res = xor <vscale x 16 x i8> %a, %b
ret <vscale x 16 x i8> %res
}
define <vscale x 2 x i64> @bic_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
; CHECK-LABEL: bic_d
; CHECK: bic z0.d, z0.d, z1.d
; CHECK-NEXT: ret
%res = call <vscale x 2 x i64> @llvm.aarch64.sve.bic.base.nxv2i64(<vscale x 2 x i64> %a,
<vscale x 2 x i64> %b)
ret <vscale x 2 x i64> %res
}
define <vscale x 4 x i32> @bic_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
; CHECK-LABEL: bic_s
; CHECK: bic z0.d, z0.d, z1.d
; CHECK-NEXT: ret
%res = call <vscale x 4 x i32> @llvm.aarch64.sve.bic.base.nxv4i32(<vscale x 4 x i32> %a,
<vscale x 4 x i32> %b)
ret <vscale x 4 x i32> %res
}
define <vscale x 8 x i16> @bic_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
; CHECK-LABEL: bic_h
; CHECK: bic z0.d, z0.d, z1.d
; CHECK-NEXT: ret
%res = call <vscale x 8 x i16> @llvm.aarch64.sve.bic.base.nxv8i16(<vscale x 8 x i16> %a,
<vscale x 8 x i16> %b)
ret <vscale x 8 x i16> %res
}
define <vscale x 16 x i8> @bic_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
; CHECK-LABEL: bic_b
; CHECK: bic z0.d, z0.d, z1.d
; CHECK-NEXT: ret
%res = call <vscale x 16 x i8> @llvm.aarch64.sve.bic.base.nxv16i8(<vscale x 16 x i8> %a,
<vscale x 16 x i8> %b)
ret <vscale x 16 x i8> %res
}
declare <vscale x 2 x i64> @llvm.aarch64.sve.bic.base.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
declare <vscale x 4 x i32> @llvm.aarch64.sve.bic.base.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
declare <vscale x 8 x i16> @llvm.aarch64.sve.bic.base.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
declare <vscale x 16 x i8> @llvm.aarch64.sve.bic.base.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)

View File

@ -36,7 +36,7 @@ define <vscale x 16 x i1> @and_16(<vscale x 16 x i1> %Pg, <vscale x 16 x i1> %Pn
; CHECK-LABEL: and_16:
; CHECK: and p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 16 x i1> @llvm.aarch64.sve.and.nxv16i1(<vscale x 16 x i1> %Pg, <vscale x 16 x i1> %Pn, <vscale x 16 x i1> %Pd)
%res = call <vscale x 16 x i1> @llvm.aarch64.sve.and.z.nxv16i1(<vscale x 16 x i1> %Pg, <vscale x 16 x i1> %Pn, <vscale x 16 x i1> %Pd)
ret <vscale x 16 x i1> %res;
}
@ -44,7 +44,7 @@ define <vscale x 8 x i1> @and_8(<vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <v
; CHECK-LABEL: and_8:
; CHECK: and p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 8 x i1> @llvm.aarch64.sve.and.nxv8i1(<vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <vscale x 8 x i1> %Pd)
%res = call <vscale x 8 x i1> @llvm.aarch64.sve.and.z.nxv8i1(<vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <vscale x 8 x i1> %Pd)
ret <vscale x 8 x i1> %res;
}
@ -52,7 +52,7 @@ define <vscale x 4 x i1> @and_4(<vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <v
; CHECK-LABEL: and_4:
; CHECK: and p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 4 x i1> @llvm.aarch64.sve.and.nxv4i1(<vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <vscale x 4 x i1> %Pd)
%res = call <vscale x 4 x i1> @llvm.aarch64.sve.and.z.nxv4i1(<vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <vscale x 4 x i1> %Pd)
ret <vscale x 4 x i1> %res;
}
@ -60,16 +60,15 @@ define <vscale x 2 x i1> @and_2(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <v
; CHECK-LABEL: and_2:
; CHECK: and p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 2 x i1> @llvm.aarch64.sve.and.nxv2i1(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd)
%res = call <vscale x 2 x i1> @llvm.aarch64.sve.and.z.nxv2i1(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd)
ret <vscale x 2 x i1> %res;
}
define <vscale x 16 x i1> @bic_16(<vscale x 16 x i1> %Pg, <vscale x 16 x i1> %Pn, <vscale x 16 x i1> %Pd) {
; CHECK-LABEL: bic_16:
; CHECK: bic p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 16 x i1> @llvm.aarch64.sve.bic.pred.nxv16i1(<vscale x 16 x i1> %Pg, <vscale x 16 x i1> %Pn, <vscale x 16 x i1> %Pd)
%res = call <vscale x 16 x i1> @llvm.aarch64.sve.bic.z.nxv16i1(<vscale x 16 x i1> %Pg, <vscale x 16 x i1> %Pn, <vscale x 16 x i1> %Pd)
ret <vscale x 16 x i1> %res;
}
@ -77,7 +76,7 @@ define <vscale x 8 x i1> @bic_8(<vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <v
; CHECK-LABEL: bic_8:
; CHECK: bic p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 8 x i1> @llvm.aarch64.sve.bic.pred.nxv8i1(<vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <vscale x 8 x i1> %Pd)
%res = call <vscale x 8 x i1> @llvm.aarch64.sve.bic.z.nxv8i1(<vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <vscale x 8 x i1> %Pd)
ret <vscale x 8 x i1> %res;
}
@ -85,7 +84,7 @@ define <vscale x 4 x i1> @bic_4(<vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <v
; CHECK-LABEL: bic_4:
; CHECK: bic p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 4 x i1> @llvm.aarch64.sve.bic.pred.nxv4i1(<vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <vscale x 4 x i1> %Pd)
%res = call <vscale x 4 x i1> @llvm.aarch64.sve.bic.z.nxv4i1(<vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <vscale x 4 x i1> %Pd)
ret <vscale x 4 x i1> %res;
}
@ -93,7 +92,7 @@ define <vscale x 2 x i1> @bic_2(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <v
; CHECK-LABEL: bic_2:
; CHECK: bic p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 2 x i1> @llvm.aarch64.sve.bic.pred.nxv2i1(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd)
%res = call <vscale x 2 x i1> @llvm.aarch64.sve.bic.z.nxv2i1(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd)
ret <vscale x 2 x i1> %res;
}
@ -101,7 +100,7 @@ define <vscale x 16 x i1> @eor_16(<vscale x 16 x i1> %Pg, <vscale x 16 x i1> %Pn
; CHECK-LABEL: eor_16:
; CHECK: eor p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 16 x i1> @llvm.aarch64.sve.eor.nxv16i1(<vscale x 16 x i1> %Pg, <vscale x 16 x i1> %Pn, <vscale x 16 x i1> %Pd)
%res = call <vscale x 16 x i1> @llvm.aarch64.sve.eor.z.nxv16i1(<vscale x 16 x i1> %Pg, <vscale x 16 x i1> %Pn, <vscale x 16 x i1> %Pd)
ret <vscale x 16 x i1> %res;
}
@ -109,7 +108,7 @@ define <vscale x 8 x i1> @eor_8(<vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <v
; CHECK-LABEL: eor_8:
; CHECK: eor p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 8 x i1> @llvm.aarch64.sve.eor.nxv8i1(<vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <vscale x 8 x i1> %Pd)
%res = call <vscale x 8 x i1> @llvm.aarch64.sve.eor.z.nxv8i1(<vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <vscale x 8 x i1> %Pd)
ret <vscale x 8 x i1> %res;
}
@ -117,7 +116,7 @@ define <vscale x 4 x i1> @eor_4(<vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <v
; CHECK-LABEL: eor_4:
; CHECK: eor p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 4 x i1> @llvm.aarch64.sve.eor.nxv4i1(<vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <vscale x 4 x i1> %Pd)
%res = call <vscale x 4 x i1> @llvm.aarch64.sve.eor.z.nxv4i1(<vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <vscale x 4 x i1> %Pd)
ret <vscale x 4 x i1> %res;
}
@ -125,116 +124,15 @@ define <vscale x 2 x i1> @eor_2(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <v
; CHECK-LABEL: eor_2:
; CHECK: eor p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 2 x i1> @llvm.aarch64.sve.eor.nxv2i1(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd)
%res = call <vscale x 2 x i1> @llvm.aarch64.sve.eor.z.nxv2i1(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd)
ret <vscale x 2 x i1> %res;
}
define <vscale x 16 x i1> @ands_16(<vscale x 16 x i1> %Pg, <vscale x 16 x i1> %Pn, <vscale x 16 x i1> %Pd) {
; CHECK-LABEL: ands_16:
; CHECK: ands p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 16 x i1> @llvm.aarch64.sve.ands.nxv16i1(<vscale x 16 x i1> %Pg, <vscale x 16 x i1> %Pn, <vscale x 16 x i1> %Pd)
ret <vscale x 16 x i1> %res;
}
define <vscale x 8 x i1> @ands_8(<vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <vscale x 8 x i1> %Pd) {
; CHECK-LABEL: ands_8:
; CHECK: ands p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 8 x i1> @llvm.aarch64.sve.ands.nxv8i1(<vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <vscale x 8 x i1> %Pd)
ret <vscale x 8 x i1> %res;
}
define <vscale x 4 x i1> @ands_4(<vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <vscale x 4 x i1> %Pd) {
; CHECK-LABEL: ands_4:
; CHECK: ands p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 4 x i1> @llvm.aarch64.sve.ands.nxv4i1(<vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <vscale x 4 x i1> %Pd)
ret <vscale x 4 x i1> %res;
}
define <vscale x 2 x i1> @ands_2(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd) {
; CHECK-LABEL: ands_2:
; CHECK: ands p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 2 x i1> @llvm.aarch64.sve.ands.nxv2i1(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd)
ret <vscale x 2 x i1> %res;
}
define <vscale x 16 x i1> @bics_16(<vscale x 16 x i1> %Pg, <vscale x 16 x i1> %Pn, <vscale x 16 x i1> %Pd) {
; CHECK-LABEL: bics_16:
; CHECK: bics p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 16 x i1> @llvm.aarch64.sve.bics.nxv16i1(<vscale x 16 x i1> %Pg, <vscale x 16 x i1> %Pn, <vscale x 16 x i1> %Pd)
ret <vscale x 16 x i1> %res;
}
define <vscale x 8 x i1> @bics_8(<vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <vscale x 8 x i1> %Pd) {
; CHECK-LABEL: bics_8:
; CHECK: bics p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 8 x i1> @llvm.aarch64.sve.bics.nxv8i1(<vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <vscale x 8 x i1> %Pd)
ret <vscale x 8 x i1> %res;
}
define <vscale x 4 x i1> @bics_4(<vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <vscale x 4 x i1> %Pd) {
; CHECK-LABEL: bics_4:
; CHECK: bics p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 4 x i1> @llvm.aarch64.sve.bics.nxv4i1(<vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <vscale x 4 x i1> %Pd)
ret <vscale x 4 x i1> %res;
}
define <vscale x 2 x i1> @bics_2(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd) {
; CHECK-LABEL: bics_2:
; CHECK: bics p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 2 x i1> @llvm.aarch64.sve.bics.nxv2i1(<vscale x 2 x i1> %Pg,
<vscale x 2 x i1> %Pn,
<vscale x 2 x i1> %Pd)
ret <vscale x 2 x i1> %res;
}
define <vscale x 16 x i1> @eors_16(<vscale x 16 x i1> %Pg, <vscale x 16 x i1> %Pn, <vscale x 16 x i1> %Pd) {
; CHECK-LABEL: eors_16:
; CHECK: eors p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 16 x i1> @llvm.aarch64.sve.eors.nxv16i1(<vscale x 16 x i1> %Pg, <vscale x 16 x i1> %Pn, <vscale x 16 x i1> %Pd)
ret <vscale x 16 x i1> %res;
}
define <vscale x 8 x i1> @eors_8(<vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <vscale x 8 x i1> %Pd) {
; CHECK-LABEL: eors_8:
; CHECK: eors p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 8 x i1> @llvm.aarch64.sve.eors.nxv8i1(<vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <vscale x 8 x i1> %Pd)
ret <vscale x 8 x i1> %res;
}
define <vscale x 4 x i1> @eors_4(<vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <vscale x 4 x i1> %Pd) {
; CHECK-LABEL: eors_4:
; CHECK: eors p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 4 x i1> @llvm.aarch64.sve.eors.nxv4i1(<vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <vscale x 4 x i1> %Pd)
ret <vscale x 4 x i1> %res;
}
define <vscale x 2 x i1> @eors_2(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd) {
; CHECK-LABEL: eors_2:
; CHECK: eors p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 2 x i1> @llvm.aarch64.sve.eors.nxv2i1(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd)
ret <vscale x 2 x i1> %res;
}
define <vscale x 16 x i1> @orr_16(<vscale x 16 x i1> %Pg, <vscale x 16 x i1> %Pn, <vscale x 16 x i1> %Pd) {
; CHECK-LABEL: orr_16:
; CHECK: orr p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 16 x i1> @llvm.aarch64.sve.orr.nxv16i1(<vscale x 16 x i1> %Pg, <vscale x 16 x i1> %Pn, <vscale x 16 x i1> %Pd)
%res = call <vscale x 16 x i1> @llvm.aarch64.sve.orr.z.nxv16i1(<vscale x 16 x i1> %Pg, <vscale x 16 x i1> %Pn, <vscale x 16 x i1> %Pd)
ret <vscale x 16 x i1> %res;
}
@ -242,7 +140,7 @@ define <vscale x 8 x i1> @orr_8(<vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <v
; CHECK-LABEL: orr_8:
; CHECK: orr p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 8 x i1> @llvm.aarch64.sve.orr.nxv8i1(<vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <vscale x 8 x i1> %Pd)
%res = call <vscale x 8 x i1> @llvm.aarch64.sve.orr.z.nxv8i1(<vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <vscale x 8 x i1> %Pd)
ret <vscale x 8 x i1> %res;
}
@ -250,7 +148,7 @@ define <vscale x 4 x i1> @orr_4(<vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <v
; CHECK-LABEL: orr_4:
; CHECK: orr p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 4 x i1> @llvm.aarch64.sve.orr.nxv4i1(<vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <vscale x 4 x i1> %Pd)
%res = call <vscale x 4 x i1> @llvm.aarch64.sve.orr.z.nxv4i1(<vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <vscale x 4 x i1> %Pd)
ret <vscale x 4 x i1> %res;
}
@ -258,16 +156,15 @@ define <vscale x 2 x i1> @orr_2(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <v
; CHECK-LABEL: orr_2:
; CHECK: orr p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 2 x i1> @llvm.aarch64.sve.orr.nxv2i1(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd)
%res = call <vscale x 2 x i1> @llvm.aarch64.sve.orr.z.nxv2i1(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd)
ret <vscale x 2 x i1> %res;
}
define <vscale x 16 x i1> @orn_16(<vscale x 16 x i1> %Pg, <vscale x 16 x i1> %Pn, <vscale x 16 x i1> %Pd) {
; CHECK-LABEL: orn_16:
; CHECK: orn p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 16 x i1> @llvm.aarch64.sve.orn.nxv16i1(<vscale x 16 x i1> %Pg, <vscale x 16 x i1> %Pn, <vscale x 16 x i1> %Pd)
%res = call <vscale x 16 x i1> @llvm.aarch64.sve.orn.z.nxv16i1(<vscale x 16 x i1> %Pg, <vscale x 16 x i1> %Pn, <vscale x 16 x i1> %Pd)
ret <vscale x 16 x i1> %res;
}
@ -275,7 +172,7 @@ define <vscale x 8 x i1> @orn_8(<vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <v
; CHECK-LABEL: orn_8:
; CHECK: orn p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 8 x i1> @llvm.aarch64.sve.orn.nxv8i1(<vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <vscale x 8 x i1> %Pd)
%res = call <vscale x 8 x i1> @llvm.aarch64.sve.orn.z.nxv8i1(<vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <vscale x 8 x i1> %Pd)
ret <vscale x 8 x i1> %res;
}
@ -283,7 +180,7 @@ define <vscale x 4 x i1> @orn_4(<vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <v
; CHECK-LABEL: orn_4:
; CHECK: orn p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 4 x i1> @llvm.aarch64.sve.orn.nxv4i1(<vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <vscale x 4 x i1> %Pd)
%res = call <vscale x 4 x i1> @llvm.aarch64.sve.orn.z.nxv4i1(<vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <vscale x 4 x i1> %Pd)
ret <vscale x 4 x i1> %res;
}
@ -291,7 +188,7 @@ define <vscale x 2 x i1> @orn_2(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <v
; CHECK-LABEL: orn_2:
; CHECK: orn p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 2 x i1> @llvm.aarch64.sve.orn.nxv2i1(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd)
%res = call <vscale x 2 x i1> @llvm.aarch64.sve.orn.z.nxv2i1(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd)
ret <vscale x 2 x i1> %res;
}
@ -299,7 +196,7 @@ define <vscale x 16 x i1> @nor_16(<vscale x 16 x i1> %Pg, <vscale x 16 x i1> %Pn
; CHECK-LABEL: nor_16:
; CHECK: nor p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 16 x i1> @llvm.aarch64.sve.nor.nxv16i1(<vscale x 16 x i1> %Pg, <vscale x 16 x i1> %Pn, <vscale x 16 x i1> %Pd)
%res = call <vscale x 16 x i1> @llvm.aarch64.sve.nor.z.nxv16i1(<vscale x 16 x i1> %Pg, <vscale x 16 x i1> %Pn, <vscale x 16 x i1> %Pd)
ret <vscale x 16 x i1> %res;
}
@ -307,7 +204,7 @@ define <vscale x 8 x i1> @nor_8(<vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <v
; CHECK-LABEL: nor_8:
; CHECK: nor p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 8 x i1> @llvm.aarch64.sve.nor.nxv8i1(<vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <vscale x 8 x i1> %Pd)
%res = call <vscale x 8 x i1> @llvm.aarch64.sve.nor.z.nxv8i1(<vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <vscale x 8 x i1> %Pd)
ret <vscale x 8 x i1> %res;
}
@ -315,7 +212,7 @@ define <vscale x 4 x i1> @nor_4(<vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <v
; CHECK-LABEL: nor_4:
; CHECK: nor p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 4 x i1> @llvm.aarch64.sve.nor.nxv4i1(<vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <vscale x 4 x i1> %Pd)
%res = call <vscale x 4 x i1> @llvm.aarch64.sve.nor.z.nxv4i1(<vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <vscale x 4 x i1> %Pd)
ret <vscale x 4 x i1> %res;
}
@ -323,7 +220,7 @@ define <vscale x 2 x i1> @nor_2(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <v
; CHECK-LABEL: nor_2:
; CHECK: nor p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 2 x i1> @llvm.aarch64.sve.nor.nxv2i1(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd)
%res = call <vscale x 2 x i1> @llvm.aarch64.sve.nor.z.nxv2i1(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd)
ret <vscale x 2 x i1> %res;
}
@ -331,7 +228,7 @@ define <vscale x 16 x i1> @nand_16(<vscale x 16 x i1> %Pg, <vscale x 16 x i1> %P
; CHECK-LABEL: nand_16:
; CHECK: nand p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 16 x i1> @llvm.aarch64.sve.nand.nxv16i1(<vscale x 16 x i1> %Pg, <vscale x 16 x i1> %Pn, <vscale x 16 x i1> %Pd)
%res = call <vscale x 16 x i1> @llvm.aarch64.sve.nand.z.nxv16i1(<vscale x 16 x i1> %Pg, <vscale x 16 x i1> %Pn, <vscale x 16 x i1> %Pd)
ret <vscale x 16 x i1> %res;
}
@ -339,7 +236,7 @@ define <vscale x 8 x i1> @nand_8(<vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <
; CHECK-LABEL: nand_8:
; CHECK: nand p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 8 x i1> @llvm.aarch64.sve.nand.nxv8i1(<vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <vscale x 8 x i1> %Pd)
%res = call <vscale x 8 x i1> @llvm.aarch64.sve.nand.z.nxv8i1(<vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <vscale x 8 x i1> %Pd)
ret <vscale x 8 x i1> %res;
}
@ -347,7 +244,7 @@ define <vscale x 4 x i1> @nand_4(<vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <
; CHECK-LABEL: nand_4:
; CHECK: nand p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 4 x i1> @llvm.aarch64.sve.nand.nxv4i1(<vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <vscale x 4 x i1> %Pd)
%res = call <vscale x 4 x i1> @llvm.aarch64.sve.nand.z.nxv4i1(<vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <vscale x 4 x i1> %Pd)
ret <vscale x 4 x i1> %res;
}
@ -355,191 +252,35 @@ define <vscale x 2 x i1> @nand_2(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <
; CHECK-LABEL: nand_2:
; CHECK: nand p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 2 x i1> @llvm.aarch64.sve.nand.nxv2i1(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd)
%res = call <vscale x 2 x i1> @llvm.aarch64.sve.nand.z.nxv2i1(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd)
ret <vscale x 2 x i1> %res;
}
define <vscale x 16 x i1> @orrs_16(<vscale x 16 x i1> %Pg, <vscale x 16 x i1> %Pn, <vscale x 16 x i1> %Pd) {
; CHECK-LABEL: orrs_16:
; CHECK: orrs p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 16 x i1> @llvm.aarch64.sve.orrs.nxv16i1(<vscale x 16 x i1> %Pg, <vscale x 16 x i1> %Pn, <vscale x 16 x i1> %Pd)
ret <vscale x 16 x i1> %res;
}
define <vscale x 8 x i1> @orrs_8(<vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <vscale x 8 x i1> %Pd) {
; CHECK-LABEL: orrs_8:
; CHECK: orrs p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 8 x i1> @llvm.aarch64.sve.orrs.nxv8i1(<vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <vscale x 8 x i1> %Pd)
ret <vscale x 8 x i1> %res;
}
define <vscale x 4 x i1> @orrs_4(<vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <vscale x 4 x i1> %Pd) {
; CHECK-LABEL: orrs_4:
; CHECK: orrs p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 4 x i1> @llvm.aarch64.sve.orrs.nxv4i1(<vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <vscale x 4 x i1> %Pd)
ret <vscale x 4 x i1> %res;
}
define <vscale x 2 x i1> @orrs_2(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd) {
; CHECK-LABEL: orrs_2:
; CHECK: orrs p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 2 x i1> @llvm.aarch64.sve.orrs.nxv2i1(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd)
ret <vscale x 2 x i1> %res;
}
define <vscale x 16 x i1> @orns_16(<vscale x 16 x i1> %Pg, <vscale x 16 x i1> %Pn, <vscale x 16 x i1> %Pd) {
; CHECK-LABEL: orns_16:
; CHECK: orns p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 16 x i1> @llvm.aarch64.sve.orns.nxv16i1(<vscale x 16 x i1> %Pg, <vscale x 16 x i1> %Pn, <vscale x 16 x i1> %Pd)
ret <vscale x 16 x i1> %res;
}
define <vscale x 8 x i1> @orns_8(<vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <vscale x 8 x i1> %Pd) {
; CHECK-LABEL: orns_8:
; CHECK: orns p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 8 x i1> @llvm.aarch64.sve.orns.nxv8i1(<vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <vscale x 8 x i1> %Pd)
ret <vscale x 8 x i1> %res;
}
define <vscale x 4 x i1> @orns_4(<vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <vscale x 4 x i1> %Pd) {
; CHECK-LABEL: orns_4:
; CHECK: orns p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 4 x i1> @llvm.aarch64.sve.orns.nxv4i1(<vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <vscale x 4 x i1> %Pd)
ret <vscale x 4 x i1> %res;
}
define <vscale x 2 x i1> @orns_2(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd) {
; CHECK-LABEL: orns_2:
; CHECK: orns p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 2 x i1> @llvm.aarch64.sve.orns.nxv2i1(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd)
ret <vscale x 2 x i1> %res;
}
define <vscale x 16 x i1> @nors_16(<vscale x 16 x i1> %Pg, <vscale x 16 x i1> %Pn, <vscale x 16 x i1> %Pd) {
; CHECK-LABEL: nors_16:
; CHECK: nors p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 16 x i1> @llvm.aarch64.sve.nors.nxv16i1(<vscale x 16 x i1> %Pg, <vscale x 16 x i1> %Pn, <vscale x 16 x i1> %Pd)
ret <vscale x 16 x i1> %res;
}
define <vscale x 8 x i1> @nors_8(<vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <vscale x 8 x i1> %Pd) {
; CHECK-LABEL: nors_8:
; CHECK: nors p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 8 x i1> @llvm.aarch64.sve.nors.nxv8i1(<vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <vscale x 8 x i1> %Pd)
ret <vscale x 8 x i1> %res;
}
define <vscale x 4 x i1> @nors_4(<vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <vscale x 4 x i1> %Pd) {
; CHECK-LABEL: nors_4:
; CHECK: nors p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 4 x i1> @llvm.aarch64.sve.nors.nxv4i1(<vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <vscale x 4 x i1> %Pd)
ret <vscale x 4 x i1> %res;
}
define <vscale x 2 x i1> @nors_2(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd) {
; CHECK-LABEL: nors_2:
; CHECK: nors p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 2 x i1> @llvm.aarch64.sve.nors.nxv2i1(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd)
ret <vscale x 2 x i1> %res;
}
define <vscale x 16 x i1> @nands_16(<vscale x 16 x i1> %Pg, <vscale x 16 x i1> %Pn, <vscale x 16 x i1> %Pd) {
; CHECK-LABEL: nands_16:
; CHECK: nands p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 16 x i1> @llvm.aarch64.sve.nands.nxv16i1(<vscale x 16 x i1> %Pg, <vscale x 16 x i1> %Pn, <vscale x 16 x i1> %Pd)
ret <vscale x 16 x i1> %res;
}
define <vscale x 8 x i1> @nands_8(<vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <vscale x 8 x i1> %Pd) {
; CHECK-LABEL: nands_8:
; CHECK: nands p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 8 x i1> @llvm.aarch64.sve.nands.nxv8i1(<vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <vscale x 8 x i1> %Pd)
ret <vscale x 8 x i1> %res;
}
define <vscale x 4 x i1> @nands_4(<vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <vscale x 4 x i1> %Pd) {
; CHECK-LABEL: nands_4:
; CHECK: nands p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 4 x i1> @llvm.aarch64.sve.nands.nxv4i1(<vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <vscale x 4 x i1> %Pd)
ret <vscale x 4 x i1> %res;
}
define <vscale x 2 x i1> @nands_2(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd) {
; CHECK-LABEL: nands_2:
; CHECK: nands p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
%res = call <vscale x 2 x i1> @llvm.aarch64.sve.nands.nxv2i1(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd)
ret <vscale x 2 x i1> %res;
}
declare <vscale x 16 x i1> @llvm.aarch64.sve.and.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>)
declare <vscale x 8 x i1> @llvm.aarch64.sve.and.nxv8i1(<vscale x 8 x i1>, <vscale x 8 x i1>, <vscale x 8 x i1>)
declare <vscale x 4 x i1> @llvm.aarch64.sve.and.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>, <vscale x 4 x i1>)
declare <vscale x 2 x i1> @llvm.aarch64.sve.and.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, <vscale x 2 x i1>)
declare <vscale x 16 x i1> @llvm.aarch64.sve.bic.pred.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>)
declare <vscale x 8 x i1> @llvm.aarch64.sve.bic.pred.nxv8i1(<vscale x 8 x i1>, <vscale x 8 x i1>, <vscale x 8 x i1>)
declare <vscale x 4 x i1> @llvm.aarch64.sve.bic.pred.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>, <vscale x 4 x i1>)
declare <vscale x 2 x i1> @llvm.aarch64.sve.bic.pred.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, <vscale x 2 x i1>)
declare <vscale x 16 x i1> @llvm.aarch64.sve.eor.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>)
declare <vscale x 8 x i1> @llvm.aarch64.sve.eor.nxv8i1(<vscale x 8 x i1>, <vscale x 8 x i1>, <vscale x 8 x i1>)
declare <vscale x 4 x i1> @llvm.aarch64.sve.eor.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>, <vscale x 4 x i1>)
declare <vscale x 2 x i1> @llvm.aarch64.sve.eor.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, <vscale x 2 x i1>)
declare <vscale x 16 x i1> @llvm.aarch64.sve.ands.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>)
declare <vscale x 8 x i1> @llvm.aarch64.sve.ands.nxv8i1(<vscale x 8 x i1>, <vscale x 8 x i1>, <vscale x 8 x i1>)
declare <vscale x 4 x i1> @llvm.aarch64.sve.ands.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>, <vscale x 4 x i1>)
declare <vscale x 2 x i1> @llvm.aarch64.sve.ands.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, <vscale x 2 x i1>)
declare <vscale x 16 x i1> @llvm.aarch64.sve.bics.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>)
declare <vscale x 8 x i1> @llvm.aarch64.sve.bics.nxv8i1(<vscale x 8 x i1>, <vscale x 8 x i1>, <vscale x 8 x i1>)
declare <vscale x 4 x i1> @llvm.aarch64.sve.bics.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>, <vscale x 4 x i1>)
declare <vscale x 2 x i1> @llvm.aarch64.sve.bics.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, <vscale x 2 x i1>)
declare <vscale x 16 x i1> @llvm.aarch64.sve.eors.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>)
declare <vscale x 8 x i1> @llvm.aarch64.sve.eors.nxv8i1(<vscale x 8 x i1>, <vscale x 8 x i1>, <vscale x 8 x i1>)
declare <vscale x 4 x i1> @llvm.aarch64.sve.eors.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>, <vscale x 4 x i1>)
declare <vscale x 2 x i1> @llvm.aarch64.sve.eors.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, <vscale x 2 x i1>)
declare <vscale x 16 x i1> @llvm.aarch64.sve.orr.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>)
declare <vscale x 8 x i1> @llvm.aarch64.sve.orr.nxv8i1(<vscale x 8 x i1>, <vscale x 8 x i1>, <vscale x 8 x i1>)
declare <vscale x 4 x i1> @llvm.aarch64.sve.orr.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>, <vscale x 4 x i1>)
declare <vscale x 2 x i1> @llvm.aarch64.sve.orr.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, <vscale x 2 x i1>)
declare <vscale x 16 x i1> @llvm.aarch64.sve.orn.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>)
declare <vscale x 8 x i1> @llvm.aarch64.sve.orn.nxv8i1(<vscale x 8 x i1>, <vscale x 8 x i1>, <vscale x 8 x i1>)
declare <vscale x 4 x i1> @llvm.aarch64.sve.orn.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>, <vscale x 4 x i1>)
declare <vscale x 2 x i1> @llvm.aarch64.sve.orn.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, <vscale x 2 x i1>)
declare <vscale x 16 x i1> @llvm.aarch64.sve.nor.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>)
declare <vscale x 8 x i1> @llvm.aarch64.sve.nor.nxv8i1(<vscale x 8 x i1>, <vscale x 8 x i1>, <vscale x 8 x i1>)
declare <vscale x 4 x i1> @llvm.aarch64.sve.nor.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>, <vscale x 4 x i1>)
declare <vscale x 2 x i1> @llvm.aarch64.sve.nor.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, <vscale x 2 x i1>)
declare <vscale x 16 x i1> @llvm.aarch64.sve.nand.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>)
declare <vscale x 8 x i1> @llvm.aarch64.sve.nand.nxv8i1(<vscale x 8 x i1>, <vscale x 8 x i1>, <vscale x 8 x i1>)
declare <vscale x 4 x i1> @llvm.aarch64.sve.nand.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>, <vscale x 4 x i1>)
declare <vscale x 2 x i1> @llvm.aarch64.sve.nand.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, <vscale x 2 x i1>)
declare <vscale x 16 x i1> @llvm.aarch64.sve.orrs.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>)
declare <vscale x 8 x i1> @llvm.aarch64.sve.orrs.nxv8i1(<vscale x 8 x i1>, <vscale x 8 x i1>, <vscale x 8 x i1>)
declare <vscale x 4 x i1> @llvm.aarch64.sve.orrs.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>, <vscale x 4 x i1>)
declare <vscale x 2 x i1> @llvm.aarch64.sve.orrs.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, <vscale x 2 x i1>)
declare <vscale x 16 x i1> @llvm.aarch64.sve.orns.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>)
declare <vscale x 8 x i1> @llvm.aarch64.sve.orns.nxv8i1(<vscale x 8 x i1>, <vscale x 8 x i1>, <vscale x 8 x i1>)
declare <vscale x 4 x i1> @llvm.aarch64.sve.orns.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>, <vscale x 4 x i1>)
declare <vscale x 2 x i1> @llvm.aarch64.sve.orns.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, <vscale x 2 x i1>)
declare <vscale x 16 x i1> @llvm.aarch64.sve.nors.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>)
declare <vscale x 8 x i1> @llvm.aarch64.sve.nors.nxv8i1(<vscale x 8 x i1>, <vscale x 8 x i1>, <vscale x 8 x i1>)
declare <vscale x 4 x i1> @llvm.aarch64.sve.nors.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>, <vscale x 4 x i1>)
declare <vscale x 2 x i1> @llvm.aarch64.sve.nors.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, <vscale x 2 x i1>)
declare <vscale x 16 x i1> @llvm.aarch64.sve.nands.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>)
declare <vscale x 8 x i1> @llvm.aarch64.sve.nands.nxv8i1(<vscale x 8 x i1>, <vscale x 8 x i1>, <vscale x 8 x i1>)
declare <vscale x 4 x i1> @llvm.aarch64.sve.nands.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>, <vscale x 4 x i1>)
declare <vscale x 2 x i1> @llvm.aarch64.sve.nands.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, <vscale x 2 x i1>)
declare <vscale x 16 x i1> @llvm.aarch64.sve.and.z.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>)
declare <vscale x 8 x i1> @llvm.aarch64.sve.and.z.nxv8i1(<vscale x 8 x i1>, <vscale x 8 x i1>, <vscale x 8 x i1>)
declare <vscale x 4 x i1> @llvm.aarch64.sve.and.z.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>, <vscale x 4 x i1>)
declare <vscale x 2 x i1> @llvm.aarch64.sve.and.z.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, <vscale x 2 x i1>)
declare <vscale x 16 x i1> @llvm.aarch64.sve.bic.z.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>)
declare <vscale x 8 x i1> @llvm.aarch64.sve.bic.z.nxv8i1(<vscale x 8 x i1>, <vscale x 8 x i1>, <vscale x 8 x i1>)
declare <vscale x 4 x i1> @llvm.aarch64.sve.bic.z.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>, <vscale x 4 x i1>)
declare <vscale x 2 x i1> @llvm.aarch64.sve.bic.z.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, <vscale x 2 x i1>)
declare <vscale x 16 x i1> @llvm.aarch64.sve.eor.z.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>)
declare <vscale x 8 x i1> @llvm.aarch64.sve.eor.z.nxv8i1(<vscale x 8 x i1>, <vscale x 8 x i1>, <vscale x 8 x i1>)
declare <vscale x 4 x i1> @llvm.aarch64.sve.eor.z.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>, <vscale x 4 x i1>)
declare <vscale x 2 x i1> @llvm.aarch64.sve.eor.z.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, <vscale x 2 x i1>)
declare <vscale x 16 x i1> @llvm.aarch64.sve.orr.z.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>)
declare <vscale x 8 x i1> @llvm.aarch64.sve.orr.z.nxv8i1(<vscale x 8 x i1>, <vscale x 8 x i1>, <vscale x 8 x i1>)
declare <vscale x 4 x i1> @llvm.aarch64.sve.orr.z.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>, <vscale x 4 x i1>)
declare <vscale x 2 x i1> @llvm.aarch64.sve.orr.z.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, <vscale x 2 x i1>)
declare <vscale x 16 x i1> @llvm.aarch64.sve.orn.z.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>)
declare <vscale x 8 x i1> @llvm.aarch64.sve.orn.z.nxv8i1(<vscale x 8 x i1>, <vscale x 8 x i1>, <vscale x 8 x i1>)
declare <vscale x 4 x i1> @llvm.aarch64.sve.orn.z.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>, <vscale x 4 x i1>)
declare <vscale x 2 x i1> @llvm.aarch64.sve.orn.z.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, <vscale x 2 x i1>)
declare <vscale x 16 x i1> @llvm.aarch64.sve.nor.z.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>)
declare <vscale x 8 x i1> @llvm.aarch64.sve.nor.z.nxv8i1(<vscale x 8 x i1>, <vscale x 8 x i1>, <vscale x 8 x i1>)
declare <vscale x 4 x i1> @llvm.aarch64.sve.nor.z.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>, <vscale x 4 x i1>)
declare <vscale x 2 x i1> @llvm.aarch64.sve.nor.z.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, <vscale x 2 x i1>)
declare <vscale x 16 x i1> @llvm.aarch64.sve.nand.z.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>)
declare <vscale x 8 x i1> @llvm.aarch64.sve.nand.z.nxv8i1(<vscale x 8 x i1>, <vscale x 8 x i1>, <vscale x 8 x i1>)
declare <vscale x 4 x i1> @llvm.aarch64.sve.nand.z.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>, <vscale x 4 x i1>)
declare <vscale x 2 x i1> @llvm.aarch64.sve.nand.z.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, <vscale x 2 x i1>)