mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-25 20:23:11 +01:00
4bfea803ed
After D98856 these tests will by default break (fatal_error) if any of the wrong interfaces are used, so there's no longer a need to have a RUN line that checks for a warning message emitted by the compiler.
407 lines
21 KiB
LLVM
407 lines
21 KiB
LLVM
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
|
|
|
|
define <vscale x 16 x i8> @add_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
|
|
; CHECK-LABEL: add_i8:
|
|
; CHECK: add z0.b, p0/m, z0.b, z1.b
|
|
; CHECK-NEXT: ret
|
|
%out = call <vscale x 16 x i8> @llvm.aarch64.sve.add.nxv16i8(<vscale x 16 x i1> %pg,
|
|
<vscale x 16 x i8> %a,
|
|
<vscale x 16 x i8> %b)
|
|
ret <vscale x 16 x i8> %out
|
|
}
|
|
|
|
define <vscale x 8 x i16> @add_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
|
|
; CHECK-LABEL: add_i16:
|
|
; CHECK: add z0.h, p0/m, z0.h, z1.h
|
|
; CHECK-NEXT: ret
|
|
%out = call <vscale x 8 x i16> @llvm.aarch64.sve.add.nxv8i16(<vscale x 8 x i1> %pg,
|
|
<vscale x 8 x i16> %a,
|
|
<vscale x 8 x i16> %b)
|
|
ret <vscale x 8 x i16> %out
|
|
}
|
|
|
|
define <vscale x 4 x i32> @add_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
|
|
; CHECK-LABEL: add_i32:
|
|
; CHECK: add z0.s, p0/m, z0.s, z1.s
|
|
; CHECK-NEXT: ret
|
|
%out = call <vscale x 4 x i32> @llvm.aarch64.sve.add.nxv4i32(<vscale x 4 x i1> %pg,
|
|
<vscale x 4 x i32> %a,
|
|
<vscale x 4 x i32> %b)
|
|
ret <vscale x 4 x i32> %out
|
|
}
|
|
|
|
define <vscale x 2 x i64> @add_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
|
|
; CHECK-LABEL: add_i64:
|
|
; CHECK: add z0.d, p0/m, z0.d, z1.d
|
|
; CHECK-NEXT: ret
|
|
%out = call <vscale x 2 x i64> @llvm.aarch64.sve.add.nxv2i64(<vscale x 2 x i1> %pg,
|
|
<vscale x 2 x i64> %a,
|
|
<vscale x 2 x i64> %b)
|
|
ret <vscale x 2 x i64> %out
|
|
}
|
|
|
|
define <vscale x 16 x i8> @sub_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
|
|
; CHECK-LABEL: sub_i8:
|
|
; CHECK: sub z0.b, p0/m, z0.b, z1.b
|
|
; CHECK-NEXT: ret
|
|
%out = call <vscale x 16 x i8> @llvm.aarch64.sve.sub.nxv16i8(<vscale x 16 x i1> %pg,
|
|
<vscale x 16 x i8> %a,
|
|
<vscale x 16 x i8> %b)
|
|
ret <vscale x 16 x i8> %out
|
|
}
|
|
|
|
define <vscale x 8 x i16> @sub_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
|
|
; CHECK-LABEL: sub_i16:
|
|
; CHECK: sub z0.h, p0/m, z0.h, z1.h
|
|
; CHECK-NEXT: ret
|
|
%out = call <vscale x 8 x i16> @llvm.aarch64.sve.sub.nxv8i16(<vscale x 8 x i1> %pg,
|
|
<vscale x 8 x i16> %a,
|
|
<vscale x 8 x i16> %b)
|
|
ret <vscale x 8 x i16> %out
|
|
}
|
|
|
|
define <vscale x 4 x i32> @sub_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
|
|
; CHECK-LABEL: sub_i32:
|
|
; CHECK: sub z0.s, p0/m, z0.s, z1.s
|
|
; CHECK-NEXT: ret
|
|
%out = call <vscale x 4 x i32> @llvm.aarch64.sve.sub.nxv4i32(<vscale x 4 x i1> %pg,
|
|
<vscale x 4 x i32> %a,
|
|
<vscale x 4 x i32> %b)
|
|
ret <vscale x 4 x i32> %out
|
|
}
|
|
|
|
define <vscale x 2 x i64> @sub_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
|
|
; CHECK-LABEL: sub_i64:
|
|
; CHECK: sub z0.d, p0/m, z0.d, z1.d
|
|
; CHECK-NEXT: ret
|
|
%out = call <vscale x 2 x i64> @llvm.aarch64.sve.sub.nxv2i64(<vscale x 2 x i1> %pg,
|
|
<vscale x 2 x i64> %a,
|
|
<vscale x 2 x i64> %b)
|
|
ret <vscale x 2 x i64> %out
|
|
}
|
|
|
|
define <vscale x 16 x i8> @subr_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
|
|
; CHECK-LABEL: subr_i8:
|
|
; CHECK: subr z0.b, p0/m, z0.b, z1.b
|
|
; CHECK-NEXT: ret
|
|
%out = call <vscale x 16 x i8> @llvm.aarch64.sve.subr.nxv16i8(<vscale x 16 x i1> %pg,
|
|
<vscale x 16 x i8> %a,
|
|
<vscale x 16 x i8> %b)
|
|
ret <vscale x 16 x i8> %out
|
|
}
|
|
|
|
define <vscale x 8 x i16> @subr_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
|
|
; CHECK-LABEL: subr_i16:
|
|
; CHECK: subr z0.h, p0/m, z0.h, z1.h
|
|
; CHECK-NEXT: ret
|
|
%out = call <vscale x 8 x i16> @llvm.aarch64.sve.subr.nxv8i16(<vscale x 8 x i1> %pg,
|
|
<vscale x 8 x i16> %a,
|
|
<vscale x 8 x i16> %b)
|
|
ret <vscale x 8 x i16> %out
|
|
}
|
|
|
|
define <vscale x 4 x i32> @subr_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
|
|
; CHECK-LABEL: subr_i32:
|
|
; CHECK: subr z0.s, p0/m, z0.s, z1.s
|
|
; CHECK-NEXT: ret
|
|
%out = call <vscale x 4 x i32> @llvm.aarch64.sve.subr.nxv4i32(<vscale x 4 x i1> %pg,
|
|
<vscale x 4 x i32> %a,
|
|
<vscale x 4 x i32> %b)
|
|
ret <vscale x 4 x i32> %out
|
|
}
|
|
|
|
define <vscale x 2 x i64> @subr_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
|
|
; CHECK-LABEL: subr_i64:
|
|
; CHECK: subr z0.d, p0/m, z0.d, z1.d
|
|
; CHECK-NEXT: ret
|
|
%out = call <vscale x 2 x i64> @llvm.aarch64.sve.subr.nxv2i64(<vscale x 2 x i1> %pg,
|
|
<vscale x 2 x i64> %a,
|
|
<vscale x 2 x i64> %b)
|
|
ret <vscale x 2 x i64> %out
|
|
}
|
|
|
|
define <vscale x 16 x i8> @smax_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
|
|
; CHECK-LABEL: smax_i8:
|
|
; CHECK: smax z0.b, p0/m, z0.b, z1.b
|
|
; CHECK-NEXT: ret
|
|
%out = call <vscale x 16 x i8> @llvm.aarch64.sve.smax.nxv16i8(<vscale x 16 x i1> %pg,
|
|
<vscale x 16 x i8> %a,
|
|
<vscale x 16 x i8> %b)
|
|
ret <vscale x 16 x i8> %out
|
|
}
|
|
|
|
define <vscale x 8 x i16> @smax_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
|
|
; CHECK-LABEL: smax_i16:
|
|
; CHECK: smax z0.h, p0/m, z0.h, z1.h
|
|
; CHECK-NEXT: ret
|
|
%out = call <vscale x 8 x i16> @llvm.aarch64.sve.smax.nxv8i16(<vscale x 8 x i1> %pg,
|
|
<vscale x 8 x i16> %a,
|
|
<vscale x 8 x i16> %b)
|
|
ret <vscale x 8 x i16> %out
|
|
}
|
|
|
|
define <vscale x 4 x i32> @smax_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
|
|
; CHECK-LABEL: smax_i32:
|
|
; CHECK: smax z0.s, p0/m, z0.s, z1.s
|
|
; CHECK-NEXT: ret
|
|
%out = call <vscale x 4 x i32> @llvm.aarch64.sve.smax.nxv4i32(<vscale x 4 x i1> %pg,
|
|
<vscale x 4 x i32> %a,
|
|
<vscale x 4 x i32> %b)
|
|
ret <vscale x 4 x i32> %out
|
|
}
|
|
|
|
define <vscale x 2 x i64> @smax_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
|
|
; CHECK-LABEL: smax_i64:
|
|
; CHECK: smax z0.d, p0/m, z0.d, z1.d
|
|
; CHECK-NEXT: ret
|
|
%out = call <vscale x 2 x i64> @llvm.aarch64.sve.smax.nxv2i64(<vscale x 2 x i1> %pg,
|
|
<vscale x 2 x i64> %a,
|
|
<vscale x 2 x i64> %b)
|
|
ret <vscale x 2 x i64> %out
|
|
}
|
|
|
|
define <vscale x 16 x i8> @umax_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
|
|
; CHECK-LABEL: umax_i8:
|
|
; CHECK: umax z0.b, p0/m, z0.b, z1.b
|
|
; CHECK-NEXT: ret
|
|
%out = call <vscale x 16 x i8> @llvm.aarch64.sve.umax.nxv16i8(<vscale x 16 x i1> %pg,
|
|
<vscale x 16 x i8> %a,
|
|
<vscale x 16 x i8> %b)
|
|
ret <vscale x 16 x i8> %out
|
|
}
|
|
|
|
define <vscale x 8 x i16> @umax_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
|
|
; CHECK-LABEL: umax_i16:
|
|
; CHECK: umax z0.h, p0/m, z0.h, z1.h
|
|
; CHECK-NEXT: ret
|
|
%out = call <vscale x 8 x i16> @llvm.aarch64.sve.umax.nxv8i16(<vscale x 8 x i1> %pg,
|
|
<vscale x 8 x i16> %a,
|
|
<vscale x 8 x i16> %b)
|
|
ret <vscale x 8 x i16> %out
|
|
}
|
|
|
|
define <vscale x 4 x i32> @umax_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
|
|
; CHECK-LABEL: umax_i32:
|
|
; CHECK: umax z0.s, p0/m, z0.s, z1.s
|
|
; CHECK-NEXT: ret
|
|
%out = call <vscale x 4 x i32> @llvm.aarch64.sve.umax.nxv4i32(<vscale x 4 x i1> %pg,
|
|
<vscale x 4 x i32> %a,
|
|
<vscale x 4 x i32> %b)
|
|
ret <vscale x 4 x i32> %out
|
|
}
|
|
|
|
define <vscale x 2 x i64> @umax_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
|
|
; CHECK-LABEL: umax_i64:
|
|
; CHECK: umax z0.d, p0/m, z0.d, z1.d
|
|
; CHECK-NEXT: ret
|
|
%out = call <vscale x 2 x i64> @llvm.aarch64.sve.umax.nxv2i64(<vscale x 2 x i1> %pg,
|
|
<vscale x 2 x i64> %a,
|
|
<vscale x 2 x i64> %b)
|
|
ret <vscale x 2 x i64> %out
|
|
}
|
|
|
|
define <vscale x 16 x i8> @smin_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
|
|
; CHECK-LABEL: smin_i8:
|
|
; CHECK: smin z0.b, p0/m, z0.b, z1.b
|
|
; CHECK-NEXT: ret
|
|
%out = call <vscale x 16 x i8> @llvm.aarch64.sve.smin.nxv16i8(<vscale x 16 x i1> %pg,
|
|
<vscale x 16 x i8> %a,
|
|
<vscale x 16 x i8> %b)
|
|
ret <vscale x 16 x i8> %out
|
|
}
|
|
|
|
define <vscale x 8 x i16> @smin_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
|
|
; CHECK-LABEL: smin_i16:
|
|
; CHECK: smin z0.h, p0/m, z0.h, z1.h
|
|
; CHECK-NEXT: ret
|
|
%out = call <vscale x 8 x i16> @llvm.aarch64.sve.smin.nxv8i16(<vscale x 8 x i1> %pg,
|
|
<vscale x 8 x i16> %a,
|
|
<vscale x 8 x i16> %b)
|
|
ret <vscale x 8 x i16> %out
|
|
}
|
|
|
|
define <vscale x 4 x i32> @smin_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
|
|
; CHECK-LABEL: smin_i32:
|
|
; CHECK: smin z0.s, p0/m, z0.s, z1.s
|
|
; CHECK-NEXT: ret
|
|
%out = call <vscale x 4 x i32> @llvm.aarch64.sve.smin.nxv4i32(<vscale x 4 x i1> %pg,
|
|
<vscale x 4 x i32> %a,
|
|
<vscale x 4 x i32> %b)
|
|
ret <vscale x 4 x i32> %out
|
|
}
|
|
|
|
define <vscale x 2 x i64> @smin_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
|
|
; CHECK-LABEL: smin_i64:
|
|
; CHECK: smin z0.d, p0/m, z0.d, z1.d
|
|
; CHECK-NEXT: ret
|
|
%out = call <vscale x 2 x i64> @llvm.aarch64.sve.smin.nxv2i64(<vscale x 2 x i1> %pg,
|
|
<vscale x 2 x i64> %a,
|
|
<vscale x 2 x i64> %b)
|
|
ret <vscale x 2 x i64> %out
|
|
}
|
|
|
|
define <vscale x 16 x i8> @umin_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
|
|
; CHECK-LABEL: umin_i8:
|
|
; CHECK: umin z0.b, p0/m, z0.b, z1.b
|
|
; CHECK-NEXT: ret
|
|
%out = call <vscale x 16 x i8> @llvm.aarch64.sve.umin.nxv16i8(<vscale x 16 x i1> %pg,
|
|
<vscale x 16 x i8> %a,
|
|
<vscale x 16 x i8> %b)
|
|
ret <vscale x 16 x i8> %out
|
|
}
|
|
|
|
define <vscale x 8 x i16> @umin_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
|
|
; CHECK-LABEL: umin_i16:
|
|
; CHECK: umin z0.h, p0/m, z0.h, z1.h
|
|
; CHECK-NEXT: ret
|
|
%out = call <vscale x 8 x i16> @llvm.aarch64.sve.umin.nxv8i16(<vscale x 8 x i1> %pg,
|
|
<vscale x 8 x i16> %a,
|
|
<vscale x 8 x i16> %b)
|
|
ret <vscale x 8 x i16> %out
|
|
}
|
|
|
|
define <vscale x 4 x i32> @umin_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
|
|
; CHECK-LABEL: umin_i32:
|
|
; CHECK: umin z0.s, p0/m, z0.s, z1.s
|
|
; CHECK-NEXT: ret
|
|
%out = call <vscale x 4 x i32> @llvm.aarch64.sve.umin.nxv4i32(<vscale x 4 x i1> %pg,
|
|
<vscale x 4 x i32> %a,
|
|
<vscale x 4 x i32> %b)
|
|
ret <vscale x 4 x i32> %out
|
|
}
|
|
|
|
define <vscale x 2 x i64> @umin_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
|
|
; CHECK-LABEL: umin_i64:
|
|
; CHECK: umin z0.d, p0/m, z0.d, z1.d
|
|
; CHECK-NEXT: ret
|
|
%out = call <vscale x 2 x i64> @llvm.aarch64.sve.umin.nxv2i64(<vscale x 2 x i1> %pg,
|
|
<vscale x 2 x i64> %a,
|
|
<vscale x 2 x i64> %b)
|
|
ret <vscale x 2 x i64> %out
|
|
}
|
|
|
|
define <vscale x 16 x i8> @sabd_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
|
|
; CHECK-LABEL: sabd_i8:
|
|
; CHECK: sabd z0.b, p0/m, z0.b, z1.b
|
|
; CHECK-NEXT: ret
|
|
%out = call <vscale x 16 x i8> @llvm.aarch64.sve.sabd.nxv16i8(<vscale x 16 x i1> %pg,
|
|
<vscale x 16 x i8> %a,
|
|
<vscale x 16 x i8> %b)
|
|
ret <vscale x 16 x i8> %out
|
|
}
|
|
|
|
define <vscale x 8 x i16> @sabd_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
|
|
; CHECK-LABEL: sabd_i16:
|
|
; CHECK: sabd z0.h, p0/m, z0.h, z1.h
|
|
; CHECK-NEXT: ret
|
|
%out = call <vscale x 8 x i16> @llvm.aarch64.sve.sabd.nxv8i16(<vscale x 8 x i1> %pg,
|
|
<vscale x 8 x i16> %a,
|
|
<vscale x 8 x i16> %b)
|
|
ret <vscale x 8 x i16> %out
|
|
}
|
|
|
|
define <vscale x 4 x i32> @sabd_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
|
|
; CHECK-LABEL: sabd_i32:
|
|
; CHECK: sabd z0.s, p0/m, z0.s, z1.s
|
|
; CHECK-NEXT: ret
|
|
%out = call <vscale x 4 x i32> @llvm.aarch64.sve.sabd.nxv4i32(<vscale x 4 x i1> %pg,
|
|
<vscale x 4 x i32> %a,
|
|
<vscale x 4 x i32> %b)
|
|
ret <vscale x 4 x i32> %out
|
|
}
|
|
|
|
define <vscale x 2 x i64> @sabd_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
|
|
; CHECK-LABEL: sabd_i64:
|
|
; CHECK: sabd z0.d, p0/m, z0.d, z1.d
|
|
; CHECK-NEXT: ret
|
|
%out = call <vscale x 2 x i64> @llvm.aarch64.sve.sabd.nxv2i64(<vscale x 2 x i1> %pg,
|
|
<vscale x 2 x i64> %a,
|
|
<vscale x 2 x i64> %b)
|
|
ret <vscale x 2 x i64> %out
|
|
}
|
|
|
|
define <vscale x 16 x i8> @uabd_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
|
|
; CHECK-LABEL: uabd_i8:
|
|
; CHECK: uabd z0.b, p0/m, z0.b, z1.b
|
|
; CHECK-NEXT: ret
|
|
%out = call <vscale x 16 x i8> @llvm.aarch64.sve.uabd.nxv16i8(<vscale x 16 x i1> %pg,
|
|
<vscale x 16 x i8> %a,
|
|
<vscale x 16 x i8> %b)
|
|
ret <vscale x 16 x i8> %out
|
|
}
|
|
|
|
define <vscale x 8 x i16> @uabd_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
|
|
; CHECK-LABEL: uabd_i16:
|
|
; CHECK: uabd z0.h, p0/m, z0.h, z1.h
|
|
; CHECK-NEXT: ret
|
|
%out = call <vscale x 8 x i16> @llvm.aarch64.sve.uabd.nxv8i16(<vscale x 8 x i1> %pg,
|
|
<vscale x 8 x i16> %a,
|
|
<vscale x 8 x i16> %b)
|
|
ret <vscale x 8 x i16> %out
|
|
}
|
|
|
|
define <vscale x 4 x i32> @uabd_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
|
|
; CHECK-LABEL: uabd_i32:
|
|
; CHECK: uabd z0.s, p0/m, z0.s, z1.s
|
|
; CHECK-NEXT: ret
|
|
%out = call <vscale x 4 x i32> @llvm.aarch64.sve.uabd.nxv4i32(<vscale x 4 x i1> %pg,
|
|
<vscale x 4 x i32> %a,
|
|
<vscale x 4 x i32> %b)
|
|
ret <vscale x 4 x i32> %out
|
|
}
|
|
|
|
define <vscale x 2 x i64> @uabd_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
|
|
; CHECK-LABEL: uabd_i64:
|
|
; CHECK: uabd z0.d, p0/m, z0.d, z1.d
|
|
; CHECK-NEXT: ret
|
|
%out = call <vscale x 2 x i64> @llvm.aarch64.sve.uabd.nxv2i64(<vscale x 2 x i1> %pg,
|
|
<vscale x 2 x i64> %a,
|
|
<vscale x 2 x i64> %b)
|
|
ret <vscale x 2 x i64> %out
|
|
}
|
|
|
|
declare <vscale x 16 x i8> @llvm.aarch64.sve.add.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
|
|
declare <vscale x 8 x i16> @llvm.aarch64.sve.add.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
|
|
declare <vscale x 4 x i32> @llvm.aarch64.sve.add.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
|
|
declare <vscale x 2 x i64> @llvm.aarch64.sve.add.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
|
|
|
|
declare <vscale x 16 x i8> @llvm.aarch64.sve.sub.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
|
|
declare <vscale x 8 x i16> @llvm.aarch64.sve.sub.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
|
|
declare <vscale x 4 x i32> @llvm.aarch64.sve.sub.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
|
|
declare <vscale x 2 x i64> @llvm.aarch64.sve.sub.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
|
|
|
|
declare <vscale x 16 x i8> @llvm.aarch64.sve.subr.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
|
|
declare <vscale x 8 x i16> @llvm.aarch64.sve.subr.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
|
|
declare <vscale x 4 x i32> @llvm.aarch64.sve.subr.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
|
|
declare <vscale x 2 x i64> @llvm.aarch64.sve.subr.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
|
|
|
|
declare <vscale x 16 x i8> @llvm.aarch64.sve.smax.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
|
|
declare <vscale x 8 x i16> @llvm.aarch64.sve.smax.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
|
|
declare <vscale x 4 x i32> @llvm.aarch64.sve.smax.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
|
|
declare <vscale x 2 x i64> @llvm.aarch64.sve.smax.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
|
|
|
|
declare <vscale x 16 x i8> @llvm.aarch64.sve.umax.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
|
|
declare <vscale x 8 x i16> @llvm.aarch64.sve.umax.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
|
|
declare <vscale x 4 x i32> @llvm.aarch64.sve.umax.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
|
|
declare <vscale x 2 x i64> @llvm.aarch64.sve.umax.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
|
|
|
|
declare <vscale x 16 x i8> @llvm.aarch64.sve.smin.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
|
|
declare <vscale x 8 x i16> @llvm.aarch64.sve.smin.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
|
|
declare <vscale x 4 x i32> @llvm.aarch64.sve.smin.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
|
|
declare <vscale x 2 x i64> @llvm.aarch64.sve.smin.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
|
|
|
|
declare <vscale x 16 x i8> @llvm.aarch64.sve.umin.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
|
|
declare <vscale x 8 x i16> @llvm.aarch64.sve.umin.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
|
|
declare <vscale x 4 x i32> @llvm.aarch64.sve.umin.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
|
|
declare <vscale x 2 x i64> @llvm.aarch64.sve.umin.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
|
|
|
|
declare <vscale x 16 x i8> @llvm.aarch64.sve.sabd.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
|
|
declare <vscale x 8 x i16> @llvm.aarch64.sve.sabd.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
|
|
declare <vscale x 4 x i32> @llvm.aarch64.sve.sabd.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
|
|
declare <vscale x 2 x i64> @llvm.aarch64.sve.sabd.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
|
|
|
|
declare <vscale x 16 x i8> @llvm.aarch64.sve.uabd.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
|
|
declare <vscale x 8 x i16> @llvm.aarch64.sve.uabd.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
|
|
declare <vscale x 4 x i32> @llvm.aarch64.sve.uabd.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
|
|
declare <vscale x 2 x i64> @llvm.aarch64.sve.uabd.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
|