1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 03:02:36 +01:00

[AArch64][SVE2] Add bfloat16 support to whilerw/whilewr intrinsics

Reviewed By: fpetrogalli

Differential Revision: https://reviews.llvm.org/D82399
This commit is contained in:
Cullen Rhodes 2020-06-19 10:50:38 +00:00
parent 8543c38ff5
commit f2a50c987a

View File

@ -36,6 +36,14 @@ define <vscale x 2 x i1> @whilerw_i64(i64* %a, i64* %b) {
ret <vscale x 2 x i1> %out
}
define <vscale x 8 x i1> @whilerw_bfloat(bfloat* %a, bfloat* %b) {
; CHECK-LABEL: whilerw_bfloat:
; CHECK: whilerw p0.h, x0, x1
; CHECK-NEXT: ret
%out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilerw.h.nx8i1.bf16.bf16(bfloat* %a, bfloat* %b)
ret <vscale x 8 x i1> %out
}
define <vscale x 8 x i1> @whilerw_half(half* %a, half* %b) {
; CHECK-LABEL: whilerw_half:
; CHECK: whilerw p0.h, x0, x1
@ -96,6 +104,14 @@ define <vscale x 2 x i1> @whilewr_i64(i64* %a, i64* %b) {
ret <vscale x 2 x i1> %out
}
define <vscale x 8 x i1> @whilewr_bfloat(bfloat* %a, bfloat* %b) {
; CHECK-LABEL: whilewr_bfloat:
; CHECK: whilewr p0.h, x0, x1
; CHECK-NEXT: ret
%out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilewr.h.nx8i1.bf16.bf16(bfloat* %a, bfloat* %b)
ret <vscale x 8 x i1> %out
}
define <vscale x 8 x i1> @whilewr_half(half* %a, half* %b) {
; CHECK-LABEL: whilewr_half:
; CHECK: whilewr p0.h, x0, x1
@ -125,6 +141,7 @@ declare <vscale x 8 x i1> @llvm.aarch64.sve.whilerw.h.nx8i1(i16* %a, i16* %b)
declare <vscale x 4 x i1> @llvm.aarch64.sve.whilerw.s.nx4i1(i32* %a, i32* %b)
declare <vscale x 2 x i1> @llvm.aarch64.sve.whilerw.d.nx2i1(i64* %a, i64* %b)
declare <vscale x 8 x i1> @llvm.aarch64.sve.whilerw.h.nx8i1.bf16.bf16(bfloat* %a, bfloat* %b)
declare <vscale x 8 x i1> @llvm.aarch64.sve.whilerw.h.nx8i1.f16.f16(half* %a, half* %b)
declare <vscale x 4 x i1> @llvm.aarch64.sve.whilerw.s.nx4i1.f32.f32(float* %a, float* %b)
declare <vscale x 2 x i1> @llvm.aarch64.sve.whilerw.d.nx2i1.f64.f64(double* %a, double* %b)
@ -134,6 +151,7 @@ declare <vscale x 8 x i1> @llvm.aarch64.sve.whilewr.h.nx8i1(i16* %a, i16* %b)
declare <vscale x 4 x i1> @llvm.aarch64.sve.whilewr.s.nx4i1(i32* %a, i32* %b)
declare <vscale x 2 x i1> @llvm.aarch64.sve.whilewr.d.nx2i1(i64* %a, i64* %b)
declare <vscale x 8 x i1> @llvm.aarch64.sve.whilewr.h.nx8i1.bf16.bf16(bfloat* %a, bfloat* %b)
declare <vscale x 8 x i1> @llvm.aarch64.sve.whilewr.h.nx8i1.f16.f16(half* %a, half* %b)
declare <vscale x 4 x i1> @llvm.aarch64.sve.whilewr.s.nx4i1.f32.f32(float* %a, float* %b)
declare <vscale x 2 x i1> @llvm.aarch64.sve.whilewr.d.nx2i1.f64.f64(double* %a, double* %b)