mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-10-18 18:42:46 +02:00
acca14efdf
Summary: Adds the following inline asm constraints for SVE: - w: SVE vector register with full range, Z0 to Z31 - x: Restricted to registers Z0 to Z15 inclusive. - y: Restricted to registers Z0 to Z7 inclusive. This change also adds the "z" modifier to interpret a register as an SVE register. Not all of the bitconvert patterns added by this patch are used, but they have been included here for completeness. Reviewers: t.p.northover, sdesmalen, rovka, momchil.velikov, rengolin, cameron.mcinally, greened Reviewed By: sdesmalen Subscribers: javed.absar, tschuett, rkruppe, psnobl, cfe-commits, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D66302 llvm-svn: 370673
45 lines
2.0 KiB
LLVM
45 lines
2.0 KiB
LLVM
; RUN: llc < %s -mtriple aarch64-none-linux-gnu -mattr=+sve -stop-after=finalize-isel | FileCheck %s --check-prefix=CHECK
|
|
|
|
target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
|
|
target triple = "aarch64-none-linux-gnu"
|
|
|
|
; Function Attrs: nounwind readnone
|
|
; CHECK: [[ARG1:%[0-9]+]]:zpr = COPY $z1
|
|
; CHECK: [[ARG2:%[0-9]+]]:zpr = COPY $z0
|
|
; CHECK: [[ARG3:%[0-9]+]]:zpr = COPY [[ARG2]]
|
|
; CHECK: [[ARG4:%[0-9]+]]:zpr_3b = COPY [[ARG1]]
|
|
define <vscale x 16 x i8> @test_svadd_i8(<vscale x 16 x i8> %Zn, <vscale x 16 x i8> %Zm) {
|
|
%1 = tail call <vscale x 16 x i8> asm "add $0.b, $1.b, $2.b", "=w,w,y"(<vscale x 16 x i8> %Zn, <vscale x 16 x i8> %Zm)
|
|
ret <vscale x 16 x i8> %1
|
|
}
|
|
|
|
; Function Attrs: nounwind readnone
|
|
; CHECK: [[ARG1:%[0-9]+]]:zpr = COPY $z1
|
|
; CHECK: [[ARG2:%[0-9]+]]:zpr = COPY $z0
|
|
; CHECK: [[ARG3:%[0-9]+]]:zpr = COPY [[ARG2]]
|
|
; CHECK: [[ARG4:%[0-9]+]]:zpr_4b = COPY [[ARG1]]
|
|
define <vscale x 2 x i64> @test_svsub_i64(<vscale x 2 x i64> %Zn, <vscale x 2 x i64> %Zm) {
|
|
%1 = tail call <vscale x 2 x i64> asm "sub $0.d, $1.d, $2.d", "=w,w,x"(<vscale x 2 x i64> %Zn, <vscale x 2 x i64> %Zm)
|
|
ret <vscale x 2 x i64> %1
|
|
}
|
|
|
|
; Function Attrs: nounwind readnone
|
|
; CHECK: [[ARG1:%[0-9]+]]:zpr = COPY $z1
|
|
; CHECK: [[ARG2:%[0-9]+]]:zpr = COPY $z0
|
|
; CHECK: [[ARG3:%[0-9]+]]:zpr = COPY [[ARG2]]
|
|
; CHECK: [[ARG4:%[0-9]+]]:zpr_3b = COPY [[ARG1]]
|
|
define <vscale x 8 x half> @test_svfmul_f16(<vscale x 8 x half> %Zn, <vscale x 8 x half> %Zm) {
|
|
%1 = tail call <vscale x 8 x half> asm "fmul $0.h, $1.h, $2.h", "=w,w,y"(<vscale x 8 x half> %Zn, <vscale x 8 x half> %Zm)
|
|
ret <vscale x 8 x half> %1
|
|
}
|
|
|
|
; Function Attrs: nounwind readnone
|
|
; CHECK: [[ARG1:%[0-9]+]]:zpr = COPY $z1
|
|
; CHECK: [[ARG2:%[0-9]+]]:zpr = COPY $z0
|
|
; CHECK: [[ARG3:%[0-9]+]]:zpr = COPY [[ARG2]]
|
|
; CHECK: [[ARG4:%[0-9]+]]:zpr_4b = COPY [[ARG1]]
|
|
define <vscale x 4 x float> @test_svfmul_f(<vscale x 4 x float> %Zn, <vscale x 4 x float> %Zm) {
|
|
%1 = tail call <vscale x 4 x float> asm "fmul $0.s, $1.s, $2.s", "=w,w,x"(<vscale x 4 x float> %Zn, <vscale x 4 x float> %Zm)
|
|
ret <vscale x 4 x float> %1
|
|
}
|