mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-25 04:02:41 +01:00
938ff50eda
This the first of a series of patches to add CodeGen support exploiting the instructions of the z13 vector facility. This patch adds support for the native integer vector types (v16i8, v8i16, v4i32, v2i64). When the vector facility is present, we default to the new vector ABI. This is characterized by two major differences: - Vector types are passed/returned in vector registers (except for unnamed arguments of a variable-argument list function). - Vector types are at most 8-byte aligned. The reason for the choice of 8-byte vector alignment is that the hardware is able to efficiently load vectors at 8-byte alignment, and the ABI only guarantees 8-byte alignment of the stack pointer, so requiring any higher alignment for vectors would require dynamic stack re-alignment code. However, for compatibility with old code that may use vector types, when *not* using the vector facility, the old alignment rules (vector types are naturally aligned) remain in use. These alignment rules are not only implemented at the C language level (implemented in clang), but also at the LLVM IR level. This is done by selecting a different DataLayout string depending on whether the vector ABI is in effect or not. Based on a patch by Richard Sandiford. llvm-svn: 236521
84 lines
2.3 KiB
LLVM
84 lines
2.3 KiB
LLVM
; Test v16i8 minimum.
|
|
;
|
|
; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
|
|
|
|
; Test with slt.
|
|
define <16 x i8> @f1(<16 x i8> %val1, <16 x i8> %val2) {
|
|
; CHECK-LABEL: f1:
|
|
; CHECK: vmnb %v24, {{%v24, %v26|%v26, %v24}}
|
|
; CHECK: br %r14
|
|
%cmp = icmp slt <16 x i8> %val2, %val1
|
|
%ret = select <16 x i1> %cmp, <16 x i8> %val2, <16 x i8> %val1
|
|
ret <16 x i8> %ret
|
|
}
|
|
|
|
; Test with sle.
|
|
define <16 x i8> @f2(<16 x i8> %val1, <16 x i8> %val2) {
|
|
; CHECK-LABEL: f2:
|
|
; CHECK: vmnb %v24, {{%v24, %v26|%v26, %v24}}
|
|
; CHECK: br %r14
|
|
%cmp = icmp sle <16 x i8> %val2, %val1
|
|
%ret = select <16 x i1> %cmp, <16 x i8> %val2, <16 x i8> %val1
|
|
ret <16 x i8> %ret
|
|
}
|
|
|
|
; Test with sgt.
|
|
define <16 x i8> @f3(<16 x i8> %val1, <16 x i8> %val2) {
|
|
; CHECK-LABEL: f3:
|
|
; CHECK: vmnb %v24, {{%v24, %v26|%v26, %v24}}
|
|
; CHECK: br %r14
|
|
%cmp = icmp sgt <16 x i8> %val2, %val1
|
|
%ret = select <16 x i1> %cmp, <16 x i8> %val1, <16 x i8> %val2
|
|
ret <16 x i8> %ret
|
|
}
|
|
|
|
; Test with sge.
|
|
define <16 x i8> @f4(<16 x i8> %val1, <16 x i8> %val2) {
|
|
; CHECK-LABEL: f4:
|
|
; CHECK: vmnb %v24, {{%v24, %v26|%v26, %v24}}
|
|
; CHECK: br %r14
|
|
%cmp = icmp sge <16 x i8> %val2, %val1
|
|
%ret = select <16 x i1> %cmp, <16 x i8> %val1, <16 x i8> %val2
|
|
ret <16 x i8> %ret
|
|
}
|
|
|
|
; Test with ult.
|
|
define <16 x i8> @f5(<16 x i8> %val1, <16 x i8> %val2) {
|
|
; CHECK-LABEL: f5:
|
|
; CHECK: vmnlb %v24, {{%v24, %v26|%v26, %v24}}
|
|
; CHECK: br %r14
|
|
%cmp = icmp ult <16 x i8> %val2, %val1
|
|
%ret = select <16 x i1> %cmp, <16 x i8> %val2, <16 x i8> %val1
|
|
ret <16 x i8> %ret
|
|
}
|
|
|
|
; Test with ule.
|
|
define <16 x i8> @f6(<16 x i8> %val1, <16 x i8> %val2) {
|
|
; CHECK-LABEL: f6:
|
|
; CHECK: vmnlb %v24, {{%v24, %v26|%v26, %v24}}
|
|
; CHECK: br %r14
|
|
%cmp = icmp ule <16 x i8> %val2, %val1
|
|
%ret = select <16 x i1> %cmp, <16 x i8> %val2, <16 x i8> %val1
|
|
ret <16 x i8> %ret
|
|
}
|
|
|
|
; Test with ugt.
|
|
define <16 x i8> @f7(<16 x i8> %val1, <16 x i8> %val2) {
|
|
; CHECK-LABEL: f7:
|
|
; CHECK: vmnlb %v24, {{%v24, %v26|%v26, %v24}}
|
|
; CHECK: br %r14
|
|
%cmp = icmp ugt <16 x i8> %val2, %val1
|
|
%ret = select <16 x i1> %cmp, <16 x i8> %val1, <16 x i8> %val2
|
|
ret <16 x i8> %ret
|
|
}
|
|
|
|
; Test with uge.
|
|
define <16 x i8> @f8(<16 x i8> %val1, <16 x i8> %val2) {
|
|
; CHECK-LABEL: f8:
|
|
; CHECK: vmnlb %v24, {{%v24, %v26|%v26, %v24}}
|
|
; CHECK: br %r14
|
|
%cmp = icmp uge <16 x i8> %val2, %val1
|
|
%ret = select <16 x i1> %cmp, <16 x i8> %val1, <16 x i8> %val2
|
|
ret <16 x i8> %ret
|
|
}
|