1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2025-01-31 20:51:52 +01:00
David Green 686f12811b [ARM] MVE vector for 64bit types
We need to make sure that we are sensibly dealing with vectors of types v2i64
and v2f64, even if most of the time we cannot generate native operations for
them. This mostly adds a lot of testing, plus fixes up a couple of the issues
found. And, or and xor can be legal for v2i64, and shifts combining needs a
slight fixup.

Differential Revision: https://reviews.llvm.org/D64316

llvm-svn: 366106
2019-07-15 18:42:54 +00:00

141 lines
3.7 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=thumbv8.1m.main-arm-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s
define arm_aapcs_vfpcc <8 x i16> @sext_v8i8_v8i16(<8 x i8> %src) {
; CHECK-LABEL: sext_v8i8_v8i16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmovlb.s8 q0, q0
; CHECK-NEXT: bx lr
entry:
%0 = sext <8 x i8> %src to <8 x i16>
ret <8 x i16> %0
}
define arm_aapcs_vfpcc <4 x i32> @sext_v4i16_v4i32(<4 x i16> %src) {
; CHECK-LABEL: sext_v4i16_v4i32:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmovlb.s16 q0, q0
; CHECK-NEXT: bx lr
entry:
%0 = sext <4 x i16> %src to <4 x i32>
ret <4 x i32> %0
}
define arm_aapcs_vfpcc <4 x i32> @sext_v4i8_v4i32(<4 x i8> %src) {
; CHECK-LABEL: sext_v4i8_v4i32:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmovlb.s8 q0, q0
; CHECK-NEXT: vmovlb.s16 q0, q0
; CHECK-NEXT: bx lr
entry:
%0 = sext <4 x i8> %src to <4 x i32>
ret <4 x i32> %0
}
define arm_aapcs_vfpcc <2 x i64> @sext_v2i32_v2i64(<2 x i32> %src) {
; CHECK-LABEL: sext_v2i32_v2i64:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: vmov.32 q1[0], r0
; CHECK-NEXT: asrs r0, r0, #31
; CHECK-NEXT: vmov.32 q1[1], r0
; CHECK-NEXT: vmov r0, s2
; CHECK-NEXT: vmov.32 q1[2], r0
; CHECK-NEXT: asrs r0, r0, #31
; CHECK-NEXT: vmov.32 q1[3], r0
; CHECK-NEXT: vmov q0, q1
; CHECK-NEXT: bx lr
entry:
%0 = sext <2 x i32> %src to <2 x i64>
ret <2 x i64> %0
}
define arm_aapcs_vfpcc <8 x i16> @zext_v8i8_v8i16(<8 x i8> %src) {
; CHECK-LABEL: zext_v8i8_v8i16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmovlb.u8 q0, q0
; CHECK-NEXT: bx lr
entry:
%0 = zext <8 x i8> %src to <8 x i16>
ret <8 x i16> %0
}
define arm_aapcs_vfpcc <4 x i32> @zext_v4i16_v4i32(<4 x i16> %src) {
; CHECK-LABEL: zext_v4i16_v4i32:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmovlb.u16 q0, q0
; CHECK-NEXT: bx lr
entry:
%0 = zext <4 x i16> %src to <4 x i32>
ret <4 x i32> %0
}
define arm_aapcs_vfpcc <4 x i32> @zext_v4i8_v4i32(<4 x i8> %src) {
; CHECK-LABEL: zext_v4i8_v4i32:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmov.i32 q1, #0xff
; CHECK-NEXT: vand q0, q0, q1
; CHECK-NEXT: bx lr
entry:
%0 = zext <4 x i8> %src to <4 x i32>
ret <4 x i32> %0
}
define arm_aapcs_vfpcc <2 x i64> @zext_v2i32_v2i64(<2 x i32> %src) {
; CHECK-LABEL: zext_v2i32_v2i64:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: adr r0, .LCPI7_0
; CHECK-NEXT: vldrw.u32 q1, [r0]
; CHECK-NEXT: vand q0, q0, q1
; CHECK-NEXT: bx lr
; CHECK-NEXT: .p2align 4
; CHECK-NEXT: @ %bb.1:
; CHECK-NEXT: .LCPI7_0:
; CHECK-NEXT: .long 4294967295 @ 0xffffffff
; CHECK-NEXT: .long 0 @ 0x0
; CHECK-NEXT: .long 4294967295 @ 0xffffffff
; CHECK-NEXT: .long 0 @ 0x0
entry:
%0 = zext <2 x i32> %src to <2 x i64>
ret <2 x i64> %0
}
define arm_aapcs_vfpcc <8 x i8> @trunc_v8i16_v8i8(<8 x i16> %src) {
; CHECK-LABEL: trunc_v8i16_v8i8:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: bx lr
entry:
%0 = trunc <8 x i16> %src to <8 x i8>
ret <8 x i8> %0
}
define arm_aapcs_vfpcc <4 x i16> @trunc_v4i32_v4i16(<4 x i32> %src) {
; CHECK-LABEL: trunc_v4i32_v4i16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: bx lr
entry:
%0 = trunc <4 x i32> %src to <4 x i16>
ret <4 x i16> %0
}
define arm_aapcs_vfpcc <4 x i8> @trunc_v4i32_v4i8(<4 x i32> %src) {
; CHECK-LABEL: trunc_v4i32_v4i8:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: bx lr
entry:
%0 = trunc <4 x i32> %src to <4 x i8>
ret <4 x i8> %0
}
define arm_aapcs_vfpcc <2 x i32> @trunc_v2i64_v2i32(<2 x i64> %src) {
; CHECK-LABEL: trunc_v2i64_v2i32:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: bx lr
entry:
%0 = trunc <2 x i64> %src to <2 x i32>
ret <2 x i32> %0
}