1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 11:13:28 +01:00
llvm-mirror/test/CodeGen/ARM/vector-extend-narrow.ll
Ahmed Bougacha b54a536d6f [ARM] Enable vector extload combine for legal types.
This commit enables forming vector extloads for ARM.
It only does so for legal types, and when we can't fold the extension
in a wide/long form of the user instruction.

Enabling it for larger types isn't as good an idea on ARM as it is on
X86, because: 
- we pretend that extloads are legal, but end up generating vld+vmov
- we have instructions like vld {dN, dM}, which can't be generated
  when we "manually expand" extloads to vld+vmov.

For legal types, the combine doesn't fire that often: in the
integration tests only in a big endian testcase, where it removes a
pointless AND.

Related to rdar://19723053
Differential Revision: http://reviews.llvm.org/D7423

llvm-svn: 231396
2015-03-05 19:37:53 +00:00

76 lines
1.8 KiB
LLVM

; RUN: llc -mtriple armv7 %s -o - | FileCheck %s
; CHECK-LABEL: f:
define float @f(<4 x i16>* nocapture %in) {
; CHECK: vld1
; CHECK: vmovl.u16
%1 = load <4 x i16>, <4 x i16>* %in
; CHECK: vcvt.f32.u32
%2 = uitofp <4 x i16> %1 to <4 x float>
%3 = extractelement <4 x float> %2, i32 0
%4 = extractelement <4 x float> %2, i32 1
%5 = extractelement <4 x float> %2, i32 2
; CHECK: vadd.f32
%6 = fadd float %3, %4
%7 = fadd float %6, %5
ret float %7
}
; CHECK-LABEL: g:
define float @g(<4 x i8>* nocapture %in) {
; Note: vld1 here is reasonably important. Mixing VFP and NEON
; instructions is bad on some cores
; CHECK: vld1
; CHECK: vmovl.u8
; CHECK: vmovl.u16
%1 = load <4 x i8>, <4 x i8>* %in
; CHECK: vcvt.f32.u32
%2 = uitofp <4 x i8> %1 to <4 x float>
%3 = extractelement <4 x float> %2, i32 0
%4 = extractelement <4 x float> %2, i32 1
%5 = extractelement <4 x float> %2, i32 2
; CHECK: vadd.f32
%6 = fadd float %3, %4
%7 = fadd float %6, %5
ret float %7
}
; CHECK-LABEL: h:
define <4 x i8> @h(<4 x float> %v) {
; CHECK: vcvt.{{[us]}}32.f32
; CHECK: vmovn.i32
%1 = fptoui <4 x float> %v to <4 x i8>
ret <4 x i8> %1
}
; CHECK-LABEL: i:
define <4 x i8> @i(<4 x i8>* %x) {
; Note: vld1 here is reasonably important. Mixing VFP and NEON
; instructions is bad on some cores
; CHECK: vld1
; CHECK: vmovl.s8
; CHECK: vmovl.s16
; CHECK: vrecpe
; CHECK: vrecps
; CHECK: vmul
; CHECK: vmovn
%1 = load <4 x i8>, <4 x i8>* %x, align 4
%2 = sdiv <4 x i8> zeroinitializer, %1
ret <4 x i8> %2
}
; CHECK-LABEL: j:
define <4 x i32> @j(<4 x i8>* %in) nounwind {
; CHECK: vld1
; CHECK: vmovl.u8
; CHECK: vmovl.u16
; CHECK-NOT: vand
%1 = load <4 x i8>, <4 x i8>* %in, align 4
%2 = zext <4 x i8> %1 to <4 x i32>
ret <4 x i32> %2
}