mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-10-24 13:33:37 +02:00
95701f5ce4
Besides better codegen, the motivation is to be able to canonicalize this pattern in IR (currently we don't) knowing that the backend is prepared for that. This may also allow removing code for special constant cases in DAGCombiner::foldSelectOfConstants() that was added in D30180. Differential Revision: https://reviews.llvm.org/D31944 llvm-svn: 301457
69 lines
2.1 KiB
LLVM
69 lines
2.1 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -mtriple=arm-eabi -mattr=neon | FileCheck %s
|
|
|
|
define i32 @sext_inc(i1 zeroext %x) {
|
|
; CHECK-LABEL: sext_inc:
|
|
; CHECK: @ BB#0:
|
|
; CHECK-NEXT: eor r0, r0, #1
|
|
; CHECK-NEXT: mov pc, lr
|
|
%ext = sext i1 %x to i32
|
|
%add = add i32 %ext, 1
|
|
ret i32 %add
|
|
}
|
|
|
|
define <4 x i32> @sext_inc_vec(<4 x i1> %x) {
|
|
; CHECK-LABEL: sext_inc_vec:
|
|
; CHECK: @ BB#0:
|
|
; CHECK-NEXT: vmov.i16 d16, #0x1
|
|
; CHECK-NEXT: vmov d17, r0, r1
|
|
; CHECK-NEXT: vmov.i32 q9, #0x1
|
|
; CHECK-NEXT: veor d16, d17, d16
|
|
; CHECK-NEXT: vmovl.u16 q8, d16
|
|
; CHECK-NEXT: vand q8, q8, q9
|
|
; CHECK-NEXT: vmov r0, r1, d16
|
|
; CHECK-NEXT: vmov r2, r3, d17
|
|
; CHECK-NEXT: mov pc, lr
|
|
%ext = sext <4 x i1> %x to <4 x i32>
|
|
%add = add <4 x i32> %ext, <i32 1, i32 1, i32 1, i32 1>
|
|
ret <4 x i32> %add
|
|
}
|
|
|
|
define <4 x i32> @cmpgt_sext_inc_vec(<4 x i32> %x, <4 x i32> %y) {
|
|
; CHECK-LABEL: cmpgt_sext_inc_vec:
|
|
; CHECK: @ BB#0:
|
|
; CHECK-NEXT: mov r12, sp
|
|
; CHECK-NEXT: vmov d19, r2, r3
|
|
; CHECK-NEXT: vmov.i32 q10, #0x1
|
|
; CHECK-NEXT: vld1.64 {d16, d17}, [r12]
|
|
; CHECK-NEXT: vmov d18, r0, r1
|
|
; CHECK-NEXT: vcge.s32 q8, q8, q9
|
|
; CHECK-NEXT: vand q8, q8, q10
|
|
; CHECK-NEXT: vmov r0, r1, d16
|
|
; CHECK-NEXT: vmov r2, r3, d17
|
|
; CHECK-NEXT: mov pc, lr
|
|
%cmp = icmp sgt <4 x i32> %x, %y
|
|
%ext = sext <4 x i1> %cmp to <4 x i32>
|
|
%add = add <4 x i32> %ext, <i32 1, i32 1, i32 1, i32 1>
|
|
ret <4 x i32> %add
|
|
}
|
|
|
|
define <4 x i32> @cmpne_sext_inc_vec(<4 x i32> %x, <4 x i32> %y) {
|
|
; CHECK-LABEL: cmpne_sext_inc_vec:
|
|
; CHECK: @ BB#0:
|
|
; CHECK-NEXT: mov r12, sp
|
|
; CHECK-NEXT: vmov d19, r2, r3
|
|
; CHECK-NEXT: vmov.i32 q10, #0x1
|
|
; CHECK-NEXT: vld1.64 {d16, d17}, [r12]
|
|
; CHECK-NEXT: vmov d18, r0, r1
|
|
; CHECK-NEXT: vceq.i32 q8, q9, q8
|
|
; CHECK-NEXT: vand q8, q8, q10
|
|
; CHECK-NEXT: vmov r0, r1, d16
|
|
; CHECK-NEXT: vmov r2, r3, d17
|
|
; CHECK-NEXT: mov pc, lr
|
|
%cmp = icmp ne <4 x i32> %x, %y
|
|
%ext = sext <4 x i1> %cmp to <4 x i32>
|
|
%add = add <4 x i32> %ext, <i32 1, i32 1, i32 1, i32 1>
|
|
ret <4 x i32> %add
|
|
}
|
|
|