1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-01 00:12:50 +01:00
llvm-mirror/test/CodeGen/ARM/fast-isel-intrinsic.ll
JF Bastien cb60eaba94 Enable FastISel on ARM for Linux and NaCl, not MCJIT
This is a resubmit of r182877, which was reverted because it broken
MCJIT tests on ARM. The patch leaves MCJIT on ARM as it was before: only
enabled for iOS. I've CC'ed people from the original review and revert.

FastISel was only enabled for iOS ARM and Thumb2, this patch enables it
for ARM (not Thumb2) on Linux and NaCl, but not MCJIT.

Thumb2 support needs a bit more work, mainly around register class
restrictions.

The patch punts to SelectionDAG when doing TLS relocation on non-Darwin
targets. I will fix this and other FastISel-to-SelectionDAG failures in
a separate patch.

The patch also forces FastISel to retain frame pointers: iOS always
keeps them for backtracking (so emitted code won't change because of
this), but Linux was getting much worse code that was incorrect when
using big frames (such as test-suite's lencod). I'll also fix this in a
later patch, it will probably require a peephole so that FastISel
doesn't rematerialize frame pointers back-to-back.

The test changes are straightforward, similar to:
  http://lists.cs.uiuc.edu/pipermail/llvm-commits/Week-of-Mon-20130513/174279.html
They also add a vararg test that got dropped in that change.

I ran all of lnt test-suite on A15 hardware with --optimize-option=-O0
and all the tests pass. All the tests also pass on x86 make check-all. I
also re-ran the check-all tests that failed on ARM, and they all seem to
pass.

llvm-svn: 183966
2013-06-14 02:49:43 +00:00

263 lines
9.3 KiB
LLVM

; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios | FileCheck %s --check-prefix=ARM
; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-linux-gnueabi | FileCheck %s --check-prefix=ARM
; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios | FileCheck %s --check-prefix=THUMB
; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios -arm-long-calls | FileCheck %s --check-prefix=ARM-LONG
; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-linux-gnueabi -arm-long-calls | FileCheck %s --check-prefix=ARM-LONG
; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios -arm-long-calls | FileCheck %s --check-prefix=THUMB-LONG
; Note that some of these tests assume that relocations are either
; movw/movt or constant pool loads. Different platforms will select
; different approaches.
@message1 = global [60 x i8] c"The LLVM Compiler Infrastructure\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00", align 1
@temp = common global [60 x i8] zeroinitializer, align 1
define void @t1() nounwind ssp {
; ARM: t1
; ARM: {{(movw r0, :lower16:_?message1)|(ldr r0, .LCPI)}}
; ARM: {{(movt r0, :upper16:_?message1)|(ldr r0, \[r0\])}}
; ARM: add r0, r0, #5
; ARM: movw r1, #64
; ARM: movw r2, #10
; ARM: and r1, r1, #255
; ARM: bl {{_?}}memset
; ARM-LONG: t1
; ARM-LONG: {{(movw r3, :lower16:L_memset\$non_lazy_ptr)|(ldr r3, .LCPI)}}
; ARM-LONG: {{(movt r3, :upper16:L_memset\$non_lazy_ptr)?}}
; ARM-LONG: ldr r3, [r3]
; ARM-LONG: blx r3
; THUMB: t1
; THUMB: {{(movw r0, :lower16:_?message1)|(ldr.n r0, .LCPI)}}
; THUMB: {{(movt r0, :upper16:_?message1)|(ldr r0, \[r0\])}}
; THUMB: adds r0, #5
; THUMB: movs r1, #64
; THUMB: movt r1, #0
; THUMB: movs r2, #10
; THUMB: movt r2, #0
; THUMB: and r1, r1, #255
; THUMB: bl {{_?}}memset
; THUMB-LONG: t1
; THUMB-LONG: movw r3, :lower16:L_memset$non_lazy_ptr
; THUMB-LONG: movt r3, :upper16:L_memset$non_lazy_ptr
; THUMB-LONG: ldr r3, [r3]
; THUMB-LONG: blx r3
call void @llvm.memset.p0i8.i32(i8* getelementptr inbounds ([60 x i8]* @message1, i32 0, i32 5), i8 64, i32 10, i32 4, i1 false)
ret void
}
declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) nounwind
define void @t2() nounwind ssp {
; ARM: t2
; ARM: {{(movw r0, :lower16:L_temp\$non_lazy_ptr)|(ldr r0, .LCPI)}}
; ARM: {{(movt r0, :upper16:L_temp\$non_lazy_ptr)?}}
; ARM: ldr r0, [r0]
; ARM: add r1, r0, #4
; ARM: add r0, r0, #16
; ARM: movw r2, #17
; ARM: str r0, [sp[[SLOT:[, #0-9]*]]] @ 4-byte Spill
; ARM: mov r0, r1
; ARM: ldr r1, [sp[[SLOT]]] @ 4-byte Reload
; ARM: bl {{_?}}memcpy
; ARM-LONG: t2
; ARM-LONG: {{(movw r3, :lower16:L_memcpy\$non_lazy_ptr)|(ldr r3, .LCPI)}}
; ARM-LONG: {{(movt r3, :upper16:L_memcpy\$non_lazy_ptr)?}}
; ARM-LONG: ldr r3, [r3]
; ARM-LONG: blx r3
; THUMB: t2
; THUMB: {{(movw r0, :lower16:L_temp\$non_lazy_ptr)|(ldr.n r0, .LCPI)}}
; THUMB: {{(movt r0, :upper16:L_temp\$non_lazy_ptr)?}}
; THUMB: ldr r0, [r0]
; THUMB: adds r1, r0, #4
; THUMB: adds r0, #16
; THUMB: movs r2, #17
; THUMB: movt r2, #0
; THUMB: str r0, [sp[[SLOT:[, #0-9]*]]] @ 4-byte Spill
; THUMB: mov r0, r1
; THUMB: ldr r1, [sp[[SLOT]]] @ 4-byte Reload
; THUMB: bl {{_?}}memcpy
; THUMB-LONG: t2
; THUMB-LONG: movw r3, :lower16:L_memcpy$non_lazy_ptr
; THUMB-LONG: movt r3, :upper16:L_memcpy$non_lazy_ptr
; THUMB-LONG: ldr r3, [r3]
; THUMB-LONG: blx r3
call void @llvm.memcpy.p0i8.p0i8.i32(i8* getelementptr inbounds ([60 x i8]* @temp, i32 0, i32 4), i8* getelementptr inbounds ([60 x i8]* @temp, i32 0, i32 16), i32 17, i32 4, i1 false)
ret void
}
declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind
define void @t3() nounwind ssp {
; ARM: t3
; ARM: {{(movw r0, :lower16:L_temp\$non_lazy_ptr)|(ldr r0, .LCPI)}}
; ARM: {{(movt r0, :upper16:L_temp\$non_lazy_ptr)?}}
; ARM: ldr r0, [r0]
; ARM: add r1, r0, #4
; ARM: add r0, r0, #16
; ARM: movw r2, #10
; ARM: mov r0, r1
; ARM: bl {{_?}}memmove
; ARM-LONG: t3
; ARM-LONG: {{(movw r3, :lower16:L_memmove\$non_lazy_ptr)|(ldr r3, .LCPI)}}
; ARM-LONG: {{(movt r3, :upper16:L_memmove\$non_lazy_ptr)?}}
; ARM-LONG: ldr r3, [r3]
; ARM-LONG: blx r3
; THUMB: t3
; THUMB: {{(movw r0, :lower16:L_temp\$non_lazy_ptr)|(ldr.n r0, .LCPI)}}
; THUMB: {{(movt r0, :upper16:L_temp\$non_lazy_ptr)?}}
; THUMB: ldr r0, [r0]
; THUMB: adds r1, r0, #4
; THUMB: adds r0, #16
; THUMB: movs r2, #10
; THUMB: movt r2, #0
; THUMB: str r0, [sp[[SLOT:[, #0-9]*]]] @ 4-byte Spill
; THUMB: mov r0, r1
; THUMB: ldr r1, [sp[[SLOT]]] @ 4-byte Reload
; THUMB: bl {{_?}}memmove
; THUMB-LONG: t3
; THUMB-LONG: movw r3, :lower16:L_memmove$non_lazy_ptr
; THUMB-LONG: movt r3, :upper16:L_memmove$non_lazy_ptr
; THUMB-LONG: ldr r3, [r3]
; THUMB-LONG: blx r3
call void @llvm.memmove.p0i8.p0i8.i32(i8* getelementptr inbounds ([60 x i8]* @temp, i32 0, i32 4), i8* getelementptr inbounds ([60 x i8]* @temp, i32 0, i32 16), i32 10, i32 1, i1 false)
ret void
}
define void @t4() nounwind ssp {
; ARM: t4
; ARM: {{(movw r0, :lower16:L_temp\$non_lazy_ptr)|(ldr r0, .LCPI)}}
; ARM: {{(movt r0, :upper16:L_temp\$non_lazy_ptr)?}}
; ARM: ldr r0, [r0]
; ARM: ldr r1, [r0, #16]
; ARM: str r1, [r0, #4]
; ARM: ldr r1, [r0, #20]
; ARM: str r1, [r0, #8]
; ARM: ldrh r1, [r0, #24]
; ARM: strh r1, [r0, #12]
; ARM: bx lr
; THUMB: t4
; THUMB: {{(movw r0, :lower16:L_temp\$non_lazy_ptr)|(ldr.n r0, .LCPI)}}
; THUMB: {{(movt r0, :upper16:L_temp\$non_lazy_ptr)?}}
; THUMB: ldr r0, [r0]
; THUMB: ldr r1, [r0, #16]
; THUMB: str r1, [r0, #4]
; THUMB: ldr r1, [r0, #20]
; THUMB: str r1, [r0, #8]
; THUMB: ldrh r1, [r0, #24]
; THUMB: strh r1, [r0, #12]
; THUMB: bx lr
call void @llvm.memcpy.p0i8.p0i8.i32(i8* getelementptr inbounds ([60 x i8]* @temp, i32 0, i32 4), i8* getelementptr inbounds ([60 x i8]* @temp, i32 0, i32 16), i32 10, i32 4, i1 false)
ret void
}
declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind
define void @t5() nounwind ssp {
; ARM: t5
; ARM: {{(movw r0, :lower16:L_temp\$non_lazy_ptr)|(ldr r0, .LCPI)}}
; ARM: {{(movt r0, :upper16:L_temp\$non_lazy_ptr)?}}
; ARM: ldr r0, [r0]
; ARM: ldrh r1, [r0, #16]
; ARM: strh r1, [r0, #4]
; ARM: ldrh r1, [r0, #18]
; ARM: strh r1, [r0, #6]
; ARM: ldrh r1, [r0, #20]
; ARM: strh r1, [r0, #8]
; ARM: ldrh r1, [r0, #22]
; ARM: strh r1, [r0, #10]
; ARM: ldrh r1, [r0, #24]
; ARM: strh r1, [r0, #12]
; ARM: bx lr
; THUMB: t5
; THUMB: {{(movw r0, :lower16:L_temp\$non_lazy_ptr)|(ldr.n r0, .LCPI)}}
; THUMB: {{(movt r0, :upper16:L_temp\$non_lazy_ptr)?}}
; THUMB: ldr r0, [r0]
; THUMB: ldrh r1, [r0, #16]
; THUMB: strh r1, [r0, #4]
; THUMB: ldrh r1, [r0, #18]
; THUMB: strh r1, [r0, #6]
; THUMB: ldrh r1, [r0, #20]
; THUMB: strh r1, [r0, #8]
; THUMB: ldrh r1, [r0, #22]
; THUMB: strh r1, [r0, #10]
; THUMB: ldrh r1, [r0, #24]
; THUMB: strh r1, [r0, #12]
; THUMB: bx lr
call void @llvm.memcpy.p0i8.p0i8.i32(i8* getelementptr inbounds ([60 x i8]* @temp, i32 0, i32 4), i8* getelementptr inbounds ([60 x i8]* @temp, i32 0, i32 16), i32 10, i32 2, i1 false)
ret void
}
define void @t6() nounwind ssp {
; ARM: t6
; ARM: {{(movw r0, :lower16:L_temp\$non_lazy_ptr)|(ldr r0, .LCPI)}}
; ARM: {{(movt r0, :upper16:L_temp\$non_lazy_ptr)?}}
; ARM: ldr r0, [r0]
; ARM: ldrb r1, [r0, #16]
; ARM: strb r1, [r0, #4]
; ARM: ldrb r1, [r0, #17]
; ARM: strb r1, [r0, #5]
; ARM: ldrb r1, [r0, #18]
; ARM: strb r1, [r0, #6]
; ARM: ldrb r1, [r0, #19]
; ARM: strb r1, [r0, #7]
; ARM: ldrb r1, [r0, #20]
; ARM: strb r1, [r0, #8]
; ARM: ldrb r1, [r0, #21]
; ARM: strb r1, [r0, #9]
; ARM: ldrb r1, [r0, #22]
; ARM: strb r1, [r0, #10]
; ARM: ldrb r1, [r0, #23]
; ARM: strb r1, [r0, #11]
; ARM: ldrb r1, [r0, #24]
; ARM: strb r1, [r0, #12]
; ARM: ldrb r1, [r0, #25]
; ARM: strb r1, [r0, #13]
; ARM: bx lr
; THUMB: t6
; THUMB: {{(movw r0, :lower16:L_temp\$non_lazy_ptr)|(ldr.n r0, .LCPI)}}
; THUMB: {{(movt r0, :upper16:L_temp\$non_lazy_ptr)?}}
; THUMB: ldr r0, [r0]
; THUMB: ldrb r1, [r0, #16]
; THUMB: strb r1, [r0, #4]
; THUMB: ldrb r1, [r0, #17]
; THUMB: strb r1, [r0, #5]
; THUMB: ldrb r1, [r0, #18]
; THUMB: strb r1, [r0, #6]
; THUMB: ldrb r1, [r0, #19]
; THUMB: strb r1, [r0, #7]
; THUMB: ldrb r1, [r0, #20]
; THUMB: strb r1, [r0, #8]
; THUMB: ldrb r1, [r0, #21]
; THUMB: strb r1, [r0, #9]
; THUMB: ldrb r1, [r0, #22]
; THUMB: strb r1, [r0, #10]
; THUMB: ldrb r1, [r0, #23]
; THUMB: strb r1, [r0, #11]
; THUMB: ldrb r1, [r0, #24]
; THUMB: strb r1, [r0, #12]
; THUMB: ldrb r1, [r0, #25]
; THUMB: strb r1, [r0, #13]
; THUMB: bx lr
call void @llvm.memcpy.p0i8.p0i8.i32(i8* getelementptr inbounds ([60 x i8]* @temp, i32 0, i32 4), i8* getelementptr inbounds ([60 x i8]* @temp, i32 0, i32 16), i32 10, i32 1, i1 false)
ret void
}
; rdar://13202135
define void @t7() nounwind ssp {
; Just make sure this doesn't assert when we have an odd length and an alignment of 2.
call void @llvm.memcpy.p0i8.p0i8.i32(i8* getelementptr inbounds ([60 x i8]* @temp, i32 0, i32 4), i8* getelementptr inbounds ([60 x i8]* @temp, i32 0, i32 16), i32 3, i32 2, i1 false)
ret void
}
define i32 @t8(i32 %x) nounwind {
entry:
; ARM: t8
; ARM-NOT: FastISel missed call: %expval = call i32 @llvm.expect.i32(i32 %x, i32 1)
; THUMB: t8
; THUMB-NOT: FastISel missed call: %expval = call i32 @llvm.expect.i32(i32 %x, i32 1)
%expval = call i32 @llvm.expect.i32(i32 %x, i32 1)
ret i32 %expval
}
declare i32 @llvm.expect.i32(i32, i32) nounwind readnone