1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-01 08:23:21 +01:00
llvm-mirror/test/CodeGen/X86/unaligned-load.ll
Evan Cheng 562bb43207 Fix sdisel memcpy, memset, memmove lowering:
1. Makes it possible to lower with floating point loads and stores.
2. Avoid unaligned loads / stores unless it's fast.
3. Fix some memcpy lowering logic bug related to when to optimize a
   load from constant string into a constant.
4. Adjust x86 memcpy lowering threshold to make it more sane.
5. Fix x86 target hook so it uses vector and floating point memory
   ops more effectively.
rdar://7774704

llvm-svn: 100090
2010-04-01 06:04:33 +00:00

39 lines
1.2 KiB
LLVM

; RUN: llc < %s -mtriple=x86_64-apple-darwin10.0 -mcpu=core2 -relocation-model=dynamic-no-pic --asm-verbose=0 | FileCheck -check-prefix=CORE2 %s
; RUN: llc < %s -mtriple=x86_64-apple-darwin10.0 -mcpu=corei7 -relocation-model=dynamic-no-pic --asm-verbose=0 | FileCheck -check-prefix=COREI7 %s
@.str1 = internal constant [31 x i8] c"DHRYSTONE PROGRAM, SOME STRING\00", align 8
@.str3 = internal constant [31 x i8] c"DHRYSTONE PROGRAM, 2'ND STRING\00", align 8
define void @func() nounwind ssp {
entry:
%String2Loc = alloca [31 x i8], align 1
br label %bb
bb:
%String2Loc9 = getelementptr inbounds [31 x i8]* %String2Loc, i64 0, i64 0
call void @llvm.memcpy.i64(i8* %String2Loc9, i8* getelementptr inbounds ([31 x i8]* @.str3, i64 0, i64 0), i64 31, i32 1)
; CORE2: movsd _.str3+16
; CORE2: movsd _.str3+8
; CORE2: movsd _.str3
; COREI7: movups _.str3
br label %bb
return:
ret void
}
declare void @llvm.memcpy.i64(i8* nocapture, i8* nocapture, i64, i32) nounwind
; CORE2: .align 3
; CORE2-NEXT: _.str1:
; CORE2-NEXT: .asciz "DHRYSTONE PROGRAM, SOME STRING"
; CORE2: .align 3
; CORE2-NEXT: _.str3:
; COREI7: .align 3
; COREI7-NEXT: _.str1:
; COREI7-NEXT: .asciz "DHRYSTONE PROGRAM, SOME STRING"
; COREI7: .align 3
; COREI7-NEXT: _.str3: