1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 19:23:23 +01:00
llvm-mirror/test/CodeGen/X86/x86-64-static-relo-movl.ll
Louis Gerbarg c6a8e61725 Allow X86FastIsel to cope with 64 bit absolute relocations
This patch is a follow up to r211040 & r211052. Rather than bailing out of fast
isel this patch will generate an alternate instruction (movabsq) instead of the
leaq. While this will always have enough room to handle the 64 bit displacment
it is generally over kill for internal symbols (most displacements will be
within 32 bits) but since we have no way of communicating the code model to the
the assmebler in order to avoid flagging an absolute leal/leaq as illegal when
using a symbolic displacement.

llvm-svn: 211130
2014-06-17 23:22:41 +00:00

25 lines
822 B
LLVM

; RUN: llc -mtriple=x86_64-pc-win32-macho -relocation-model=static -O0 < %s | FileCheck %s
; Ensure that we don't generate a movl and not a lea for a static relocation
; when compiling for 64 bit.
%struct.MatchInfo = type [64 x i64]
@NO_MATCH = internal constant %struct.MatchInfo zeroinitializer, align 8
define void @setup() {
%pending = alloca %struct.MatchInfo, align 8
%t = bitcast %struct.MatchInfo* %pending to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %t, i8* bitcast (%struct.MatchInfo* @NO_MATCH to i8*), i64 512, i32 8, i1 false)
%u = getelementptr inbounds %struct.MatchInfo* %pending, i32 0, i32 2
%v = load i64* %u, align 8
br label %done
done:
ret void
; CHECK: movabsq $_NO_MATCH, {{.*}}
}
; Function Attrs: nounwind
declare void @llvm.memcpy.p0i8.p0i8.i64(i8*, i8*, i64, i32, i1)