1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 19:23:23 +01:00
llvm-mirror/test/CodeGen/X86/movgs.ll
Chandler Carruth e04fe8733a [x86] Regenerate a number of FileCheck assertions with my script for
test cases that will change with the new vector shuffle lowering. This
gives us a nice baseline for deltas against. I've checked and removed
the cases where there were weird register usage being pinned down, and
all of these are extremely pin-pointed tests so fully checking them
seems very appropriate.

llvm-svn: 218941
2014-10-03 01:06:32 +00:00

95 lines
3.2 KiB
LLVM

; RUN: llc < %s -march=x86 -mtriple=i386-linux-gnu -mcpu=penryn -mattr=sse4.1 | FileCheck %s --check-prefix=X32
; RUN: llc < %s -mtriple=x86_64-linux -mcpu=penryn -mattr=sse4.1 | FileCheck %s --check-prefix=X64
; RUN: llc < %s -mtriple=x86_64-win32 -mcpu=penryn -mattr=sse4.1 | FileCheck %s --check-prefix=X64
define i32 @test1() nounwind readonly {
; X32-LABEL: test1:
; X32: # BB#0: # %entry
; X32-NEXT: movl %gs:196, %eax
; X32-NEXT: movl (%eax), %eax
; X32-NEXT: retl
;
; X64-LABEL: test1:
; X64: # BB#0: # %entry
; X64-NEXT: movq %gs:320, %rax
; X64-NEXT: movl (%rax), %eax
; X64-NEXT: retq
entry:
%tmp = load i32* addrspace(256)* getelementptr (i32* addrspace(256)* inttoptr (i32 72 to i32* addrspace(256)*), i32 31) ; <i32*> [#uses=1]
%tmp1 = load i32* %tmp ; <i32> [#uses=1]
ret i32 %tmp1
}
define i64 @test2(void (i8*)* addrspace(256)* %tmp8) nounwind {
; X32-LABEL: test2:
; X32: # BB#0: # %entry
; X32-NEXT: subl $12, %esp
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: calll *%gs:(%eax)
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: xorl %edx, %edx
; X32-NEXT: addl $12, %esp
; X32-NEXT: retl
;
; X64-LABEL: test2:
; X64: # BB#0: # %entry
; X64-NEXT: {{(subq.*%rsp|pushq)}}
; X64-NEXT: callq *%gs:(%{{(rcx|rdi)}})
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: {{(addq.*%rsp|popq)}}
; X64-NEXT: retq
entry:
%tmp9 = load void (i8*)* addrspace(256)* %tmp8, align 8
tail call void %tmp9(i8* undef) nounwind optsize
ret i64 0
}
define <2 x i64> @pmovsxwd_1(i64 addrspace(256)* %p) nounwind readonly {
; X32-LABEL: pmovsxwd_1:
; X32: # BB#0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: pmovsxwd %gs:(%eax), %xmm0
; X32-NEXT: retl
;
; X64-LABEL: pmovsxwd_1:
; X64: # BB#0: # %entry
; X64-NEXT: pmovsxwd %gs:(%{{(rcx|rdi)}}), %xmm0
; X64-NEXT: retq
entry:
%0 = load i64 addrspace(256)* %p
%tmp2 = insertelement <2 x i64> zeroinitializer, i64 %0, i32 0
%1 = bitcast <2 x i64> %tmp2 to <8 x i16>
%2 = tail call <4 x i32> @llvm.x86.sse41.pmovsxwd(<8 x i16> %1) nounwind readnone
%3 = bitcast <4 x i32> %2 to <2 x i64>
ret <2 x i64> %3
}
; The two loads here both look identical to selection DAG, except for their
; address spaces. Make sure they aren't CSE'd.
define i32 @test_no_cse() nounwind readonly {
; X32-LABEL: test_no_cse:
; X32: # BB#0: # %entry
; X32-NEXT: movl %gs:196, %eax
; X32-NEXT: movl (%eax), %eax
; X32-NEXT: movl %fs:196, %ecx
; X32-NEXT: addl (%ecx), %eax
; X32-NEXT: retl
;
; X64-LABEL: test_no_cse:
; X64: # BB#0: # %entry
; X64-NEXT: movq %gs:320, %rax
; X64-NEXT: movl (%rax), %eax
; X64-NEXT: movq %fs:320, %rcx
; X64-NEXT: addl (%rcx), %eax
; X64-NEXT: retq
entry:
%tmp = load i32* addrspace(256)* getelementptr (i32* addrspace(256)* inttoptr (i32 72 to i32* addrspace(256)*), i32 31) ; <i32*> [#uses=1]
%tmp1 = load i32* %tmp ; <i32> [#uses=1]
%tmp2 = load i32* addrspace(257)* getelementptr (i32* addrspace(257)* inttoptr (i32 72 to i32* addrspace(257)*), i32 31) ; <i32*> [#uses=1]
%tmp3 = load i32* %tmp2 ; <i32> [#uses=1]
%tmp4 = add i32 %tmp1, %tmp3
ret i32 %tmp4
}
declare <4 x i32> @llvm.x86.sse41.pmovsxwd(<8 x i16>) nounwind readnone