1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-30 23:42:52 +01:00
llvm-mirror/test/CodeGen/X86/widen_load-1.ll
Michael Liao b6735b87b0 Introduce 'UseSSEx' to force SSE legacy encoding
- Add 'UseSSEx' to force SSE legacy insn not being selected when AVX is
  enabled.

  As the penalty of inter-mixing SSE and AVX instructions, we need
  prevent SSE legacy insn from being generated except explicitly
  specified through some intrinsics. For patterns supported by both
  SSE and AVX, so far, we force AVX insn will be tried first relying on
  AddedComplexity or position in td file. It's error-prone and
  introduces bugs accidentally.

  'UseSSEx' is disabled when AVX is turned on. For SSE insns inherited
  by AVX, we need this predicate to force VEX encoding or SSE legacy
  encoding only.

  For insns not inherited by AVX, we still use the previous predicates,
  i.e. 'HasSSEx'. So far, these insns fall into the following
  categories:
  * SSE insns with MMX operands
  * SSE insns with GPR/MEM operands only (xFENCE, PREFETCH, CLFLUSH,
    CRC, and etc.)
  * SSE4A insns.
  * MMX insns.
  * x87 insns added by SSE.

2 test cases are modified:

 - test/CodeGen/X86/fast-isel-x86-64.ll
   AVX code generation is different from SSE one. 'vcvtsi2sdq' cannot be
   selected by fast-isel due to complicated pattern and fast-isel
   fallback to materialize it from constant pool.

 - test/CodeGen/X86/widen_load-1.ll
   AVX code generation is different from SSE one after fixing SSE/AVX
   inter-mixing. Exec-domain fixing prefers 'vmovapd' instead of
   'vmovaps'.

llvm-svn: 162919
2012-08-30 16:54:46 +00:00

52 lines
2.3 KiB
LLVM

; RUN: llc %s -o - -march=x86-64 -mattr=-avx -mtriple=x86_64-unknown-linux-gnu | FileCheck %s --check-prefix=SSE
; RUN: llc %s -o - -march=x86-64 -mattr=+avx -mtriple=x86_64-unknown-linux-gnu | FileCheck %s --check-prefix=AVX
; PR4891
; PR5626
; This load should be before the call, not after.
; SSE: movaps compl+128(%rip), %xmm0
; SSE: movaps %xmm0, (%rsp)
; SSE: callq killcommon
; AVX: vmovapd compl+128(%rip), %xmm0
; AVX: vmovapd %xmm0, (%rsp)
; AVX: callq killcommon
@compl = linkonce global [20 x i64] zeroinitializer, align 64 ; <[20 x i64]*> [#uses=1]
declare void @killcommon(i32* noalias)
define void @reset(<2 x float>* noalias %garbage1) {
"file complex.c, line 27, bb1":
%changed = alloca i32, align 4 ; <i32*> [#uses=3]
br label %"file complex.c, line 27, bb13"
"file complex.c, line 27, bb13": ; preds = %"file complex.c, line 27, bb1"
store i32 0, i32* %changed, align 4
%r2 = getelementptr float* bitcast ([20 x i64]* @compl to float*), i64 32 ; <float*> [#uses=1]
%r3 = bitcast float* %r2 to <2 x float>* ; <<2 x float>*> [#uses=1]
%r4 = load <2 x float>* %r3, align 4 ; <<2 x float>> [#uses=1]
call void @killcommon(i32* %changed)
br label %"file complex.c, line 34, bb4"
"file complex.c, line 34, bb4": ; preds = %"file complex.c, line 27, bb13"
%r5 = load i32* %changed, align 4 ; <i32> [#uses=1]
%r6 = icmp eq i32 %r5, 0 ; <i1> [#uses=1]
%r7 = zext i1 %r6 to i32 ; <i32> [#uses=1]
%r8 = icmp ne i32 %r7, 0 ; <i1> [#uses=1]
br i1 %r8, label %"file complex.c, line 34, bb7", label %"file complex.c, line 27, bb5"
"file complex.c, line 27, bb5": ; preds = %"file complex.c, line 34, bb4"
br label %"file complex.c, line 35, bb6"
"file complex.c, line 35, bb6": ; preds = %"file complex.c, line 27, bb5"
%r11 = ptrtoint <2 x float>* %garbage1 to i64 ; <i64> [#uses=1]
%r12 = inttoptr i64 %r11 to <2 x float>* ; <<2 x float>*> [#uses=1]
store <2 x float> %r4, <2 x float>* %r12, align 4
br label %"file complex.c, line 34, bb7"
"file complex.c, line 34, bb7": ; preds = %"file complex.c, line 35, bb6", %"file complex.c, line 34, bb4"
ret void
}