1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2025-01-31 20:51:52 +01:00

[X86] 2011-10-19-widen_vselect.ll - replace X32 check prefix with X86. NFC.

We typically use X32 for gnux32 triples
This commit is contained in:
Simon Pilgrim 2020-11-17 12:34:10 +00:00
parent 95dd3dbc4f
commit df9de10e46

View File

@ -1,15 +1,15 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i686-unknown-linux-gnu -mcpu=corei7 | FileCheck %s --check-prefix=X32
; RUN: llc < %s -mtriple=i686-unknown-linux-gnu -mcpu=corei7 | FileCheck %s --check-prefix=X86
; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 | FileCheck %s --check-prefix=X64
; Make sure that we don't crash when legalizing vselect and vsetcc and that
; we are able to generate vector blend instructions.
define void @simple_widen(<2 x float> %a, <2 x float> %b) {
; X32-LABEL: simple_widen:
; X32: # %bb.0: # %entry
; X32-NEXT: movlps %xmm1, (%eax)
; X32-NEXT: retl
; X86-LABEL: simple_widen:
; X86: # %bb.0: # %entry
; X86-NEXT: movlps %xmm1, (%eax)
; X86-NEXT: retl
;
; X64-LABEL: simple_widen:
; X64: # %bb.0: # %entry
@ -22,14 +22,14 @@ entry:
}
define void @complex_inreg_work(<2 x float> %a, <2 x float> %b, <2 x float> %c) {
; X32-LABEL: complex_inreg_work:
; X32: # %bb.0: # %entry
; X32-NEXT: movaps %xmm0, %xmm3
; X32-NEXT: cmpordps %xmm2, %xmm2
; X32-NEXT: movaps %xmm2, %xmm0
; X32-NEXT: blendvps %xmm0, %xmm3, %xmm1
; X32-NEXT: movlps %xmm1, (%eax)
; X32-NEXT: retl
; X86-LABEL: complex_inreg_work:
; X86: # %bb.0: # %entry
; X86-NEXT: movaps %xmm0, %xmm3
; X86-NEXT: cmpordps %xmm2, %xmm2
; X86-NEXT: movaps %xmm2, %xmm0
; X86-NEXT: blendvps %xmm0, %xmm3, %xmm1
; X86-NEXT: movlps %xmm1, (%eax)
; X86-NEXT: retl
;
; X64-LABEL: complex_inreg_work:
; X64: # %bb.0: # %entry
@ -47,11 +47,11 @@ entry:
}
define void @zero_test() {
; X32-LABEL: zero_test:
; X32: # %bb.0: # %entry
; X32-NEXT: xorps %xmm0, %xmm0
; X32-NEXT: movlps %xmm0, (%eax)
; X32-NEXT: retl
; X86-LABEL: zero_test:
; X86: # %bb.0: # %entry
; X86-NEXT: xorps %xmm0, %xmm0
; X86-NEXT: movlps %xmm0, (%eax)
; X86-NEXT: retl
;
; X64-LABEL: zero_test:
; X64: # %bb.0: # %entry
@ -65,27 +65,27 @@ entry:
}
define void @full_test() {
; X32-LABEL: full_test:
; X32: # %bb.0: # %entry
; X32-NEXT: subl $60, %esp
; X32-NEXT: .cfi_def_cfa_offset 64
; X32-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
; X32-NEXT: cvttps2dq %xmm2, %xmm0
; X32-NEXT: cvtdq2ps %xmm0, %xmm1
; X32-NEXT: xorps %xmm0, %xmm0
; X32-NEXT: cmpltps %xmm2, %xmm0
; X32-NEXT: movaps {{.*#+}} xmm3 = <1.0E+0,1.0E+0,u,u>
; X32-NEXT: addps %xmm1, %xmm3
; X32-NEXT: movaps %xmm1, %xmm4
; X32-NEXT: blendvps %xmm0, %xmm3, %xmm4
; X32-NEXT: cmpeqps %xmm2, %xmm1
; X32-NEXT: movaps %xmm1, %xmm0
; X32-NEXT: blendvps %xmm0, %xmm2, %xmm4
; X32-NEXT: movlps %xmm4, {{[0-9]+}}(%esp)
; X32-NEXT: movlps %xmm4, {{[0-9]+}}(%esp)
; X32-NEXT: addl $60, %esp
; X32-NEXT: .cfi_def_cfa_offset 4
; X32-NEXT: retl
; X86-LABEL: full_test:
; X86: # %bb.0: # %entry
; X86-NEXT: subl $60, %esp
; X86-NEXT: .cfi_def_cfa_offset 64
; X86-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
; X86-NEXT: cvttps2dq %xmm2, %xmm0
; X86-NEXT: cvtdq2ps %xmm0, %xmm1
; X86-NEXT: xorps %xmm0, %xmm0
; X86-NEXT: cmpltps %xmm2, %xmm0
; X86-NEXT: movaps {{.*#+}} xmm3 = <1.0E+0,1.0E+0,u,u>
; X86-NEXT: addps %xmm1, %xmm3
; X86-NEXT: movaps %xmm1, %xmm4
; X86-NEXT: blendvps %xmm0, %xmm3, %xmm4
; X86-NEXT: cmpeqps %xmm2, %xmm1
; X86-NEXT: movaps %xmm1, %xmm0
; X86-NEXT: blendvps %xmm0, %xmm2, %xmm4
; X86-NEXT: movlps %xmm4, {{[0-9]+}}(%esp)
; X86-NEXT: movlps %xmm4, {{[0-9]+}}(%esp)
; X86-NEXT: addl $60, %esp
; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl
;
; X64-LABEL: full_test:
; X64: # %bb.0: # %entry