mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-23 11:13:28 +01:00
21418c85a1
If the mask input to getV4X86ShuffleImm8 only refers to a single source element (+ undefs) then canonicalize to a full broadcast. getV4X86ShuffleImm8 defaults to inline values for undefs, which can be useful for shuffle widening/narrowing but does leave SimplifyDemanded* calls thinking the shuffle depends on unnecessary elements. I'm still investigating what we should do more generally to avoid these undemanded elements, but broadcast cases was a simpler win.
116 lines
5.1 KiB
LLVM
116 lines
5.1 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=X32-SSE
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=X64-SSE
|
|
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=X32-AVX
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=X64-AVX
|
|
|
|
define i64 @extract_any_extend_vector_inreg_v16i64(<16 x i64> %a0, i32 %a1) nounwind {
|
|
; X32-SSE-LABEL: extract_any_extend_vector_inreg_v16i64:
|
|
; X32-SSE: # %bb.0:
|
|
; X32-SSE-NEXT: pushl %ebp
|
|
; X32-SSE-NEXT: movl %esp, %ebp
|
|
; X32-SSE-NEXT: andl $-16, %esp
|
|
; X32-SSE-NEXT: subl $272, %esp # imm = 0x110
|
|
; X32-SSE-NEXT: movl 88(%ebp), %ecx
|
|
; X32-SSE-NEXT: movdqa 72(%ebp), %xmm0
|
|
; X32-SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero
|
|
; X32-SSE-NEXT: xorps %xmm1, %xmm1
|
|
; X32-SSE-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
|
|
; X32-SSE-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
|
|
; X32-SSE-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
|
|
; X32-SSE-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
|
|
; X32-SSE-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
|
|
; X32-SSE-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
|
|
; X32-SSE-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
|
|
; X32-SSE-NEXT: movdqa %xmm0, {{[0-9]+}}(%esp)
|
|
; X32-SSE-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
|
|
; X32-SSE-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
|
|
; X32-SSE-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
|
|
; X32-SSE-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
|
|
; X32-SSE-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
|
|
; X32-SSE-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
|
|
; X32-SSE-NEXT: movaps %xmm1, (%esp)
|
|
; X32-SSE-NEXT: movdqa %xmm0, {{[0-9]+}}(%esp)
|
|
; X32-SSE-NEXT: leal (%ecx,%ecx), %eax
|
|
; X32-SSE-NEXT: andl $31, %eax
|
|
; X32-SSE-NEXT: movl 128(%esp,%eax,4), %eax
|
|
; X32-SSE-NEXT: leal 1(%ecx,%ecx), %ecx
|
|
; X32-SSE-NEXT: andl $31, %ecx
|
|
; X32-SSE-NEXT: movl (%esp,%ecx,4), %edx
|
|
; X32-SSE-NEXT: movl %ebp, %esp
|
|
; X32-SSE-NEXT: popl %ebp
|
|
; X32-SSE-NEXT: retl
|
|
;
|
|
; X64-SSE-LABEL: extract_any_extend_vector_inreg_v16i64:
|
|
; X64-SSE: # %bb.0:
|
|
; X64-SSE-NEXT: pushq %rax
|
|
; X64-SSE-NEXT: # kill: def $edi killed $edi def $rdi
|
|
; X64-SSE-NEXT: psrldq {{.*#+}} xmm7 = xmm7[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero
|
|
; X64-SSE-NEXT: xorps %xmm0, %xmm0
|
|
; X64-SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
|
|
; X64-SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
|
|
; X64-SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
|
|
; X64-SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
|
|
; X64-SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
|
|
; X64-SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
|
|
; X64-SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
|
|
; X64-SSE-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp)
|
|
; X64-SSE-NEXT: andl $15, %edi
|
|
; X64-SSE-NEXT: movq -128(%rsp,%rdi,8), %rax
|
|
; X64-SSE-NEXT: popq %rcx
|
|
; X64-SSE-NEXT: retq
|
|
;
|
|
; X32-AVX-LABEL: extract_any_extend_vector_inreg_v16i64:
|
|
; X32-AVX: # %bb.0:
|
|
; X32-AVX-NEXT: pushl %ebp
|
|
; X32-AVX-NEXT: movl %esp, %ebp
|
|
; X32-AVX-NEXT: andl $-32, %esp
|
|
; X32-AVX-NEXT: subl $288, %esp # imm = 0x120
|
|
; X32-AVX-NEXT: movl 40(%ebp), %ecx
|
|
; X32-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
|
; X32-AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
|
; X32-AVX-NEXT: vmovaps %ymm1, {{[0-9]+}}(%esp)
|
|
; X32-AVX-NEXT: vmovaps %ymm1, {{[0-9]+}}(%esp)
|
|
; X32-AVX-NEXT: vmovaps %ymm1, {{[0-9]+}}(%esp)
|
|
; X32-AVX-NEXT: vmovaps %ymm0, {{[0-9]+}}(%esp)
|
|
; X32-AVX-NEXT: vmovaps %ymm1, {{[0-9]+}}(%esp)
|
|
; X32-AVX-NEXT: vmovaps %ymm1, {{[0-9]+}}(%esp)
|
|
; X32-AVX-NEXT: vmovaps %ymm1, (%esp)
|
|
; X32-AVX-NEXT: vmovaps %ymm0, {{[0-9]+}}(%esp)
|
|
; X32-AVX-NEXT: leal (%ecx,%ecx), %eax
|
|
; X32-AVX-NEXT: andl $31, %eax
|
|
; X32-AVX-NEXT: movl 128(%esp,%eax,4), %eax
|
|
; X32-AVX-NEXT: leal 1(%ecx,%ecx), %ecx
|
|
; X32-AVX-NEXT: andl $31, %ecx
|
|
; X32-AVX-NEXT: movl (%esp,%ecx,4), %edx
|
|
; X32-AVX-NEXT: movl %ebp, %esp
|
|
; X32-AVX-NEXT: popl %ebp
|
|
; X32-AVX-NEXT: vzeroupper
|
|
; X32-AVX-NEXT: retl
|
|
;
|
|
; X64-AVX-LABEL: extract_any_extend_vector_inreg_v16i64:
|
|
; X64-AVX: # %bb.0:
|
|
; X64-AVX-NEXT: pushq %rbp
|
|
; X64-AVX-NEXT: movq %rsp, %rbp
|
|
; X64-AVX-NEXT: andq $-32, %rsp
|
|
; X64-AVX-NEXT: subq $160, %rsp
|
|
; X64-AVX-NEXT: # kill: def $edi killed $edi def $rdi
|
|
; X64-AVX-NEXT: vpermq {{.*#+}} ymm0 = ymm3[3,3,3,3]
|
|
; X64-AVX-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
|
|
; X64-AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
|
; X64-AVX-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
|
|
; X64-AVX-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
|
|
; X64-AVX-NEXT: vmovaps %ymm1, (%rsp)
|
|
; X64-AVX-NEXT: vmovdqa %ymm0, {{[0-9]+}}(%rsp)
|
|
; X64-AVX-NEXT: andl $15, %edi
|
|
; X64-AVX-NEXT: movq (%rsp,%rdi,8), %rax
|
|
; X64-AVX-NEXT: movq %rbp, %rsp
|
|
; X64-AVX-NEXT: popq %rbp
|
|
; X64-AVX-NEXT: vzeroupper
|
|
; X64-AVX-NEXT: retq
|
|
%1 = extractelement <16 x i64> %a0, i32 15
|
|
%2 = insertelement <16 x i64> zeroinitializer, i64 %1, i32 4
|
|
%3 = extractelement <16 x i64> %2, i32 %a1
|
|
ret i64 %3
|
|
}
|