1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-26 12:43:36 +01:00

[x86] add more tests for potential horizontal ops; NFC

As discussed in D56011 - add runs for AVX512 and tests with extra uses.

llvm-svn: 350221
This commit is contained in:
Sanjay Patel 2019-01-02 16:36:04 +00:00
parent 88d49a9216
commit 589c4f65be

View File

@ -1,8 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse3 | FileCheck %s --check-prefixes=SSE3,SSE3-SLOW
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse3,fast-hops | FileCheck %s --check-prefixes=SSE3,SSE3-FAST
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX-SLOW
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx,fast-hops | FileCheck %s --check-prefixes=AVX,AVX-FAST
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse3 | FileCheck %s --check-prefixes=SSE3,SSE3-SLOW
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse3,fast-hops | FileCheck %s --check-prefixes=SSE3,SSE3-FAST
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX-SLOW,AVX1-SLOW
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx,fast-hops | FileCheck %s --check-prefixes=AVX,AVX-FAST,AVX1-FAST
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVX-SLOW,AVX512-SLOW
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f,fast-hops | FileCheck %s --check-prefixes=AVX,AVX-FAST,AVX512-FAST
define <2 x double> @haddpd1(<2 x double> %x, <2 x double> %y) {
; SSE3-LABEL: haddpd1:
@ -583,6 +585,8 @@ define <2 x float> @haddps_v2f32(<4 x float> %v0) {
ret <2 x float> %res1
}
; 128-bit vectors, float/double, fadd/fsub
define float @extract_extract_v4f32_fadd_f32(<4 x float> %x) {
; SSE3-LABEL: extract_extract_v4f32_fadd_f32:
; SSE3: # %bb.0:
@ -619,120 +623,6 @@ define float @extract_extract_v4f32_fadd_f32_commute(<4 x float> %x) {
ret float %x01
}
define float @extract_extract_v8f32_fadd_f32(<8 x float> %x) {
; SSE3-LABEL: extract_extract_v8f32_fadd_f32:
; SSE3: # %bb.0:
; SSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE3-NEXT: addss %xmm1, %xmm0
; SSE3-NEXT: retq
;
; AVX-LABEL: extract_extract_v8f32_fadd_f32:
; AVX: # %bb.0:
; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
%x0 = extractelement <8 x float> %x, i32 0
%x1 = extractelement <8 x float> %x, i32 1
%x01 = fadd float %x0, %x1
ret float %x01
}
define float @extract_extract_v8f32_fadd_f32_commute(<8 x float> %x) {
; SSE3-LABEL: extract_extract_v8f32_fadd_f32_commute:
; SSE3: # %bb.0:
; SSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE3-NEXT: addss %xmm1, %xmm0
; SSE3-NEXT: retq
;
; AVX-LABEL: extract_extract_v8f32_fadd_f32_commute:
; AVX: # %bb.0:
; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
%x0 = extractelement <8 x float> %x, i32 0
%x1 = extractelement <8 x float> %x, i32 1
%x01 = fadd float %x1, %x0
ret float %x01
}
define float @extract_extract_v4f32_fsub_f32(<4 x float> %x) {
; SSE3-LABEL: extract_extract_v4f32_fsub_f32:
; SSE3: # %bb.0:
; SSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE3-NEXT: subss %xmm1, %xmm0
; SSE3-NEXT: retq
;
; AVX-LABEL: extract_extract_v4f32_fsub_f32:
; AVX: # %bb.0:
; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX-NEXT: vsubss %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%x0 = extractelement <4 x float> %x, i32 0
%x1 = extractelement <4 x float> %x, i32 1
%x01 = fsub float %x0, %x1
ret float %x01
}
define float @extract_extract_v4f32_fsub_f32_commute(<4 x float> %x) {
; SSE3-LABEL: extract_extract_v4f32_fsub_f32_commute:
; SSE3: # %bb.0:
; SSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE3-NEXT: subss %xmm0, %xmm1
; SSE3-NEXT: movaps %xmm1, %xmm0
; SSE3-NEXT: retq
;
; AVX-LABEL: extract_extract_v4f32_fsub_f32_commute:
; AVX: # %bb.0:
; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX-NEXT: vsubss %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%x0 = extractelement <4 x float> %x, i32 0
%x1 = extractelement <4 x float> %x, i32 1
%x01 = fsub float %x1, %x0
ret float %x01
}
define float @extract_extract_v8f32_fsub_f32(<8 x float> %x) {
; SSE3-LABEL: extract_extract_v8f32_fsub_f32:
; SSE3: # %bb.0:
; SSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE3-NEXT: subss %xmm1, %xmm0
; SSE3-NEXT: retq
;
; AVX-LABEL: extract_extract_v8f32_fsub_f32:
; AVX: # %bb.0:
; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX-NEXT: vsubss %xmm1, %xmm0, %xmm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
%x0 = extractelement <8 x float> %x, i32 0
%x1 = extractelement <8 x float> %x, i32 1
%x01 = fsub float %x0, %x1
ret float %x01
}
define float @extract_extract_v8f32_fsub_f32_commute(<8 x float> %x) {
; SSE3-LABEL: extract_extract_v8f32_fsub_f32_commute:
; SSE3: # %bb.0:
; SSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE3-NEXT: subss %xmm0, %xmm1
; SSE3-NEXT: movaps %xmm1, %xmm0
; SSE3-NEXT: retq
;
; AVX-LABEL: extract_extract_v8f32_fsub_f32_commute:
; AVX: # %bb.0:
; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX-NEXT: vsubss %xmm0, %xmm1, %xmm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
%x0 = extractelement <8 x float> %x, i32 0
%x1 = extractelement <8 x float> %x, i32 1
%x01 = fsub float %x1, %x0
ret float %x01
}
define double @extract_extract_v2f64_fadd_f64(<2 x double> %x) {
; SSE3-LABEL: extract_extract_v2f64_fadd_f64:
; SSE3: # %bb.0:
@ -773,6 +663,122 @@ define double @extract_extract_v2f64_fadd_f64_commute(<2 x double> %x) {
ret double %x01
}
define float @extract_extract_v4f32_fsub_f32(<4 x float> %x) {
; SSE3-LABEL: extract_extract_v4f32_fsub_f32:
; SSE3: # %bb.0:
; SSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE3-NEXT: subss %xmm1, %xmm0
; SSE3-NEXT: retq
;
; AVX-LABEL: extract_extract_v4f32_fsub_f32:
; AVX: # %bb.0:
; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX-NEXT: vsubss %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%x0 = extractelement <4 x float> %x, i32 0
%x1 = extractelement <4 x float> %x, i32 1
%x01 = fsub float %x0, %x1
ret float %x01
}
define float @extract_extract_v4f32_fsub_f32_commute(<4 x float> %x) {
; SSE3-LABEL: extract_extract_v4f32_fsub_f32_commute:
; SSE3: # %bb.0:
; SSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE3-NEXT: subss %xmm0, %xmm1
; SSE3-NEXT: movaps %xmm1, %xmm0
; SSE3-NEXT: retq
;
; AVX-LABEL: extract_extract_v4f32_fsub_f32_commute:
; AVX: # %bb.0:
; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX-NEXT: vsubss %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%x0 = extractelement <4 x float> %x, i32 0
%x1 = extractelement <4 x float> %x, i32 1
%x01 = fsub float %x1, %x0
ret float %x01
}
define double @extract_extract_v2f64_fsub_f64(<2 x double> %x) {
; SSE3-LABEL: extract_extract_v2f64_fsub_f64:
; SSE3: # %bb.0:
; SSE3-NEXT: movapd %xmm0, %xmm1
; SSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; SSE3-NEXT: subsd %xmm1, %xmm0
; SSE3-NEXT: retq
;
; AVX-LABEL: extract_extract_v2f64_fsub_f64:
; AVX: # %bb.0:
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX-NEXT: vsubsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%x0 = extractelement <2 x double> %x, i32 0
%x1 = extractelement <2 x double> %x, i32 1
%x01 = fsub double %x0, %x1
ret double %x01
}
define double @extract_extract_v2f64_fsub_f64_commute(<2 x double> %x) {
; SSE3-LABEL: extract_extract_v2f64_fsub_f64_commute:
; SSE3: # %bb.0:
; SSE3-NEXT: movapd %xmm0, %xmm1
; SSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; SSE3-NEXT: subsd %xmm0, %xmm1
; SSE3-NEXT: movapd %xmm1, %xmm0
; SSE3-NEXT: retq
;
; AVX-LABEL: extract_extract_v2f64_fsub_f64_commute:
; AVX: # %bb.0:
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX-NEXT: vsubsd %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%x0 = extractelement <2 x double> %x, i32 0
%x1 = extractelement <2 x double> %x, i32 1
%x01 = fsub double %x1, %x0
ret double %x01
}
; 256-bit vectors, float/double, fadd/fsub
define float @extract_extract_v8f32_fadd_f32(<8 x float> %x) {
; SSE3-LABEL: extract_extract_v8f32_fadd_f32:
; SSE3: # %bb.0:
; SSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE3-NEXT: addss %xmm1, %xmm0
; SSE3-NEXT: retq
;
; AVX-LABEL: extract_extract_v8f32_fadd_f32:
; AVX: # %bb.0:
; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
%x0 = extractelement <8 x float> %x, i32 0
%x1 = extractelement <8 x float> %x, i32 1
%x01 = fadd float %x0, %x1
ret float %x01
}
define float @extract_extract_v8f32_fadd_f32_commute(<8 x float> %x) {
; SSE3-LABEL: extract_extract_v8f32_fadd_f32_commute:
; SSE3: # %bb.0:
; SSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE3-NEXT: addss %xmm1, %xmm0
; SSE3-NEXT: retq
;
; AVX-LABEL: extract_extract_v8f32_fadd_f32_commute:
; AVX: # %bb.0:
; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
%x0 = extractelement <8 x float> %x, i32 0
%x1 = extractelement <8 x float> %x, i32 1
%x01 = fadd float %x1, %x0
ret float %x01
}
define double @extract_extract_v4f64_fadd_f64(<4 x double> %x) {
; SSE3-LABEL: extract_extract_v4f64_fadd_f64:
; SSE3: # %bb.0:
@ -815,43 +821,43 @@ define double @extract_extract_v4f64_fadd_f64_commute(<4 x double> %x) {
ret double %x01
}
define double @extract_extract_v2f64_fsub_f64(<2 x double> %x) {
; SSE3-LABEL: extract_extract_v2f64_fsub_f64:
define float @extract_extract_v8f32_fsub_f32(<8 x float> %x) {
; SSE3-LABEL: extract_extract_v8f32_fsub_f32:
; SSE3: # %bb.0:
; SSE3-NEXT: movapd %xmm0, %xmm1
; SSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; SSE3-NEXT: subsd %xmm1, %xmm0
; SSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE3-NEXT: subss %xmm1, %xmm0
; SSE3-NEXT: retq
;
; AVX-LABEL: extract_extract_v2f64_fsub_f64:
; AVX-LABEL: extract_extract_v8f32_fsub_f32:
; AVX: # %bb.0:
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX-NEXT: vsubsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX-NEXT: vsubss %xmm1, %xmm0, %xmm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
%x0 = extractelement <2 x double> %x, i32 0
%x1 = extractelement <2 x double> %x, i32 1
%x01 = fsub double %x0, %x1
ret double %x01
%x0 = extractelement <8 x float> %x, i32 0
%x1 = extractelement <8 x float> %x, i32 1
%x01 = fsub float %x0, %x1
ret float %x01
}
define double @extract_extract_v2f64_fsub_f64_commute(<2 x double> %x) {
; SSE3-LABEL: extract_extract_v2f64_fsub_f64_commute:
define float @extract_extract_v8f32_fsub_f32_commute(<8 x float> %x) {
; SSE3-LABEL: extract_extract_v8f32_fsub_f32_commute:
; SSE3: # %bb.0:
; SSE3-NEXT: movapd %xmm0, %xmm1
; SSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; SSE3-NEXT: subsd %xmm0, %xmm1
; SSE3-NEXT: movapd %xmm1, %xmm0
; SSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE3-NEXT: subss %xmm0, %xmm1
; SSE3-NEXT: movaps %xmm1, %xmm0
; SSE3-NEXT: retq
;
; AVX-LABEL: extract_extract_v2f64_fsub_f64_commute:
; AVX-LABEL: extract_extract_v8f32_fsub_f32_commute:
; AVX: # %bb.0:
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX-NEXT: vsubsd %xmm0, %xmm1, %xmm0
; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX-NEXT: vsubss %xmm0, %xmm1, %xmm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
%x0 = extractelement <2 x double> %x, i32 0
%x1 = extractelement <2 x double> %x, i32 1
%x01 = fsub double %x1, %x0
ret double %x01
%x0 = extractelement <8 x float> %x, i32 0
%x1 = extractelement <8 x float> %x, i32 1
%x01 = fsub float %x1, %x0
ret float %x01
}
define double @extract_extract_v4f64_fsub_f64(<4 x double> %x) {
@ -895,3 +901,233 @@ define double @extract_extract_v4f64_fsub_f64_commute(<4 x double> %x) {
ret double %x01
}
; 512-bit vectors, float/double, fadd/fsub
define float @extract_extract_v16f32_fadd_f32(<16 x float> %x) {
; SSE3-LABEL: extract_extract_v16f32_fadd_f32:
; SSE3: # %bb.0:
; SSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE3-NEXT: addss %xmm1, %xmm0
; SSE3-NEXT: retq
;
; AVX-LABEL: extract_extract_v16f32_fadd_f32:
; AVX: # %bb.0:
; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
%x0 = extractelement <16 x float> %x, i32 0
%x1 = extractelement <16 x float> %x, i32 1
%x01 = fadd float %x0, %x1
ret float %x01
}
define float @extract_extract_v16f32_fadd_f32_commute(<16 x float> %x) {
; SSE3-LABEL: extract_extract_v16f32_fadd_f32_commute:
; SSE3: # %bb.0:
; SSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE3-NEXT: addss %xmm1, %xmm0
; SSE3-NEXT: retq
;
; AVX-LABEL: extract_extract_v16f32_fadd_f32_commute:
; AVX: # %bb.0:
; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
%x0 = extractelement <16 x float> %x, i32 0
%x1 = extractelement <16 x float> %x, i32 1
%x01 = fadd float %x1, %x0
ret float %x01
}
define double @extract_extract_v8f64_fadd_f64(<8 x double> %x) {
; SSE3-LABEL: extract_extract_v8f64_fadd_f64:
; SSE3: # %bb.0:
; SSE3-NEXT: movapd %xmm0, %xmm1
; SSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; SSE3-NEXT: addsd %xmm0, %xmm1
; SSE3-NEXT: movapd %xmm1, %xmm0
; SSE3-NEXT: retq
;
; AVX-LABEL: extract_extract_v8f64_fadd_f64:
; AVX: # %bb.0:
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
%x0 = extractelement <8 x double> %x, i32 0
%x1 = extractelement <8 x double> %x, i32 1
%x01 = fadd double %x0, %x1
ret double %x01
}
define double @extract_extract_v8f64_fadd_f64_commute(<8 x double> %x) {
; SSE3-LABEL: extract_extract_v8f64_fadd_f64_commute:
; SSE3: # %bb.0:
; SSE3-NEXT: movapd %xmm0, %xmm1
; SSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; SSE3-NEXT: addsd %xmm0, %xmm1
; SSE3-NEXT: movapd %xmm1, %xmm0
; SSE3-NEXT: retq
;
; AVX-LABEL: extract_extract_v8f64_fadd_f64_commute:
; AVX: # %bb.0:
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX-NEXT: vaddsd %xmm0, %xmm1, %xmm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
%x0 = extractelement <8 x double> %x, i32 0
%x1 = extractelement <8 x double> %x, i32 1
%x01 = fadd double %x1, %x0
ret double %x01
}
define float @extract_extract_v16f32_fsub_f32(<16 x float> %x) {
; SSE3-LABEL: extract_extract_v16f32_fsub_f32:
; SSE3: # %bb.0:
; SSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE3-NEXT: subss %xmm1, %xmm0
; SSE3-NEXT: retq
;
; AVX-LABEL: extract_extract_v16f32_fsub_f32:
; AVX: # %bb.0:
; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX-NEXT: vsubss %xmm1, %xmm0, %xmm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
%x0 = extractelement <16 x float> %x, i32 0
%x1 = extractelement <16 x float> %x, i32 1
%x01 = fsub float %x0, %x1
ret float %x01
}
define float @extract_extract_v16f32_fsub_f32_commute(<16 x float> %x) {
; SSE3-LABEL: extract_extract_v16f32_fsub_f32_commute:
; SSE3: # %bb.0:
; SSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE3-NEXT: subss %xmm0, %xmm1
; SSE3-NEXT: movaps %xmm1, %xmm0
; SSE3-NEXT: retq
;
; AVX-LABEL: extract_extract_v16f32_fsub_f32_commute:
; AVX: # %bb.0:
; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX-NEXT: vsubss %xmm0, %xmm1, %xmm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
%x0 = extractelement <16 x float> %x, i32 0
%x1 = extractelement <16 x float> %x, i32 1
%x01 = fsub float %x1, %x0
ret float %x01
}
define double @extract_extract_v8f64_fsub_f64(<8 x double> %x) {
; SSE3-LABEL: extract_extract_v8f64_fsub_f64:
; SSE3: # %bb.0:
; SSE3-NEXT: movapd %xmm0, %xmm1
; SSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; SSE3-NEXT: subsd %xmm1, %xmm0
; SSE3-NEXT: retq
;
; AVX-LABEL: extract_extract_v8f64_fsub_f64:
; AVX: # %bb.0:
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX-NEXT: vsubsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
%x0 = extractelement <8 x double> %x, i32 0
%x1 = extractelement <8 x double> %x, i32 1
%x01 = fsub double %x0, %x1
ret double %x01
}
define double @extract_extract_v8f64_fsub_f64_commute(<8 x double> %x) {
; SSE3-LABEL: extract_extract_v8f64_fsub_f64_commute:
; SSE3: # %bb.0:
; SSE3-NEXT: movapd %xmm0, %xmm1
; SSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; SSE3-NEXT: subsd %xmm0, %xmm1
; SSE3-NEXT: movapd %xmm1, %xmm0
; SSE3-NEXT: retq
;
; AVX-LABEL: extract_extract_v8f64_fsub_f64_commute:
; AVX: # %bb.0:
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX-NEXT: vsubsd %xmm0, %xmm1, %xmm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
%x0 = extractelement <8 x double> %x, i32 0
%x1 = extractelement <8 x double> %x, i32 1
%x01 = fsub double %x1, %x0
ret double %x01
}
; Check output when 1 or both extracts have extra uses.
define float @extract_extract_v4f32_fadd_f32_uses1(<4 x float> %x, float* %p) {
; SSE3-LABEL: extract_extract_v4f32_fadd_f32_uses1:
; SSE3: # %bb.0:
; SSE3-NEXT: movss %xmm0, (%rdi)
; SSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE3-NEXT: addss %xmm1, %xmm0
; SSE3-NEXT: retq
;
; AVX-LABEL: extract_extract_v4f32_fadd_f32_uses1:
; AVX: # %bb.0:
; AVX-NEXT: vmovss %xmm0, (%rdi)
; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%x0 = extractelement <4 x float> %x, i32 0
store float %x0, float* %p
%x1 = extractelement <4 x float> %x, i32 1
%x01 = fadd float %x0, %x1
ret float %x01
}
define float @extract_extract_v4f32_fadd_f32_uses2(<4 x float> %x, float* %p) {
; SSE3-LABEL: extract_extract_v4f32_fadd_f32_uses2:
; SSE3: # %bb.0:
; SSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE3-NEXT: movss %xmm1, (%rdi)
; SSE3-NEXT: addss %xmm1, %xmm0
; SSE3-NEXT: retq
;
; AVX-LABEL: extract_extract_v4f32_fadd_f32_uses2:
; AVX: # %bb.0:
; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX-NEXT: vmovss %xmm1, (%rdi)
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%x0 = extractelement <4 x float> %x, i32 0
%x1 = extractelement <4 x float> %x, i32 1
store float %x1, float* %p
%x01 = fadd float %x0, %x1
ret float %x01
}
define float @extract_extract_v4f32_fadd_f32_uses3(<4 x float> %x, float* %p1, float* %p2) {
; SSE3-LABEL: extract_extract_v4f32_fadd_f32_uses3:
; SSE3: # %bb.0:
; SSE3-NEXT: movss %xmm0, (%rdi)
; SSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE3-NEXT: movss %xmm1, (%rsi)
; SSE3-NEXT: addss %xmm1, %xmm0
; SSE3-NEXT: retq
;
; AVX-LABEL: extract_extract_v4f32_fadd_f32_uses3:
; AVX: # %bb.0:
; AVX-NEXT: vmovss %xmm0, (%rdi)
; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX-NEXT: vmovss %xmm1, (%rsi)
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%x0 = extractelement <4 x float> %x, i32 0
store float %x0, float* %p1
%x1 = extractelement <4 x float> %x, i32 1
store float %x1, float* %p2
%x01 = fadd float %x0, %x1
ret float %x01
}