1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-24 19:52:54 +01:00

[x86] Back out a bad choice about lowering v4i64 and pave the way for

a more sane approach to AVX2 support.

Fundamentally, there is no useful way to lower integer vectors in AVX.
None. We always end up with a VINSERTF128 in the end, so we might as
well eagerly switch to the floating point domain and do everything
there. This cleans up lots of weird and unlikely to be correct
differences between integer and floating point shuffles when we only
have AVX1.

The other nice consequence is that by doing things this way we will make
it much easier to write the integer lowering routines as we won't need
to duplicate the logic to check for AVX vs. AVX2 in each one -- if we
actually try to lower a 256-bit vector as an integer vector, we have
AVX2 and can rely on it. I think this will make the code much simpler
and more comprehensible.

Currently, I've disabled *all* support for AVX2 so that we always fall
back to AVX. This keeps everything working rather than asserting. That
will go away with the subsequent series of patches that provide
a baseline AVX2 implementation.

Please note, I'm going to implement AVX2 *without access to hardware*.
That means I cannot correctness test this path. I will be relying on
those with access to AVX2 hardware to do correctness testing and fix
bugs here, but as a courtesy I'm trying to sketch out the framework for
the new-style vector shuffle lowering in the context of the AVX2 ISA.

llvm-svn: 218228
This commit is contained in:
Chandler Carruth 2014-09-22 00:32:15 +00:00
parent 9a747d4524
commit 973771fcb4
2 changed files with 138 additions and 264 deletions

View File

@ -9309,44 +9309,6 @@ static SDValue lowerV4F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
DAG.getConstant(BlendMask, MVT::i8)); DAG.getConstant(BlendMask, MVT::i8));
} }
/// \brief Handle lowering of 4-lane 64-bit integer shuffles.
///
/// Largely delegates to common code when we have AVX2 and to the floating-point
/// code when we only have AVX.
static SDValue lowerV4I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
const X86Subtarget *Subtarget,
SelectionDAG &DAG) {
SDLoc DL(Op);
assert(Op.getSimpleValueType() == MVT::v4i64 && "Bad shuffle type!");
assert(V1.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
ArrayRef<int> Mask = SVOp->getMask();
assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
// FIXME: If we have AVX2, we should delegate to generic code as crossing
// shuffles aren't a problem and FP and int have the same patterns.
if (is128BitLaneCrossingShuffleMask(MVT::v4i64, Mask))
return splitAndLower256BitVectorShuffle(Op, V1, V2, Subtarget, DAG);
// If we have a single input to the zero element, insert that into V1 if we
// can do so cheaply.
int NumV2Elements =
std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
if (NumV2Elements == 1 && Mask[0] >= 4)
if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
MVT::v4i64, DL, V1, V2, Mask, Subtarget, DAG))
return Insertion;
// AVX1 doesn't provide any facilities for v4i64 shuffles, bitcast and
// delegate to floating point code.
V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v4f64, V1);
V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v4f64, V2);
return DAG.getNode(ISD::BITCAST, DL, MVT::v4i64,
lowerV4F64VectorShuffle(Op, V1, V2, Subtarget, DAG));
}
/// \brief Handle lowering of 8-lane 32-bit floating point shuffles. /// \brief Handle lowering of 8-lane 32-bit floating point shuffles.
/// ///
/// Also ends up handling lowering of 8-lane 32-bit integer shuffles when AVX2 /// Also ends up handling lowering of 8-lane 32-bit integer shuffles when AVX2
@ -9442,21 +9404,42 @@ static SDValue lowerV8F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
static SDValue lower256BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2, static SDValue lower256BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
MVT VT, const X86Subtarget *Subtarget, MVT VT, const X86Subtarget *Subtarget,
SelectionDAG &DAG) { SelectionDAG &DAG) {
SDLoc DL(Op);
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
ArrayRef<int> Mask = SVOp->getMask();
// There is a really nice hard cut-over between AVX1 and AVX2 that means we can
// check for those subtargets here and avoid much of the subtarget querying in
// the per-vector-type lowering routines. With AVX1 we have essentially *zero*
// ability to manipulate a 256-bit vector with integer types. Since we'll use
// floating point types there eventually, just immediately cast everything to
// a float and operate entirely in that domain.
// FIXME: Actually test for AVX2 when we have implemented it.
if (VT.isInteger()) {
int ElementBits = VT.getScalarSizeInBits();
if (ElementBits < 32)
// No floating point type available, decompose into 128-bit vectors.
return splitAndLower256BitVectorShuffle(Op, V1, V2, Subtarget, DAG);
MVT FpVT = MVT::getVectorVT(MVT::getFloatingPointVT(ElementBits),
VT.getVectorNumElements());
V1 = DAG.getNode(ISD::BITCAST, DL, FpVT, V1);
V2 = DAG.getNode(ISD::BITCAST, DL, FpVT, V2);
return DAG.getNode(ISD::BITCAST, DL, VT,
DAG.getVectorShuffle(FpVT, DL, V1, V2, Mask));
}
switch (VT.SimpleTy) { switch (VT.SimpleTy) {
case MVT::v4f64: case MVT::v4f64:
return lowerV4F64VectorShuffle(Op, V1, V2, Subtarget, DAG); return lowerV4F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
case MVT::v4i64: case MVT::v4i64:
return lowerV4I64VectorShuffle(Op, V1, V2, Subtarget, DAG); llvm_unreachable("AVX2 integer support not yet implemented!");
case MVT::v8f32: case MVT::v8f32:
return lowerV8F32VectorShuffle(Op, V1, V2, Subtarget, DAG); return lowerV8F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
case MVT::v8i32: case MVT::v8i32:
case MVT::v16i16: case MVT::v16i16:
case MVT::v32i8: case MVT::v32i8:
// Fall back to the basic pattern of extracting the high half and forming llvm_unreachable("AVX2 integer support not yet implemented!");
// a 4-way blend.
// FIXME: Add targeted lowering for each type that can document rationale
// for delegating to this when necessary.
return splitAndLower256BitVectorShuffle(Op, V1, V2, Subtarget, DAG);
default: default:
llvm_unreachable("Not a valid 256-bit x86 vector type!"); llvm_unreachable("Not a valid 256-bit x86 vector type!");

View File

@ -1,156 +1,96 @@
; RUN: llc < %s -mcpu=x86-64 -mattr=+avx -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=ALL --check-prefix=AVX1 ; RUN: llc < %s -mcpu=x86-64 -mattr=+avx -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
; RUN: llc < %s -mcpu=x86-64 -mattr=+avx2 -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=ALL --check-prefix=AVX2 ; RUN: llc < %s -mcpu=x86-64 -mattr=+avx2 -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
target triple = "x86_64-unknown-unknown" target triple = "x86_64-unknown-unknown"
define <4 x i64> @shuffle_v4i64_0001(<4 x i64> %a, <4 x i64> %b) { define <4 x i64> @shuffle_v4i64_0001(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: @shuffle_v4i64_0001 ; ALL-LABEL: @shuffle_v4i64_0001
; AVX1: # BB#0: ; ALL: # BB#0:
; AVX1-NEXT: vpshufd {{.*}} # xmm1 = xmm0[0,1,0,1] ; ALL-NEXT: vunpcklpd {{.*}} # xmm1 = xmm0[0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq ; ALL-NEXT: retq
;
; AVX2-LABEL: @shuffle_v4i64_0001
; AVX2: # BB#0:
; AVX2-NEXT: vpshufd {{.*}} # xmm1 = xmm0[0,1,0,1]
; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; AVX2-NEXT: retq
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 0, i32 0, i32 0, i32 1> %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 0, i32 0, i32 0, i32 1>
ret <4 x i64> %shuffle ret <4 x i64> %shuffle
} }
define <4 x i64> @shuffle_v4i64_0020(<4 x i64> %a, <4 x i64> %b) { define <4 x i64> @shuffle_v4i64_0020(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: @shuffle_v4i64_0020 ; ALL-LABEL: @shuffle_v4i64_0020
; AVX1: # BB#0: ; ALL: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; ALL-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpunpcklqdq {{.*}} # xmm1 = xmm1[0],xmm0[0] ; ALL-NEXT: vunpcklpd {{.*}} # xmm1 = xmm1[0],xmm0[0]
; AVX1-NEXT: vpshufd {{.*}} # xmm0 = xmm0[0,1,0,1] ; ALL-NEXT: vunpcklpd {{.*}} # xmm0 = xmm0[0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; ALL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq ; ALL-NEXT: retq
;
; AVX2-LABEL: @shuffle_v4i64_0020
; AVX2: # BB#0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpunpcklqdq {{.*}} # xmm1 = xmm1[0],xmm0[0]
; AVX2-NEXT: vpshufd {{.*}} # xmm0 = xmm0[0,1,0,1]
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: retq
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 0, i32 0, i32 2, i32 0> %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 0, i32 0, i32 2, i32 0>
ret <4 x i64> %shuffle ret <4 x i64> %shuffle
} }
define <4 x i64> @shuffle_v4i64_0112(<4 x i64> %a, <4 x i64> %b) { define <4 x i64> @shuffle_v4i64_0112(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: @shuffle_v4i64_0112 ; ALL-LABEL: @shuffle_v4i64_0112
; AVX1: # BB#0: ; ALL: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; ALL-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpalignr {{.*}} # xmm1 = xmm0[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7] ; ALL-NEXT: vshufpd {{.*}} # xmm1 = xmm0[1],xmm1[0]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; ALL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq ; ALL-NEXT: retq
;
; AVX2-LABEL: @shuffle_v4i64_0112
; AVX2: # BB#0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpalignr {{.*}} # xmm1 = xmm0[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: retq
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 0, i32 1, i32 1, i32 2> %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 0, i32 1, i32 1, i32 2>
ret <4 x i64> %shuffle ret <4 x i64> %shuffle
} }
define <4 x i64> @shuffle_v4i64_0300(<4 x i64> %a, <4 x i64> %b) { define <4 x i64> @shuffle_v4i64_0300(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: @shuffle_v4i64_0300 ; ALL-LABEL: @shuffle_v4i64_0300
; AVX1: # BB#0: ; ALL: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; ALL-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpblendw {{.*}} # xmm1 = xmm0[0,1,2,3],xmm1[4,5,6,7] ; ALL-NEXT: vblendpd {{.*}} # xmm1 = xmm0[0],xmm1[1]
; AVX1-NEXT: vpshufd {{.*}} # xmm0 = xmm0[0,1,0,1] ; ALL-NEXT: vunpcklpd {{.*}} # xmm0 = xmm0[0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq ; ALL-NEXT: retq
;
; AVX2-LABEL: @shuffle_v4i64_0300
; AVX2: # BB#0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpblendw {{.*}} # xmm1 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; AVX2-NEXT: vpshufd {{.*}} # xmm0 = xmm0[0,1,0,1]
; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; AVX2-NEXT: retq
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 0, i32 3, i32 0, i32 0> %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 0, i32 3, i32 0, i32 0>
ret <4 x i64> %shuffle ret <4 x i64> %shuffle
} }
define <4 x i64> @shuffle_v4i64_1000(<4 x i64> %a, <4 x i64> %b) { define <4 x i64> @shuffle_v4i64_1000(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: @shuffle_v4i64_1000 ; ALL-LABEL: @shuffle_v4i64_1000
; AVX1: # BB#0: ; ALL: # BB#0:
; AVX1-NEXT: vpshufd {{.*}} # xmm1 = xmm0[2,3,0,1] ; ALL-NEXT: vpermilpd {{.*}} # xmm1 = xmm0[1,0]
; AVX1-NEXT: vpshufd {{.*}} # xmm0 = xmm0[0,1,0,1] ; ALL-NEXT: vunpcklpd {{.*}} # xmm0 = xmm0[0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq ; ALL-NEXT: retq
;
; AVX2-LABEL: @shuffle_v4i64_1000
; AVX2: # BB#0:
; AVX2-NEXT: vpshufd {{.*}} # xmm1 = xmm0[2,3,0,1]
; AVX2-NEXT: vpshufd {{.*}} # xmm0 = xmm0[0,1,0,1]
; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; AVX2-NEXT: retq
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 1, i32 0, i32 0, i32 0> %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 1, i32 0, i32 0, i32 0>
ret <4 x i64> %shuffle ret <4 x i64> %shuffle
} }
define <4 x i64> @shuffle_v4i64_2200(<4 x i64> %a, <4 x i64> %b) { define <4 x i64> @shuffle_v4i64_2200(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: @shuffle_v4i64_2200 ; ALL-LABEL: @shuffle_v4i64_2200
; AVX1: # BB#0: ; ALL: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; ALL-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpshufd {{.*}} # xmm1 = xmm1[0,1,0,1] ; ALL-NEXT: vunpcklpd {{.*}} # xmm1 = xmm1[0,0]
; AVX1-NEXT: vpshufd {{.*}} # xmm0 = xmm0[0,1,0,1] ; ALL-NEXT: vunpcklpd {{.*}} # xmm0 = xmm0[0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq ; ALL-NEXT: retq
;
; AVX2-LABEL: @shuffle_v4i64_2200
; AVX2: # BB#0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpshufd {{.*}} # xmm1 = xmm1[0,1,0,1]
; AVX2-NEXT: vpshufd {{.*}} # xmm0 = xmm0[0,1,0,1]
; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; AVX2-NEXT: retq
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 2, i32 2, i32 0, i32 0> %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 2, i32 2, i32 0, i32 0>
ret <4 x i64> %shuffle ret <4 x i64> %shuffle
} }
define <4 x i64> @shuffle_v4i64_3330(<4 x i64> %a, <4 x i64> %b) { define <4 x i64> @shuffle_v4i64_3330(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: @shuffle_v4i64_3330 ; ALL-LABEL: @shuffle_v4i64_3330
; AVX1: # BB#0: ; ALL: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; ALL-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpalignr {{.*}} # xmm0 = xmm1[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7] ; ALL-NEXT: vshufpd {{.*}} # xmm0 = xmm1[1],xmm0[0]
; AVX1-NEXT: vpshufd {{.*}} # xmm1 = xmm1[2,3,2,3] ; ALL-NEXT: vmovhlps {{.*}} # xmm1 = xmm1[1,1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq ; ALL-NEXT: retq
;
; AVX2-LABEL: @shuffle_v4i64_3330
; AVX2: # BB#0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpalignr {{.*}} # xmm0 = xmm1[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
; AVX2-NEXT: vpshufd {{.*}} # xmm1 = xmm1[2,3,2,3]
; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; AVX2-NEXT: retq
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 3, i32 3, i32 3, i32 0> %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 3, i32 3, i32 3, i32 0>
ret <4 x i64> %shuffle ret <4 x i64> %shuffle
} }
define <4 x i64> @shuffle_v4i64_3210(<4 x i64> %a, <4 x i64> %b) { define <4 x i64> @shuffle_v4i64_3210(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: @shuffle_v4i64_3210 ; ALL-LABEL: @shuffle_v4i64_3210
; AVX1: # BB#0: ; ALL: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; ALL-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpshufd {{.*}} # xmm1 = xmm1[2,3,0,1] ; ALL-NEXT: vpermilpd {{.*}} # xmm1 = xmm1[1,0]
; AVX1-NEXT: vpshufd {{.*}} # xmm0 = xmm0[2,3,0,1] ; ALL-NEXT: vpermilpd {{.*}} # xmm0 = xmm0[1,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq ; ALL-NEXT: retq
;
; AVX2-LABEL: @shuffle_v4i64_3210
; AVX2: # BB#0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpshufd {{.*}} # xmm1 = xmm1[2,3,0,1]
; AVX2-NEXT: vpshufd {{.*}} # xmm0 = xmm0[2,3,0,1]
; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; AVX2-NEXT: retq
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 3, i32 2, i32 1, i32 0> %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
ret <4 x i64> %shuffle ret <4 x i64> %shuffle
} }
@ -402,85 +342,51 @@ define <4 x double> @shuffle_v4f64_0167(<4 x double> %a, <4 x double> %b) {
} }
define <4 x i64> @shuffle_v4i64_0124(<4 x i64> %a, <4 x i64> %b) { define <4 x i64> @shuffle_v4i64_0124(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: @shuffle_v4i64_0124 ; ALL-LABEL: @shuffle_v4i64_0124
; AVX1: # BB#0: ; ALL: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; ALL-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpshufd {{.*}} # xmm1 = xmm1[0,1,0,1] ; ALL-NEXT: vunpcklpd {{.*}} # xmm1 = xmm1[0,0]
; AVX1-NEXT: vpblendw {{.*}} # xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7] ; ALL-NEXT: vblendpd {{.*}} # xmm1 = xmm2[0],xmm1[1]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; ALL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq ; ALL-NEXT: retq
;
; AVX2-LABEL: @shuffle_v4i64_0124
; AVX2: # BB#0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
; AVX2-NEXT: vpshufd {{.*}} # xmm1 = xmm1[0,1,0,1]
; AVX2-NEXT: vpblendw {{.*}} # xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: retq
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 0, i32 1, i32 2, i32 4> %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 0, i32 1, i32 2, i32 4>
ret <4 x i64> %shuffle ret <4 x i64> %shuffle
} }
define <4 x i64> @shuffle_v4i64_0142(<4 x i64> %a, <4 x i64> %b) { define <4 x i64> @shuffle_v4i64_0142(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: @shuffle_v4i64_0142 ; ALL-LABEL: @shuffle_v4i64_0142
; AVX1: # BB#0: ; ALL: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; ALL-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpshufd {{.*}} # xmm2 = xmm2[0,1,0,1] ; ALL-NEXT: vunpcklpd {{.*}} # xmm2 = xmm2[0,0]
; AVX1-NEXT: vpblendw {{.*}} # xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7] ; ALL-NEXT: vblendpd {{.*}} # xmm1 = xmm1[0],xmm2[1]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; ALL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq ; ALL-NEXT: retq
;
; AVX2-LABEL: @shuffle_v4i64_0142
; AVX2: # BB#0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
; AVX2-NEXT: vpshufd {{.*}} # xmm2 = xmm2[0,1,0,1]
; AVX2-NEXT: vpblendw {{.*}} # xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: retq
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 0, i32 1, i32 4, i32 2> %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 0, i32 1, i32 4, i32 2>
ret <4 x i64> %shuffle ret <4 x i64> %shuffle
} }
define <4 x i64> @shuffle_v4i64_0412(<4 x i64> %a, <4 x i64> %b) { define <4 x i64> @shuffle_v4i64_0412(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: @shuffle_v4i64_0412 ; ALL-LABEL: @shuffle_v4i64_0412
; AVX1: # BB#0: ; ALL: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; ALL-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpalignr {{.*}} # xmm2 = xmm0[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7] ; ALL-NEXT: vshufpd {{.*}} # xmm2 = xmm0[1],xmm2[0]
; AVX1-NEXT: vpshufd {{.*}} # xmm1 = xmm1[0,1,0,1] ; ALL-NEXT: vunpcklpd {{.*}} # xmm1 = xmm1[0,0]
; AVX1-NEXT: vpblendw {{.*}} # xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] ; ALL-NEXT: vblendpd {{.*}} # xmm0 = xmm0[0],xmm1[1]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; ALL-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq ; ALL-NEXT: retq
;
; AVX2-LABEL: @shuffle_v4i64_0412
; AVX2: # BB#0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
; AVX2-NEXT: vpalignr {{.*}} # xmm2 = xmm0[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7]
; AVX2-NEXT: vpshufd {{.*}} # xmm1 = xmm1[0,1,0,1]
; AVX2-NEXT: vpblendw {{.*}} # xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
; AVX2-NEXT: retq
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 0, i32 4, i32 1, i32 2> %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 0, i32 4, i32 1, i32 2>
ret <4 x i64> %shuffle ret <4 x i64> %shuffle
} }
define <4 x i64> @shuffle_v4i64_4012(<4 x i64> %a, <4 x i64> %b) { define <4 x i64> @shuffle_v4i64_4012(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: @shuffle_v4i64_4012 ; ALL-LABEL: @shuffle_v4i64_4012
; AVX1: # BB#0: ; ALL: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; ALL-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpalignr {{.*}} # xmm2 = xmm0[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7] ; ALL-NEXT: vshufpd {{.*}} # xmm2 = xmm0[1],xmm2[0]
; AVX1-NEXT: vpshufd {{.*}} # xmm0 = xmm0[0,1,0,1] ; ALL-NEXT: vunpcklpd {{.*}} # xmm0 = xmm0[0,0]
; AVX1-NEXT: vpblendw {{.*}} # xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] ; ALL-NEXT: vblendpd {{.*}} # xmm0 = xmm1[0],xmm0[1]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; ALL-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq ; ALL-NEXT: retq
;
; AVX2-LABEL: @shuffle_v4i64_4012
; AVX2: # BB#0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
; AVX2-NEXT: vpalignr {{.*}} # xmm2 = xmm0[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7]
; AVX2-NEXT: vpshufd {{.*}} # xmm0 = xmm0[0,1,0,1]
; AVX2-NEXT: vpblendw {{.*}} # xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
; AVX2-NEXT: retq
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 4, i32 0, i32 1, i32 2> %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 4, i32 0, i32 1, i32 2>
ret <4 x i64> %shuffle ret <4 x i64> %shuffle
} }
@ -495,23 +401,14 @@ define <4 x i64> @shuffle_v4i64_0145(<4 x i64> %a, <4 x i64> %b) {
} }
define <4 x i64> @shuffle_v4i64_0451(<4 x i64> %a, <4 x i64> %b) { define <4 x i64> @shuffle_v4i64_0451(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: @shuffle_v4i64_0451 ; ALL-LABEL: @shuffle_v4i64_0451
; AVX1: # BB#0: ; ALL: # BB#0:
; AVX1-NEXT: vpshufd {{.*}} # xmm2 = xmm1[2,3,0,1] ; ALL-NEXT: vpermilpd {{.*}} # xmm2 = xmm1[1,0]
; AVX1-NEXT: vpblendw {{.*}} # xmm2 = xmm2[0,1,2,3],xmm0[4,5,6,7] ; ALL-NEXT: vblendpd {{.*}} # xmm2 = xmm2[0],xmm0[1]
; AVX1-NEXT: vpshufd {{.*}} # xmm1 = xmm1[0,1,0,1] ; ALL-NEXT: vunpcklpd {{.*}} # xmm1 = xmm1[0,0]
; AVX1-NEXT: vpblendw {{.*}} # xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] ; ALL-NEXT: vblendpd {{.*}} # xmm0 = xmm0[0],xmm1[1]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; ALL-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq ; ALL-NEXT: retq
;
; AVX2-LABEL: @shuffle_v4i64_0451
; AVX2: # BB#0:
; AVX2-NEXT: vpshufd {{.*}} # xmm2 = xmm1[2,3,0,1]
; AVX2-NEXT: vpblendw {{.*}} # xmm2 = xmm2[0,1,2,3],xmm0[4,5,6,7]
; AVX2-NEXT: vpshufd {{.*}} # xmm1 = xmm1[0,1,0,1]
; AVX2-NEXT: vpblendw {{.*}} # xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
; AVX2-NEXT: retq
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 0, i32 4, i32 5, i32 1> %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 0, i32 4, i32 5, i32 1>
ret <4 x i64> %shuffle ret <4 x i64> %shuffle
} }
@ -526,36 +423,30 @@ define <4 x i64> @shuffle_v4i64_4501(<4 x i64> %a, <4 x i64> %b) {
} }
define <4 x i64> @shuffle_v4i64_4015(<4 x i64> %a, <4 x i64> %b) { define <4 x i64> @shuffle_v4i64_4015(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: @shuffle_v4i64_4015 ; ALL-LABEL: @shuffle_v4i64_4015
; AVX1: # BB#0: ; ALL: # BB#0:
; AVX1-NEXT: vpshufd {{.*}} # xmm2 = xmm0[2,3,0,1] ; ALL-NEXT: vpermilpd {{.*}} # xmm2 = xmm0[1,0]
; AVX1-NEXT: vpblendw {{.*}} # xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7] ; ALL-NEXT: vblendpd {{.*}} # xmm2 = xmm2[0],xmm1[1]
; AVX1-NEXT: vpshufd {{.*}} # xmm0 = xmm0[0,1,0,1] ; ALL-NEXT: vunpcklpd {{.*}} # xmm0 = xmm0[0,0]
; AVX1-NEXT: vpblendw {{.*}} # xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] ; ALL-NEXT: vblendpd {{.*}} # xmm0 = xmm1[0],xmm0[1]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; ALL-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq ; ALL-NEXT: retq
;
; AVX2-LABEL: @shuffle_v4i64_4015
; AVX2: # BB#0:
; AVX2-NEXT: vpshufd {{.*}} # xmm2 = xmm0[2,3,0,1]
; AVX2-NEXT: vpblendw {{.*}} # xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
; AVX2-NEXT: vpshufd {{.*}} # xmm0 = xmm0[0,1,0,1]
; AVX2-NEXT: vpblendw {{.*}} # xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
; AVX2-NEXT: retq
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 4, i32 0, i32 1, i32 5> %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 4, i32 0, i32 1, i32 5>
ret <4 x i64> %shuffle ret <4 x i64> %shuffle
} }
define <4 x i64> @stress_test1(<4 x i64> %a, <4 x i64> %b) { define <4 x i64> @stress_test1(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: @stress_test1 ; ALL-LABEL: @stress_test1
; AVX1: # BB#0: ; ALL: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm0 ; ALL-NEXT: vpermilpd {{.*}} # xmm0 = xmm1[1,0]
; AVX1-NEXT: vpshufd {{.*}} # xmm0 = xmm0[2,3,2,3] ; ALL-NEXT: vpermilpd {{.*}} # xmm0 = xmm0[1,0]
; AVX1-NEXT: vpblendw {{.*}} # xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] ; ALL-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX1-NEXT: vpshufd {{.*}} # xmm1 = xmm1[2,3,0,1] ; ALL-NEXT: vmovhlps {{.*}} # xmm1 = xmm1[1,1]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; ALL-NEXT: vpermilpd {{.*}} # xmm1 = xmm1[1,0]
; AVX1-NEXT: retq ; ALL-NEXT: vblendpd {{.*}} # xmm1 = xmm1[0],xmm0[1]
; ALL-NEXT: vpermilpd {{.*}} # xmm0 = xmm0[1,0]
; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; ALL-NEXT: retq
%c = shufflevector <4 x i64> %b, <4 x i64> undef, <4 x i32> <i32 3, i32 1, i32 1, i32 0> %c = shufflevector <4 x i64> %b, <4 x i64> undef, <4 x i32> <i32 3, i32 1, i32 1, i32 0>
%d = shufflevector <4 x i64> %c, <4 x i64> undef, <4 x i32> <i32 3, i32 undef, i32 2, i32 undef> %d = shufflevector <4 x i64> %c, <4 x i64> undef, <4 x i32> <i32 3, i32 undef, i32 2, i32 undef>
%e = shufflevector <4 x i64> %b, <4 x i64> undef, <4 x i32> <i32 3, i32 3, i32 1, i32 undef> %e = shufflevector <4 x i64> %b, <4 x i64> undef, <4 x i32> <i32 3, i32 3, i32 1, i32 undef>