1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2025-01-31 12:41:49 +01:00

[X86][AVX] Call SimplifyDemandedBits on MaskedLoadSDNode with non-boolean masks

On X86 (AVX1/AVX2), non-boolean masked loads only demand the sign bit of the mask, we already do the equivalent for masked stores.

Annoyingly I can't easily handle this inside TargetLowering::SimplifyDemandedBits as this is an x86 specific case for a generic node.

Differential Revision: https://reviews.llvm.org/D80478
This commit is contained in:
Simon Pilgrim 2020-05-24 09:41:02 +01:00
parent dd3a39032b
commit 414197e88c
3 changed files with 22 additions and 32 deletions

View File

@ -42951,7 +42951,7 @@ combineMaskedLoadConstantMask(MaskedLoadSDNode *ML, SelectionDAG &DAG,
static SDValue combineMaskedLoad(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
MaskedLoadSDNode *Mld = cast<MaskedLoadSDNode>(N);
auto *Mld = cast<MaskedLoadSDNode>(N);
// TODO: Expanding load with constant mask may be optimized as well.
if (Mld->isExpandingLoad())
@ -42960,12 +42960,33 @@ static SDValue combineMaskedLoad(SDNode *N, SelectionDAG &DAG,
if (Mld->getExtensionType() == ISD::NON_EXTLOAD) {
if (SDValue ScalarLoad = reduceMaskedLoadToScalarLoad(Mld, DAG, DCI))
return ScalarLoad;
// TODO: Do some AVX512 subsets benefit from this transform?
if (!Subtarget.hasAVX512())
if (SDValue Blend = combineMaskedLoadConstantMask(Mld, DAG, DCI))
return Blend;
}
// If the mask value has been legalized to a non-boolean vector, try to
// simplify ops leading up to it. We only demand the MSB of each lane.
SDValue Mask = Mld->getMask();
if (Mask.getScalarValueSizeInBits() != 1) {
EVT VT = Mld->getValueType(0);
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
APInt DemandedBits(APInt::getSignMask(VT.getScalarSizeInBits()));
if (TLI.SimplifyDemandedBits(Mask, DemandedBits, DCI)) {
if (N->getOpcode() != ISD::DELETED_NODE)
DCI.AddToWorklist(N);
return SDValue(N, 0);
}
if (SDValue NewMask =
TLI.SimplifyMultipleUseDemandedBits(Mask, DemandedBits, DAG))
return DAG.getMaskedLoad(
VT, SDLoc(N), Mld->getChain(), Mld->getBasePtr(), Mld->getOffset(),
NewMask, Mld->getPassThru(), Mld->getMemoryVT(), Mld->getMemOperand(),
Mld->getAddressingMode(), Mld->getExtensionType());
}
return SDValue();
}

View File

@ -1163,10 +1163,8 @@ define <8 x float> @load_v8f32_v8i1_zero(<8 x i1> %mask, <8 x float>* %addr) {
; AVX1: ## %bb.0:
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX1-NEXT: vpslld $31, %xmm1, %xmm1
; AVX1-NEXT: vpsrad $31, %xmm1, %xmm1
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
; AVX1-NEXT: vpslld $31, %xmm0, %xmm0
; AVX1-NEXT: vpsrad $31, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: vmaskmovps (%rdi), %ymm0, %ymm0
; AVX1-NEXT: retq
@ -1175,7 +1173,6 @@ define <8 x float> @load_v8f32_v8i1_zero(<8 x i1> %mask, <8 x float>* %addr) {
; AVX2: ## %bb.0:
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: vpslld $31, %ymm0, %ymm0
; AVX2-NEXT: vpsrad $31, %ymm0, %ymm0
; AVX2-NEXT: vmaskmovps (%rdi), %ymm0, %ymm0
; AVX2-NEXT: retq
;
@ -2416,10 +2413,8 @@ define <8 x i32> @load_v8i32_v8i1(<8 x i1> %mask, <8 x i32>* %addr, <8 x i32> %d
; AVX1: ## %bb.0:
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX1-NEXT: vpslld $31, %xmm2, %xmm2
; AVX1-NEXT: vpsrad $31, %xmm2, %xmm2
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
; AVX1-NEXT: vpslld $31, %xmm0, %xmm0
; AVX1-NEXT: vpsrad $31, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
; AVX1-NEXT: vmaskmovps (%rdi), %ymm0, %ymm2
; AVX1-NEXT: vblendvps %ymm0, %ymm2, %ymm1, %ymm0
@ -2429,7 +2424,6 @@ define <8 x i32> @load_v8i32_v8i1(<8 x i1> %mask, <8 x i32>* %addr, <8 x i32> %d
; AVX2: ## %bb.0:
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: vpslld $31, %ymm0, %ymm0
; AVX2-NEXT: vpsrad $31, %ymm0, %ymm0
; AVX2-NEXT: vpmaskmovd (%rdi), %ymm0, %ymm2
; AVX2-NEXT: vblendvps %ymm0, %ymm2, %ymm1, %ymm0
; AVX2-NEXT: retq
@ -2612,10 +2606,8 @@ define <8 x i32> @load_v8i32_v8i1_zero(<8 x i1> %mask, <8 x i32>* %addr) {
; AVX1: ## %bb.0:
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX1-NEXT: vpslld $31, %xmm1, %xmm1
; AVX1-NEXT: vpsrad $31, %xmm1, %xmm1
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
; AVX1-NEXT: vpslld $31, %xmm0, %xmm0
; AVX1-NEXT: vpsrad $31, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: vmaskmovps (%rdi), %ymm0, %ymm0
; AVX1-NEXT: retq
@ -2624,7 +2616,6 @@ define <8 x i32> @load_v8i32_v8i1_zero(<8 x i1> %mask, <8 x i32>* %addr) {
; AVX2: ## %bb.0:
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: vpslld $31, %ymm0, %ymm0
; AVX2-NEXT: vpsrad $31, %ymm0, %ymm0
; AVX2-NEXT: vpmaskmovd (%rdi), %ymm0, %ymm0
; AVX2-NEXT: retq
;

View File

@ -28,7 +28,6 @@ define <9 x float> @mload_split9(<9 x i1> %mask, <9 x float>* %addr, <9 x float>
; CHECK-NEXT: vpinsrw $3, %r8d, %xmm2, %xmm2
; CHECK-NEXT: vpmovzxwd {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
; CHECK-NEXT: vpslld $31, %xmm3, %xmm3
; CHECK-NEXT: vpsrad $31, %xmm3, %xmm3
; CHECK-NEXT: vpinsrw $4, %r9d, %xmm2, %xmm2
; CHECK-NEXT: movl {{[0-9]+}}(%rsp), %ecx
; CHECK-NEXT: vpinsrw $5, %ecx, %xmm2, %xmm2
@ -38,14 +37,12 @@ define <9 x float> @mload_split9(<9 x i1> %mask, <9 x float>* %addr, <9 x float>
; CHECK-NEXT: vpinsrw $7, %ecx, %xmm2, %xmm2
; CHECK-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm2[4,4,5,5,6,6,7,7]
; CHECK-NEXT: vpslld $31, %xmm2, %xmm2
; CHECK-NEXT: vpsrad $31, %xmm2, %xmm2
; CHECK-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
; CHECK-NEXT: vmaskmovps (%rdi), %ymm2, %ymm3
; CHECK-NEXT: vblendvps %ymm2, %ymm3, %ymm0, %ymm0
; CHECK-NEXT: movzbl {{[0-9]+}}(%rsp), %ecx
; CHECK-NEXT: vmovd %ecx, %xmm2
; CHECK-NEXT: vpslld $31, %xmm2, %xmm2
; CHECK-NEXT: vpsrad $31, %xmm2, %xmm2
; CHECK-NEXT: vmaskmovps 32(%rdi), %ymm2, %ymm3
; CHECK-NEXT: vblendvps %xmm2, %xmm3, %xmm1, %xmm1
; CHECK-NEXT: vmovss %xmm1, 32(%rax)
@ -79,7 +76,6 @@ define <13 x float> @mload_split13(<13 x i1> %mask, <13 x float>* %addr, <13 x f
; CHECK-NEXT: vpinsrw $3, %r8d, %xmm3, %xmm3
; CHECK-NEXT: vpmovzxwd {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
; CHECK-NEXT: vpslld $31, %xmm4, %xmm4
; CHECK-NEXT: vpsrad $31, %xmm4, %xmm4
; CHECK-NEXT: vpinsrw $4, %r9d, %xmm3, %xmm3
; CHECK-NEXT: movl {{[0-9]+}}(%rsp), %ecx
; CHECK-NEXT: vpinsrw $5, %ecx, %xmm3, %xmm3
@ -89,7 +85,6 @@ define <13 x float> @mload_split13(<13 x i1> %mask, <13 x float>* %addr, <13 x f
; CHECK-NEXT: vpinsrw $7, %ecx, %xmm3, %xmm3
; CHECK-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm3[4,4,5,5,6,6,7,7]
; CHECK-NEXT: vpslld $31, %xmm3, %xmm3
; CHECK-NEXT: vpsrad $31, %xmm3, %xmm3
; CHECK-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
; CHECK-NEXT: vmaskmovps (%rdi), %ymm3, %ymm4
; CHECK-NEXT: vblendvps %ymm3, %ymm4, %ymm2, %ymm2
@ -103,12 +98,10 @@ define <13 x float> @mload_split13(<13 x i1> %mask, <13 x float>* %addr, <13 x f
; CHECK-NEXT: vpinsrw $3, %ecx, %xmm3, %xmm3
; CHECK-NEXT: vpmovzxwd {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
; CHECK-NEXT: vpslld $31, %xmm4, %xmm4
; CHECK-NEXT: vpsrad $31, %xmm4, %xmm4
; CHECK-NEXT: movl {{[0-9]+}}(%rsp), %ecx
; CHECK-NEXT: vpinsrw $4, %ecx, %xmm3, %xmm3
; CHECK-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm3[4,4,5,5,6,6,7,7]
; CHECK-NEXT: vpslld $31, %xmm3, %xmm3
; CHECK-NEXT: vpsrad $31, %xmm3, %xmm3
; CHECK-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm5
; CHECK-NEXT: vmaskmovps 32(%rdi), %ymm5, %ymm5
; CHECK-NEXT: vblendvps %xmm4, %xmm5, %xmm1, %xmm1
@ -147,7 +140,6 @@ define <14 x float> @mload_split14(<14 x i1> %mask, <14 x float>* %addr, <14 x f
; CHECK-NEXT: vpinsrw $3, %r8d, %xmm3, %xmm3
; CHECK-NEXT: vpmovzxwd {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
; CHECK-NEXT: vpslld $31, %xmm4, %xmm4
; CHECK-NEXT: vpsrad $31, %xmm4, %xmm4
; CHECK-NEXT: vpinsrw $4, %r9d, %xmm3, %xmm3
; CHECK-NEXT: movl {{[0-9]+}}(%rsp), %ecx
; CHECK-NEXT: vpinsrw $5, %ecx, %xmm3, %xmm3
@ -157,7 +149,6 @@ define <14 x float> @mload_split14(<14 x i1> %mask, <14 x float>* %addr, <14 x f
; CHECK-NEXT: vpinsrw $7, %ecx, %xmm3, %xmm3
; CHECK-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm3[4,4,5,5,6,6,7,7]
; CHECK-NEXT: vpslld $31, %xmm3, %xmm3
; CHECK-NEXT: vpsrad $31, %xmm3, %xmm3
; CHECK-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
; CHECK-NEXT: vmaskmovps (%rdi), %ymm3, %ymm4
; CHECK-NEXT: vblendvps %ymm3, %ymm4, %ymm2, %ymm2
@ -171,14 +162,12 @@ define <14 x float> @mload_split14(<14 x i1> %mask, <14 x float>* %addr, <14 x f
; CHECK-NEXT: vpinsrw $3, %ecx, %xmm3, %xmm3
; CHECK-NEXT: vpmovzxwd {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
; CHECK-NEXT: vpslld $31, %xmm4, %xmm4
; CHECK-NEXT: vpsrad $31, %xmm4, %xmm4
; CHECK-NEXT: movl {{[0-9]+}}(%rsp), %ecx
; CHECK-NEXT: vpinsrw $4, %ecx, %xmm3, %xmm3
; CHECK-NEXT: movl {{[0-9]+}}(%rsp), %ecx
; CHECK-NEXT: vpinsrw $5, %ecx, %xmm3, %xmm3
; CHECK-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm3[4,4,5,5,6,6,7,7]
; CHECK-NEXT: vpslld $31, %xmm3, %xmm3
; CHECK-NEXT: vpsrad $31, %xmm3, %xmm3
; CHECK-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm5
; CHECK-NEXT: vmaskmovps 32(%rdi), %ymm5, %ymm5
; CHECK-NEXT: vextractf128 $1, %ymm5, %xmm6
@ -222,14 +211,12 @@ define <17 x float> @mload_split17(<17 x i1> %mask, <17 x float>* %addr, <17 x f
; CHECK-NEXT: vpinsrb $6, %r8d, %xmm3, %xmm3
; CHECK-NEXT: vpmovzxwd {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
; CHECK-NEXT: vpslld $31, %xmm4, %xmm4
; CHECK-NEXT: vpsrad $31, %xmm4, %xmm4
; CHECK-NEXT: vpinsrb $8, %r9d, %xmm3, %xmm3
; CHECK-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm3, %xmm3
; CHECK-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm3, %xmm3
; CHECK-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm3, %xmm3
; CHECK-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm3[4,4,5,5,6,6,7,7]
; CHECK-NEXT: vpslld $31, %xmm3, %xmm3
; CHECK-NEXT: vpsrad $31, %xmm3, %xmm3
; CHECK-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
; CHECK-NEXT: vmaskmovps (%r10), %ymm3, %ymm4
; CHECK-NEXT: vblendvps %ymm3, %ymm4, %ymm2, %ymm2
@ -239,21 +226,18 @@ define <17 x float> @mload_split17(<17 x i1> %mask, <17 x float>* %addr, <17 x f
; CHECK-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm3, %xmm3
; CHECK-NEXT: vpmovzxwd {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
; CHECK-NEXT: vpslld $31, %xmm4, %xmm4
; CHECK-NEXT: vpsrad $31, %xmm4, %xmm4
; CHECK-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm3, %xmm3
; CHECK-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm3, %xmm3
; CHECK-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm3, %xmm3
; CHECK-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm3, %xmm3
; CHECK-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm3[4,4,5,5,6,6,7,7]
; CHECK-NEXT: vpslld $31, %xmm3, %xmm3
; CHECK-NEXT: vpsrad $31, %xmm3, %xmm3
; CHECK-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
; CHECK-NEXT: vmaskmovps 32(%r10), %ymm3, %ymm4
; CHECK-NEXT: vblendvps %ymm3, %ymm4, %ymm1, %ymm1
; CHECK-NEXT: vmovd %edi, %xmm3
; CHECK-NEXT: vpmovzxwd {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
; CHECK-NEXT: vpslld $31, %xmm3, %xmm3
; CHECK-NEXT: vpsrad $31, %xmm3, %xmm3
; CHECK-NEXT: vmaskmovps 64(%r10), %ymm3, %ymm4
; CHECK-NEXT: vblendvps %xmm3, %xmm4, %xmm0, %xmm0
; CHECK-NEXT: vmovss %xmm0, 64(%rax)
@ -300,14 +284,12 @@ define <23 x float> @mload_split23(<23 x i1> %mask, <23 x float>* %addr, <23 x f
; CHECK-NEXT: vpinsrb $6, %r8d, %xmm4, %xmm4
; CHECK-NEXT: vpmovzxwd {{.*#+}} xmm5 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero
; CHECK-NEXT: vpslld $31, %xmm5, %xmm5
; CHECK-NEXT: vpsrad $31, %xmm5, %xmm5
; CHECK-NEXT: vpinsrb $8, %r9d, %xmm4, %xmm4
; CHECK-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm4, %xmm4
; CHECK-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm4, %xmm4
; CHECK-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm4, %xmm4
; CHECK-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm4[4,4,5,5,6,6,7,7]
; CHECK-NEXT: vpslld $31, %xmm4, %xmm4
; CHECK-NEXT: vpsrad $31, %xmm4, %xmm4
; CHECK-NEXT: vinsertf128 $1, %xmm4, %ymm5, %ymm4
; CHECK-NEXT: vmaskmovps (%r10), %ymm4, %ymm5
; CHECK-NEXT: vblendvps %ymm4, %ymm5, %ymm3, %ymm3
@ -317,14 +299,12 @@ define <23 x float> @mload_split23(<23 x i1> %mask, <23 x float>* %addr, <23 x f
; CHECK-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm4, %xmm4
; CHECK-NEXT: vpmovzxwd {{.*#+}} xmm5 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero
; CHECK-NEXT: vpslld $31, %xmm5, %xmm5
; CHECK-NEXT: vpsrad $31, %xmm5, %xmm5
; CHECK-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm4, %xmm4
; CHECK-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm4, %xmm4
; CHECK-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm4, %xmm4
; CHECK-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm4, %xmm4
; CHECK-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm4[4,4,5,5,6,6,7,7]
; CHECK-NEXT: vpslld $31, %xmm4, %xmm4
; CHECK-NEXT: vpsrad $31, %xmm4, %xmm4
; CHECK-NEXT: vinsertf128 $1, %xmm4, %ymm5, %ymm4
; CHECK-NEXT: vmaskmovps 32(%r10), %ymm4, %ymm5
; CHECK-NEXT: vblendvps %ymm4, %ymm5, %ymm2, %ymm2
@ -334,13 +314,11 @@ define <23 x float> @mload_split23(<23 x i1> %mask, <23 x float>* %addr, <23 x f
; CHECK-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm4, %xmm4
; CHECK-NEXT: vpmovzxwd {{.*#+}} xmm5 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero
; CHECK-NEXT: vpslld $31, %xmm5, %xmm5
; CHECK-NEXT: vpsrad $31, %xmm5, %xmm5
; CHECK-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm4, %xmm4
; CHECK-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm4, %xmm4
; CHECK-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm4, %xmm4
; CHECK-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm4[4,4,5,5,6,6,7,7]
; CHECK-NEXT: vpslld $31, %xmm4, %xmm4
; CHECK-NEXT: vpsrad $31, %xmm4, %xmm4
; CHECK-NEXT: vinsertf128 $1, %xmm4, %ymm5, %ymm6
; CHECK-NEXT: vmaskmovps 64(%r10), %ymm6, %ymm6
; CHECK-NEXT: vmovaps %ymm2, 32(%rax)