1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-20 03:23:01 +02:00

Revert "[X86][AVX] Added support for lowering to VBROADCASTF128/VBROADCASTI128"

It caused PR28657.

This reverts commit r276281.

llvm-svn: 276405
This commit is contained in:
Benjamin Kramer 2016-07-22 11:03:10 +00:00
parent 8c05d714b9
commit b22be9a076
11 changed files with 208 additions and 168 deletions

View File

@ -296,7 +296,6 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
Name.startswith("avx.blend.p") ||
Name == "avx2.pblendw" ||
Name.startswith("avx2.pblendd.") ||
Name.startswith("avx.vbroadcastf128") ||
Name == "avx2.vbroadcasti128" ||
Name == "xop.vpcmov" ||
(Name.startswith("xop.vpcom") && F->arg_size() == 2))) {
@ -887,7 +886,7 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
Value *Trunc0 = Builder.CreateTrunc(CI->getArgOperand(0), Type::getInt32Ty(C));
Rep = Builder.CreateCall(CRC32, {Trunc0, CI->getArgOperand(1)});
Rep = Builder.CreateZExt(Rep, CI->getType(), "");
} else if (IsX86 && Name.startswith("avx.vbroadcast.s")) {
} else if (IsX86 && Name.startswith("avx.vbroadcast")) {
// Replace broadcasts with a series of insertelements.
Type *VecTy = CI->getType();
Type *EltTy = VecTy->getVectorElementType();
@ -919,21 +918,15 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
bool DoSext = (StringRef::npos != Name.find("pmovsx"));
Rep = DoSext ? Builder.CreateSExt(SV, DstTy)
: Builder.CreateZExt(SV, DstTy);
} else if (IsX86 && (Name.startswith("avx.vbroadcastf128") ||
Name == "avx2.vbroadcasti128")) {
// Replace vbroadcastf128/vbroadcasti128 with a vector load+shuffle.
Type *EltTy = CI->getType()->getVectorElementType();
unsigned NumSrcElts = 128 / EltTy->getPrimitiveSizeInBits();
Type *VT = VectorType::get(EltTy, NumSrcElts);
} else if (IsX86 && Name == "avx2.vbroadcasti128") {
// Replace vbroadcasts with a vector shuffle.
Type *VT = VectorType::get(Type::getInt64Ty(C), 2);
Value *Op = Builder.CreatePointerCast(CI->getArgOperand(0),
PointerType::getUnqual(VT));
Value *Load = Builder.CreateLoad(VT, Op);
if (NumSrcElts == 2)
Rep = Builder.CreateShuffleVector(Load, UndefValue::get(Load->getType()),
{ 0, 1, 0, 1 });
else
Rep = Builder.CreateShuffleVector(Load, UndefValue::get(Load->getType()),
{ 0, 1, 2, 3, 0, 1, 2, 3 });
uint32_t Idxs[4] = { 0, 1, 0, 1 };
Rep = Builder.CreateShuffleVector(Load, UndefValue::get(Load->getType()),
Idxs);
} else if (IsX86 && (Name.startswith("avx2.pbroadcast") ||
Name.startswith("avx2.vbroadcast") ||
Name.startswith("avx512.pbroadcast") ||

View File

@ -12804,10 +12804,6 @@ static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget,
// (insert_subvector (insert_subvector undef, (load addr), 0),
// (load addr + 16), Elts/2)
// --> load32 addr
// or a 16-byte broadcast:
// (insert_subvector (insert_subvector undef, (load addr), 0),
// (load addr), Elts/2)
// --> X86SubVBroadcast(load16 addr)
if ((IdxVal == OpVT.getVectorNumElements() / 2) &&
Vec.getOpcode() == ISD::INSERT_SUBVECTOR &&
OpVT.is256BitVector() && SubVecVT.is128BitVector()) {
@ -12826,10 +12822,6 @@ static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget,
if (SDValue Ld = EltsFromConsecutiveLoads(OpVT, Ops, dl, DAG, false))
return Ld;
}
// If lower/upper loads are the same then lower to a VBROADCASTF128.
if (SubVec2 == peekThroughBitcasts(SubVec))
return DAG.getNode(X86ISD::SUBV_BROADCAST, dl, OpVT, SubVec);
}
}
}

View File

@ -986,10 +986,6 @@ multiclass avx512_subvec_broadcast_rm<bits<8> opc, string OpcodeStr,
AVX5128IBase, EVEX;
}
//===----------------------------------------------------------------------===//
// AVX-512 BROADCAST SUBVECTORS
//
defm VBROADCASTI32X4 : avx512_subvec_broadcast_rm<0x5a, "vbroadcasti32x4",
v16i32_info, v4i32x_info>,
EVEX_V512, EVEX_CD8<32, CD8VT4>;
@ -1010,13 +1006,7 @@ defm VBROADCASTI32X4Z256 : avx512_subvec_broadcast_rm<0x5a, "vbroadcasti32x4",
defm VBROADCASTF32X4Z256 : avx512_subvec_broadcast_rm<0x1a, "vbroadcastf32x4",
v8f32x_info, v4f32x_info>,
EVEX_V256, EVEX_CD8<32, CD8VT4>;
def : Pat<(v16i16 (X86SubVBroadcast (bc_v8i16 (loadv2i64 addr:$src)))),
(VBROADCASTI32X4Z256rm addr:$src)>;
def : Pat<(v32i8 (X86SubVBroadcast (bc_v16i8 (loadv2i64 addr:$src)))),
(VBROADCASTI32X4Z256rm addr:$src)>;
}
let Predicates = [HasVLX, HasDQI] in {
defm VBROADCASTI64X2Z128 : avx512_subvec_broadcast_rm<0x5a, "vbroadcasti64x2",
v4i64x_info, v2i64x_info>, VEX_W,
@ -1025,14 +1015,6 @@ defm VBROADCASTF64X2Z128 : avx512_subvec_broadcast_rm<0x1a, "vbroadcastf64x2",
v4f64x_info, v2f64x_info>, VEX_W,
EVEX_V256, EVEX_CD8<64, CD8VT2>;
}
let Predicates = [HasVLX, NoDQI] in {
def : Pat<(v4f64 (X86SubVBroadcast (loadv2f64 addr:$src))),
(VBROADCASTF32X4Z256rm addr:$src)>;
def : Pat<(v4i64 (X86SubVBroadcast (loadv2i64 addr:$src))),
(VBROADCASTI32X4Z256rm addr:$src)>;
}
let Predicates = [HasDQI] in {
defm VBROADCASTI64X2 : avx512_subvec_broadcast_rm<0x5a, "vbroadcasti64x2",
v8i64_info, v2i64x_info>, VEX_W,

View File

@ -7759,50 +7759,23 @@ let ExeDomain = SSEPackedDouble, Predicates = [HasAVX2, NoVLX] in
def VBROADCASTSDYrr : avx2_broadcast_rr<0x19, "vbroadcastsd", VR256,
v4f64, v2f64, WriteFShuffle256>, VEX_L;
//===----------------------------------------------------------------------===//
// VBROADCAST*128 - Load from memory and broadcast 128-bit vector to both
// halves of a 256-bit vector.
//
let mayLoad = 1, hasSideEffects = 0, Predicates = [HasAVX2] in
def VBROADCASTI128 : AVX8I<0x5A, MRMSrcMem, (outs VR256:$dst),
(ins i128mem:$src),
"vbroadcasti128\t{$src, $dst|$dst, $src}", []>,
Sched<[WriteLoad]>, VEX, VEX_L;
let mayLoad = 1, hasSideEffects = 0, Predicates = [HasAVX] in
def VBROADCASTF128 : AVX8I<0x1A, MRMSrcMem, (outs VR256:$dst),
(ins f128mem:$src),
"vbroadcastf128\t{$src, $dst|$dst, $src}", []>,
"vbroadcastf128\t{$src, $dst|$dst, $src}",
[(set VR256:$dst,
(int_x86_avx_vbroadcastf128_pd_256 addr:$src))]>,
Sched<[WriteFShuffleLd]>, VEX, VEX_L;
let Predicates = [HasAVX2, NoVLX] in {
def : Pat<(v4i64 (X86SubVBroadcast (loadv2i64 addr:$src))),
(VBROADCASTI128 addr:$src)>;
def : Pat<(v8i32 (X86SubVBroadcast (bc_v4i32 (loadv2i64 addr:$src)))),
(VBROADCASTI128 addr:$src)>;
def : Pat<(v16i16 (X86SubVBroadcast (bc_v8i16 (loadv2i64 addr:$src)))),
(VBROADCASTI128 addr:$src)>;
def : Pat<(v32i8 (X86SubVBroadcast (bc_v16i8 (loadv2i64 addr:$src)))),
(VBROADCASTI128 addr:$src)>;
}
let Predicates = [HasAVX] in
def : Pat<(int_x86_avx_vbroadcastf128_ps_256 addr:$src),
(VBROADCASTF128 addr:$src)>;
let Predicates = [HasAVX, NoVLX] in {
def : Pat<(v4f64 (X86SubVBroadcast (loadv2f64 addr:$src))),
(VBROADCASTF128 addr:$src)>;
def : Pat<(v8f32 (X86SubVBroadcast (loadv4f32 addr:$src))),
(VBROADCASTF128 addr:$src)>;
}
let Predicates = [HasAVX1Only] in {
def : Pat<(v4i64 (X86SubVBroadcast (loadv2i64 addr:$src))),
(VBROADCASTF128 addr:$src)>;
def : Pat<(v8i32 (X86SubVBroadcast (bc_v4i32 (loadv2i64 addr:$src)))),
(VBROADCASTF128 addr:$src)>;
def : Pat<(v16i16 (X86SubVBroadcast (bc_v8i16 (loadv2i64 addr:$src)))),
(VBROADCASTF128 addr:$src)>;
def : Pat<(v32i8 (X86SubVBroadcast (bc_v16i8 (loadv2i64 addr:$src)))),
(VBROADCASTF128 addr:$src)>;
}
//===----------------------------------------------------------------------===//
// VINSERTF128 - Insert packed floating-point values

View File

@ -95,30 +95,6 @@ define <2 x double> @test_x86_avx_extractf128_pd_256_2(<4 x double> %a0) {
}
define <4 x double> @test_x86_avx_vbroadcastf128_pd_256(i8* %a0) {
; CHECK-LABEL: test_x86_avx_vbroadcastf128_pd_256:
; CHECK: ## BB#0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; CHECK-NEXT: retl
%res = call <4 x double> @llvm.x86.avx.vbroadcastf128.pd.256(i8* %a0) ; <<4 x double>> [#uses=1]
ret <4 x double> %res
}
declare <4 x double> @llvm.x86.avx.vbroadcastf128.pd.256(i8*) nounwind readonly
define <8 x float> @test_x86_avx_vbroadcastf128_ps_256(i8* %a0) {
; CHECK-LABEL: test_x86_avx_vbroadcastf128_ps_256:
; CHECK: ## BB#0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; CHECK-NEXT: retl
%res = call <8 x float> @llvm.x86.avx.vbroadcastf128.ps.256(i8* %a0) ; <<8 x float>> [#uses=1]
ret <8 x float> %res
}
declare <8 x float> @llvm.x86.avx.vbroadcastf128.ps.256(i8*) nounwind readonly
define <4 x double> @test_x86_avx_blend_pd_256(<4 x double> %a0, <4 x double> %a1) {
; CHECK-LABEL: test_x86_avx_blend_pd_256:
; CHECK: ## BB#0:
@ -388,7 +364,7 @@ define void @test_x86_sse2_storeu_dq(i8* %a0, <16 x i8> %a1) {
; CHECK-LABEL: test_x86_sse2_storeu_dq:
; CHECK: ## BB#0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: vpaddb LCPI34_0, %xmm0, %xmm0
; CHECK-NEXT: vpaddb LCPI32_0, %xmm0, %xmm0
; CHECK-NEXT: vmovdqu %xmm0, (%eax)
; CHECK-NEXT: retl
%a2 = add <16 x i8> %a1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>

View File

@ -3970,6 +3970,42 @@ define <8 x float> @test_x86_avx_sqrt_ps_256(<8 x float> %a0) {
declare <8 x float> @llvm.x86.avx.sqrt.ps.256(<8 x float>) nounwind readnone
define <4 x double> @test_x86_avx_vbroadcastf128_pd_256(i8* %a0) {
; AVX-LABEL: test_x86_avx_vbroadcastf128_pd_256:
; AVX: ## BB#0:
; AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; AVX-NEXT: retl
;
; AVX512VL-LABEL: test_x86_avx_vbroadcastf128_pd_256:
; AVX512VL: ## BB#0:
; AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512VL-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; AVX512VL-NEXT: retl
%res = call <4 x double> @llvm.x86.avx.vbroadcastf128.pd.256(i8* %a0) ; <<4 x double>> [#uses=1]
ret <4 x double> %res
}
declare <4 x double> @llvm.x86.avx.vbroadcastf128.pd.256(i8*) nounwind readonly
define <8 x float> @test_x86_avx_vbroadcastf128_ps_256(i8* %a0) {
; AVX-LABEL: test_x86_avx_vbroadcastf128_ps_256:
; AVX: ## BB#0:
; AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; AVX-NEXT: retl
;
; AVX512VL-LABEL: test_x86_avx_vbroadcastf128_ps_256:
; AVX512VL: ## BB#0:
; AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512VL-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; AVX512VL-NEXT: retl
%res = call <8 x float> @llvm.x86.avx.vbroadcastf128.ps.256(i8* %a0) ; <<8 x float>> [#uses=1]
ret <8 x float> %res
}
declare <8 x float> @llvm.x86.avx.vbroadcastf128.ps.256(i8*) nounwind readonly
define <4 x double> @test_x86_avx_vperm2f128_pd_256(<4 x double> %a0, <4 x double> %a1) {
; AVX-LABEL: test_x86_avx_vperm2f128_pd_256:
; AVX: ## BB#0:
@ -4549,7 +4585,7 @@ define void @movnt_dq(i8* %p, <2 x i64> %a1) nounwind {
; AVX-LABEL: movnt_dq:
; AVX: ## BB#0:
; AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX-NEXT: vpaddq LCPI254_0, %xmm0, %xmm0
; AVX-NEXT: vpaddq LCPI256_0, %xmm0, %xmm0
; AVX-NEXT: vmovntdq %ymm0, (%eax)
; AVX-NEXT: vzeroupper
; AVX-NEXT: retl
@ -4557,7 +4593,7 @@ define void @movnt_dq(i8* %p, <2 x i64> %a1) nounwind {
; AVX512VL-LABEL: movnt_dq:
; AVX512VL: ## BB#0:
; AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512VL-NEXT: vpaddq LCPI254_0, %xmm0, %xmm0
; AVX512VL-NEXT: vpaddq LCPI256_0, %xmm0, %xmm0
; AVX512VL-NEXT: vmovntdq %ymm0, (%eax)
; AVX512VL-NEXT: retl
%a2 = add <2 x i64> %a1, <i64 1, i64 1>

View File

@ -6,12 +6,14 @@ define <4 x double> @test_broadcast_2f64_4f64(<2 x double> *%p) nounwind {
; X32-LABEL: test_broadcast_2f64_4f64:
; X32: ## BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-NEXT: vmovaps (%eax), %xmm0
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_2f64_4f64:
; X64: ## BB#0:
; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-NEXT: vmovaps (%rdi), %xmm0
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: retq
%1 = load <2 x double>, <2 x double> *%p
%2 = shufflevector <2 x double> %1, <2 x double> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
@ -22,12 +24,14 @@ define <4 x i64> @test_broadcast_2i64_4i64(<2 x i64> *%p) nounwind {
; X32-LABEL: test_broadcast_2i64_4i64:
; X32: ## BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-NEXT: vmovaps (%eax), %xmm0
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_2i64_4i64:
; X64: ## BB#0:
; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-NEXT: vmovaps (%rdi), %xmm0
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: retq
%1 = load <2 x i64>, <2 x i64> *%p
%2 = shufflevector <2 x i64> %1, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
@ -38,12 +42,14 @@ define <8 x float> @test_broadcast_4f32_8f32(<4 x float> *%p) nounwind {
; X32-LABEL: test_broadcast_4f32_8f32:
; X32: ## BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-NEXT: vmovaps (%eax), %xmm0
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_4f32_8f32:
; X64: ## BB#0:
; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-NEXT: vmovaps (%rdi), %xmm0
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: retq
%1 = load <4 x float>, <4 x float> *%p
%2 = shufflevector <4 x float> %1, <4 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
@ -54,12 +60,14 @@ define <8 x i32> @test_broadcast_4i32_8i32(<4 x i32> *%p) nounwind {
; X32-LABEL: test_broadcast_4i32_8i32:
; X32: ## BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-NEXT: vmovaps (%eax), %xmm0
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_4i32_8i32:
; X64: ## BB#0:
; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-NEXT: vmovaps (%rdi), %xmm0
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: retq
%1 = load <4 x i32>, <4 x i32> *%p
%2 = shufflevector <4 x i32> %1, <4 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
@ -70,12 +78,14 @@ define <16 x i16> @test_broadcast_8i16_16i16(<8 x i16> *%p) nounwind {
; X32-LABEL: test_broadcast_8i16_16i16:
; X32: ## BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-NEXT: vmovaps (%eax), %xmm0
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_8i16_16i16:
; X64: ## BB#0:
; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-NEXT: vmovaps (%rdi), %xmm0
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: retq
%1 = load <8 x i16>, <8 x i16> *%p
%2 = shufflevector <8 x i16> %1, <8 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@ -86,12 +96,14 @@ define <32 x i8> @test_broadcast_16i8_32i8(<16 x i8> *%p) nounwind {
; X32-LABEL: test_broadcast_16i8_32i8:
; X32: ## BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-NEXT: vmovaps (%eax), %xmm0
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_16i8_32i8:
; X64: ## BB#0:
; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-NEXT: vmovaps (%rdi), %xmm0
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: retq
%1 = load <16 x i8>, <16 x i8> *%p
%2 = shufflevector <16 x i8> %1, <16 x i8> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>

View File

@ -505,12 +505,14 @@ define <4 x i64> @test_mm256_broadcastsi128_si256_mem(<2 x i64>* %p0) {
; X32-LABEL: test_mm256_broadcastsi128_si256_mem:
; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-NEXT: vmovaps (%eax), %xmm0
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_broadcastsi128_si256_mem:
; X64: # BB#0:
; X64-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-NEXT: vmovaps (%rdi), %xmm0
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: retq
%a0 = load <2 x i64>, <2 x i64>* %p0
%res = shufflevector <2 x i64> %a0, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>

View File

@ -6,13 +6,15 @@ define <4 x double> @test_broadcast_2f64_4f64(<2 x double> *%p) nounwind {
; X32-LABEL: test_broadcast_2f64_4f64:
; X32: ## BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-NEXT: vmovapd (%eax), %xmm0
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: vaddpd LCPI0_0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_2f64_4f64:
; X64: ## BB#0:
; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-NEXT: vmovapd (%rdi), %xmm0
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: vaddpd {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: retq
%1 = load <2 x double>, <2 x double> *%p
@ -25,13 +27,15 @@ define <4 x i64> @test_broadcast_2i64_4i64(<2 x i64> *%p) nounwind {
; X32-LABEL: test_broadcast_2i64_4i64:
; X32: ## BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-NEXT: vmovdqa (%eax), %xmm0
; X32-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: vpaddq LCPI1_0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_2i64_4i64:
; X64: ## BB#0:
; X64-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-NEXT: vmovdqa (%rdi), %xmm0
; X64-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: vpaddq {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: retq
%1 = load <2 x i64>, <2 x i64> *%p
@ -44,13 +48,15 @@ define <8 x float> @test_broadcast_4f32_8f32(<4 x float> *%p) nounwind {
; X32-LABEL: test_broadcast_4f32_8f32:
; X32: ## BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-NEXT: vmovaps (%eax), %xmm0
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: vaddps LCPI2_0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_4f32_8f32:
; X64: ## BB#0:
; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-NEXT: vmovaps (%rdi), %xmm0
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: vaddps {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: retq
%1 = load <4 x float>, <4 x float> *%p
@ -63,13 +69,15 @@ define <8 x i32> @test_broadcast_4i32_8i32(<4 x i32> *%p) nounwind {
; X32-LABEL: test_broadcast_4i32_8i32:
; X32: ## BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-NEXT: vmovdqa (%eax), %xmm0
; X32-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: vpaddd LCPI3_0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_4i32_8i32:
; X64: ## BB#0:
; X64-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-NEXT: vmovdqa (%rdi), %xmm0
; X64-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: vpaddd {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: retq
%1 = load <4 x i32>, <4 x i32> *%p
@ -82,13 +90,15 @@ define <16 x i16> @test_broadcast_8i16_16i16(<8 x i16> *%p) nounwind {
; X32-LABEL: test_broadcast_8i16_16i16:
; X32: ## BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-NEXT: vmovdqa (%eax), %xmm0
; X32-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: vpaddw LCPI4_0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_8i16_16i16:
; X64: ## BB#0:
; X64-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-NEXT: vmovdqa (%rdi), %xmm0
; X64-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: vpaddw {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: retq
%1 = load <8 x i16>, <8 x i16> *%p
@ -101,13 +111,15 @@ define <32 x i8> @test_broadcast_16i8_32i8(<16 x i8> *%p) nounwind {
; X32-LABEL: test_broadcast_16i8_32i8:
; X32: ## BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-NEXT: vmovdqa (%eax), %xmm0
; X32-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: vpaddb LCPI5_0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_16i8_32i8:
; X64: ## BB#0:
; X64-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-NEXT: vmovdqa (%rdi), %xmm0
; X64-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: vpaddb {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: retq
%1 = load <16 x i8>, <16 x i8> *%p

View File

@ -10,19 +10,22 @@
define <4 x double> @test_broadcast_2f64_4f64(<2 x double> *%p) nounwind {
; X64-AVX512VL-LABEL: test_broadcast_2f64_4f64:
; X64-AVX512VL: ## BB#0:
; X64-AVX512VL-NEXT: vbroadcastf32x4 (%rdi), %ymm0
; X64-AVX512VL-NEXT: vmovapd (%rdi), %xmm0
; X64-AVX512VL-NEXT: vinsertf32x4 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512VL-NEXT: vaddpd {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX512VL-NEXT: retq
;
; X64-AVX512BWVL-LABEL: test_broadcast_2f64_4f64:
; X64-AVX512BWVL: ## BB#0:
; X64-AVX512BWVL-NEXT: vbroadcastf32x4 (%rdi), %ymm0
; X64-AVX512BWVL-NEXT: vmovapd (%rdi), %xmm0
; X64-AVX512BWVL-NEXT: vinsertf32x4 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512BWVL-NEXT: vaddpd {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX512BWVL-NEXT: retq
;
; X64-AVX512DQVL-LABEL: test_broadcast_2f64_4f64:
; X64-AVX512DQVL: ## BB#0:
; X64-AVX512DQVL-NEXT: vbroadcastf64x2 (%rdi), %ymm0
; X64-AVX512DQVL-NEXT: vmovapd (%rdi), %xmm0
; X64-AVX512DQVL-NEXT: vinsertf64x2 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512DQVL-NEXT: vaddpd {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX512DQVL-NEXT: retq
%1 = load <2 x double>, <2 x double> *%p
@ -34,19 +37,22 @@ define <4 x double> @test_broadcast_2f64_4f64(<2 x double> *%p) nounwind {
define <4 x i64> @test_broadcast_2i64_4i64(<2 x i64> *%p) nounwind {
; X64-AVX512VL-LABEL: test_broadcast_2i64_4i64:
; X64-AVX512VL: ## BB#0:
; X64-AVX512VL-NEXT: vbroadcasti32x4 (%rdi), %ymm0
; X64-AVX512VL-NEXT: vmovdqa64 (%rdi), %xmm0
; X64-AVX512VL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512VL-NEXT: vpaddq {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX512VL-NEXT: retq
;
; X64-AVX512BWVL-LABEL: test_broadcast_2i64_4i64:
; X64-AVX512BWVL: ## BB#0:
; X64-AVX512BWVL-NEXT: vbroadcasti32x4 (%rdi), %ymm0
; X64-AVX512BWVL-NEXT: vmovdqa64 (%rdi), %xmm0
; X64-AVX512BWVL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512BWVL-NEXT: vpaddq {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX512BWVL-NEXT: retq
;
; X64-AVX512DQVL-LABEL: test_broadcast_2i64_4i64:
; X64-AVX512DQVL: ## BB#0:
; X64-AVX512DQVL-NEXT: vbroadcasti64x2 (%rdi), %ymm0
; X64-AVX512DQVL-NEXT: vmovdqa64 (%rdi), %xmm0
; X64-AVX512DQVL-NEXT: vinserti64x2 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512DQVL-NEXT: vpaddq {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX512DQVL-NEXT: retq
%1 = load <2 x i64>, <2 x i64> *%p
@ -58,7 +64,8 @@ define <4 x i64> @test_broadcast_2i64_4i64(<2 x i64> *%p) nounwind {
define <8 x float> @test_broadcast_4f32_8f32(<4 x float> *%p) nounwind {
; X64-AVX512-LABEL: test_broadcast_4f32_8f32:
; X64-AVX512: ## BB#0:
; X64-AVX512-NEXT: vbroadcastf32x4 (%rdi), %ymm0
; X64-AVX512-NEXT: vmovaps (%rdi), %xmm0
; X64-AVX512-NEXT: vinsertf32x4 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512-NEXT: vaddps {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX512-NEXT: retq
%1 = load <4 x float>, <4 x float> *%p
@ -70,7 +77,8 @@ define <8 x float> @test_broadcast_4f32_8f32(<4 x float> *%p) nounwind {
define <8 x i32> @test_broadcast_4i32_8i32(<4 x i32> *%p) nounwind {
; X64-AVX512-LABEL: test_broadcast_4i32_8i32:
; X64-AVX512: ## BB#0:
; X64-AVX512-NEXT: vbroadcasti32x4 (%rdi), %ymm0
; X64-AVX512-NEXT: vmovdqa32 (%rdi), %xmm0
; X64-AVX512-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512-NEXT: vpaddd {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX512-NEXT: retq
%1 = load <4 x i32>, <4 x i32> *%p
@ -80,11 +88,26 @@ define <8 x i32> @test_broadcast_4i32_8i32(<4 x i32> *%p) nounwind {
}
define <16 x i16> @test_broadcast_8i16_16i16(<8 x i16> *%p) nounwind {
; X64-AVX512-LABEL: test_broadcast_8i16_16i16:
; X64-AVX512: ## BB#0:
; X64-AVX512-NEXT: vbroadcasti32x4 (%rdi), %ymm0
; X64-AVX512-NEXT: vpaddw {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX512-NEXT: retq
; X64-AVX512VL-LABEL: test_broadcast_8i16_16i16:
; X64-AVX512VL: ## BB#0:
; X64-AVX512VL-NEXT: vmovdqa64 (%rdi), %xmm0
; X64-AVX512VL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512VL-NEXT: vpaddw {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX512VL-NEXT: retq
;
; X64-AVX512BWVL-LABEL: test_broadcast_8i16_16i16:
; X64-AVX512BWVL: ## BB#0:
; X64-AVX512BWVL-NEXT: vmovdqu16 (%rdi), %xmm0
; X64-AVX512BWVL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512BWVL-NEXT: vpaddw {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX512BWVL-NEXT: retq
;
; X64-AVX512DQVL-LABEL: test_broadcast_8i16_16i16:
; X64-AVX512DQVL: ## BB#0:
; X64-AVX512DQVL-NEXT: vmovdqa64 (%rdi), %xmm0
; X64-AVX512DQVL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512DQVL-NEXT: vpaddw {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX512DQVL-NEXT: retq
%1 = load <8 x i16>, <8 x i16> *%p
%2 = shufflevector <8 x i16> %1, <8 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%3 = add <16 x i16> %2, <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16>
@ -92,11 +115,26 @@ define <16 x i16> @test_broadcast_8i16_16i16(<8 x i16> *%p) nounwind {
}
define <32 x i8> @test_broadcast_16i8_32i8(<16 x i8> *%p) nounwind {
; X64-AVX512-LABEL: test_broadcast_16i8_32i8:
; X64-AVX512: ## BB#0:
; X64-AVX512-NEXT: vbroadcasti32x4 (%rdi), %ymm0
; X64-AVX512-NEXT: vpaddb {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX512-NEXT: retq
; X64-AVX512VL-LABEL: test_broadcast_16i8_32i8:
; X64-AVX512VL: ## BB#0:
; X64-AVX512VL-NEXT: vmovdqa64 (%rdi), %xmm0
; X64-AVX512VL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512VL-NEXT: vpaddb {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX512VL-NEXT: retq
;
; X64-AVX512BWVL-LABEL: test_broadcast_16i8_32i8:
; X64-AVX512BWVL: ## BB#0:
; X64-AVX512BWVL-NEXT: vmovdqu8 (%rdi), %xmm0
; X64-AVX512BWVL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512BWVL-NEXT: vpaddb {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX512BWVL-NEXT: retq
;
; X64-AVX512DQVL-LABEL: test_broadcast_16i8_32i8:
; X64-AVX512DQVL: ## BB#0:
; X64-AVX512DQVL-NEXT: vmovdqa64 (%rdi), %xmm0
; X64-AVX512DQVL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512DQVL-NEXT: vpaddb {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX512DQVL-NEXT: retq
%1 = load <16 x i8>, <16 x i8> *%p
%2 = shufflevector <16 x i8> %1, <16 x i8> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
%3 = add <32 x i8> %2, <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31, i8 32>
@ -110,21 +148,24 @@ define <32 x i8> @test_broadcast_16i8_32i8(<16 x i8> *%p) nounwind {
define <8 x double> @test_broadcast_2f64_8f64(<2 x double> *%p) nounwind {
; X64-AVX512VL-LABEL: test_broadcast_2f64_8f64:
; X64-AVX512VL: ## BB#0:
; X64-AVX512VL-NEXT: vbroadcastf32x4 (%rdi), %ymm0
; X64-AVX512VL-NEXT: vmovapd (%rdi), %xmm0
; X64-AVX512VL-NEXT: vinsertf32x4 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512VL-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512VL-NEXT: vaddpd {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512VL-NEXT: retq
;
; X64-AVX512BWVL-LABEL: test_broadcast_2f64_8f64:
; X64-AVX512BWVL: ## BB#0:
; X64-AVX512BWVL-NEXT: vbroadcastf32x4 (%rdi), %ymm0
; X64-AVX512BWVL-NEXT: vmovapd (%rdi), %xmm0
; X64-AVX512BWVL-NEXT: vinsertf32x4 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512BWVL-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BWVL-NEXT: vaddpd {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512BWVL-NEXT: retq
;
; X64-AVX512DQVL-LABEL: test_broadcast_2f64_8f64:
; X64-AVX512DQVL: ## BB#0:
; X64-AVX512DQVL-NEXT: vbroadcastf64x2 (%rdi), %ymm0
; X64-AVX512DQVL-NEXT: vmovapd (%rdi), %xmm0
; X64-AVX512DQVL-NEXT: vinsertf64x2 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512DQVL-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512DQVL-NEXT: vaddpd {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512DQVL-NEXT: retq
@ -137,21 +178,24 @@ define <8 x double> @test_broadcast_2f64_8f64(<2 x double> *%p) nounwind {
define <8 x i64> @test_broadcast_2i64_8i64(<2 x i64> *%p) nounwind {
; X64-AVX512VL-LABEL: test_broadcast_2i64_8i64:
; X64-AVX512VL: ## BB#0:
; X64-AVX512VL-NEXT: vbroadcasti32x4 (%rdi), %ymm0
; X64-AVX512VL-NEXT: vmovdqa64 (%rdi), %xmm0
; X64-AVX512VL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512VL-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512VL-NEXT: vpaddq {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512VL-NEXT: retq
;
; X64-AVX512BWVL-LABEL: test_broadcast_2i64_8i64:
; X64-AVX512BWVL: ## BB#0:
; X64-AVX512BWVL-NEXT: vbroadcasti32x4 (%rdi), %ymm0
; X64-AVX512BWVL-NEXT: vmovdqa64 (%rdi), %xmm0
; X64-AVX512BWVL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512BWVL-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BWVL-NEXT: vpaddq {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512BWVL-NEXT: retq
;
; X64-AVX512DQVL-LABEL: test_broadcast_2i64_8i64:
; X64-AVX512DQVL: ## BB#0:
; X64-AVX512DQVL-NEXT: vbroadcasti64x2 (%rdi), %ymm0
; X64-AVX512DQVL-NEXT: vmovdqa64 (%rdi), %xmm0
; X64-AVX512DQVL-NEXT: vinserti64x2 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512DQVL-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512DQVL-NEXT: vpaddq {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512DQVL-NEXT: retq
@ -164,21 +208,24 @@ define <8 x i64> @test_broadcast_2i64_8i64(<2 x i64> *%p) nounwind {
define <16 x float> @test_broadcast_4f32_16f32(<4 x float> *%p) nounwind {
; X64-AVX512VL-LABEL: test_broadcast_4f32_16f32:
; X64-AVX512VL: ## BB#0:
; X64-AVX512VL-NEXT: vbroadcastf32x4 (%rdi), %ymm0
; X64-AVX512VL-NEXT: vmovaps (%rdi), %xmm0
; X64-AVX512VL-NEXT: vinsertf32x4 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512VL-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512VL-NEXT: vaddps {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512VL-NEXT: retq
;
; X64-AVX512BWVL-LABEL: test_broadcast_4f32_16f32:
; X64-AVX512BWVL: ## BB#0:
; X64-AVX512BWVL-NEXT: vbroadcastf32x4 (%rdi), %ymm0
; X64-AVX512BWVL-NEXT: vmovaps (%rdi), %xmm0
; X64-AVX512BWVL-NEXT: vinsertf32x4 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512BWVL-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BWVL-NEXT: vaddps {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512BWVL-NEXT: retq
;
; X64-AVX512DQVL-LABEL: test_broadcast_4f32_16f32:
; X64-AVX512DQVL: ## BB#0:
; X64-AVX512DQVL-NEXT: vbroadcastf32x4 (%rdi), %ymm0
; X64-AVX512DQVL-NEXT: vmovaps (%rdi), %xmm0
; X64-AVX512DQVL-NEXT: vinsertf32x4 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512DQVL-NEXT: vinsertf32x8 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512DQVL-NEXT: vaddps {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512DQVL-NEXT: retq
@ -191,21 +238,24 @@ define <16 x float> @test_broadcast_4f32_16f32(<4 x float> *%p) nounwind {
define <16 x i32> @test_broadcast_4i32_16i32(<4 x i32> *%p) nounwind {
; X64-AVX512VL-LABEL: test_broadcast_4i32_16i32:
; X64-AVX512VL: ## BB#0:
; X64-AVX512VL-NEXT: vbroadcasti32x4 (%rdi), %ymm0
; X64-AVX512VL-NEXT: vmovdqa32 (%rdi), %xmm0
; X64-AVX512VL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512VL-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512VL-NEXT: vpaddd {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512VL-NEXT: retq
;
; X64-AVX512BWVL-LABEL: test_broadcast_4i32_16i32:
; X64-AVX512BWVL: ## BB#0:
; X64-AVX512BWVL-NEXT: vbroadcasti32x4 (%rdi), %ymm0
; X64-AVX512BWVL-NEXT: vmovdqa32 (%rdi), %xmm0
; X64-AVX512BWVL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512BWVL-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BWVL-NEXT: vpaddd {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512BWVL-NEXT: retq
;
; X64-AVX512DQVL-LABEL: test_broadcast_4i32_16i32:
; X64-AVX512DQVL: ## BB#0:
; X64-AVX512DQVL-NEXT: vbroadcasti32x4 (%rdi), %ymm0
; X64-AVX512DQVL-NEXT: vmovdqa32 (%rdi), %xmm0
; X64-AVX512DQVL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512DQVL-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512DQVL-NEXT: vpaddd {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512DQVL-NEXT: retq
@ -218,21 +268,24 @@ define <16 x i32> @test_broadcast_4i32_16i32(<4 x i32> *%p) nounwind {
define <32 x i16> @test_broadcast_8i16_32i16(<8 x i16> *%p) nounwind {
; X64-AVX512VL-LABEL: test_broadcast_8i16_32i16:
; X64-AVX512VL: ## BB#0:
; X64-AVX512VL-NEXT: vbroadcasti32x4 (%rdi), %ymm1
; X64-AVX512VL-NEXT: vmovdqa64 (%rdi), %xmm0
; X64-AVX512VL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm1
; X64-AVX512VL-NEXT: vpaddw {{.*}}(%rip), %ymm1, %ymm0
; X64-AVX512VL-NEXT: vpaddw {{.*}}(%rip), %ymm1, %ymm1
; X64-AVX512VL-NEXT: retq
;
; X64-AVX512BWVL-LABEL: test_broadcast_8i16_32i16:
; X64-AVX512BWVL: ## BB#0:
; X64-AVX512BWVL-NEXT: vbroadcasti32x4 (%rdi), %ymm0
; X64-AVX512BWVL-NEXT: vmovdqu16 (%rdi), %xmm0
; X64-AVX512BWVL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512BWVL-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BWVL-NEXT: vpaddw {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512BWVL-NEXT: retq
;
; X64-AVX512DQVL-LABEL: test_broadcast_8i16_32i16:
; X64-AVX512DQVL: ## BB#0:
; X64-AVX512DQVL-NEXT: vbroadcasti32x4 (%rdi), %ymm1
; X64-AVX512DQVL-NEXT: vmovdqa64 (%rdi), %xmm0
; X64-AVX512DQVL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm1
; X64-AVX512DQVL-NEXT: vpaddw {{.*}}(%rip), %ymm1, %ymm0
; X64-AVX512DQVL-NEXT: vpaddw {{.*}}(%rip), %ymm1, %ymm1
; X64-AVX512DQVL-NEXT: retq
@ -245,21 +298,24 @@ define <32 x i16> @test_broadcast_8i16_32i16(<8 x i16> *%p) nounwind {
define <64 x i8> @test_broadcast_16i8_64i8(<16 x i8> *%p) nounwind {
; X64-AVX512VL-LABEL: test_broadcast_16i8_64i8:
; X64-AVX512VL: ## BB#0:
; X64-AVX512VL-NEXT: vbroadcasti32x4 (%rdi), %ymm1
; X64-AVX512VL-NEXT: vmovdqa64 (%rdi), %xmm0
; X64-AVX512VL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm1
; X64-AVX512VL-NEXT: vpaddb {{.*}}(%rip), %ymm1, %ymm0
; X64-AVX512VL-NEXT: vpaddb {{.*}}(%rip), %ymm1, %ymm1
; X64-AVX512VL-NEXT: retq
;
; X64-AVX512BWVL-LABEL: test_broadcast_16i8_64i8:
; X64-AVX512BWVL: ## BB#0:
; X64-AVX512BWVL-NEXT: vbroadcasti32x4 (%rdi), %ymm0
; X64-AVX512BWVL-NEXT: vmovdqu8 (%rdi), %xmm0
; X64-AVX512BWVL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512BWVL-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BWVL-NEXT: vpaddb {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512BWVL-NEXT: retq
;
; X64-AVX512DQVL-LABEL: test_broadcast_16i8_64i8:
; X64-AVX512DQVL: ## BB#0:
; X64-AVX512DQVL-NEXT: vbroadcasti32x4 (%rdi), %ymm1
; X64-AVX512DQVL-NEXT: vmovdqa64 (%rdi), %xmm0
; X64-AVX512DQVL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm1
; X64-AVX512DQVL-NEXT: vpaddb {{.*}}(%rip), %ymm1, %ymm0
; X64-AVX512DQVL-NEXT: vpaddb {{.*}}(%rip), %ymm1, %ymm1
; X64-AVX512DQVL-NEXT: retq

View File

@ -1352,17 +1352,20 @@ define <4 x double> @splat_mem_v4f64_from_v2f64(<2 x double>* %ptr) {
define <4 x i64> @splat128_mem_v4i64_from_v2i64(<2 x i64>* %ptr) {
; AVX1-LABEL: splat128_mem_v4i64_from_v2i64:
; AVX1: # BB#0:
; AVX1-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; AVX1-NEXT: vmovaps (%rdi), %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: splat128_mem_v4i64_from_v2i64:
; AVX2: # BB#0:
; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; AVX2-NEXT: vmovaps (%rdi), %xmm0
; AVX2-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: splat128_mem_v4i64_from_v2i64:
; AVX512VL: # BB#0:
; AVX512VL-NEXT: vbroadcasti32x4 (%rdi), %ymm0
; AVX512VL-NEXT: vmovdqa64 (%rdi), %xmm0
; AVX512VL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
; AVX512VL-NEXT: retq
%v = load <2 x i64>, <2 x i64>* %ptr
%shuffle = shufflevector <2 x i64> %v, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
@ -1372,17 +1375,20 @@ define <4 x i64> @splat128_mem_v4i64_from_v2i64(<2 x i64>* %ptr) {
define <4 x double> @splat128_mem_v4f64_from_v2f64(<2 x double>* %ptr) {
; AVX1-LABEL: splat128_mem_v4f64_from_v2f64:
; AVX1: # BB#0:
; AVX1-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; AVX1-NEXT: vmovaps (%rdi), %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: splat128_mem_v4f64_from_v2f64:
; AVX2: # BB#0:
; AVX2-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; AVX2-NEXT: vmovaps (%rdi), %xmm0
; AVX2-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: splat128_mem_v4f64_from_v2f64:
; AVX512VL: # BB#0:
; AVX512VL-NEXT: vbroadcastf32x4 (%rdi), %ymm0
; AVX512VL-NEXT: vmovapd (%rdi), %xmm0
; AVX512VL-NEXT: vinsertf32x4 $1, %xmm0, %ymm0, %ymm0
; AVX512VL-NEXT: retq
%v = load <2 x double>, <2 x double>* %ptr
%shuffle = shufflevector <2 x double> %v, <2 x double> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>