diff --git a/test/Transforms/InstCombine/X86/x86-pack-inseltpoison.ll b/test/Transforms/InstCombine/X86/x86-pack-inseltpoison.ll index ff8842cd15e..4cc89fc80f2 100644 --- a/test/Transforms/InstCombine/X86/x86-pack-inseltpoison.ll +++ b/test/Transforms/InstCombine/X86/x86-pack-inseltpoison.ll @@ -5,99 +5,99 @@ ; UNDEF Elts ; -define <8 x i16> @undef_packssdw_128() { -; CHECK-LABEL: @undef_packssdw_128( +define <8 x i16> @poison_packssdw_128() { +; CHECK-LABEL: @poison_packssdw_128( ; CHECK-NEXT: ret <8 x i16> undef ; - %1 = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> undef, <4 x i32> undef) + %1 = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> poison, <4 x i32> poison) ret <8 x i16> %1 } -define <8 x i16> @undef_packusdw_128() { -; CHECK-LABEL: @undef_packusdw_128( +define <8 x i16> @poison_packusdw_128() { +; CHECK-LABEL: @poison_packusdw_128( ; CHECK-NEXT: ret <8 x i16> undef ; - %1 = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> undef, <4 x i32> undef) + %1 = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> poison, <4 x i32> poison) ret <8 x i16> %1 } -define <16 x i8> @undef_packsswb_128() { -; CHECK-LABEL: @undef_packsswb_128( +define <16 x i8> @poison_packsswb_128() { +; CHECK-LABEL: @poison_packsswb_128( ; CHECK-NEXT: ret <16 x i8> undef ; - %1 = call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> undef, <8 x i16> undef) + %1 = call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> poison, <8 x i16> poison) ret <16 x i8> %1 } -define <16 x i8> @undef_packuswb_128() { -; CHECK-LABEL: @undef_packuswb_128( +define <16 x i8> @poison_packuswb_128() { +; CHECK-LABEL: @poison_packuswb_128( ; CHECK-NEXT: ret <16 x i8> undef ; - %1 = call <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16> undef, <8 x i16> undef) + %1 = call <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16> poison, <8 x i16> poison) ret <16 x i8> %1 } -define <16 x i16> @undef_packssdw_256() { -; CHECK-LABEL: @undef_packssdw_256( +define <16 x i16> @poison_packssdw_256() { +; CHECK-LABEL: @poison_packssdw_256( ; CHECK-NEXT: ret <16 x i16> undef ; - %1 = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> undef, <8 x i32> undef) + %1 = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> poison, <8 x i32> poison) ret <16 x i16> %1 } -define <16 x i16> @undef_packusdw_256() { -; CHECK-LABEL: @undef_packusdw_256( +define <16 x i16> @poison_packusdw_256() { +; CHECK-LABEL: @poison_packusdw_256( ; CHECK-NEXT: ret <16 x i16> undef ; - %1 = call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> undef, <8 x i32> undef) + %1 = call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> poison, <8 x i32> poison) ret <16 x i16> %1 } -define <32 x i8> @undef_packsswb_256() { -; CHECK-LABEL: @undef_packsswb_256( +define <32 x i8> @poison_packsswb_256() { +; CHECK-LABEL: @poison_packsswb_256( ; CHECK-NEXT: ret <32 x i8> undef ; - %1 = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> undef, <16 x i16> undef) + %1 = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> poison, <16 x i16> poison) ret <32 x i8> %1 } -define <32 x i8> @undef_packuswb_256() { -; CHECK-LABEL: @undef_packuswb_256( +define <32 x i8> @poison_packuswb_256() { +; CHECK-LABEL: @poison_packuswb_256( ; CHECK-NEXT: ret <32 x i8> undef ; - %1 = call <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16> undef, <16 x i16> undef) + %1 = call <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16> poison, <16 x i16> poison) ret <32 x i8> %1 } -define <32 x i16> @undef_packssdw_512() { -; CHECK-LABEL: @undef_packssdw_512( +define <32 x i16> @poison_packssdw_512() { +; CHECK-LABEL: @poison_packssdw_512( ; CHECK-NEXT: ret <32 x i16> undef ; - %1 = call <32 x i16> @llvm.x86.avx512.packssdw.512(<16 x i32> undef, <16 x i32> undef) + %1 = call <32 x i16> @llvm.x86.avx512.packssdw.512(<16 x i32> poison, <16 x i32> poison) ret <32 x i16> %1 } -define <32 x i16> @undef_packusdw_512() { -; CHECK-LABEL: @undef_packusdw_512( +define <32 x i16> @poison_packusdw_512() { +; CHECK-LABEL: @poison_packusdw_512( ; CHECK-NEXT: ret <32 x i16> undef ; - %1 = call <32 x i16> @llvm.x86.avx512.packusdw.512(<16 x i32> undef, <16 x i32> undef) + %1 = call <32 x i16> @llvm.x86.avx512.packusdw.512(<16 x i32> poison, <16 x i32> poison) ret <32 x i16> %1 } -define <64 x i8> @undef_packsswb_512() { -; CHECK-LABEL: @undef_packsswb_512( +define <64 x i8> @poison_packsswb_512() { +; CHECK-LABEL: @poison_packsswb_512( ; CHECK-NEXT: ret <64 x i8> undef ; - %1 = call <64 x i8> @llvm.x86.avx512.packsswb.512(<32 x i16> undef, <32 x i16> undef) + %1 = call <64 x i8> @llvm.x86.avx512.packsswb.512(<32 x i16> poison, <32 x i16> poison) ret <64 x i8> %1 } -define <64 x i8> @undef_packuswb_512() { -; CHECK-LABEL: @undef_packuswb_512( +define <64 x i8> @poison_packuswb_512() { +; CHECK-LABEL: @poison_packuswb_512( ; CHECK-NEXT: ret <64 x i8> undef ; - %1 = call <64 x i8> @llvm.x86.avx512.packuswb.512(<32 x i16> undef, <32 x i16> undef) + %1 = call <64 x i8> @llvm.x86.avx512.packuswb.512(<32 x i16> poison, <32 x i16> poison) ret <64 x i8> %1 } @@ -115,17 +115,17 @@ define <8 x i16> @fold_packssdw_128() { define <8 x i16> @fold_packusdw_128() { ; CHECK-LABEL: @fold_packusdw_128( -; CHECK-NEXT: ret <8 x i16> +; CHECK-NEXT: ret <8 x i16> ; - %1 = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> undef, <4 x i32> ) + %1 = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> poison, <4 x i32> ) ret <8 x i16> %1 } define <16 x i8> @fold_packsswb_128() { ; CHECK-LABEL: @fold_packsswb_128( -; CHECK-NEXT: ret <16 x i8> +; CHECK-NEXT: ret <16 x i8> ; - %1 = call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> zeroinitializer, <8 x i16> undef) + %1 = call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> zeroinitializer, <8 x i16> poison) ret <16 x i8> %1 } @@ -139,9 +139,9 @@ define <16 x i8> @fold_packuswb_128() { define <16 x i16> @fold_packssdw_256() { ; CHECK-LABEL: @fold_packssdw_256( -; CHECK-NEXT: ret <16 x i16> +; CHECK-NEXT: ret <16 x i16> ; - %1 = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> , <8 x i32> undef) + %1 = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> , <8 x i32> poison) ret <16 x i16> %1 } @@ -155,9 +155,9 @@ define <16 x i16> @fold_packusdw_256() { define <32 x i8> @fold_packsswb_256() { ; CHECK-LABEL: @fold_packsswb_256( -; CHECK-NEXT: ret <32 x i8> +; CHECK-NEXT: ret <32 x i8> ; - %1 = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> undef, <16 x i16> zeroinitializer) + %1 = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> poison, <16 x i16> zeroinitializer) ret <32 x i8> %1 } @@ -171,9 +171,9 @@ define <32 x i8> @fold_packuswb_256() { define <32 x i16> @fold_packssdw_512() { ; CHECK-LABEL: @fold_packssdw_512( -; CHECK-NEXT: ret <32 x i16> +; CHECK-NEXT: ret <32 x i16> ; - %1 = call <32 x i16> @llvm.x86.avx512.packssdw.512(<16 x i32> , <16 x i32> undef) + %1 = call <32 x i16> @llvm.x86.avx512.packssdw.512(<16 x i32> , <16 x i32> poison) ret <32 x i16> %1 } @@ -187,9 +187,9 @@ define <32 x i16> @fold_packusdw_512() { define <64 x i8> @fold_packsswb_512() { ; CHECK-LABEL: @fold_packsswb_512( -; CHECK-NEXT: ret <64 x i8> +; CHECK-NEXT: ret <64 x i8> ; - %1 = call <64 x i8> @llvm.x86.avx512.packsswb.512(<32 x i16> undef, <32 x i16> zeroinitializer) + %1 = call <64 x i8> @llvm.x86.avx512.packsswb.512(<32 x i16> poison, <32 x i16> zeroinitializer) ret <64 x i8> %1 } @@ -211,8 +211,8 @@ define <8 x i16> @elts_packssdw_128(<4 x i32> %a0, <4 x i32> %a1) { ; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> poison, <8 x i32> ; CHECK-NEXT: ret <8 x i16> [[TMP2]] ; - %1 = shufflevector <4 x i32> %a0, <4 x i32> poison, <4 x i32> - %2 = shufflevector <4 x i32> %a1, <4 x i32> poison, <4 x i32> + %1 = shufflevector <4 x i32> %a0, <4 x i32> poison, <4 x i32> + %2 = shufflevector <4 x i32> %a1, <4 x i32> poison, <4 x i32> %3 = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> %1, <4 x i32> %2) %4 = shufflevector <8 x i16> %3, <8 x i16> poison, <8 x i32> ret <8 x i16> %4 @@ -227,7 +227,7 @@ define <8 x i16> @elts_packusdw_128(<4 x i32> %a0, <4 x i32> %a1) { %1 = insertelement <4 x i32> %a0, i32 0, i32 0 %2 = insertelement <4 x i32> %a1, i32 0, i32 3 %3 = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> %1, <4 x i32> %2) - %4 = shufflevector <8 x i16> %3, <8 x i16> poison, <8 x i32> + %4 = shufflevector <8 x i16> %3, <8 x i16> poison, <8 x i32> ret <8 x i16> %4 } @@ -260,9 +260,9 @@ define <16 x i16> @elts_packssdw_256(<8 x i32> %a0, <8 x i32> %a1) { ; CHECK-NEXT: ret <16 x i16> [[TMP2]] ; %1 = shufflevector <8 x i32> %a0, <8 x i32> poison, <8 x i32> - %2 = shufflevector <8 x i32> %a1, <8 x i32> poison, <8 x i32> + %2 = shufflevector <8 x i32> %a1, <8 x i32> poison, <8 x i32> %3 = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> %1, <8 x i32> %2) - %4 = shufflevector <16 x i16> %3, <16 x i16> poison, <16 x i32> + %4 = shufflevector <16 x i16> %3, <16 x i16> poison, <16 x i32> ret <16 x i16> %4 } @@ -276,7 +276,7 @@ define <16 x i16> @elts_packusdw_256(<8 x i32> %a0, <8 x i32> %a1) { %1 = shufflevector <8 x i32> %a0, <8 x i32> poison, <8 x i32> %2 = shufflevector <8 x i32> %a1, <8 x i32> poison, <8 x i32> %3 = call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> %1, <8 x i32> %2) - %4 = shufflevector <16 x i16> %3, <16 x i16> poison, <16 x i32> + %4 = shufflevector <16 x i16> %3, <16 x i16> poison, <16 x i32> ret <16 x i16> %4 } @@ -309,9 +309,9 @@ define <32 x i16> @elts_packssdw_512(<16 x i32> %a0, <16 x i32> %a1) { ; CHECK-NEXT: ret <32 x i16> [[TMP2]] ; %1 = shufflevector <16 x i32> %a0, <16 x i32> poison, <16 x i32> - %2 = shufflevector <16 x i32> %a1, <16 x i32> poison, <16 x i32> + %2 = shufflevector <16 x i32> %a1, <16 x i32> poison, <16 x i32> %3 = call <32 x i16> @llvm.x86.avx512.packssdw.512(<16 x i32> %1, <16 x i32> %2) - %4 = shufflevector <32 x i16> %3, <32 x i16> poison, <32 x i32> + %4 = shufflevector <32 x i16> %3, <32 x i16> poison, <32 x i32> ret <32 x i16> %4 } @@ -325,7 +325,7 @@ define <32 x i16> @elts_packusdw_512(<16 x i32> %a0, <16 x i32> %a1) { %1 = shufflevector <16 x i32> %a0, <16 x i32> poison, <16 x i32> %2 = shufflevector <16 x i32> %a1, <16 x i32> poison, <16 x i32> %3 = call <32 x i16> @llvm.x86.avx512.packusdw.512(<16 x i32> %1, <16 x i32> %2) - %4 = shufflevector <32 x i16> %3, <32 x i16> poison, <32 x i32> + %4 = shufflevector <32 x i16> %3, <32 x i16> poison, <32 x i32> ret <32 x i16> %4 } diff --git a/test/Transforms/InstCombine/X86/x86-pshufb-inseltpoison.ll b/test/Transforms/InstCombine/X86/x86-pshufb-inseltpoison.ll index f5094f85a8a..35781159e1b 100644 --- a/test/Transforms/InstCombine/X86/x86-pshufb-inseltpoison.ll +++ b/test/Transforms/InstCombine/X86/x86-pshufb-inseltpoison.ll @@ -415,56 +415,56 @@ define <64 x i8> @permute3_avx512(<64 x i8> %InVec) { ret <64 x i8> %1 } -; FIXME: Verify that instcombine is able to fold constant byte shuffles with undef mask elements. +; FIXME: Verify that instcombine is able to fold constant byte shuffles with poison mask elements. -define <16 x i8> @fold_with_undef_elts(<16 x i8> %InVec) { -; CHECK-LABEL: @fold_with_undef_elts( +define <16 x i8> @fold_with_poison_elts(<16 x i8> %InVec) { +; CHECK-LABEL: @fold_with_poison_elts( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i8> [[INVEC:%.*]], <16 x i8> , <16 x i32> ; CHECK-NEXT: ret <16 x i8> [[TMP1]] ; - %1 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %InVec, <16 x i8> ) + %1 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %InVec, <16 x i8> ) ret <16 x i8> %1 } -define <32 x i8> @fold_with_undef_elts_avx2(<32 x i8> %InVec) { -; CHECK-LABEL: @fold_with_undef_elts_avx2( +define <32 x i8> @fold_with_poison_elts_avx2(<32 x i8> %InVec) { +; CHECK-LABEL: @fold_with_poison_elts_avx2( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <32 x i8> [[INVEC:%.*]], <32 x i8> , <32 x i32> ; CHECK-NEXT: ret <32 x i8> [[TMP1]] ; - %1 = tail call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %InVec, <32 x i8> ) + %1 = tail call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %InVec, <32 x i8> ) ret <32 x i8> %1 } -define <64 x i8> @fold_with_undef_elts_avx512(<64 x i8> %InVec) { -; CHECK-LABEL: @fold_with_undef_elts_avx512( +define <64 x i8> @fold_with_poison_elts_avx512(<64 x i8> %InVec) { +; CHECK-LABEL: @fold_with_poison_elts_avx512( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <64 x i8> [[INVEC:%.*]], <64 x i8> , <64 x i32> ; CHECK-NEXT: ret <64 x i8> [[TMP1]] ; - %1 = tail call <64 x i8> @llvm.x86.avx512.pshuf.b.512(<64 x i8> %InVec, <64 x i8> ) + %1 = tail call <64 x i8> @llvm.x86.avx512.pshuf.b.512(<64 x i8> %InVec, <64 x i8> ) ret <64 x i8> %1 } -define <16 x i8> @fold_with_allundef_elts(<16 x i8> %InVec) { -; CHECK-LABEL: @fold_with_allundef_elts( +define <16 x i8> @fold_with_allpoison_elts(<16 x i8> %InVec) { +; CHECK-LABEL: @fold_with_allpoison_elts( ; CHECK-NEXT: ret <16 x i8> undef ; - %1 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %InVec, <16 x i8> undef) + %1 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %InVec, <16 x i8> poison) ret <16 x i8> %1 } -define <32 x i8> @fold_with_allundef_elts_avx2(<32 x i8> %InVec) { -; CHECK-LABEL: @fold_with_allundef_elts_avx2( +define <32 x i8> @fold_with_allpoison_elts_avx2(<32 x i8> %InVec) { +; CHECK-LABEL: @fold_with_allpoison_elts_avx2( ; CHECK-NEXT: ret <32 x i8> undef ; - %1 = tail call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %InVec, <32 x i8> undef) + %1 = tail call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %InVec, <32 x i8> poison) ret <32 x i8> %1 } -define <64 x i8> @fold_with_allundef_elts_avx512(<64 x i8> %InVec) { -; CHECK-LABEL: @fold_with_allundef_elts_avx512( +define <64 x i8> @fold_with_allpoison_elts_avx512(<64 x i8> %InVec) { +; CHECK-LABEL: @fold_with_allpoison_elts_avx512( ; CHECK-NEXT: ret <64 x i8> undef ; - %1 = tail call <64 x i8> @llvm.x86.avx512.pshuf.b.512(<64 x i8> %InVec, <64 x i8> undef) + %1 = tail call <64 x i8> @llvm.x86.avx512.pshuf.b.512(<64 x i8> %InVec, <64 x i8> poison) ret <64 x i8> %1 } @@ -479,7 +479,7 @@ define <16 x i8> @demanded_elts_insertion(<16 x i8> %InVec, <16 x i8> %BaseMask, %1 = insertelement <16 x i8> %BaseMask, i8 %M0, i32 0 %2 = insertelement <16 x i8> %1, i8 %M15, i32 15 %3 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %InVec, <16 x i8> %2) - %4 = shufflevector <16 x i8> %3, <16 x i8> poison, <16 x i32> + %4 = shufflevector <16 x i8> %3, <16 x i8> poison, <16 x i32> ret <16 x i8> %4 } @@ -492,7 +492,7 @@ define <32 x i8> @demanded_elts_insertion_avx2(<32 x i8> %InVec, <32 x i8> %Base %1 = insertelement <32 x i8> %BaseMask, i8 %M0, i32 0 %2 = insertelement <32 x i8> %1, i8 %M22, i32 22 %3 = tail call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %InVec, <32 x i8> %2) - %4 = shufflevector <32 x i8> %3, <32 x i8> poison, <32 x i32> + %4 = shufflevector <32 x i8> %3, <32 x i8> poison, <32 x i32> ret <32 x i8> %4 } diff --git a/test/Transforms/InstCombine/X86/x86-sse4a-inseltpoison.ll b/test/Transforms/InstCombine/X86/x86-sse4a-inseltpoison.ll index 3270ca134db..1376266308a 100644 --- a/test/Transforms/InstCombine/X86/x86-sse4a-inseltpoison.ll +++ b/test/Transforms/InstCombine/X86/x86-sse4a-inseltpoison.ll @@ -50,11 +50,11 @@ define <2 x i64> @test_extrq_constant(<2 x i64> %x, <16 x i8> %y) { ret <2 x i64> %1 } -define <2 x i64> @test_extrq_constant_undef(<2 x i64> %x, <16 x i8> %y) { -; CHECK-LABEL: @test_extrq_constant_undef( +define <2 x i64> @test_extrq_constant_poison(<2 x i64> %x, <16 x i8> %y) { +; CHECK-LABEL: @test_extrq_constant_poison( ; CHECK-NEXT: ret <2 x i64> ; - %1 = tail call <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64> , <16 x i8> ) nounwind + %1 = tail call <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64> , <16 x i8> ) nounwind ret <2 x i64> %1 } @@ -65,7 +65,7 @@ define <2 x i64> @test_extrq_call_constexpr(<2 x i64> %x) { ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to <2 x i64> ; CHECK-NEXT: ret <2 x i64> [[TMP3]] ; - %1 = call <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64> %x, <16 x i8> bitcast (<2 x i64> to <16 x i8>)) + %1 = call <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64> %x, <16 x i8> bitcast (<2 x i64> to <16 x i8>)) ret <2 x i64> %1 } @@ -104,8 +104,8 @@ define <2 x i64> @test_extrqi_shuffle_2zzzzzzzuuuuuuuu(<2 x i64> %x) { ret <2 x i64> %1 } -define <2 x i64> @test_extrqi_undef(<2 x i64> %x) { -; CHECK-LABEL: @test_extrqi_undef( +define <2 x i64> @test_extrqi_poison(<2 x i64> %x) { +; CHECK-LABEL: @test_extrqi_poison( ; CHECK-NEXT: ret <2 x i64> undef ; %1 = tail call <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64> zeroinitializer, i8 32, i8 33) @@ -128,11 +128,11 @@ define <2 x i64> @test_extrqi_constant(<2 x i64> %x) { ret <2 x i64> %1 } -define <2 x i64> @test_extrqi_constant_undef(<2 x i64> %x) { -; CHECK-LABEL: @test_extrqi_constant_undef( +define <2 x i64> @test_extrqi_constant_poison(<2 x i64> %x) { +; CHECK-LABEL: @test_extrqi_constant_poison( ; CHECK-NEXT: ret <2 x i64> ; - %1 = tail call <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64> , i8 4, i8 18) + %1 = tail call <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64> , i8 4, i8 18) ret <2 x i64> %1 } @@ -140,7 +140,7 @@ define <2 x i64> @test_extrqi_call_constexpr() { ; CHECK-LABEL: @test_extrqi_call_constexpr( ; CHECK-NEXT: ret <2 x i64> zeroinitializer ; - %1 = tail call <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64> bitcast (<16 x i8> trunc (<16 x i16> bitcast (<4 x i64> to <16 x i16>) to <16 x i8>) to <2 x i64>), i8 8, i8 16) + %1 = tail call <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64> bitcast (<16 x i8> trunc (<16 x i16> bitcast (<4 x i64> to <16 x i16>) to <16 x i8>) to <2 x i64>), i8 8, i8 16) ret <2 x i64> %1 } @@ -174,11 +174,11 @@ define <2 x i64> @test_insertq_constant(<2 x i64> %x, <2 x i64> %y) { ret <2 x i64> %1 } -define <2 x i64> @test_insertq_constant_undef(<2 x i64> %x, <2 x i64> %y) { -; CHECK-LABEL: @test_insertq_constant_undef( +define <2 x i64> @test_insertq_constant_poison(<2 x i64> %x, <2 x i64> %y) { +; CHECK-LABEL: @test_insertq_constant_poison( ; CHECK-NEXT: ret <2 x i64> ; - %1 = tail call <2 x i64> @llvm.x86.sse4a.insertq(<2 x i64> , <2 x i64> ) nounwind + %1 = tail call <2 x i64> @llvm.x86.sse4a.insertq(<2 x i64> , <2 x i64> ) nounwind ret <2 x i64> %1 } @@ -187,7 +187,7 @@ define <2 x i64> @test_insertq_call_constexpr(<2 x i64> %x) { ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> [[X:%.*]], <2 x i64> , i8 2, i8 0) ; CHECK-NEXT: ret <2 x i64> [[TMP1]] ; - %1 = tail call <2 x i64> @llvm.x86.sse4a.insertq(<2 x i64> %x, <2 x i64> bitcast (<16 x i8> trunc (<16 x i16> bitcast (<4 x i64> to <16 x i16>) to <16 x i8>) to <2 x i64>)) + %1 = tail call <2 x i64> @llvm.x86.sse4a.insertq(<2 x i64> %x, <2 x i64> bitcast (<16 x i8> trunc (<16 x i16> bitcast (<4 x i64> to <16 x i16>) to <16 x i8>) to <2 x i64>)) ret <2 x i64> %1 } @@ -232,12 +232,12 @@ define <2 x i64> @test_insertqi_call_constexpr(<2 x i64> %x) { ; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> [[X:%.*]], <2 x i64> , i8 48, i8 3) ; CHECK-NEXT: ret <2 x i64> [[TMP1]] ; - %1 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %x, <2 x i64> bitcast (<16 x i8> trunc (<16 x i16> bitcast (<4 x i64> to <16 x i16>) to <16 x i8>) to <2 x i64>), i8 48, i8 3) + %1 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %x, <2 x i64> bitcast (<16 x i8> trunc (<16 x i16> bitcast (<4 x i64> to <16 x i16>) to <16 x i8>) to <2 x i64>), i8 48, i8 3) ret <2 x i64> %1 } ; The result of this insert is the second arg, since the top 64 bits of -; the result are undefined, and we copy the bottom 64 bits from the +; the result are poisonined, and we copy the bottom 64 bits from the ; second arg define <2 x i64> @testInsert64Bits(<2 x i64> %v, <2 x i64> %i) { ; CHECK-LABEL: @testInsert64Bits( diff --git a/test/Transforms/InstCombine/X86/x86-vector-shifts-inseltpoison.ll b/test/Transforms/InstCombine/X86/x86-vector-shifts-inseltpoison.ll index fdfe6acbdb4..352c305e10c 100644 --- a/test/Transforms/InstCombine/X86/x86-vector-shifts-inseltpoison.ll +++ b/test/Transforms/InstCombine/X86/x86-vector-shifts-inseltpoison.ll @@ -1604,7 +1604,7 @@ define <4 x i32> @avx2_psrav_d_128_allbig(<4 x i32> %v) { ; CHECK-NEXT: [[TMP1:%.*]] = ashr <4 x i32> [[V:%.*]], ; CHECK-NEXT: ret <4 x i32> [[TMP1]] ; - %1 = tail call <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32> %v, <4 x i32> ) + %1 = tail call <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32> %v, <4 x i32> ) ret <4 x i32> %1 } @@ -1613,7 +1613,7 @@ define <8 x i32> @avx2_psrav_d_256_allbig(<8 x i32> %v) { ; CHECK-NEXT: [[TMP1:%.*]] = ashr <8 x i32> [[V:%.*]], ; CHECK-NEXT: ret <8 x i32> [[TMP1]] ; - %1 = tail call <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32> %v, <8 x i32> ) + %1 = tail call <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32> %v, <8 x i32> ) ret <8 x i32> %1 } @@ -1622,36 +1622,36 @@ define <16 x i32> @avx512_psrav_d_512_allbig(<16 x i32> %v) { ; CHECK-NEXT: [[TMP1:%.*]] = ashr <16 x i32> [[V:%.*]], ; CHECK-NEXT: ret <16 x i32> [[TMP1]] ; - %1 = tail call <16 x i32> @llvm.x86.avx512.psrav.d.512(<16 x i32> %v, <16 x i32> ) + %1 = tail call <16 x i32> @llvm.x86.avx512.psrav.d.512(<16 x i32> %v, <16 x i32> ) ret <16 x i32> %1 } -define <4 x i32> @avx2_psrav_d_128_undef(<4 x i32> %v) { -; CHECK-LABEL: @avx2_psrav_d_128_undef( +define <4 x i32> @avx2_psrav_d_128_poison(<4 x i32> %v) { +; CHECK-LABEL: @avx2_psrav_d_128_poison( ; CHECK-NEXT: [[TMP1:%.*]] = ashr <4 x i32> [[V:%.*]], ; CHECK-NEXT: ret <4 x i32> [[TMP1]] ; - %1 = insertelement <4 x i32> , i32 undef, i32 0 + %1 = insertelement <4 x i32> , i32 poison, i32 0 %2 = tail call <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32> %v, <4 x i32> %1) ret <4 x i32> %2 } -define <8 x i32> @avx2_psrav_d_256_undef(<8 x i32> %v) { -; CHECK-LABEL: @avx2_psrav_d_256_undef( +define <8 x i32> @avx2_psrav_d_256_poison(<8 x i32> %v) { +; CHECK-LABEL: @avx2_psrav_d_256_poison( ; CHECK-NEXT: [[TMP1:%.*]] = ashr <8 x i32> [[V:%.*]], ; CHECK-NEXT: ret <8 x i32> [[TMP1]] ; - %1 = insertelement <8 x i32> , i32 undef, i32 1 + %1 = insertelement <8 x i32> , i32 poison, i32 1 %2 = tail call <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32> %v, <8 x i32> %1) ret <8 x i32> %2 } -define <16 x i32> @avx512_psrav_d_512_undef(<16 x i32> %v) { -; CHECK-LABEL: @avx512_psrav_d_512_undef( +define <16 x i32> @avx512_psrav_d_512_poison(<16 x i32> %v) { +; CHECK-LABEL: @avx512_psrav_d_512_poison( ; CHECK-NEXT: [[TMP1:%.*]] = ashr <16 x i32> [[V:%.*]], ; CHECK-NEXT: ret <16 x i32> [[TMP1]] ; - %1 = insertelement <16 x i32> , i32 undef, i32 1 + %1 = insertelement <16 x i32> , i32 poison, i32 1 %2 = tail call <16 x i32> @llvm.x86.avx512.psrav.d.512(<16 x i32> %v, <16 x i32> %1) ret <16 x i32> %2 } @@ -1695,7 +1695,7 @@ define <2 x i64> @avx512_psrav_q_128_allbig(<2 x i64> %v) { ; CHECK-NEXT: [[TMP1:%.*]] = ashr <2 x i64> [[V:%.*]], ; CHECK-NEXT: ret <2 x i64> [[TMP1]] ; - %1 = tail call <2 x i64> @llvm.x86.avx512.psrav.q.128(<2 x i64> %v, <2 x i64> ) + %1 = tail call <2 x i64> @llvm.x86.avx512.psrav.q.128(<2 x i64> %v, <2 x i64> ) ret <2 x i64> %1 } @@ -1704,26 +1704,26 @@ define <4 x i64> @avx512_psrav_q_256_allbig(<4 x i64> %v) { ; CHECK-NEXT: [[TMP1:%.*]] = ashr <4 x i64> [[V:%.*]], ; CHECK-NEXT: ret <4 x i64> [[TMP1]] ; - %1 = tail call <4 x i64> @llvm.x86.avx512.psrav.q.256(<4 x i64> %v, <4 x i64> ) + %1 = tail call <4 x i64> @llvm.x86.avx512.psrav.q.256(<4 x i64> %v, <4 x i64> ) ret <4 x i64> %1 } -define <2 x i64> @avx512_psrav_q_128_undef(<2 x i64> %v) { -; CHECK-LABEL: @avx512_psrav_q_128_undef( +define <2 x i64> @avx512_psrav_q_128_poison(<2 x i64> %v) { +; CHECK-LABEL: @avx512_psrav_q_128_poison( ; CHECK-NEXT: [[TMP1:%.*]] = ashr <2 x i64> [[V:%.*]], ; CHECK-NEXT: ret <2 x i64> [[TMP1]] ; - %1 = insertelement <2 x i64> , i64 undef, i64 0 + %1 = insertelement <2 x i64> , i64 poison, i64 0 %2 = tail call <2 x i64> @llvm.x86.avx512.psrav.q.128(<2 x i64> %v, <2 x i64> %1) ret <2 x i64> %2 } -define <4 x i64> @avx512_psrav_q_256_undef(<4 x i64> %v) { -; CHECK-LABEL: @avx512_psrav_q_256_undef( +define <4 x i64> @avx512_psrav_q_256_poison(<4 x i64> %v) { +; CHECK-LABEL: @avx512_psrav_q_256_poison( ; CHECK-NEXT: [[TMP1:%.*]] = ashr <4 x i64> [[V:%.*]], ; CHECK-NEXT: ret <4 x i64> [[TMP1]] ; - %1 = insertelement <4 x i64> , i64 undef, i64 0 + %1 = insertelement <4 x i64> , i64 poison, i64 0 %2 = tail call <4 x i64> @llvm.x86.avx512.psrav.q.256(<4 x i64> %v, <4 x i64> %1) ret <4 x i64> %2 } @@ -1750,16 +1750,16 @@ define <8 x i64> @avx512_psrav_q_512_allbig(<8 x i64> %v) { ; CHECK-NEXT: [[TMP1:%.*]] = ashr <8 x i64> [[V:%.*]], ; CHECK-NEXT: ret <8 x i64> [[TMP1]] ; - %1 = tail call <8 x i64> @llvm.x86.avx512.psrav.q.512(<8 x i64> %v, <8 x i64> ) + %1 = tail call <8 x i64> @llvm.x86.avx512.psrav.q.512(<8 x i64> %v, <8 x i64> ) ret <8 x i64> %1 } -define <8 x i64> @avx512_psrav_q_512_undef(<8 x i64> %v) { -; CHECK-LABEL: @avx512_psrav_q_512_undef( +define <8 x i64> @avx512_psrav_q_512_poison(<8 x i64> %v) { +; CHECK-LABEL: @avx512_psrav_q_512_poison( ; CHECK-NEXT: [[TMP1:%.*]] = ashr <8 x i64> [[V:%.*]], ; CHECK-NEXT: ret <8 x i64> [[TMP1]] ; - %1 = insertelement <8 x i64> , i64 undef, i64 0 + %1 = insertelement <8 x i64> , i64 poison, i64 0 %2 = tail call <8 x i64> @llvm.x86.avx512.psrav.q.512(<8 x i64> %v, <8 x i64> %1) ret <8 x i64> %2 } @@ -1786,16 +1786,16 @@ define <8 x i16> @avx512_psrav_w_128_allbig(<8 x i16> %v) { ; CHECK-NEXT: [[TMP1:%.*]] = ashr <8 x i16> [[V:%.*]], ; CHECK-NEXT: ret <8 x i16> [[TMP1]] ; - %1 = tail call <8 x i16> @llvm.x86.avx512.psrav.w.128(<8 x i16> %v, <8 x i16> ) + %1 = tail call <8 x i16> @llvm.x86.avx512.psrav.w.128(<8 x i16> %v, <8 x i16> ) ret <8 x i16> %1 } -define <8 x i16> @avx512_psrav_w_128_undef(<8 x i16> %v) { -; CHECK-LABEL: @avx512_psrav_w_128_undef( +define <8 x i16> @avx512_psrav_w_128_poison(<8 x i16> %v) { +; CHECK-LABEL: @avx512_psrav_w_128_poison( ; CHECK-NEXT: [[TMP1:%.*]] = ashr <8 x i16> [[V:%.*]], ; CHECK-NEXT: ret <8 x i16> [[TMP1]] ; - %1 = insertelement <8 x i16> , i16 undef, i64 0 + %1 = insertelement <8 x i16> , i16 poison, i64 0 %2 = tail call <8 x i16> @llvm.x86.avx512.psrav.w.128(<8 x i16> %v, <8 x i16> %1) ret <8 x i16> %2 } @@ -1822,16 +1822,16 @@ define <16 x i16> @avx512_psrav_w_256_allbig(<16 x i16> %v) { ; CHECK-NEXT: [[TMP1:%.*]] = ashr <16 x i16> [[V:%.*]], ; CHECK-NEXT: ret <16 x i16> [[TMP1]] ; - %1 = tail call <16 x i16> @llvm.x86.avx512.psrav.w.256(<16 x i16> %v, <16 x i16> ) + %1 = tail call <16 x i16> @llvm.x86.avx512.psrav.w.256(<16 x i16> %v, <16 x i16> ) ret <16 x i16> %1 } -define <16 x i16> @avx512_psrav_w_256_undef(<16 x i16> %v) { -; CHECK-LABEL: @avx512_psrav_w_256_undef( +define <16 x i16> @avx512_psrav_w_256_poison(<16 x i16> %v) { +; CHECK-LABEL: @avx512_psrav_w_256_poison( ; CHECK-NEXT: [[TMP1:%.*]] = ashr <16 x i16> [[V:%.*]], ; CHECK-NEXT: ret <16 x i16> [[TMP1]] ; - %1 = insertelement <16 x i16> , i16 undef, i64 0 + %1 = insertelement <16 x i16> , i16 poison, i64 0 %2 = tail call <16 x i16> @llvm.x86.avx512.psrav.w.256(<16 x i16> %v, <16 x i16> %1) ret <16 x i16> %2 } @@ -1858,16 +1858,16 @@ define <32 x i16> @avx512_psrav_w_512_allbig(<32 x i16> %v) { ; CHECK-NEXT: [[TMP1:%.*]] = ashr <32 x i16> [[V:%.*]], ; CHECK-NEXT: ret <32 x i16> [[TMP1]] ; - %1 = tail call <32 x i16> @llvm.x86.avx512.psrav.w.512(<32 x i16> %v, <32 x i16> ) + %1 = tail call <32 x i16> @llvm.x86.avx512.psrav.w.512(<32 x i16> %v, <32 x i16> ) ret <32 x i16> %1 } -define <32 x i16> @avx512_psrav_w_512_undef(<32 x i16> %v) { -; CHECK-LABEL: @avx512_psrav_w_512_undef( +define <32 x i16> @avx512_psrav_w_512_poison(<32 x i16> %v) { +; CHECK-LABEL: @avx512_psrav_w_512_poison( ; CHECK-NEXT: [[TMP1:%.*]] = ashr <32 x i16> [[V:%.*]], ; CHECK-NEXT: ret <32 x i16> [[TMP1]] ; - %1 = insertelement <32 x i16> , i16 undef, i64 0 + %1 = insertelement <32 x i16> , i16 poison, i64 0 %2 = tail call <32 x i16> @llvm.x86.avx512.psrav.w.512(<32 x i16> %v, <32 x i16> %1) ret <32 x i16> %2 } @@ -1932,7 +1932,7 @@ define <4 x i32> @avx2_psrlv_d_128_allbig(<4 x i32> %v) { ; CHECK-LABEL: @avx2_psrlv_d_128_allbig( ; CHECK-NEXT: ret <4 x i32> ; - %1 = tail call <4 x i32> @llvm.x86.avx2.psrlv.d(<4 x i32> %v, <4 x i32> ) + %1 = tail call <4 x i32> @llvm.x86.avx2.psrlv.d(<4 x i32> %v, <4 x i32> ) ret <4 x i32> %1 } @@ -1940,26 +1940,26 @@ define <8 x i32> @avx2_psrlv_d_256_allbig(<8 x i32> %v) { ; CHECK-LABEL: @avx2_psrlv_d_256_allbig( ; CHECK-NEXT: ret <8 x i32> ; - %1 = tail call <8 x i32> @llvm.x86.avx2.psrlv.d.256(<8 x i32> %v, <8 x i32> ) + %1 = tail call <8 x i32> @llvm.x86.avx2.psrlv.d.256(<8 x i32> %v, <8 x i32> ) ret <8 x i32> %1 } -define <4 x i32> @avx2_psrlv_d_128_undef(<4 x i32> %v) { -; CHECK-LABEL: @avx2_psrlv_d_128_undef( +define <4 x i32> @avx2_psrlv_d_128_poison(<4 x i32> %v) { +; CHECK-LABEL: @avx2_psrlv_d_128_poison( ; CHECK-NEXT: [[TMP1:%.*]] = lshr <4 x i32> [[V:%.*]], ; CHECK-NEXT: ret <4 x i32> [[TMP1]] ; - %1 = insertelement <4 x i32> , i32 undef, i32 0 + %1 = insertelement <4 x i32> , i32 poison, i32 0 %2 = tail call <4 x i32> @llvm.x86.avx2.psrlv.d(<4 x i32> %v, <4 x i32> %1) ret <4 x i32> %2 } -define <8 x i32> @avx2_psrlv_d_256_undef(<8 x i32> %v) { -; CHECK-LABEL: @avx2_psrlv_d_256_undef( +define <8 x i32> @avx2_psrlv_d_256_poison(<8 x i32> %v) { +; CHECK-LABEL: @avx2_psrlv_d_256_poison( ; CHECK-NEXT: [[TMP1:%.*]] = lshr <8 x i32> [[V:%.*]], ; CHECK-NEXT: ret <8 x i32> [[TMP1]] ; - %1 = insertelement <8 x i32> , i32 undef, i32 1 + %1 = insertelement <8 x i32> , i32 poison, i32 1 %2 = tail call <8 x i32> @llvm.x86.avx2.psrlv.d.256(<8 x i32> %v, <8 x i32> %1) ret <8 x i32> %2 } @@ -2028,27 +2028,27 @@ define <4 x i64> @avx2_psrlv_q_256_allbig(<4 x i64> %v) { ; CHECK-LABEL: @avx2_psrlv_q_256_allbig( ; CHECK-NEXT: ret <4 x i64> ; - %1 = tail call <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64> %v, <4 x i64> ) + %1 = tail call <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64> %v, <4 x i64> ) ret <4 x i64> %1 } -; The shift amount is 0 (the undef lane could be 0), so we return the unshifted input. +; The shift amount is 0 (the poison lane could be 0), so we return the unshifted input. -define <2 x i64> @avx2_psrlv_q_128_undef(<2 x i64> %v) { -; CHECK-LABEL: @avx2_psrlv_q_128_undef( +define <2 x i64> @avx2_psrlv_q_128_poison(<2 x i64> %v) { +; CHECK-LABEL: @avx2_psrlv_q_128_poison( ; CHECK-NEXT: ret <2 x i64> [[V:%.*]] ; - %1 = insertelement <2 x i64> , i64 undef, i64 1 + %1 = insertelement <2 x i64> , i64 poison, i64 1 %2 = tail call <2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64> %v, <2 x i64> %1) ret <2 x i64> %2 } -define <4 x i64> @avx2_psrlv_q_256_undef(<4 x i64> %v) { -; CHECK-LABEL: @avx2_psrlv_q_256_undef( +define <4 x i64> @avx2_psrlv_q_256_poison(<4 x i64> %v) { +; CHECK-LABEL: @avx2_psrlv_q_256_poison( ; CHECK-NEXT: [[TMP1:%.*]] = lshr <4 x i64> [[V:%.*]], ; CHECK-NEXT: ret <4 x i64> [[TMP1]] ; - %1 = insertelement <4 x i64> , i64 undef, i64 0 + %1 = insertelement <4 x i64> , i64 poison, i64 0 %2 = tail call <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64> %v, <4 x i64> %1) ret <4 x i64> %2 } @@ -2083,16 +2083,16 @@ define <16 x i32> @avx512_psrlv_d_512_allbig(<16 x i32> %v) { ; CHECK-LABEL: @avx512_psrlv_d_512_allbig( ; CHECK-NEXT: ret <16 x i32> ; - %1 = tail call <16 x i32> @llvm.x86.avx512.psrlv.d.512(<16 x i32> %v, <16 x i32> ) + %1 = tail call <16 x i32> @llvm.x86.avx512.psrlv.d.512(<16 x i32> %v, <16 x i32> ) ret <16 x i32> %1 } -define <16 x i32> @avx512_psrlv_d_512_undef(<16 x i32> %v) { -; CHECK-LABEL: @avx512_psrlv_d_512_undef( +define <16 x i32> @avx512_psrlv_d_512_poison(<16 x i32> %v) { +; CHECK-LABEL: @avx512_psrlv_d_512_poison( ; CHECK-NEXT: [[TMP1:%.*]] = lshr <16 x i32> [[V:%.*]], ; CHECK-NEXT: ret <16 x i32> [[TMP1]] ; - %1 = insertelement <16 x i32> , i32 undef, i32 1 + %1 = insertelement <16 x i32> , i32 poison, i32 1 %2 = tail call <16 x i32> @llvm.x86.avx512.psrlv.d.512(<16 x i32> %v, <16 x i32> %1) ret <16 x i32> %2 } @@ -2127,16 +2127,16 @@ define <8 x i64> @avx512_psrlv_q_512_allbig(<8 x i64> %v) { ; CHECK-LABEL: @avx512_psrlv_q_512_allbig( ; CHECK-NEXT: ret <8 x i64> ; - %1 = tail call <8 x i64> @llvm.x86.avx512.psrlv.q.512(<8 x i64> %v, <8 x i64> ) + %1 = tail call <8 x i64> @llvm.x86.avx512.psrlv.q.512(<8 x i64> %v, <8 x i64> ) ret <8 x i64> %1 } -define <8 x i64> @avx512_psrlv_q_512_undef(<8 x i64> %v) { -; CHECK-LABEL: @avx512_psrlv_q_512_undef( +define <8 x i64> @avx512_psrlv_q_512_poison(<8 x i64> %v) { +; CHECK-LABEL: @avx512_psrlv_q_512_poison( ; CHECK-NEXT: [[TMP1:%.*]] = lshr <8 x i64> [[V:%.*]], ; CHECK-NEXT: ret <8 x i64> [[TMP1]] ; - %1 = insertelement <8 x i64> , i64 undef, i64 0 + %1 = insertelement <8 x i64> , i64 poison, i64 0 %2 = tail call <8 x i64> @llvm.x86.avx512.psrlv.q.512(<8 x i64> %v, <8 x i64> %1) ret <8 x i64> %2 } @@ -2171,16 +2171,16 @@ define <8 x i16> @avx512_psrlv_w_128_allbig(<8 x i16> %v) { ; CHECK-LABEL: @avx512_psrlv_w_128_allbig( ; CHECK-NEXT: ret <8 x i16> ; - %1 = tail call <8 x i16> @llvm.x86.avx512.psrlv.w.128(<8 x i16> %v, <8 x i16> ) + %1 = tail call <8 x i16> @llvm.x86.avx512.psrlv.w.128(<8 x i16> %v, <8 x i16> ) ret <8 x i16> %1 } -define <8 x i16> @avx512_psrlv_w_128_undef(<8 x i16> %v) { -; CHECK-LABEL: @avx512_psrlv_w_128_undef( +define <8 x i16> @avx512_psrlv_w_128_poison(<8 x i16> %v) { +; CHECK-LABEL: @avx512_psrlv_w_128_poison( ; CHECK-NEXT: [[TMP1:%.*]] = lshr <8 x i16> [[V:%.*]], ; CHECK-NEXT: ret <8 x i16> [[TMP1]] ; - %1 = insertelement <8 x i16> , i16 undef, i64 0 + %1 = insertelement <8 x i16> , i16 poison, i64 0 %2 = tail call <8 x i16> @llvm.x86.avx512.psrlv.w.128(<8 x i16> %v, <8 x i16> %1) ret <8 x i16> %2 } @@ -2215,16 +2215,16 @@ define <16 x i16> @avx512_psrlv_w_256_allbig(<16 x i16> %v) { ; CHECK-LABEL: @avx512_psrlv_w_256_allbig( ; CHECK-NEXT: ret <16 x i16> ; - %1 = tail call <16 x i16> @llvm.x86.avx512.psrlv.w.256(<16 x i16> %v, <16 x i16> ) + %1 = tail call <16 x i16> @llvm.x86.avx512.psrlv.w.256(<16 x i16> %v, <16 x i16> ) ret <16 x i16> %1 } -define <16 x i16> @avx512_psrlv_w_256_undef(<16 x i16> %v) { -; CHECK-LABEL: @avx512_psrlv_w_256_undef( +define <16 x i16> @avx512_psrlv_w_256_poison(<16 x i16> %v) { +; CHECK-LABEL: @avx512_psrlv_w_256_poison( ; CHECK-NEXT: [[TMP1:%.*]] = lshr <16 x i16> [[V:%.*]], ; CHECK-NEXT: ret <16 x i16> [[TMP1]] ; - %1 = insertelement <16 x i16> , i16 undef, i64 0 + %1 = insertelement <16 x i16> , i16 poison, i64 0 %2 = tail call <16 x i16> @llvm.x86.avx512.psrlv.w.256(<16 x i16> %v, <16 x i16> %1) ret <16 x i16> %2 } @@ -2259,16 +2259,16 @@ define <32 x i16> @avx512_psrlv_w_512_allbig(<32 x i16> %v) { ; CHECK-LABEL: @avx512_psrlv_w_512_allbig( ; CHECK-NEXT: ret <32 x i16> ; - %1 = tail call <32 x i16> @llvm.x86.avx512.psrlv.w.512(<32 x i16> %v, <32 x i16> ) + %1 = tail call <32 x i16> @llvm.x86.avx512.psrlv.w.512(<32 x i16> %v, <32 x i16> ) ret <32 x i16> %1 } -define <32 x i16> @avx512_psrlv_w_512_undef(<32 x i16> %v) { -; CHECK-LABEL: @avx512_psrlv_w_512_undef( +define <32 x i16> @avx512_psrlv_w_512_poison(<32 x i16> %v) { +; CHECK-LABEL: @avx512_psrlv_w_512_poison( ; CHECK-NEXT: [[TMP1:%.*]] = lshr <32 x i16> [[V:%.*]], ; CHECK-NEXT: ret <32 x i16> [[TMP1]] ; - %1 = insertelement <32 x i16> , i16 undef, i64 0 + %1 = insertelement <32 x i16> , i16 poison, i64 0 %2 = tail call <32 x i16> @llvm.x86.avx512.psrlv.w.512(<32 x i16> %v, <32 x i16> %1) ret <32 x i16> %2 } @@ -2333,7 +2333,7 @@ define <4 x i32> @avx2_psllv_d_128_allbig(<4 x i32> %v) { ; CHECK-LABEL: @avx2_psllv_d_128_allbig( ; CHECK-NEXT: ret <4 x i32> ; - %1 = tail call <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32> %v, <4 x i32> ) + %1 = tail call <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32> %v, <4 x i32> ) ret <4 x i32> %1 } @@ -2341,26 +2341,26 @@ define <8 x i32> @avx2_psllv_d_256_allbig(<8 x i32> %v) { ; CHECK-LABEL: @avx2_psllv_d_256_allbig( ; CHECK-NEXT: ret <8 x i32> ; - %1 = tail call <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32> %v, <8 x i32> ) + %1 = tail call <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32> %v, <8 x i32> ) ret <8 x i32> %1 } -define <4 x i32> @avx2_psllv_d_128_undef(<4 x i32> %v) { -; CHECK-LABEL: @avx2_psllv_d_128_undef( +define <4 x i32> @avx2_psllv_d_128_poison(<4 x i32> %v) { +; CHECK-LABEL: @avx2_psllv_d_128_poison( ; CHECK-NEXT: [[TMP1:%.*]] = shl <4 x i32> [[V:%.*]], ; CHECK-NEXT: ret <4 x i32> [[TMP1]] ; - %1 = insertelement <4 x i32> , i32 undef, i32 0 + %1 = insertelement <4 x i32> , i32 poison, i32 0 %2 = tail call <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32> %v, <4 x i32> %1) ret <4 x i32> %2 } -define <8 x i32> @avx2_psllv_d_256_undef(<8 x i32> %v) { -; CHECK-LABEL: @avx2_psllv_d_256_undef( +define <8 x i32> @avx2_psllv_d_256_poison(<8 x i32> %v) { +; CHECK-LABEL: @avx2_psllv_d_256_poison( ; CHECK-NEXT: [[TMP1:%.*]] = shl <8 x i32> [[V:%.*]], ; CHECK-NEXT: ret <8 x i32> [[TMP1]] ; - %1 = insertelement <8 x i32> , i32 undef, i32 1 + %1 = insertelement <8 x i32> , i32 poison, i32 1 %2 = tail call <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32> %v, <8 x i32> %1) ret <8 x i32> %2 } @@ -2429,27 +2429,27 @@ define <4 x i64> @avx2_psllv_q_256_allbig(<4 x i64> %v) { ; CHECK-LABEL: @avx2_psllv_q_256_allbig( ; CHECK-NEXT: ret <4 x i64> ; - %1 = tail call <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64> %v, <4 x i64> ) + %1 = tail call <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64> %v, <4 x i64> ) ret <4 x i64> %1 } -; The shift amount is 0 (the undef lane could be 0), so we return the unshifted input. +; The shift amount is 0 (the poison lane could be 0), so we return the unshifted input. -define <2 x i64> @avx2_psllv_q_128_undef(<2 x i64> %v) { -; CHECK-LABEL: @avx2_psllv_q_128_undef( +define <2 x i64> @avx2_psllv_q_128_poison(<2 x i64> %v) { +; CHECK-LABEL: @avx2_psllv_q_128_poison( ; CHECK-NEXT: ret <2 x i64> [[V:%.*]] ; - %1 = insertelement <2 x i64> , i64 undef, i64 1 + %1 = insertelement <2 x i64> , i64 poison, i64 1 %2 = tail call <2 x i64> @llvm.x86.avx2.psllv.q(<2 x i64> %v, <2 x i64> %1) ret <2 x i64> %2 } -define <4 x i64> @avx2_psllv_q_256_undef(<4 x i64> %v) { -; CHECK-LABEL: @avx2_psllv_q_256_undef( +define <4 x i64> @avx2_psllv_q_256_poison(<4 x i64> %v) { +; CHECK-LABEL: @avx2_psllv_q_256_poison( ; CHECK-NEXT: [[TMP1:%.*]] = shl <4 x i64> [[V:%.*]], ; CHECK-NEXT: ret <4 x i64> [[TMP1]] ; - %1 = insertelement <4 x i64> , i64 undef, i64 0 + %1 = insertelement <4 x i64> , i64 poison, i64 0 %2 = tail call <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64> %v, <4 x i64> %1) ret <4 x i64> %2 } @@ -2484,16 +2484,16 @@ define <16 x i32> @avx512_psllv_d_512_allbig(<16 x i32> %v) { ; CHECK-LABEL: @avx512_psllv_d_512_allbig( ; CHECK-NEXT: ret <16 x i32> ; - %1 = tail call <16 x i32> @llvm.x86.avx512.psllv.d.512(<16 x i32> %v, <16 x i32> ) + %1 = tail call <16 x i32> @llvm.x86.avx512.psllv.d.512(<16 x i32> %v, <16 x i32> ) ret <16 x i32> %1 } -define <16 x i32> @avx512_psllv_d_512_undef(<16 x i32> %v) { -; CHECK-LABEL: @avx512_psllv_d_512_undef( +define <16 x i32> @avx512_psllv_d_512_poison(<16 x i32> %v) { +; CHECK-LABEL: @avx512_psllv_d_512_poison( ; CHECK-NEXT: [[TMP1:%.*]] = shl <16 x i32> [[V:%.*]], ; CHECK-NEXT: ret <16 x i32> [[TMP1]] ; - %1 = insertelement <16 x i32> , i32 undef, i32 1 + %1 = insertelement <16 x i32> , i32 poison, i32 1 %2 = tail call <16 x i32> @llvm.x86.avx512.psllv.d.512(<16 x i32> %v, <16 x i32> %1) ret <16 x i32> %2 } @@ -2528,16 +2528,16 @@ define <8 x i64> @avx512_psllv_q_512_allbig(<8 x i64> %v) { ; CHECK-LABEL: @avx512_psllv_q_512_allbig( ; CHECK-NEXT: ret <8 x i64> ; - %1 = tail call <8 x i64> @llvm.x86.avx512.psllv.q.512(<8 x i64> %v, <8 x i64> ) + %1 = tail call <8 x i64> @llvm.x86.avx512.psllv.q.512(<8 x i64> %v, <8 x i64> ) ret <8 x i64> %1 } -define <8 x i64> @avx512_psllv_q_512_undef(<8 x i64> %v) { -; CHECK-LABEL: @avx512_psllv_q_512_undef( +define <8 x i64> @avx512_psllv_q_512_poison(<8 x i64> %v) { +; CHECK-LABEL: @avx512_psllv_q_512_poison( ; CHECK-NEXT: [[TMP1:%.*]] = shl <8 x i64> [[V:%.*]], ; CHECK-NEXT: ret <8 x i64> [[TMP1]] ; - %1 = insertelement <8 x i64> , i64 undef, i64 0 + %1 = insertelement <8 x i64> , i64 poison, i64 0 %2 = tail call <8 x i64> @llvm.x86.avx512.psllv.q.512(<8 x i64> %v, <8 x i64> %1) ret <8 x i64> %2 } @@ -2572,16 +2572,16 @@ define <8 x i16> @avx512_psllv_w_128_allbig(<8 x i16> %v) { ; CHECK-LABEL: @avx512_psllv_w_128_allbig( ; CHECK-NEXT: ret <8 x i16> ; - %1 = tail call <8 x i16> @llvm.x86.avx512.psllv.w.128(<8 x i16> %v, <8 x i16> ) + %1 = tail call <8 x i16> @llvm.x86.avx512.psllv.w.128(<8 x i16> %v, <8 x i16> ) ret <8 x i16> %1 } -define <8 x i16> @avx512_psllv_w_128_undef(<8 x i16> %v) { -; CHECK-LABEL: @avx512_psllv_w_128_undef( +define <8 x i16> @avx512_psllv_w_128_poison(<8 x i16> %v) { +; CHECK-LABEL: @avx512_psllv_w_128_poison( ; CHECK-NEXT: [[TMP1:%.*]] = shl <8 x i16> [[V:%.*]], ; CHECK-NEXT: ret <8 x i16> [[TMP1]] ; - %1 = insertelement <8 x i16> , i16 undef, i64 0 + %1 = insertelement <8 x i16> , i16 poison, i64 0 %2 = tail call <8 x i16> @llvm.x86.avx512.psllv.w.128(<8 x i16> %v, <8 x i16> %1) ret <8 x i16> %2 } @@ -2616,16 +2616,16 @@ define <16 x i16> @avx512_psllv_w_256_allbig(<16 x i16> %v) { ; CHECK-LABEL: @avx512_psllv_w_256_allbig( ; CHECK-NEXT: ret <16 x i16> ; - %1 = tail call <16 x i16> @llvm.x86.avx512.psllv.w.256(<16 x i16> %v, <16 x i16> ) + %1 = tail call <16 x i16> @llvm.x86.avx512.psllv.w.256(<16 x i16> %v, <16 x i16> ) ret <16 x i16> %1 } -define <16 x i16> @avx512_psllv_w_256_undef(<16 x i16> %v) { -; CHECK-LABEL: @avx512_psllv_w_256_undef( +define <16 x i16> @avx512_psllv_w_256_poison(<16 x i16> %v) { +; CHECK-LABEL: @avx512_psllv_w_256_poison( ; CHECK-NEXT: [[TMP1:%.*]] = shl <16 x i16> [[V:%.*]], ; CHECK-NEXT: ret <16 x i16> [[TMP1]] ; - %1 = insertelement <16 x i16> , i16 undef, i64 0 + %1 = insertelement <16 x i16> , i16 poison, i64 0 %2 = tail call <16 x i16> @llvm.x86.avx512.psllv.w.256(<16 x i16> %v, <16 x i16> %1) ret <16 x i16> %2 } @@ -2660,16 +2660,16 @@ define <32 x i16> @avx512_psllv_w_512_allbig(<32 x i16> %v) { ; CHECK-LABEL: @avx512_psllv_w_512_allbig( ; CHECK-NEXT: ret <32 x i16> ; - %1 = tail call <32 x i16> @llvm.x86.avx512.psllv.w.512(<32 x i16> %v, <32 x i16> ) + %1 = tail call <32 x i16> @llvm.x86.avx512.psllv.w.512(<32 x i16> %v, <32 x i16> ) ret <32 x i16> %1 } -define <32 x i16> @avx512_psllv_w_512_undef(<32 x i16> %v) { -; CHECK-LABEL: @avx512_psllv_w_512_undef( +define <32 x i16> @avx512_psllv_w_512_poison(<32 x i16> %v) { +; CHECK-LABEL: @avx512_psllv_w_512_poison( ; CHECK-NEXT: [[TMP1:%.*]] = shl <32 x i16> [[V:%.*]], ; CHECK-NEXT: ret <32 x i16> [[TMP1]] ; - %1 = insertelement <32 x i16> , i16 undef, i64 0 + %1 = insertelement <32 x i16> , i16 poison, i64 0 %2 = tail call <32 x i16> @llvm.x86.avx512.psllv.w.512(<32 x i16> %v, <32 x i16> %1) ret <32 x i16> %2 } @@ -2685,7 +2685,7 @@ define <8 x i16> @sse2_psra_w_128_masked(<8 x i16> %v, <8 x i16> %a) { ; CHECK-NEXT: [[TMP3:%.*]] = ashr <8 x i16> [[V:%.*]], [[TMP2]] ; CHECK-NEXT: ret <8 x i16> [[TMP3]] ; - %1 = and <8 x i16> %a, + %1 = and <8 x i16> %a, %2 = tail call <8 x i16> @llvm.x86.sse2.psra.w(<8 x i16> %v, <8 x i16> %1) ret <8 x i16> %2 } @@ -2697,7 +2697,7 @@ define <8 x i32> @avx2_psra_d_256_masked(<8 x i32> %v, <4 x i32> %a) { ; CHECK-NEXT: [[TMP3:%.*]] = ashr <8 x i32> [[V:%.*]], [[TMP2]] ; CHECK-NEXT: ret <8 x i32> [[TMP3]] ; - %1 = and <4 x i32> %a, + %1 = and <4 x i32> %a, %2 = tail call <8 x i32> @llvm.x86.avx2.psra.d(<8 x i32> %v, <4 x i32> %1) ret <8 x i32> %2 } @@ -2709,7 +2709,7 @@ define <8 x i64> @avx512_psra_q_512_masked(<8 x i64> %v, <2 x i64> %a) { ; CHECK-NEXT: [[TMP3:%.*]] = ashr <8 x i64> [[V:%.*]], [[TMP2]] ; CHECK-NEXT: ret <8 x i64> [[TMP3]] ; - %1 = and <2 x i64> %a, + %1 = and <2 x i64> %a, %2 = tail call <8 x i64> @llvm.x86.avx512.psra.q.512(<8 x i64> %v, <2 x i64> %1) ret <8 x i64> %2 } @@ -2721,7 +2721,7 @@ define <4 x i32> @sse2_psrl_d_128_masked(<4 x i32> %v, <4 x i32> %a) { ; CHECK-NEXT: [[TMP3:%.*]] = lshr <4 x i32> [[V:%.*]], [[TMP2]] ; CHECK-NEXT: ret <4 x i32> [[TMP3]] ; - %1 = and <4 x i32> %a, + %1 = and <4 x i32> %a, %2 = tail call <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32> %v, <4 x i32> %1) ret <4 x i32> %2 } @@ -2733,7 +2733,7 @@ define <4 x i64> @avx2_psrl_q_256_masked(<4 x i64> %v, <2 x i64> %a) { ; CHECK-NEXT: [[TMP3:%.*]] = lshr <4 x i64> [[V:%.*]], [[TMP2]] ; CHECK-NEXT: ret <4 x i64> [[TMP3]] ; - %1 = and <2 x i64> %a, + %1 = and <2 x i64> %a, %2 = tail call <4 x i64> @llvm.x86.avx2.psrl.q(<4 x i64> %v, <2 x i64> %1) ret <4 x i64> %2 } @@ -2745,7 +2745,7 @@ define <32 x i16> @avx512_psrl_w_512_masked(<32 x i16> %v, <8 x i16> %a) { ; CHECK-NEXT: [[TMP3:%.*]] = lshr <32 x i16> [[V:%.*]], [[TMP2]] ; CHECK-NEXT: ret <32 x i16> [[TMP3]] ; - %1 = and <8 x i16> %a, + %1 = and <8 x i16> %a, %2 = tail call <32 x i16> @llvm.x86.avx512.psrl.w.512(<32 x i16> %v, <8 x i16> %1) ret <32 x i16> %2 } @@ -2757,7 +2757,7 @@ define <2 x i64> @sse2_psll_q_128_masked(<2 x i64> %v, <2 x i64> %a) { ; CHECK-NEXT: [[TMP3:%.*]] = shl <2 x i64> [[V:%.*]], [[TMP2]] ; CHECK-NEXT: ret <2 x i64> [[TMP3]] ; - %1 = and <2 x i64> %a, + %1 = and <2 x i64> %a, %2 = tail call <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64> %v, <2 x i64> %1) ret <2 x i64> %2 } @@ -2769,7 +2769,7 @@ define <16 x i16> @avx2_psll_w_256_masked(<16 x i16> %v, <8 x i16> %a) { ; CHECK-NEXT: [[TMP3:%.*]] = shl <16 x i16> [[V:%.*]], [[TMP2]] ; CHECK-NEXT: ret <16 x i16> [[TMP3]] ; - %1 = and <8 x i16> %a, + %1 = and <8 x i16> %a, %2 = tail call <16 x i16> @llvm.x86.avx2.psll.w(<16 x i16> %v, <8 x i16> %1) ret <16 x i16> %2 } @@ -2781,7 +2781,7 @@ define <16 x i32> @avx512_psll_d_512_masked(<16 x i32> %v, <4 x i32> %a) { ; CHECK-NEXT: [[TMP3:%.*]] = shl <16 x i32> [[V:%.*]], [[TMP2]] ; CHECK-NEXT: ret <16 x i32> [[TMP3]] ; - %1 = and <4 x i32> %a, + %1 = and <4 x i32> %a, %2 = tail call <16 x i32> @llvm.x86.avx512.psll.d.512(<16 x i32> %v, <4 x i32> %1) ret <16 x i32> %2 } @@ -2927,7 +2927,7 @@ define <4 x i32> @avx2_psrav_d_128_masked_shuffle(<4 x i32> %v, <4 x i32> %a) { ; CHECK-NEXT: [[TMP3:%.*]] = ashr <4 x i32> [[V:%.*]], [[TMP2]] ; CHECK-NEXT: ret <4 x i32> [[TMP3]] ; - %1 = and <4 x i32> %a, + %1 = and <4 x i32> %a, %2 = shufflevector <4 x i32> %1, <4 x i32> poison, <4 x i32> %3 = tail call <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32> %v, <4 x i32> %2) ret <4 x i32> %3 diff --git a/test/Transforms/InstCombine/X86/x86-vpermil-inseltpoison.ll b/test/Transforms/InstCombine/X86/x86-vpermil-inseltpoison.ll index f633a3d4356..b992f834c3e 100644 --- a/test/Transforms/InstCombine/X86/x86-vpermil-inseltpoison.ll +++ b/test/Transforms/InstCombine/X86/x86-vpermil-inseltpoison.ll @@ -165,59 +165,59 @@ define <8 x double> @test_vpermilvar_pd_512(<8 x double> %v) { ret <8 x double> %a } -; Verify that instcombine is able to fold constant shuffles with undef mask elements. +; Verify that instcombine is able to fold constant shuffles with poison mask elements. -define <4 x float> @undef_test_vpermilvar_ps(<4 x float> %v) { -; CHECK-LABEL: @undef_test_vpermilvar_ps( +define <4 x float> @poison_test_vpermilvar_ps(<4 x float> %v) { +; CHECK-LABEL: @poison_test_vpermilvar_ps( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x float> [[V:%.*]], <4 x float> undef, <4 x i32> ; CHECK-NEXT: ret <4 x float> [[TMP1]] ; - %a = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %v, <4 x i32> ) + %a = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %v, <4 x i32> ) ret <4 x float> %a } -define <8 x float> @undef_test_vpermilvar_ps_256(<8 x float> %v) { -; CHECK-LABEL: @undef_test_vpermilvar_ps_256( +define <8 x float> @poison_test_vpermilvar_ps_256(<8 x float> %v) { +; CHECK-LABEL: @poison_test_vpermilvar_ps_256( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x float> [[V:%.*]], <8 x float> undef, <8 x i32> ; CHECK-NEXT: ret <8 x float> [[TMP1]] ; - %a = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %v, <8 x i32> ) + %a = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %v, <8 x i32> ) ret <8 x float> %a } -define <16 x float> @undef_test_vpermilvar_ps_512(<16 x float> %v) { -; CHECK-LABEL: @undef_test_vpermilvar_ps_512( +define <16 x float> @poison_test_vpermilvar_ps_512(<16 x float> %v) { +; CHECK-LABEL: @poison_test_vpermilvar_ps_512( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x float> [[V:%.*]], <16 x float> undef, <16 x i32> ; CHECK-NEXT: ret <16 x float> [[TMP1]] ; - %a = tail call <16 x float> @llvm.x86.avx512.vpermilvar.ps.512(<16 x float> %v, <16 x i32> ) + %a = tail call <16 x float> @llvm.x86.avx512.vpermilvar.ps.512(<16 x float> %v, <16 x i32> ) ret <16 x float> %a } -define <2 x double> @undef_test_vpermilvar_pd(<2 x double> %v) { -; CHECK-LABEL: @undef_test_vpermilvar_pd( +define <2 x double> @poison_test_vpermilvar_pd(<2 x double> %v) { +; CHECK-LABEL: @poison_test_vpermilvar_pd( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <2 x double> [[V:%.*]], <2 x double> undef, <2 x i32> ; CHECK-NEXT: ret <2 x double> [[TMP1]] ; - %a = tail call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %v, <2 x i64> ) + %a = tail call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %v, <2 x i64> ) ret <2 x double> %a } -define <4 x double> @undef_test_vpermilvar_pd_256(<4 x double> %v) { -; CHECK-LABEL: @undef_test_vpermilvar_pd_256( +define <4 x double> @poison_test_vpermilvar_pd_256(<4 x double> %v) { +; CHECK-LABEL: @poison_test_vpermilvar_pd_256( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x double> [[V:%.*]], <4 x double> undef, <4 x i32> ; CHECK-NEXT: ret <4 x double> [[TMP1]] ; - %a = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %v, <4 x i64> ) + %a = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %v, <4 x i64> ) ret <4 x double> %a } -define <8 x double> @undef_test_vpermilvar_pd_512(<8 x double> %v) { -; CHECK-LABEL: @undef_test_vpermilvar_pd_512( +define <8 x double> @poison_test_vpermilvar_pd_512(<8 x double> %v) { +; CHECK-LABEL: @poison_test_vpermilvar_pd_512( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x double> [[V:%.*]], <8 x double> undef, <8 x i32> ; CHECK-NEXT: ret <8 x double> [[TMP1]] ; - %a = tail call <8 x double> @llvm.x86.avx512.vpermilvar.pd.512(<8 x double> %v, <8 x i64> ) + %a = tail call <8 x double> @llvm.x86.avx512.vpermilvar.pd.512(<8 x double> %v, <8 x i64> ) ret <8 x double> %a } @@ -230,7 +230,7 @@ define <4 x float> @elts_test_vpermilvar_ps(<4 x float> %a0, i32 %a1) { ; %1 = insertelement <4 x i32> , i32 %a1, i32 3 %2 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> %1) - %3 = shufflevector <4 x float> %2, <4 x float> poison, <4 x i32> + %3 = shufflevector <4 x float> %2, <4 x float> poison, <4 x i32> ret <4 x float> %3 } @@ -241,7 +241,7 @@ define <8 x float> @elts_test_vpermilvar_ps_256(<8 x float> %a0, <8 x i32> %a1) ; %1 = shufflevector <8 x i32> %a1, <8 x i32> , <8 x i32> %2 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> %1) - %3 = shufflevector <8 x float> %2, <8 x float> poison, <8 x i32> + %3 = shufflevector <8 x float> %2, <8 x float> poison, <8 x i32> ret <8 x float> %3 } @@ -253,7 +253,7 @@ define <16 x float> @elts_test_vpermilvar_ps_512(<16 x float> %a0, <16 x i32> %a ; %1 = insertelement <16 x i32> %a1, i32 %a2, i32 0 %2 = tail call <16 x float> @llvm.x86.avx512.vpermilvar.ps.512(<16 x float> %a0, <16 x i32> %1) - %3 = shufflevector <16 x float> %2, <16 x float> poison, <16 x i32> + %3 = shufflevector <16 x float> %2, <16 x float> poison, <16 x i32> ret <16 x float> %3 } @@ -264,7 +264,7 @@ define <2 x double> @elts_test_vpermilvar_pd(<2 x double> %a0, i64 %a1) { ; %1 = insertelement <2 x i64> , i64 %a1, i32 1 %2 = tail call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %a0, <2 x i64> %1) - %3 = shufflevector <2 x double> %2, <2 x double> poison, <2 x i32> + %3 = shufflevector <2 x double> %2, <2 x double> poison, <2 x i32> ret <2 x double> %3 } @@ -275,7 +275,7 @@ define <4 x double> @elts_test_vpermilvar_pd_256(<4 x double> %a0, <4 x i64> %a1 ; %1 = shufflevector <4 x i64> , <4 x i64> %a1, <4 x i32> %2 = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %a0, <4 x i64> %1) - %3 = shufflevector <4 x double> %2, <4 x double> poison, <4 x i32> + %3 = shufflevector <4 x double> %2, <4 x double> poison, <4 x i32> ret <4 x double> %3 }