1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-25 12:12:47 +01:00
llvm-mirror/test/CodeGen/X86/vec_umulo.ll

2630 lines
107 KiB
LLVM
Raw Normal View History

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefixes=SSE,SSSE3
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE41
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX,AVX512
declare {<1 x i32>, <1 x i1>} @llvm.umul.with.overflow.v1i32(<1 x i32>, <1 x i32>)
declare {<2 x i32>, <2 x i1>} @llvm.umul.with.overflow.v2i32(<2 x i32>, <2 x i32>)
declare {<3 x i32>, <3 x i1>} @llvm.umul.with.overflow.v3i32(<3 x i32>, <3 x i32>)
declare {<4 x i32>, <4 x i1>} @llvm.umul.with.overflow.v4i32(<4 x i32>, <4 x i32>)
declare {<6 x i32>, <6 x i1>} @llvm.umul.with.overflow.v6i32(<6 x i32>, <6 x i32>)
declare {<8 x i32>, <8 x i1>} @llvm.umul.with.overflow.v8i32(<8 x i32>, <8 x i32>)
declare {<16 x i32>, <16 x i1>} @llvm.umul.with.overflow.v16i32(<16 x i32>, <16 x i32>)
declare {<16 x i8>, <16 x i1>} @llvm.umul.with.overflow.v16i8(<16 x i8>, <16 x i8>)
declare {<8 x i16>, <8 x i1>} @llvm.umul.with.overflow.v8i16(<8 x i16>, <8 x i16>)
declare {<2 x i64>, <2 x i1>} @llvm.umul.with.overflow.v2i64(<2 x i64>, <2 x i64>)
declare {<4 x i24>, <4 x i1>} @llvm.umul.with.overflow.v4i24(<4 x i24>, <4 x i24>)
declare {<4 x i1>, <4 x i1>} @llvm.umul.with.overflow.v4i1(<4 x i1>, <4 x i1>)
declare {<2 x i128>, <2 x i1>} @llvm.umul.with.overflow.v2i128(<2 x i128>, <2 x i128>)
define <1 x i32> @umulo_v1i32(<1 x i32> %a0, <1 x i32> %a1, <1 x i32>* %p2) nounwind {
; SSE-LABEL: umulo_v1i32:
; SSE: # %bb.0:
; SSE-NEXT: movq %rdx, %rcx
; SSE-NEXT: movl %edi, %eax
; SSE-NEXT: xorl %edi, %edi
; SSE-NEXT: mull %esi
; SSE-NEXT: seto %dil
; SSE-NEXT: negl %edi
; SSE-NEXT: movl %eax, (%rcx)
; SSE-NEXT: movl %edi, %eax
; SSE-NEXT: retq
;
; AVX-LABEL: umulo_v1i32:
; AVX: # %bb.0:
; AVX-NEXT: movq %rdx, %rcx
; AVX-NEXT: movl %edi, %eax
; AVX-NEXT: xorl %edi, %edi
; AVX-NEXT: mull %esi
; AVX-NEXT: seto %dil
; AVX-NEXT: negl %edi
; AVX-NEXT: movl %eax, (%rcx)
; AVX-NEXT: movl %edi, %eax
; AVX-NEXT: retq
%t = call {<1 x i32>, <1 x i1>} @llvm.umul.with.overflow.v1i32(<1 x i32> %a0, <1 x i32> %a1)
%val = extractvalue {<1 x i32>, <1 x i1>} %t, 0
%obit = extractvalue {<1 x i32>, <1 x i1>} %t, 1
%res = sext <1 x i1> %obit to <1 x i32>
store <1 x i32> %val, <1 x i32>* %p2
ret <1 x i32> %res
}
define <2 x i32> @umulo_v2i32(<2 x i32> %a0, <2 x i32> %a1, <2 x i32>* %p2) nounwind {
; SSE2-LABEL: umulo_v2i32:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [4294967295,0,4294967295,0]
; SSE2-NEXT: pand %xmm2, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
; SSE2-NEXT: movq %xmm3, %r8
; SSE2-NEXT: pand %xmm2, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; SSE2-NEXT: movq %xmm2, %r10
; SSE2-NEXT: movq %xmm0, %rax
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; SSE2-NEXT: movq %xmm1, %rdx
; SSE2-NEXT: xorl %esi, %esi
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; SSE2-NEXT: mulq %rdx
; SSE2-NEXT: movq $-1, %r9
; SSE2-NEXT: movl $0, %ecx
; SSE2-NEXT: cmovoq %r9, %rcx
; SSE2-NEXT: movq %rax, %xmm0
; SSE2-NEXT: movq %r8, %rax
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; SSE2-NEXT: mulq %r10
; SSE2-NEXT: movq %rax, %xmm1
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,2,3]
; SSE2-NEXT: psrlq $32, %xmm0
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: pcmpeqd %xmm0, %xmm2
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,0,3,2]
; SSE2-NEXT: pand %xmm2, %xmm0
; SSE2-NEXT: pcmpeqd %xmm2, %xmm2
; SSE2-NEXT: pxor %xmm0, %xmm2
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; SSE2-NEXT: movq %rcx, %xmm0
; SSE2-NEXT: cmovoq %r9, %rsi
; SSE2-NEXT: movq %rsi, %xmm3
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0]
; SSE2-NEXT: por %xmm2, %xmm0
; SSE2-NEXT: movq %xmm1, (%rdi)
; SSE2-NEXT: retq
;
; SSSE3-LABEL: umulo_v2i32:
; SSSE3: # %bb.0:
; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [4294967295,0,4294967295,0]
; SSSE3-NEXT: pand %xmm2, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
; SSSE3-NEXT: movq %xmm3, %r8
; SSSE3-NEXT: pand %xmm2, %xmm1
; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; SSSE3-NEXT: movq %xmm2, %r10
; SSSE3-NEXT: movq %xmm0, %rax
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; SSSE3-NEXT: movq %xmm1, %rdx
; SSSE3-NEXT: xorl %esi, %esi
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; SSSE3-NEXT: mulq %rdx
; SSSE3-NEXT: movq $-1, %r9
; SSSE3-NEXT: movl $0, %ecx
; SSSE3-NEXT: cmovoq %r9, %rcx
; SSSE3-NEXT: movq %rax, %xmm0
; SSSE3-NEXT: movq %r8, %rax
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; SSSE3-NEXT: mulq %r10
; SSSE3-NEXT: movq %rax, %xmm1
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,2,3]
; SSSE3-NEXT: psrlq $32, %xmm0
; SSSE3-NEXT: pxor %xmm2, %xmm2
; SSSE3-NEXT: pcmpeqd %xmm0, %xmm2
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,0,3,2]
; SSSE3-NEXT: pand %xmm2, %xmm0
; SSSE3-NEXT: pcmpeqd %xmm2, %xmm2
; SSSE3-NEXT: pxor %xmm0, %xmm2
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; SSSE3-NEXT: movq %rcx, %xmm0
; SSSE3-NEXT: cmovoq %r9, %rsi
; SSSE3-NEXT: movq %rsi, %xmm3
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0]
; SSSE3-NEXT: por %xmm2, %xmm0
; SSSE3-NEXT: movq %xmm1, (%rdi)
; SSSE3-NEXT: retq
;
; SSE41-LABEL: umulo_v2i32:
; SSE41: # %bb.0:
; SSE41-NEXT: pxor %xmm2, %xmm2
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; SSE41-NEXT: movq %xmm0, %r8
; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; SSE41-NEXT: movq %xmm1, %rcx
; SSE41-NEXT: pextrq $1, %xmm0, %rax
; SSE41-NEXT: pextrq $1, %xmm1, %rdx
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; SSE41-NEXT: xorl %esi, %esi
; SSE41-NEXT: mulq %rdx
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; SSE41-NEXT: movq %rax, %r9
; SSE41-NEXT: movq $-1, %r10
; SSE41-NEXT: movl $0, %eax
; SSE41-NEXT: cmovoq %r10, %rax
; SSE41-NEXT: movq %rax, %xmm0
; SSE41-NEXT: movq %r8, %rax
; SSE41-NEXT: mulq %rcx
; SSE41-NEXT: cmovoq %r10, %rsi
; SSE41-NEXT: movq %rsi, %xmm1
; SSE41-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; SSE41-NEXT: movq %r9, %xmm0
; SSE41-NEXT: movq %rax, %xmm3
; SSE41-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0]
; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,2,2,3]
; SSE41-NEXT: psrlq $32, %xmm3
; SSE41-NEXT: pcmpeqq %xmm2, %xmm3
; SSE41-NEXT: pcmpeqd %xmm0, %xmm0
; SSE41-NEXT: pxor %xmm3, %xmm0
; SSE41-NEXT: por %xmm1, %xmm0
; SSE41-NEXT: movq %xmm4, (%rdi)
; SSE41-NEXT: retq
;
; AVX1-LABEL: umulo_v2i32:
; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; AVX1-NEXT: vmovq %xmm0, %r8
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; AVX1-NEXT: vmovq %xmm1, %rcx
; AVX1-NEXT: vpextrq $1, %xmm0, %rax
; AVX1-NEXT: vpextrq $1, %xmm1, %rdx
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; AVX1-NEXT: xorl %esi, %esi
; AVX1-NEXT: mulq %rdx
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; AVX1-NEXT: movq %rax, %r9
; AVX1-NEXT: movq $-1, %r10
; AVX1-NEXT: movl $0, %eax
; AVX1-NEXT: cmovoq %r10, %rax
; AVX1-NEXT: vmovq %rax, %xmm0
; AVX1-NEXT: movq %r8, %rax
; AVX1-NEXT: mulq %rcx
; AVX1-NEXT: cmovoq %r10, %rsi
; AVX1-NEXT: vmovq %rsi, %xmm1
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; AVX1-NEXT: vmovq %r9, %xmm1
; AVX1-NEXT: vmovq %rax, %xmm3
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm1[0]
; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm3
; AVX1-NEXT: vpcmpeqq %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpor %xmm0, %xmm2, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; AVX1-NEXT: vmovq %xmm1, (%rdi)
; AVX1-NEXT: retq
;
; AVX2-LABEL: umulo_v2i32:
; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; AVX2-NEXT: vmovq %xmm0, %r8
; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; AVX2-NEXT: vmovq %xmm1, %rcx
; AVX2-NEXT: vpextrq $1, %xmm0, %rax
; AVX2-NEXT: vpextrq $1, %xmm1, %rdx
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; AVX2-NEXT: xorl %esi, %esi
; AVX2-NEXT: mulq %rdx
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; AVX2-NEXT: movq %rax, %r9
; AVX2-NEXT: movq $-1, %r10
; AVX2-NEXT: movl $0, %eax
; AVX2-NEXT: cmovoq %r10, %rax
; AVX2-NEXT: vmovq %rax, %xmm0
; AVX2-NEXT: movq %r8, %rax
; AVX2-NEXT: mulq %rcx
; AVX2-NEXT: cmovoq %r10, %rsi
; AVX2-NEXT: vmovq %rsi, %xmm1
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; AVX2-NEXT: vmovq %r9, %xmm1
; AVX2-NEXT: vmovq %rax, %xmm3
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm1[0]
; AVX2-NEXT: vpsrlq $32, %xmm1, %xmm3
; AVX2-NEXT: vpcmpeqq %xmm2, %xmm3, %xmm2
; AVX2-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
; AVX2-NEXT: vpxor %xmm3, %xmm2, %xmm2
; AVX2-NEXT: vpor %xmm0, %xmm2, %xmm0
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; AVX2-NEXT: vmovq %xmm1, (%rdi)
; AVX2-NEXT: retq
;
; AVX512-LABEL: umulo_v2i32:
; AVX512: # %bb.0:
; AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
; AVX512-NEXT: vmovq %xmm0, %rcx
; AVX512-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
; AVX512-NEXT: vmovq %xmm1, %rsi
; AVX512-NEXT: vpextrq $1, %xmm0, %rax
; AVX512-NEXT: vpextrq $1, %xmm1, %rdx
; AVX512-NEXT: mulq %rdx
; AVX512-NEXT: seto %r8b
; AVX512-NEXT: vmovq %rax, %xmm0
; AVX512-NEXT: movq %rcx, %rax
; AVX512-NEXT: mulq %rsi
; AVX512-NEXT: vmovq %rax, %xmm1
; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX512-NEXT: vpsrlq $32, %xmm0, %xmm1
; AVX512-NEXT: vptestmq %xmm1, %xmm1, %k0
; AVX512-NEXT: kmovd %r8d, %k1
; AVX512-NEXT: kshiftlw $1, %k1, %k1
; AVX512-NEXT: seto %al
; AVX512-NEXT: andl $1, %eax
; AVX512-NEXT: kmovw %eax, %k2
; AVX512-NEXT: korw %k1, %k2, %k1
; AVX512-NEXT: korw %k1, %k0, %k1
; AVX512-NEXT: vpmovqd %xmm0, (%rdi)
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX512-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z}
; AVX512-NEXT: retq
%t = call {<2 x i32>, <2 x i1>} @llvm.umul.with.overflow.v2i32(<2 x i32> %a0, <2 x i32> %a1)
%val = extractvalue {<2 x i32>, <2 x i1>} %t, 0
%obit = extractvalue {<2 x i32>, <2 x i1>} %t, 1
%res = sext <2 x i1> %obit to <2 x i32>
store <2 x i32> %val, <2 x i32>* %p2
ret <2 x i32> %res
}
define <3 x i32> @umulo_v3i32(<3 x i32> %a0, <3 x i32> %a1, <3 x i32>* %p2) nounwind {
; SSE2-LABEL: umulo_v3i32:
; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; SSE2-NEXT: pmuludq %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,3,3]
; SSE2-NEXT: pmuludq %xmm2, %xmm4
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm4[1,3,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: pcmpeqd %xmm3, %xmm2
; SSE2-NEXT: pcmpeqd %xmm1, %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[0,2,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; SSE2-NEXT: movq %xmm0, (%rdi)
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; SSE2-NEXT: movd %xmm0, 8(%rdi)
; SSE2-NEXT: movdqa %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: umulo_v3i32:
; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; SSSE3-NEXT: pmuludq %xmm1, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,3,3]
; SSSE3-NEXT: pmuludq %xmm2, %xmm4
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm4[1,3,2,3]
; SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
; SSSE3-NEXT: pxor %xmm2, %xmm2
; SSSE3-NEXT: pcmpeqd %xmm3, %xmm2
; SSSE3-NEXT: pcmpeqd %xmm1, %xmm1
; SSSE3-NEXT: pxor %xmm2, %xmm1
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm4[0,2,2,3]
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; SSSE3-NEXT: movq %xmm0, (%rdi)
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; SSSE3-NEXT: movd %xmm0, 8(%rdi)
; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: umulo_v3i32:
; SSE41: # %bb.0:
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
; SSE41-NEXT: pmuludq %xmm2, %xmm3
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: pmuludq %xmm1, %xmm2
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
; SSE41-NEXT: pxor %xmm3, %xmm3
; SSE41-NEXT: pcmpeqd %xmm2, %xmm3
; SSE41-NEXT: pcmpeqd %xmm2, %xmm2
; SSE41-NEXT: pxor %xmm3, %xmm2
; SSE41-NEXT: pmulld %xmm1, %xmm0
; SSE41-NEXT: pextrd $2, %xmm0, 8(%rdi)
; SSE41-NEXT: movq %xmm0, (%rdi)
; SSE41-NEXT: movdqa %xmm2, %xmm0
; SSE41-NEXT: retq
;
; AVX1-LABEL: umulo_v3i32:
; AVX1: # %bb.0:
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
; AVX1-NEXT: vpmuludq %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm3
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7]
; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpextrd $2, %xmm0, 8(%rdi)
; AVX1-NEXT: vmovq %xmm0, (%rdi)
; AVX1-NEXT: vmovdqa %xmm2, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: umulo_v3i32:
; AVX2: # %bb.0:
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
; AVX2-NEXT: vpmuludq %xmm2, %xmm3, %xmm2
; AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm3
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
; AVX2-NEXT: vpblendd {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2],xmm2[3]
; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX2-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
; AVX2-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
; AVX2-NEXT: vpxor %xmm3, %xmm2, %xmm2
; AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpextrd $2, %xmm0, 8(%rdi)
; AVX2-NEXT: vmovq %xmm0, (%rdi)
; AVX2-NEXT: vmovdqa %xmm2, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: umulo_v3i32:
; AVX512: # %bb.0:
; AVX512-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX512-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
; AVX512-NEXT: vpmuludq %xmm2, %xmm3, %xmm2
; AVX512-NEXT: vpmuludq %xmm1, %xmm0, %xmm3
; AVX512-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
; AVX512-NEXT: vpblendd {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2],xmm2[3]
; AVX512-NEXT: vptestmd %xmm2, %xmm2, %k1
; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm1
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
; AVX512-NEXT: vpextrd $2, %xmm1, 8(%rdi)
; AVX512-NEXT: vmovq %xmm1, (%rdi)
; AVX512-NEXT: retq
%t = call {<3 x i32>, <3 x i1>} @llvm.umul.with.overflow.v3i32(<3 x i32> %a0, <3 x i32> %a1)
%val = extractvalue {<3 x i32>, <3 x i1>} %t, 0
%obit = extractvalue {<3 x i32>, <3 x i1>} %t, 1
%res = sext <3 x i1> %obit to <3 x i32>
store <3 x i32> %val, <3 x i32>* %p2
ret <3 x i32> %res
}
define <4 x i32> @umulo_v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i32>* %p2) nounwind {
; SSE2-LABEL: umulo_v4i32:
; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; SSE2-NEXT: pmuludq %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,3,3]
; SSE2-NEXT: pmuludq %xmm2, %xmm4
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm4[1,3,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: pcmpeqd %xmm3, %xmm2
; SSE2-NEXT: pcmpeqd %xmm1, %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[0,2,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; SSE2-NEXT: movdqa %xmm0, (%rdi)
; SSE2-NEXT: movdqa %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: umulo_v4i32:
; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; SSSE3-NEXT: pmuludq %xmm1, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,3,3]
; SSSE3-NEXT: pmuludq %xmm2, %xmm4
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm4[1,3,2,3]
; SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
; SSSE3-NEXT: pxor %xmm2, %xmm2
; SSSE3-NEXT: pcmpeqd %xmm3, %xmm2
; SSSE3-NEXT: pcmpeqd %xmm1, %xmm1
; SSSE3-NEXT: pxor %xmm2, %xmm1
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm4[0,2,2,3]
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; SSSE3-NEXT: movdqa %xmm0, (%rdi)
; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: umulo_v4i32:
; SSE41: # %bb.0:
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
; SSE41-NEXT: pmuludq %xmm2, %xmm3
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: pmuludq %xmm1, %xmm2
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
; SSE41-NEXT: pxor %xmm3, %xmm3
; SSE41-NEXT: pcmpeqd %xmm2, %xmm3
; SSE41-NEXT: pcmpeqd %xmm2, %xmm2
; SSE41-NEXT: pxor %xmm3, %xmm2
; SSE41-NEXT: pmulld %xmm1, %xmm0
; SSE41-NEXT: movdqa %xmm0, (%rdi)
; SSE41-NEXT: movdqa %xmm2, %xmm0
; SSE41-NEXT: retq
;
; AVX1-LABEL: umulo_v4i32:
; AVX1: # %bb.0:
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
; AVX1-NEXT: vpmuludq %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm3
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7]
; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vmovdqa %xmm0, (%rdi)
; AVX1-NEXT: vmovdqa %xmm2, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: umulo_v4i32:
; AVX2: # %bb.0:
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
; AVX2-NEXT: vpmuludq %xmm2, %xmm3, %xmm2
; AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm3
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
; AVX2-NEXT: vpblendd {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2],xmm2[3]
; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX2-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
; AVX2-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
; AVX2-NEXT: vpxor %xmm3, %xmm2, %xmm2
; AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovdqa %xmm0, (%rdi)
; AVX2-NEXT: vmovdqa %xmm2, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: umulo_v4i32:
; AVX512: # %bb.0:
; AVX512-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX512-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
; AVX512-NEXT: vpmuludq %xmm2, %xmm3, %xmm2
; AVX512-NEXT: vpmuludq %xmm1, %xmm0, %xmm3
; AVX512-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
; AVX512-NEXT: vpblendd {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2],xmm2[3]
; AVX512-NEXT: vptestmd %xmm2, %xmm2, %k1
; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm1
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
; AVX512-NEXT: vmovdqa %xmm1, (%rdi)
; AVX512-NEXT: retq
%t = call {<4 x i32>, <4 x i1>} @llvm.umul.with.overflow.v4i32(<4 x i32> %a0, <4 x i32> %a1)
%val = extractvalue {<4 x i32>, <4 x i1>} %t, 0
%obit = extractvalue {<4 x i32>, <4 x i1>} %t, 1
%res = sext <4 x i1> %obit to <4 x i32>
store <4 x i32> %val, <4 x i32>* %p2
ret <4 x i32> %res
}
define <6 x i32> @umulo_v6i32(<6 x i32> %a0, <6 x i32> %a1, <6 x i32>* %p2) nounwind {
; SSE2-LABEL: umulo_v6i32:
; SSE2: # %bb.0:
; SSE2-NEXT: movq %rdi, %rax
; SSE2-NEXT: movd {{.*#+}} xmm8 = mem[0],zero,zero,zero
; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1]
; SSE2-NEXT: movd {{.*#+}} xmm4 = mem[0],zero,zero,zero
; SSE2-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE2-NEXT: movd %r8d, %xmm3
; SSE2-NEXT: movd %ecx, %xmm0
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
; SSE2-NEXT: movd %edx, %xmm2
; SSE2-NEXT: movd %esi, %xmm5
; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1]
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm0[0]
; SSE2-NEXT: pmuludq %xmm1, %xmm5
; SSE2-NEXT: movd {{.*#+}} xmm7 = mem[0],zero,zero,zero
; SSE2-NEXT: movd {{.*#+}} xmm6 = mem[0],zero,zero,zero
; SSE2-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
; SSE2-NEXT: movd %r9d, %xmm0
; SSE2-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: pmuludq %xmm6, %xmm0
; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rcx
; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[1,3,2,3]
; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,0],xmm8[0,0]
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0],xmm3[0,0]
; SSE2-NEXT: pmuludq %xmm4, %xmm2
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,3,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1]
; SSE2-NEXT: pxor %xmm3, %xmm3
; SSE2-NEXT: pcmpeqd %xmm3, %xmm6
; SSE2-NEXT: pcmpeqd %xmm4, %xmm4
; SSE2-NEXT: pxor %xmm4, %xmm6
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,2,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3]
; SSE2-NEXT: pmuludq %xmm7, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm1[1,3,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm7[0],xmm2[1],xmm7[1]
; SSE2-NEXT: pcmpeqd %xmm3, %xmm2
; SSE2-NEXT: pxor %xmm4, %xmm2
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: movq %xmm0, 16(%rcx)
; SSE2-NEXT: movdqa %xmm5, (%rcx)
; SSE2-NEXT: movq %xmm2, 16(%rdi)
; SSE2-NEXT: movdqa %xmm6, (%rdi)
; SSE2-NEXT: retq
;
; SSSE3-LABEL: umulo_v6i32:
; SSSE3: # %bb.0:
; SSSE3-NEXT: movq %rdi, %rax
; SSSE3-NEXT: movd {{.*#+}} xmm8 = mem[0],zero,zero,zero
; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1]
; SSSE3-NEXT: movd {{.*#+}} xmm4 = mem[0],zero,zero,zero
; SSSE3-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSSE3-NEXT: movd %r8d, %xmm3
; SSSE3-NEXT: movd %ecx, %xmm0
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
; SSSE3-NEXT: movd %edx, %xmm2
; SSSE3-NEXT: movd %esi, %xmm5
; SSSE3-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1]
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm0[0]
; SSSE3-NEXT: pmuludq %xmm1, %xmm5
; SSSE3-NEXT: movd {{.*#+}} xmm7 = mem[0],zero,zero,zero
; SSSE3-NEXT: movd {{.*#+}} xmm6 = mem[0],zero,zero,zero
; SSSE3-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
; SSSE3-NEXT: movd %r9d, %xmm0
; SSSE3-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSSE3-NEXT: pmuludq %xmm6, %xmm0
; SSSE3-NEXT: movq {{[0-9]+}}(%rsp), %rcx
; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm5[1,3,2,3]
; SSSE3-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,0],xmm8[0,0]
; SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0],xmm3[0,0]
; SSSE3-NEXT: pmuludq %xmm4, %xmm2
; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,3,2,3]
; SSSE3-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1]
; SSSE3-NEXT: pxor %xmm3, %xmm3
; SSSE3-NEXT: pcmpeqd %xmm3, %xmm6
; SSSE3-NEXT: pcmpeqd %xmm4, %xmm4
; SSSE3-NEXT: pxor %xmm4, %xmm6
; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,2,2,3]
; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
; SSSE3-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1]
; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3]
; SSSE3-NEXT: pmuludq %xmm7, %xmm1
; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm1[1,3,2,3]
; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm7[0],xmm2[1],xmm7[1]
; SSSE3-NEXT: pcmpeqd %xmm3, %xmm2
; SSSE3-NEXT: pxor %xmm4, %xmm2
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSSE3-NEXT: movq %xmm0, 16(%rcx)
; SSSE3-NEXT: movdqa %xmm5, (%rcx)
; SSSE3-NEXT: movq %xmm2, 16(%rdi)
; SSSE3-NEXT: movdqa %xmm6, (%rdi)
; SSSE3-NEXT: retq
;
; SSE41-LABEL: umulo_v6i32:
; SSE41: # %bb.0:
; SSE41-NEXT: movq %rdi, %rax
; SSE41-NEXT: movd %esi, %xmm0
; SSE41-NEXT: pinsrd $1, %edx, %xmm0
; SSE41-NEXT: pinsrd $2, %ecx, %xmm0
; SSE41-NEXT: pinsrd $3, %r8d, %xmm0
; SSE41-NEXT: movd %r9d, %xmm1
; SSE41-NEXT: pinsrd $1, {{[0-9]+}}(%rsp), %xmm1
; SSE41-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
; SSE41-NEXT: pinsrd $1, {{[0-9]+}}(%rsp), %xmm2
; SSE41-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero
; SSE41-NEXT: pinsrd $1, {{[0-9]+}}(%rsp), %xmm3
; SSE41-NEXT: pinsrd $2, {{[0-9]+}}(%rsp), %xmm3
; SSE41-NEXT: pinsrd $3, {{[0-9]+}}(%rsp), %xmm3
; SSE41-NEXT: movq {{[0-9]+}}(%rsp), %rcx
; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm3[1,1,3,3]
; SSE41-NEXT: pmuludq %xmm4, %xmm5
; SSE41-NEXT: movdqa %xmm0, %xmm4
; SSE41-NEXT: pmuludq %xmm3, %xmm4
; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3],xmm4[4,5],xmm5[6,7]
; SSE41-NEXT: pxor %xmm8, %xmm8
; SSE41-NEXT: pcmpeqd %xmm8, %xmm4
; SSE41-NEXT: pcmpeqd %xmm6, %xmm6
; SSE41-NEXT: pxor %xmm6, %xmm4
; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm2[1,1,3,3]
; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,3,3]
; SSE41-NEXT: pmuludq %xmm7, %xmm5
; SSE41-NEXT: movdqa %xmm1, %xmm7
; SSE41-NEXT: pmuludq %xmm2, %xmm7
; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm7[1,1,3,3]
; SSE41-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1],xmm5[2,3],xmm7[4,5],xmm5[6,7]
; SSE41-NEXT: pcmpeqd %xmm8, %xmm7
; SSE41-NEXT: pxor %xmm6, %xmm7
; SSE41-NEXT: pmulld %xmm0, %xmm3
; SSE41-NEXT: pmulld %xmm1, %xmm2
; SSE41-NEXT: movq %xmm2, 16(%rcx)
; SSE41-NEXT: movdqa %xmm3, (%rcx)
; SSE41-NEXT: movq %xmm7, 16(%rdi)
; SSE41-NEXT: movdqa %xmm4, (%rdi)
; SSE41-NEXT: retq
;
; AVX1-LABEL: umulo_v6i32:
; AVX1: # %bb.0:
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm3[1,1,3,3]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm4[1,1,3,3]
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; AVX1-NEXT: vpmuludq %xmm2, %xmm5, %xmm2
; AVX1-NEXT: vpmuludq %xmm3, %xmm4, %xmm5
; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm5[0,1],xmm2[2,3],xmm5[4,5],xmm2[6,7]
; AVX1-NEXT: vpxor %xmm8, %xmm8, %xmm8
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; AVX1-NEXT: vpcmpeqd %xmm8, %xmm2, %xmm2
; AVX1-NEXT: vpcmpeqd %xmm6, %xmm6, %xmm6
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; AVX1-NEXT: vpxor %xmm6, %xmm2, %xmm2
; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm1[1,1,3,3]
; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
; AVX1-NEXT: vpmuludq %xmm7, %xmm5, %xmm5
; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm7
; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[1,1,3,3]
; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm7[0,1],xmm5[2,3],xmm7[4,5],xmm5[6,7]
; AVX1-NEXT: vpcmpeqd %xmm8, %xmm5, %xmm5
; AVX1-NEXT: vpxor %xmm6, %xmm5, %xmm5
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm5, %ymm2
; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpmulld %xmm3, %xmm4, %xmm1
; AVX1-NEXT: vmovq %xmm1, 16(%rdi)
; AVX1-NEXT: vmovdqa %xmm0, (%rdi)
; AVX1-NEXT: vmovaps %ymm2, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: umulo_v6i32:
; AVX2: # %bb.0:
; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm1[1,1,3,3,5,5,7,7]
; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[1,1,3,3,5,5,7,7]
; AVX2-NEXT: vpmuludq %ymm2, %ymm3, %ymm2
; AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm3
; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[1,1,3,3,5,5,7,7]
; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2],ymm2[3],ymm3[4],ymm2[5],ymm3[6],ymm2[7]
; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX2-NEXT: vpcmpeqd %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3
; AVX2-NEXT: vpxor %ymm3, %ymm2, %ymm2
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; AVX2-NEXT: vpmulld %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vmovq %xmm1, 16(%rdi)
; AVX2-NEXT: vmovdqa %xmm0, (%rdi)
; AVX2-NEXT: vmovdqa %ymm2, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: umulo_v6i32:
; AVX512: # %bb.0:
; AVX512-NEXT: vpshufd {{.*#+}} ymm2 = ymm1[1,1,3,3,5,5,7,7]
; AVX512-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[1,1,3,3,5,5,7,7]
; AVX512-NEXT: vpmuludq %ymm2, %ymm3, %ymm2
; AVX512-NEXT: vpmuludq %ymm1, %ymm0, %ymm3
; AVX512-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[1,1,3,3,5,5,7,7]
; AVX512-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2],ymm2[3],ymm3[4],ymm2[5],ymm3[6],ymm2[7]
; AVX512-NEXT: vptestmd %ymm2, %ymm2, %k1
; AVX512-NEXT: vpmulld %ymm1, %ymm0, %ymm1
; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; AVX512-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX512-NEXT: vmovq %xmm2, 16(%rdi)
; AVX512-NEXT: vmovdqa %xmm1, (%rdi)
; AVX512-NEXT: retq
%t = call {<6 x i32>, <6 x i1>} @llvm.umul.with.overflow.v6i32(<6 x i32> %a0, <6 x i32> %a1)
%val = extractvalue {<6 x i32>, <6 x i1>} %t, 0
%obit = extractvalue {<6 x i32>, <6 x i1>} %t, 1
%res = sext <6 x i1> %obit to <6 x i32>
store <6 x i32> %val, <6 x i32>* %p2
ret <6 x i32> %res
}
define <8 x i32> @umulo_v8i32(<8 x i32> %a0, <8 x i32> %a1, <8 x i32>* %p2) nounwind {
; SSE2-LABEL: umulo_v8i32:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm4
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
; SSE2-NEXT: pmuludq %xmm2, %xmm4
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,3,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm2[1,1,3,3]
; SSE2-NEXT: pmuludq %xmm5, %xmm6
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm6[1,3,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; SSE2-NEXT: pxor %xmm8, %xmm8
; SSE2-NEXT: pcmpeqd %xmm8, %xmm0
; SSE2-NEXT: pcmpeqd %xmm7, %xmm7
; SSE2-NEXT: pxor %xmm7, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,3,3]
; SSE2-NEXT: pmuludq %xmm3, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
; SSE2-NEXT: pmuludq %xmm5, %xmm3
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm3[1,3,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
; SSE2-NEXT: pcmpeqd %xmm8, %xmm2
; SSE2-NEXT: pxor %xmm7, %xmm2
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm6[0,2,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
; SSE2-NEXT: movdqa %xmm1, 16(%rdi)
; SSE2-NEXT: movdqa %xmm4, (%rdi)
; SSE2-NEXT: movdqa %xmm2, %xmm1
; SSE2-NEXT: retq
;
; SSSE3-LABEL: umulo_v8i32:
; SSSE3: # %bb.0:
; SSSE3-NEXT: movdqa %xmm0, %xmm4
; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
; SSSE3-NEXT: pmuludq %xmm2, %xmm4
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,3,2,3]
; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm2[1,1,3,3]
; SSSE3-NEXT: pmuludq %xmm5, %xmm6
; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm6[1,3,2,3]
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; SSSE3-NEXT: pxor %xmm8, %xmm8
; SSSE3-NEXT: pcmpeqd %xmm8, %xmm0
; SSSE3-NEXT: pcmpeqd %xmm7, %xmm7
; SSSE3-NEXT: pxor %xmm7, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,3,3]
; SSSE3-NEXT: pmuludq %xmm3, %xmm1
; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
; SSSE3-NEXT: pmuludq %xmm5, %xmm3
; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm3[1,3,2,3]
; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
; SSSE3-NEXT: pcmpeqd %xmm8, %xmm2
; SSSE3-NEXT: pxor %xmm7, %xmm2
; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm6[0,2,2,3]
; SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
; SSSE3-NEXT: movdqa %xmm1, 16(%rdi)
; SSSE3-NEXT: movdqa %xmm4, (%rdi)
; SSSE3-NEXT: movdqa %xmm2, %xmm1
; SSSE3-NEXT: retq
;
; SSE41-LABEL: umulo_v8i32:
; SSE41: # %bb.0:
; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3]
; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
; SSE41-NEXT: pmuludq %xmm4, %xmm5
; SSE41-NEXT: movdqa %xmm0, %xmm4
; SSE41-NEXT: pmuludq %xmm2, %xmm4
; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3],xmm4[4,5],xmm5[6,7]
; SSE41-NEXT: pxor %xmm8, %xmm8
; SSE41-NEXT: pcmpeqd %xmm8, %xmm4
; SSE41-NEXT: pcmpeqd %xmm7, %xmm7
; SSE41-NEXT: pxor %xmm7, %xmm4
; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm3[1,1,3,3]
; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm1[1,1,3,3]
; SSE41-NEXT: pmuludq %xmm5, %xmm6
; SSE41-NEXT: movdqa %xmm1, %xmm5
; SSE41-NEXT: pmuludq %xmm3, %xmm5
; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm5[0,1],xmm6[2,3],xmm5[4,5],xmm6[6,7]
; SSE41-NEXT: pcmpeqd %xmm8, %xmm5
; SSE41-NEXT: pxor %xmm7, %xmm5
; SSE41-NEXT: pmulld %xmm2, %xmm0
; SSE41-NEXT: pmulld %xmm3, %xmm1
; SSE41-NEXT: movdqa %xmm1, 16(%rdi)
; SSE41-NEXT: movdqa %xmm0, (%rdi)
; SSE41-NEXT: movdqa %xmm4, %xmm0
; SSE41-NEXT: movdqa %xmm5, %xmm1
; SSE41-NEXT: retq
;
; AVX1-LABEL: umulo_v8i32:
; AVX1: # %bb.0:
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm3[1,1,3,3]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm4[1,1,3,3]
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; AVX1-NEXT: vpmuludq %xmm2, %xmm5, %xmm2
; AVX1-NEXT: vpmuludq %xmm3, %xmm4, %xmm5
; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm5[0,1],xmm2[2,3],xmm5[4,5],xmm2[6,7]
; AVX1-NEXT: vpxor %xmm8, %xmm8, %xmm8
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; AVX1-NEXT: vpcmpeqd %xmm8, %xmm2, %xmm2
; AVX1-NEXT: vpcmpeqd %xmm6, %xmm6, %xmm6
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; AVX1-NEXT: vpxor %xmm6, %xmm2, %xmm2
; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm1[1,1,3,3]
; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
; AVX1-NEXT: vpmuludq %xmm7, %xmm5, %xmm5
; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm7
; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[1,1,3,3]
; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm7[0,1],xmm5[2,3],xmm7[4,5],xmm5[6,7]
; AVX1-NEXT: vpcmpeqd %xmm8, %xmm5, %xmm5
; AVX1-NEXT: vpxor %xmm6, %xmm5, %xmm5
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm5, %ymm2
; AVX1-NEXT: vpmulld %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
; AVX1-NEXT: vmovaps %ymm0, (%rdi)
; AVX1-NEXT: vmovaps %ymm2, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: umulo_v8i32:
; AVX2: # %bb.0:
; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm1[1,1,3,3,5,5,7,7]
; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[1,1,3,3,5,5,7,7]
; AVX2-NEXT: vpmuludq %ymm2, %ymm3, %ymm2
; AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm3
; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[1,1,3,3,5,5,7,7]
; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2],ymm2[3],ymm3[4],ymm2[5],ymm3[6],ymm2[7]
; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX2-NEXT: vpcmpeqd %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3
; AVX2-NEXT: vpxor %ymm3, %ymm2, %ymm2
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; AVX2-NEXT: vpmulld %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vmovdqa %ymm0, (%rdi)
; AVX2-NEXT: vmovdqa %ymm2, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: umulo_v8i32:
; AVX512: # %bb.0:
; AVX512-NEXT: vpshufd {{.*#+}} ymm2 = ymm1[1,1,3,3,5,5,7,7]
; AVX512-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[1,1,3,3,5,5,7,7]
; AVX512-NEXT: vpmuludq %ymm2, %ymm3, %ymm2
; AVX512-NEXT: vpmuludq %ymm1, %ymm0, %ymm3
; AVX512-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[1,1,3,3,5,5,7,7]
; AVX512-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2],ymm2[3],ymm3[4],ymm2[5],ymm3[6],ymm2[7]
; AVX512-NEXT: vptestmd %ymm2, %ymm2, %k1
; AVX512-NEXT: vpmulld %ymm1, %ymm0, %ymm1
; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; AVX512-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
; AVX512-NEXT: vmovdqa %ymm1, (%rdi)
; AVX512-NEXT: retq
%t = call {<8 x i32>, <8 x i1>} @llvm.umul.with.overflow.v8i32(<8 x i32> %a0, <8 x i32> %a1)
%val = extractvalue {<8 x i32>, <8 x i1>} %t, 0
%obit = extractvalue {<8 x i32>, <8 x i1>} %t, 1
%res = sext <8 x i1> %obit to <8 x i32>
store <8 x i32> %val, <8 x i32>* %p2
ret <8 x i32> %res
}
define <16 x i32> @umulo_v16i32(<16 x i32> %a0, <16 x i32> %a1, <16 x i32>* %p2) nounwind {
; SSE2-LABEL: umulo_v16i32:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm8
; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm0[1,1,3,3]
; SSE2-NEXT: pmuludq %xmm4, %xmm8
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm8[1,3,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm4[1,1,3,3]
; SSE2-NEXT: pmuludq %xmm10, %xmm9
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm9[1,3,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
; SSE2-NEXT: pxor %xmm10, %xmm10
; SSE2-NEXT: pcmpeqd %xmm10, %xmm0
; SSE2-NEXT: pcmpeqd %xmm11, %xmm11
; SSE2-NEXT: pxor %xmm11, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm13 = xmm1[1,1,3,3]
; SSE2-NEXT: pmuludq %xmm5, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm15 = xmm1[1,3,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm12 = xmm5[1,1,3,3]
; SSE2-NEXT: pmuludq %xmm13, %xmm12
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm12[1,3,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm15 = xmm15[0],xmm5[0],xmm15[1],xmm5[1]
; SSE2-NEXT: pcmpeqd %xmm10, %xmm15
; SSE2-NEXT: pxor %xmm11, %xmm15
; SSE2-NEXT: pshufd {{.*#+}} xmm14 = xmm2[1,1,3,3]
; SSE2-NEXT: pmuludq %xmm6, %xmm2
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm2[1,3,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm13 = xmm6[1,1,3,3]
; SSE2-NEXT: pmuludq %xmm14, %xmm13
; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm13[1,3,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
; SSE2-NEXT: pcmpeqd %xmm10, %xmm5
; SSE2-NEXT: pxor %xmm11, %xmm5
; SSE2-NEXT: pshufd {{.*#+}} xmm14 = xmm3[1,1,3,3]
; SSE2-NEXT: pmuludq %xmm7, %xmm3
; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm3[1,3,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm7[1,1,3,3]
; SSE2-NEXT: pmuludq %xmm14, %xmm7
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm7[1,3,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm4[0],xmm6[1],xmm4[1]
; SSE2-NEXT: pcmpeqd %xmm10, %xmm6
; SSE2-NEXT: pxor %xmm11, %xmm6
; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,2,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm9[0,2,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1]
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm12[0,2,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm13[0,2,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm7[0,2,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
; SSE2-NEXT: movdqa %xmm3, 48(%rdi)
; SSE2-NEXT: movdqa %xmm2, 32(%rdi)
; SSE2-NEXT: movdqa %xmm1, 16(%rdi)
; SSE2-NEXT: movdqa %xmm8, (%rdi)
; SSE2-NEXT: movdqa %xmm15, %xmm1
; SSE2-NEXT: movdqa %xmm5, %xmm2
; SSE2-NEXT: movdqa %xmm6, %xmm3
; SSE2-NEXT: retq
;
; SSSE3-LABEL: umulo_v16i32:
; SSSE3: # %bb.0:
; SSSE3-NEXT: movdqa %xmm0, %xmm8
; SSSE3-NEXT: pshufd {{.*#+}} xmm10 = xmm0[1,1,3,3]
; SSSE3-NEXT: pmuludq %xmm4, %xmm8
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm8[1,3,2,3]
; SSSE3-NEXT: pshufd {{.*#+}} xmm9 = xmm4[1,1,3,3]
; SSSE3-NEXT: pmuludq %xmm10, %xmm9
; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm9[1,3,2,3]
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
; SSSE3-NEXT: pxor %xmm10, %xmm10
; SSSE3-NEXT: pcmpeqd %xmm10, %xmm0
; SSSE3-NEXT: pcmpeqd %xmm11, %xmm11
; SSSE3-NEXT: pxor %xmm11, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm13 = xmm1[1,1,3,3]
; SSSE3-NEXT: pmuludq %xmm5, %xmm1
; SSSE3-NEXT: pshufd {{.*#+}} xmm15 = xmm1[1,3,2,3]
; SSSE3-NEXT: pshufd {{.*#+}} xmm12 = xmm5[1,1,3,3]
; SSSE3-NEXT: pmuludq %xmm13, %xmm12
; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm12[1,3,2,3]
; SSSE3-NEXT: punpckldq {{.*#+}} xmm15 = xmm15[0],xmm5[0],xmm15[1],xmm5[1]
; SSSE3-NEXT: pcmpeqd %xmm10, %xmm15
; SSSE3-NEXT: pxor %xmm11, %xmm15
; SSSE3-NEXT: pshufd {{.*#+}} xmm14 = xmm2[1,1,3,3]
; SSSE3-NEXT: pmuludq %xmm6, %xmm2
; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm2[1,3,2,3]
; SSSE3-NEXT: pshufd {{.*#+}} xmm13 = xmm6[1,1,3,3]
; SSSE3-NEXT: pmuludq %xmm14, %xmm13
; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm13[1,3,2,3]
; SSSE3-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
; SSSE3-NEXT: pcmpeqd %xmm10, %xmm5
; SSSE3-NEXT: pxor %xmm11, %xmm5
; SSSE3-NEXT: pshufd {{.*#+}} xmm14 = xmm3[1,1,3,3]
; SSSE3-NEXT: pmuludq %xmm7, %xmm3
; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm3[1,3,2,3]
; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm7[1,1,3,3]
; SSSE3-NEXT: pmuludq %xmm14, %xmm7
; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm7[1,3,2,3]
; SSSE3-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm4[0],xmm6[1],xmm4[1]
; SSSE3-NEXT: pcmpeqd %xmm10, %xmm6
; SSSE3-NEXT: pxor %xmm11, %xmm6
; SSSE3-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,2,2,3]
; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm9[0,2,2,3]
; SSSE3-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1]
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm12[0,2,2,3]
; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm13[0,2,2,3]
; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm7[0,2,2,3]
; SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
; SSSE3-NEXT: movdqa %xmm3, 48(%rdi)
; SSSE3-NEXT: movdqa %xmm2, 32(%rdi)
; SSSE3-NEXT: movdqa %xmm1, 16(%rdi)
; SSSE3-NEXT: movdqa %xmm8, (%rdi)
; SSSE3-NEXT: movdqa %xmm15, %xmm1
; SSSE3-NEXT: movdqa %xmm5, %xmm2
; SSSE3-NEXT: movdqa %xmm6, %xmm3
; SSSE3-NEXT: retq
;
; SSE41-LABEL: umulo_v16i32:
; SSE41: # %bb.0:
; SSE41-NEXT: pshufd {{.*#+}} xmm8 = xmm4[1,1,3,3]
; SSE41-NEXT: pshufd {{.*#+}} xmm9 = xmm0[1,1,3,3]
; SSE41-NEXT: pmuludq %xmm8, %xmm9
; SSE41-NEXT: movdqa %xmm0, %xmm8
; SSE41-NEXT: pmuludq %xmm4, %xmm8
; SSE41-NEXT: pshufd {{.*#+}} xmm8 = xmm8[1,1,3,3]
; SSE41-NEXT: pblendw {{.*#+}} xmm8 = xmm8[0,1],xmm9[2,3],xmm8[4,5],xmm9[6,7]
; SSE41-NEXT: pxor %xmm12, %xmm12
; SSE41-NEXT: pcmpeqd %xmm12, %xmm8
; SSE41-NEXT: pcmpeqd %xmm13, %xmm13
; SSE41-NEXT: pxor %xmm13, %xmm8
; SSE41-NEXT: pshufd {{.*#+}} xmm9 = xmm5[1,1,3,3]
; SSE41-NEXT: pshufd {{.*#+}} xmm10 = xmm1[1,1,3,3]
; SSE41-NEXT: pmuludq %xmm9, %xmm10
; SSE41-NEXT: movdqa %xmm1, %xmm9
; SSE41-NEXT: pmuludq %xmm5, %xmm9
; SSE41-NEXT: pshufd {{.*#+}} xmm9 = xmm9[1,1,3,3]
; SSE41-NEXT: pblendw {{.*#+}} xmm9 = xmm9[0,1],xmm10[2,3],xmm9[4,5],xmm10[6,7]
; SSE41-NEXT: pcmpeqd %xmm12, %xmm9
; SSE41-NEXT: pxor %xmm13, %xmm9
; SSE41-NEXT: pshufd {{.*#+}} xmm10 = xmm6[1,1,3,3]
; SSE41-NEXT: pshufd {{.*#+}} xmm11 = xmm2[1,1,3,3]
; SSE41-NEXT: pmuludq %xmm10, %xmm11
; SSE41-NEXT: movdqa %xmm2, %xmm10
; SSE41-NEXT: pmuludq %xmm6, %xmm10
; SSE41-NEXT: pshufd {{.*#+}} xmm10 = xmm10[1,1,3,3]
; SSE41-NEXT: pblendw {{.*#+}} xmm10 = xmm10[0,1],xmm11[2,3],xmm10[4,5],xmm11[6,7]
; SSE41-NEXT: pcmpeqd %xmm12, %xmm10
; SSE41-NEXT: pxor %xmm13, %xmm10
; SSE41-NEXT: pshufd {{.*#+}} xmm11 = xmm7[1,1,3,3]
; SSE41-NEXT: pshufd {{.*#+}} xmm14 = xmm3[1,1,3,3]
; SSE41-NEXT: pmuludq %xmm11, %xmm14
; SSE41-NEXT: movdqa %xmm3, %xmm11
; SSE41-NEXT: pmuludq %xmm7, %xmm11
; SSE41-NEXT: pshufd {{.*#+}} xmm11 = xmm11[1,1,3,3]
; SSE41-NEXT: pblendw {{.*#+}} xmm11 = xmm11[0,1],xmm14[2,3],xmm11[4,5],xmm14[6,7]
; SSE41-NEXT: pcmpeqd %xmm12, %xmm11
; SSE41-NEXT: pxor %xmm13, %xmm11
; SSE41-NEXT: pmulld %xmm4, %xmm0
; SSE41-NEXT: pmulld %xmm5, %xmm1
; SSE41-NEXT: pmulld %xmm6, %xmm2
; SSE41-NEXT: pmulld %xmm7, %xmm3
; SSE41-NEXT: movdqa %xmm3, 48(%rdi)
; SSE41-NEXT: movdqa %xmm2, 32(%rdi)
; SSE41-NEXT: movdqa %xmm1, 16(%rdi)
; SSE41-NEXT: movdqa %xmm0, (%rdi)
; SSE41-NEXT: movdqa %xmm8, %xmm0
; SSE41-NEXT: movdqa %xmm9, %xmm1
; SSE41-NEXT: movdqa %xmm10, %xmm2
; SSE41-NEXT: movdqa %xmm11, %xmm3
; SSE41-NEXT: retq
;
; AVX1-LABEL: umulo_v16i32:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm10
; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm10[1,1,3,3]
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm12
; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm12[1,1,3,3]
; AVX1-NEXT: vpmuludq %xmm6, %xmm7, %xmm6
; AVX1-NEXT: vpmuludq %xmm10, %xmm12, %xmm7
; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[1,1,3,3]
; AVX1-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0,1],xmm6[2,3],xmm7[4,5],xmm6[6,7]
; AVX1-NEXT: vpxor %xmm8, %xmm8, %xmm8
; AVX1-NEXT: vpcmpeqd %xmm8, %xmm7, %xmm7
; AVX1-NEXT: vpcmpeqd %xmm9, %xmm9, %xmm9
; AVX1-NEXT: vpxor %xmm9, %xmm7, %xmm7
; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm3[1,1,3,3]
; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[1,1,3,3]
; AVX1-NEXT: vpmuludq %xmm6, %xmm4, %xmm4
; AVX1-NEXT: vpmuludq %xmm3, %xmm1, %xmm6
; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm6[0,1],xmm4[2,3],xmm6[4,5],xmm4[6,7]
; AVX1-NEXT: vpcmpeqd %xmm8, %xmm4, %xmm4
; AVX1-NEXT: vpxor %xmm9, %xmm4, %xmm4
; AVX1-NEXT: vpackssdw %xmm7, %xmm4, %xmm11
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm6
; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm6[1,1,3,3]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm4[1,1,3,3]
; AVX1-NEXT: vpmuludq %xmm7, %xmm5, %xmm5
; AVX1-NEXT: vpmuludq %xmm6, %xmm4, %xmm7
; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[1,1,3,3]
; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm7[0,1],xmm5[2,3],xmm7[4,5],xmm5[6,7]
; AVX1-NEXT: vpcmpeqd %xmm8, %xmm5, %xmm5
; AVX1-NEXT: vpxor %xmm9, %xmm5, %xmm13
; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm2[1,1,3,3]
; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
; AVX1-NEXT: vpmuludq %xmm7, %xmm5, %xmm5
; AVX1-NEXT: vpmuludq %xmm2, %xmm0, %xmm7
; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[1,1,3,3]
; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm7[0,1],xmm5[2,3],xmm7[4,5],xmm5[6,7]
; AVX1-NEXT: vpcmpeqd %xmm8, %xmm5, %xmm5
; AVX1-NEXT: vpxor %xmm9, %xmm5, %xmm5
; AVX1-NEXT: vpackssdw %xmm13, %xmm5, %xmm5
; AVX1-NEXT: vpacksswb %xmm11, %xmm5, %xmm5
; AVX1-NEXT: vpmulld %xmm6, %xmm4, %xmm4
; AVX1-NEXT: vpmulld %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm2
; AVX1-NEXT: vpmulld %xmm10, %xmm12, %xmm0
; AVX1-NEXT: vpmulld %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm3
; AVX1-NEXT: vpmovsxbd %xmm5, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm5[1,1,2,3]
; AVX1-NEXT: vpmovsxbd %xmm1, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm5[2,3,0,1]
; AVX1-NEXT: vpmovsxbd %xmm1, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm5[3,3,0,1]
; AVX1-NEXT: vpmovsxbd %xmm4, %xmm4
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm1
; AVX1-NEXT: vmovaps %ymm3, 32(%rdi)
; AVX1-NEXT: vmovaps %ymm2, (%rdi)
; AVX1-NEXT: retq
;
; AVX2-LABEL: umulo_v16i32:
; AVX2: # %bb.0:
; AVX2-NEXT: vpshufd {{.*#+}} ymm4 = ymm3[1,1,3,3,5,5,7,7]
; AVX2-NEXT: vpshufd {{.*#+}} ymm5 = ymm1[1,1,3,3,5,5,7,7]
; AVX2-NEXT: vpmuludq %ymm4, %ymm5, %ymm4
; AVX2-NEXT: vpmuludq %ymm3, %ymm1, %ymm5
; AVX2-NEXT: vpshufd {{.*#+}} ymm5 = ymm5[1,1,3,3,5,5,7,7]
; AVX2-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2],ymm4[3],ymm5[4],ymm4[5],ymm5[6],ymm4[7]
; AVX2-NEXT: vpxor %xmm5, %xmm5, %xmm5
; AVX2-NEXT: vpcmpeqd %ymm5, %ymm4, %ymm4
; AVX2-NEXT: vpcmpeqd %ymm6, %ymm6, %ymm6
; AVX2-NEXT: vpxor %ymm6, %ymm4, %ymm4
; AVX2-NEXT: vextracti128 $1, %ymm4, %xmm7
; AVX2-NEXT: vpackssdw %xmm7, %xmm4, %xmm4
; AVX2-NEXT: vpshufd {{.*#+}} ymm7 = ymm2[1,1,3,3,5,5,7,7]
; AVX2-NEXT: vpshufd {{.*#+}} ymm8 = ymm0[1,1,3,3,5,5,7,7]
; AVX2-NEXT: vpmuludq %ymm7, %ymm8, %ymm7
; AVX2-NEXT: vpmuludq %ymm2, %ymm0, %ymm8
; AVX2-NEXT: vpshufd {{.*#+}} ymm8 = ymm8[1,1,3,3,5,5,7,7]
; AVX2-NEXT: vpblendd {{.*#+}} ymm7 = ymm8[0],ymm7[1],ymm8[2],ymm7[3],ymm8[4],ymm7[5],ymm8[6],ymm7[7]
; AVX2-NEXT: vpcmpeqd %ymm5, %ymm7, %ymm5
; AVX2-NEXT: vpxor %ymm6, %ymm5, %ymm5
; AVX2-NEXT: vextracti128 $1, %ymm5, %xmm6
; AVX2-NEXT: vpackssdw %xmm6, %xmm5, %xmm5
; AVX2-NEXT: vpacksswb %xmm4, %xmm5, %xmm4
; AVX2-NEXT: vpmulld %ymm2, %ymm0, %ymm2
; AVX2-NEXT: vpmulld %ymm3, %ymm1, %ymm3
; AVX2-NEXT: vpmovsxbd %xmm4, %ymm0
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm4[2,3,0,1]
; AVX2-NEXT: vpmovsxbd %xmm1, %ymm1
; AVX2-NEXT: vmovdqa %ymm3, 32(%rdi)
; AVX2-NEXT: vmovdqa %ymm2, (%rdi)
; AVX2-NEXT: retq
;
; AVX512-LABEL: umulo_v16i32:
; AVX512: # %bb.0:
; AVX512-NEXT: vpmuludq %zmm1, %zmm0, %zmm2
; AVX512-NEXT: vpshufd {{.*#+}} zmm3 = zmm1[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
; AVX512-NEXT: vpshufd {{.*#+}} zmm4 = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
; AVX512-NEXT: vpmuludq %zmm3, %zmm4, %zmm3
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm4 = [1,17,3,19,5,21,7,23,9,25,11,27,13,29,15,31]
; AVX512-NEXT: vpermi2d %zmm3, %zmm2, %zmm4
; AVX512-NEXT: vptestmd %zmm4, %zmm4, %k1
; AVX512-NEXT: vpmulld %zmm1, %zmm0, %zmm1
; AVX512-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512-NEXT: vmovdqa64 %zmm1, (%rdi)
; AVX512-NEXT: retq
%t = call {<16 x i32>, <16 x i1>} @llvm.umul.with.overflow.v16i32(<16 x i32> %a0, <16 x i32> %a1)
%val = extractvalue {<16 x i32>, <16 x i1>} %t, 0
%obit = extractvalue {<16 x i32>, <16 x i1>} %t, 1
%res = sext <16 x i1> %obit to <16 x i32>
store <16 x i32> %val, <16 x i32>* %p2
ret <16 x i32> %res
}
define <16 x i32> @umulo_v16i8(<16 x i8> %a0, <16 x i8> %a1, <16 x i8>* %p2) nounwind {
; SSE2-LABEL: umulo_v16i8:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
; SSE2-NEXT: pmullw %xmm2, %xmm3
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
; SSE2-NEXT: pand %xmm2, %xmm3
; SSE2-NEXT: movdqa %xmm1, %xmm5
; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3],xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
; SSE2-NEXT: movdqa %xmm0, %xmm4
; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3],xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
; SSE2-NEXT: pmullw %xmm5, %xmm4
; SSE2-NEXT: pand %xmm2, %xmm4
; SSE2-NEXT: packuswb %xmm3, %xmm4
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: movdqa %xmm1, %xmm3
; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
; SSE2-NEXT: movdqa %xmm0, %xmm5
; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm2[8],xmm5[9],xmm2[9],xmm5[10],xmm2[10],xmm5[11],xmm2[11],xmm5[12],xmm2[12],xmm5[13],xmm2[13],xmm5[14],xmm2[14],xmm5[15],xmm2[15]
; SSE2-NEXT: pmullw %xmm3, %xmm5
; SSE2-NEXT: psrlw $8, %xmm5
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
; SSE2-NEXT: pmullw %xmm1, %xmm0
; SSE2-NEXT: psrlw $8, %xmm0
; SSE2-NEXT: packuswb %xmm5, %xmm0
; SSE2-NEXT: pcmpeqb %xmm2, %xmm0
; SSE2-NEXT: pcmpeqd %xmm3, %xmm3
; SSE2-NEXT: pxor %xmm0, %xmm3
; SSE2-NEXT: movdqa %xmm3, %xmm1
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE2-NEXT: movdqa %xmm1, %xmm0
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE2-NEXT: pslld $31, %xmm0
; SSE2-NEXT: psrad $31, %xmm0
; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE2-NEXT: pslld $31, %xmm1
; SSE2-NEXT: psrad $31, %xmm1
; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
; SSE2-NEXT: movdqa %xmm3, %xmm2
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
; SSE2-NEXT: pslld $31, %xmm2
; SSE2-NEXT: psrad $31, %xmm2
; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
; SSE2-NEXT: pslld $31, %xmm3
; SSE2-NEXT: psrad $31, %xmm3
; SSE2-NEXT: movdqa %xmm4, (%rdi)
; SSE2-NEXT: retq
;
; SSSE3-LABEL: umulo_v16i8:
; SSSE3: # %bb.0:
; SSSE3-NEXT: movdqa %xmm1, %xmm2
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
; SSSE3-NEXT: movdqa %xmm0, %xmm3
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
; SSSE3-NEXT: pmullw %xmm2, %xmm3
; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
; SSSE3-NEXT: pand %xmm2, %xmm3
; SSSE3-NEXT: movdqa %xmm1, %xmm5
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3],xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
; SSSE3-NEXT: movdqa %xmm0, %xmm4
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3],xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
; SSSE3-NEXT: pmullw %xmm5, %xmm4
; SSSE3-NEXT: pand %xmm2, %xmm4
; SSSE3-NEXT: packuswb %xmm3, %xmm4
; SSSE3-NEXT: pxor %xmm2, %xmm2
; SSSE3-NEXT: movdqa %xmm1, %xmm3
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
; SSSE3-NEXT: movdqa %xmm0, %xmm5
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm2[8],xmm5[9],xmm2[9],xmm5[10],xmm2[10],xmm5[11],xmm2[11],xmm5[12],xmm2[12],xmm5[13],xmm2[13],xmm5[14],xmm2[14],xmm5[15],xmm2[15]
; SSSE3-NEXT: pmullw %xmm3, %xmm5
; SSSE3-NEXT: psrlw $8, %xmm5
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
; SSSE3-NEXT: pmullw %xmm1, %xmm0
; SSSE3-NEXT: psrlw $8, %xmm0
; SSSE3-NEXT: packuswb %xmm5, %xmm0
; SSSE3-NEXT: pcmpeqb %xmm2, %xmm0
; SSSE3-NEXT: pcmpeqd %xmm3, %xmm3
; SSSE3-NEXT: pxor %xmm0, %xmm3
; SSSE3-NEXT: movdqa %xmm3, %xmm1
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSSE3-NEXT: pslld $31, %xmm0
; SSSE3-NEXT: psrad $31, %xmm0
; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSSE3-NEXT: pslld $31, %xmm1
; SSSE3-NEXT: psrad $31, %xmm1
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
; SSSE3-NEXT: movdqa %xmm3, %xmm2
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
; SSSE3-NEXT: pslld $31, %xmm2
; SSSE3-NEXT: psrad $31, %xmm2
; SSSE3-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
; SSSE3-NEXT: pslld $31, %xmm3
; SSSE3-NEXT: psrad $31, %xmm3
; SSSE3-NEXT: movdqa %xmm4, (%rdi)
; SSSE3-NEXT: retq
;
; SSE41-LABEL: umulo_v16i8:
; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm1, %xmm2
; SSE41-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
; SSE41-NEXT: movdqa %xmm0, %xmm3
; SSE41-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
; SSE41-NEXT: pmullw %xmm2, %xmm3
; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
; SSE41-NEXT: pand %xmm4, %xmm3
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; SSE41-NEXT: pmullw %xmm2, %xmm5
; SSE41-NEXT: pand %xmm5, %xmm4
; SSE41-NEXT: packuswb %xmm3, %xmm4
; SSE41-NEXT: pxor %xmm2, %xmm2
; SSE41-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
; SSE41-NEXT: pmullw %xmm1, %xmm0
; SSE41-NEXT: psrlw $8, %xmm0
; SSE41-NEXT: psrlw $8, %xmm5
; SSE41-NEXT: packuswb %xmm0, %xmm5
; SSE41-NEXT: pcmpeqb %xmm2, %xmm5
; SSE41-NEXT: pcmpeqd %xmm3, %xmm3
; SSE41-NEXT: pxor %xmm5, %xmm3
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
; SSE41-NEXT: pslld $31, %xmm0
; SSE41-NEXT: psrad $31, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,2,3]
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
; SSE41-NEXT: pslld $31, %xmm1
; SSE41-NEXT: psrad $31, %xmm1
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm3[2,3,0,1]
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
; SSE41-NEXT: pslld $31, %xmm2
; SSE41-NEXT: psrad $31, %xmm2
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm3[3,1,2,3]
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
; SSE41-NEXT: pslld $31, %xmm3
; SSE41-NEXT: psrad $31, %xmm3
; SSE41-NEXT: movdqa %xmm4, (%rdi)
; SSE41-NEXT: retq
;
; AVX1-LABEL: umulo_v16i8:
; AVX1: # %bb.0:
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; AVX1-NEXT: vpmullw %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm4 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX1-NEXT: vpmullw %xmm4, %xmm5, %xmm4
; AVX1-NEXT: vpand %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpackuswb %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm3[8],xmm1[9],xmm3[9],xmm1[10],xmm3[10],xmm1[11],xmm3[11],xmm1[12],xmm3[12],xmm1[13],xmm3[13],xmm1[14],xmm3[14],xmm1[15],xmm3[15]
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm3[8],xmm0[9],xmm3[9],xmm0[10],xmm3[10],xmm0[11],xmm3[11],xmm0[12],xmm3[12],xmm0[13],xmm3[13],xmm0[14],xmm3[14],xmm0[15],xmm3[15]
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm1
; AVX1-NEXT: vpackuswb %xmm0, %xmm1, %xmm0
; AVX1-NEXT: vpcmpeqb %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vpmovsxbd %xmm1, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[1,1,2,3]
; AVX1-NEXT: vpmovsxbd %xmm3, %xmm3
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[2,3,0,1]
; AVX1-NEXT: vpmovsxbd %xmm3, %xmm3
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[3,3,0,1]
; AVX1-NEXT: vpmovsxbd %xmm1, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1
; AVX1-NEXT: vmovdqa %xmm2, (%rdi)
; AVX1-NEXT: retq
;
; AVX2-LABEL: umulo_v16i8:
; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm1
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX2-NEXT: vpackuswb %xmm2, %xmm1, %xmm2
; AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm1
; AVX2-NEXT: vpmovsxbd %xmm1, %ymm0
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
; AVX2-NEXT: vpmovsxbd %xmm1, %ymm1
; AVX2-NEXT: vmovdqa %xmm2, (%rdi)
; AVX2-NEXT: retq
;
; AVX512-LABEL: umulo_v16i8:
; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
; AVX512-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; AVX512-NEXT: vpmullw %ymm1, %ymm0, %ymm1
; AVX512-NEXT: vpsrlw $8, %ymm1, %ymm0
; AVX512-NEXT: vpmovwb %ymm0, %xmm0
; AVX512-NEXT: vptestmb %xmm0, %xmm0, %k1
; AVX512-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512-NEXT: vpmovwb %ymm1, (%rdi)
; AVX512-NEXT: retq
%t = call {<16 x i8>, <16 x i1>} @llvm.umul.with.overflow.v16i8(<16 x i8> %a0, <16 x i8> %a1)
%val = extractvalue {<16 x i8>, <16 x i1>} %t, 0
%obit = extractvalue {<16 x i8>, <16 x i1>} %t, 1
%res = sext <16 x i1> %obit to <16 x i32>
store <16 x i8> %val, <16 x i8>* %p2
ret <16 x i32> %res
}
define <8 x i32> @umulo_v8i16(<8 x i16> %a0, <8 x i16> %a1, <8 x i16>* %p2) nounwind {
; SSE2-LABEL: umulo_v8i16:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: pmullw %xmm1, %xmm2
; SSE2-NEXT: pmulhuw %xmm1, %xmm0
; SSE2-NEXT: pxor %xmm3, %xmm3
; SSE2-NEXT: pcmpeqw %xmm0, %xmm3
; SSE2-NEXT: pcmpeqd %xmm1, %xmm1
; SSE2-NEXT: pxor %xmm3, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm0
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE2-NEXT: pslld $31, %xmm0
; SSE2-NEXT: psrad $31, %xmm0
; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE2-NEXT: pslld $31, %xmm1
; SSE2-NEXT: psrad $31, %xmm1
; SSE2-NEXT: movdqa %xmm2, (%rdi)
; SSE2-NEXT: retq
;
; SSSE3-LABEL: umulo_v8i16:
; SSSE3: # %bb.0:
; SSSE3-NEXT: movdqa %xmm0, %xmm2
; SSSE3-NEXT: pmullw %xmm1, %xmm2
; SSSE3-NEXT: pmulhuw %xmm1, %xmm0
; SSSE3-NEXT: pxor %xmm3, %xmm3
; SSSE3-NEXT: pcmpeqw %xmm0, %xmm3
; SSSE3-NEXT: pcmpeqd %xmm1, %xmm1
; SSSE3-NEXT: pxor %xmm3, %xmm1
; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSSE3-NEXT: pslld $31, %xmm0
; SSSE3-NEXT: psrad $31, %xmm0
; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSSE3-NEXT: pslld $31, %xmm1
; SSSE3-NEXT: psrad $31, %xmm1
; SSSE3-NEXT: movdqa %xmm2, (%rdi)
; SSSE3-NEXT: retq
;
; SSE41-LABEL: umulo_v8i16:
; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: pmullw %xmm1, %xmm2
; SSE41-NEXT: pmulhuw %xmm1, %xmm0
; SSE41-NEXT: pxor %xmm3, %xmm3
; SSE41-NEXT: pcmpeqw %xmm0, %xmm3
; SSE41-NEXT: pcmpeqd %xmm1, %xmm1
; SSE41-NEXT: pxor %xmm3, %xmm1
; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
; SSE41-NEXT: pslld $31, %xmm0
; SSE41-NEXT: psrad $31, %xmm0
; SSE41-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE41-NEXT: pslld $31, %xmm1
; SSE41-NEXT: psrad $31, %xmm1
; SSE41-NEXT: movdqa %xmm2, (%rdi)
; SSE41-NEXT: retq
;
; AVX1-LABEL: umulo_v8i16:
; AVX1: # %bb.0:
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm2
; AVX1-NEXT: vpmulhuw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpmovsxwd %xmm0, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: vmovdqa %xmm2, (%rdi)
; AVX1-NEXT: retq
;
; AVX2-LABEL: umulo_v8i16:
; AVX2: # %bb.0:
; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm2
; AVX2-NEXT: vpmulhuw %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX2-NEXT: vmovdqa %xmm2, (%rdi)
; AVX2-NEXT: retq
;
; AVX512-LABEL: umulo_v8i16:
; AVX512: # %bb.0:
; AVX512-NEXT: vpmullw %xmm1, %xmm0, %xmm2
; AVX512-NEXT: vpmulhuw %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vptestmw %xmm0, %xmm0, %k1
; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; AVX512-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
; AVX512-NEXT: vmovdqa %xmm2, (%rdi)
; AVX512-NEXT: retq
%t = call {<8 x i16>, <8 x i1>} @llvm.umul.with.overflow.v8i16(<8 x i16> %a0, <8 x i16> %a1)
%val = extractvalue {<8 x i16>, <8 x i1>} %t, 0
%obit = extractvalue {<8 x i16>, <8 x i1>} %t, 1
%res = sext <8 x i1> %obit to <8 x i32>
store <8 x i16> %val, <8 x i16>* %p2
ret <8 x i32> %res
}
define <2 x i32> @umulo_v2i64(<2 x i64> %a0, <2 x i64> %a1, <2 x i64>* %p2) nounwind {
; SSE2-LABEL: umulo_v2i64:
; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
; SSE2-NEXT: movq %xmm2, %r9
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
; SSE2-NEXT: movq %xmm2, %rsi
; SSE2-NEXT: movq %xmm0, %rax
; SSE2-NEXT: movq %xmm1, %rdx
; SSE2-NEXT: xorl %ecx, %ecx
; SSE2-NEXT: mulq %rdx
; SSE2-NEXT: movq %rax, %r8
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; SSE2-NEXT: movq $-1, %r10
; SSE2-NEXT: movl $0, %eax
; SSE2-NEXT: cmovoq %r10, %rax
; SSE2-NEXT: movq %rax, %xmm0
; SSE2-NEXT: movq %r9, %rax
; SSE2-NEXT: mulq %rsi
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; SSE2-NEXT: cmovoq %r10, %rcx
; SSE2-NEXT: movq %rcx, %xmm1
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE2-NEXT: movq %r8, %xmm1
; SSE2-NEXT: movq %rax, %xmm2
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE2-NEXT: movdqa %xmm1, (%rdi)
; SSE2-NEXT: retq
;
; SSSE3-LABEL: umulo_v2i64:
; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
; SSSE3-NEXT: movq %xmm2, %r9
; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
; SSSE3-NEXT: movq %xmm2, %rsi
; SSSE3-NEXT: movq %xmm0, %rax
; SSSE3-NEXT: movq %xmm1, %rdx
; SSSE3-NEXT: xorl %ecx, %ecx
; SSSE3-NEXT: mulq %rdx
; SSSE3-NEXT: movq %rax, %r8
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; SSSE3-NEXT: movq $-1, %r10
; SSSE3-NEXT: movl $0, %eax
; SSSE3-NEXT: cmovoq %r10, %rax
; SSSE3-NEXT: movq %rax, %xmm0
; SSSE3-NEXT: movq %r9, %rax
; SSSE3-NEXT: mulq %rsi
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; SSSE3-NEXT: cmovoq %r10, %rcx
; SSSE3-NEXT: movq %rcx, %xmm1
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSSE3-NEXT: movq %r8, %xmm1
; SSSE3-NEXT: movq %rax, %xmm2
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSSE3-NEXT: movdqa %xmm1, (%rdi)
; SSSE3-NEXT: retq
;
; SSE41-LABEL: umulo_v2i64:
; SSE41: # %bb.0:
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; SSE41-NEXT: movq %xmm0, %rcx
; SSE41-NEXT: movq %xmm1, %r9
; SSE41-NEXT: pextrq $1, %xmm0, %rax
; SSE41-NEXT: pextrq $1, %xmm1, %rdx
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; SSE41-NEXT: xorl %esi, %esi
; SSE41-NEXT: mulq %rdx
; SSE41-NEXT: movq %rax, %r8
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; SSE41-NEXT: movq $-1, %r10
; SSE41-NEXT: movl $0, %eax
; SSE41-NEXT: cmovoq %r10, %rax
; SSE41-NEXT: movq %rax, %xmm1
; SSE41-NEXT: movq %rcx, %rax
; SSE41-NEXT: mulq %r9
; SSE41-NEXT: cmovoq %r10, %rsi
; SSE41-NEXT: movq %rsi, %xmm0
; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE41-NEXT: movq %r8, %xmm1
; SSE41-NEXT: movq %rax, %xmm2
; SSE41-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
; SSE41-NEXT: movdqa %xmm2, (%rdi)
; SSE41-NEXT: retq
;
; AVX1-LABEL: umulo_v2i64:
; AVX1: # %bb.0:
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; AVX1-NEXT: vmovq %xmm0, %rcx
; AVX1-NEXT: vmovq %xmm1, %r9
; AVX1-NEXT: vpextrq $1, %xmm0, %rax
; AVX1-NEXT: vpextrq $1, %xmm1, %rdx
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; AVX1-NEXT: xorl %esi, %esi
; AVX1-NEXT: mulq %rdx
; AVX1-NEXT: movq %rax, %r8
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; AVX1-NEXT: movq $-1, %r10
; AVX1-NEXT: movl $0, %eax
; AVX1-NEXT: cmovoq %r10, %rax
; AVX1-NEXT: vmovq %rax, %xmm0
; AVX1-NEXT: movq %rcx, %rax
; AVX1-NEXT: mulq %r9
; AVX1-NEXT: cmovoq %r10, %rsi
; AVX1-NEXT: vmovq %rsi, %xmm1
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX1-NEXT: vmovq %r8, %xmm1
; AVX1-NEXT: vmovq %rax, %xmm2
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
; AVX1-NEXT: vmovdqa %xmm1, (%rdi)
; AVX1-NEXT: retq
;
; AVX2-LABEL: umulo_v2i64:
; AVX2: # %bb.0:
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; AVX2-NEXT: vmovq %xmm0, %rcx
; AVX2-NEXT: vmovq %xmm1, %r9
; AVX2-NEXT: vpextrq $1, %xmm0, %rax
; AVX2-NEXT: vpextrq $1, %xmm1, %rdx
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; AVX2-NEXT: xorl %esi, %esi
; AVX2-NEXT: mulq %rdx
; AVX2-NEXT: movq %rax, %r8
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; AVX2-NEXT: movq $-1, %r10
; AVX2-NEXT: movl $0, %eax
; AVX2-NEXT: cmovoq %r10, %rax
; AVX2-NEXT: vmovq %rax, %xmm0
; AVX2-NEXT: movq %rcx, %rax
; AVX2-NEXT: mulq %r9
; AVX2-NEXT: cmovoq %r10, %rsi
; AVX2-NEXT: vmovq %rsi, %xmm1
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX2-NEXT: vmovq %r8, %xmm1
; AVX2-NEXT: vmovq %rax, %xmm2
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
; AVX2-NEXT: vmovdqa %xmm1, (%rdi)
; AVX2-NEXT: retq
;
; AVX512-LABEL: umulo_v2i64:
; AVX512: # %bb.0:
; AVX512-NEXT: vmovq %xmm0, %rcx
; AVX512-NEXT: vmovq %xmm1, %rsi
; AVX512-NEXT: vpextrq $1, %xmm0, %rax
; AVX512-NEXT: vpextrq $1, %xmm1, %rdx
; AVX512-NEXT: mulq %rdx
; AVX512-NEXT: movq %rax, %r8
; AVX512-NEXT: seto %al
; AVX512-NEXT: kmovd %eax, %k0
; AVX512-NEXT: kshiftlw $1, %k0, %k0
; AVX512-NEXT: movq %rcx, %rax
; AVX512-NEXT: mulq %rsi
; AVX512-NEXT: seto %cl
; AVX512-NEXT: andl $1, %ecx
; AVX512-NEXT: kmovw %ecx, %k1
; AVX512-NEXT: korw %k0, %k1, %k1
; AVX512-NEXT: vmovq %r8, %xmm0
; AVX512-NEXT: vmovq %rax, %xmm1
; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX512-NEXT: vmovdqa %xmm0, (%rdi)
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX512-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z}
; AVX512-NEXT: retq
%t = call {<2 x i64>, <2 x i1>} @llvm.umul.with.overflow.v2i64(<2 x i64> %a0, <2 x i64> %a1)
%val = extractvalue {<2 x i64>, <2 x i1>} %t, 0
%obit = extractvalue {<2 x i64>, <2 x i1>} %t, 1
%res = sext <2 x i1> %obit to <2 x i32>
store <2 x i64> %val, <2 x i64>* %p2
ret <2 x i32> %res
}
define <4 x i32> @umulo_v4i24(<4 x i24> %a0, <4 x i24> %a1, <4 x i24>* %p2) nounwind {
; SSE2-LABEL: umulo_v4i24:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0]
; SSE2-NEXT: pand %xmm2, %xmm1
; SSE2-NEXT: pand %xmm2, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
; SSE2-NEXT: pmuludq %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
; SSE2-NEXT: pmuludq %xmm3, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; SSE2-NEXT: pxor %xmm3, %xmm3
; SSE2-NEXT: pcmpeqd %xmm3, %xmm2
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[0,2,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[3,1,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm4[2,3,0,1]
; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm4[1,1,2,3]
; SSE2-NEXT: movdqa %xmm4, %xmm1
; SSE2-NEXT: psrld $24, %xmm1
; SSE2-NEXT: pcmpeqd %xmm3, %xmm1
; SSE2-NEXT: pcmpeqd %xmm3, %xmm3
; SSE2-NEXT: pxor %xmm3, %xmm2
; SSE2-NEXT: pxor %xmm3, %xmm1
; SSE2-NEXT: por %xmm2, %xmm1
; SSE2-NEXT: movd %xmm0, %eax
; SSE2-NEXT: movw %ax, (%rdi)
; SSE2-NEXT: shrl $16, %eax
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movd %xmm5, %eax
; SSE2-NEXT: movw %ax, 9(%rdi)
; SSE2-NEXT: movd %xmm6, %ecx
; SSE2-NEXT: movw %cx, 6(%rdi)
; SSE2-NEXT: movd %xmm7, %edx
; SSE2-NEXT: movw %dx, 3(%rdi)
; SSE2-NEXT: shrl $16, %eax
; SSE2-NEXT: movb %al, 11(%rdi)
; SSE2-NEXT: shrl $16, %ecx
; SSE2-NEXT: movb %cl, 8(%rdi)
; SSE2-NEXT: shrl $16, %edx
; SSE2-NEXT: movb %dl, 5(%rdi)
; SSE2-NEXT: movdqa %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: umulo_v4i24:
; SSSE3: # %bb.0:
; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0]
; SSSE3-NEXT: pand %xmm2, %xmm1
; SSSE3-NEXT: pand %xmm2, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
; SSSE3-NEXT: pmuludq %xmm1, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3]
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
; SSSE3-NEXT: pmuludq %xmm3, %xmm1
; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3]
; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; SSSE3-NEXT: pxor %xmm3, %xmm3
; SSSE3-NEXT: pcmpeqd %xmm3, %xmm2
; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm0[0,2,2,3]
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm4[3,1,2,3]
; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm4[2,3,0,1]
; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm4[1,1,2,3]
; SSSE3-NEXT: movdqa %xmm4, %xmm1
; SSSE3-NEXT: psrld $24, %xmm1
; SSSE3-NEXT: pcmpeqd %xmm3, %xmm1
; SSSE3-NEXT: pcmpeqd %xmm3, %xmm3
; SSSE3-NEXT: pxor %xmm3, %xmm2
; SSSE3-NEXT: pxor %xmm3, %xmm1
; SSSE3-NEXT: por %xmm2, %xmm1
; SSSE3-NEXT: movd %xmm0, %eax
; SSSE3-NEXT: movw %ax, (%rdi)
; SSSE3-NEXT: shrl $16, %eax
; SSSE3-NEXT: movb %al, 2(%rdi)
; SSSE3-NEXT: movd %xmm5, %eax
; SSSE3-NEXT: movw %ax, 9(%rdi)
; SSSE3-NEXT: movd %xmm6, %ecx
; SSSE3-NEXT: movw %cx, 6(%rdi)
; SSSE3-NEXT: movd %xmm7, %edx
; SSSE3-NEXT: movw %dx, 3(%rdi)
; SSSE3-NEXT: shrl $16, %eax
; SSSE3-NEXT: movb %al, 11(%rdi)
; SSSE3-NEXT: shrl $16, %ecx
; SSSE3-NEXT: movb %cl, 8(%rdi)
; SSSE3-NEXT: shrl $16, %edx
; SSSE3-NEXT: movb %dl, 5(%rdi)
; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: umulo_v4i24:
; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0]
; SSE41-NEXT: pand %xmm0, %xmm2
; SSE41-NEXT: pand %xmm0, %xmm1
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
; SSE41-NEXT: pmuludq %xmm0, %xmm3
; SSE41-NEXT: movdqa %xmm2, %xmm0
; SSE41-NEXT: pmuludq %xmm1, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm3[2,3],xmm4[4,5],xmm3[6,7]
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: pcmpeqd %xmm0, %xmm4
; SSE41-NEXT: pcmpeqd %xmm3, %xmm3
; SSE41-NEXT: pxor %xmm3, %xmm4
; SSE41-NEXT: pmulld %xmm2, %xmm1
; SSE41-NEXT: pextrd $3, %xmm1, %eax
; SSE41-NEXT: pextrd $2, %xmm1, %ecx
; SSE41-NEXT: pextrd $1, %xmm1, %edx
; SSE41-NEXT: movd %xmm1, %esi
; SSE41-NEXT: psrld $24, %xmm1
; SSE41-NEXT: pcmpeqd %xmm1, %xmm0
; SSE41-NEXT: pxor %xmm3, %xmm0
; SSE41-NEXT: por %xmm4, %xmm0
; SSE41-NEXT: movw %ax, 9(%rdi)
; SSE41-NEXT: movw %cx, 6(%rdi)
; SSE41-NEXT: movw %dx, 3(%rdi)
; SSE41-NEXT: movw %si, (%rdi)
; SSE41-NEXT: shrl $16, %eax
; SSE41-NEXT: movb %al, 11(%rdi)
; SSE41-NEXT: shrl $16, %ecx
; SSE41-NEXT: movb %cl, 8(%rdi)
; SSE41-NEXT: shrl $16, %edx
; SSE41-NEXT: movb %dl, 5(%rdi)
; SSE41-NEXT: shrl $16, %esi
; SSE41-NEXT: movb %sil, 2(%rdi)
; SSE41-NEXT: retq
;
; AVX1-LABEL: umulo_v4i24:
; AVX1: # %bb.0:
; AVX1-NEXT: vbroadcastss {{.*#+}} xmm2 = [2.35098856E-38,2.35098856E-38,2.35098856E-38,2.35098856E-38]
; AVX1-NEXT: vandps %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vandps %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX1-NEXT: vpermilps {{.*#+}} xmm3 = xmm0[1,1,3,3]
; AVX1-NEXT: vpmuludq %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm3
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7]
; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
; AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vpsrld $24, %xmm1, %xmm0
; AVX1-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm0
; AVX1-NEXT: vpor %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpextrd $3, %xmm1, %eax
; AVX1-NEXT: movw %ax, 9(%rdi)
; AVX1-NEXT: vpextrd $2, %xmm1, %ecx
; AVX1-NEXT: movw %cx, 6(%rdi)
; AVX1-NEXT: vpextrd $1, %xmm1, %edx
; AVX1-NEXT: movw %dx, 3(%rdi)
; AVX1-NEXT: vmovd %xmm1, %esi
; AVX1-NEXT: movw %si, (%rdi)
; AVX1-NEXT: shrl $16, %eax
; AVX1-NEXT: movb %al, 11(%rdi)
; AVX1-NEXT: shrl $16, %ecx
; AVX1-NEXT: movb %cl, 8(%rdi)
; AVX1-NEXT: shrl $16, %edx
; AVX1-NEXT: movb %dl, 5(%rdi)
; AVX1-NEXT: shrl $16, %esi
; AVX1-NEXT: movb %sil, 2(%rdi)
; AVX1-NEXT: retq
;
; AVX2-LABEL: umulo_v4i24:
; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [16777215,16777215,16777215,16777215]
; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
; AVX2-NEXT: vpmuludq %xmm2, %xmm3, %xmm2
; AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm3
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
; AVX2-NEXT: vpblendd {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2],xmm2[3]
; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX2-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
; AVX2-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
; AVX2-NEXT: vpxor %xmm4, %xmm2, %xmm2
; AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm1
; AVX2-NEXT: vpsrld $24, %xmm1, %xmm0
; AVX2-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0
; AVX2-NEXT: vpxor %xmm4, %xmm0, %xmm0
; AVX2-NEXT: vpor %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vpextrd $3, %xmm1, %eax
; AVX2-NEXT: movw %ax, 9(%rdi)
; AVX2-NEXT: vpextrd $2, %xmm1, %ecx
; AVX2-NEXT: movw %cx, 6(%rdi)
; AVX2-NEXT: vpextrd $1, %xmm1, %edx
; AVX2-NEXT: movw %dx, 3(%rdi)
; AVX2-NEXT: vmovd %xmm1, %esi
; AVX2-NEXT: movw %si, (%rdi)
; AVX2-NEXT: shrl $16, %eax
; AVX2-NEXT: movb %al, 11(%rdi)
; AVX2-NEXT: shrl $16, %ecx
; AVX2-NEXT: movb %cl, 8(%rdi)
; AVX2-NEXT: shrl $16, %edx
; AVX2-NEXT: movb %dl, 5(%rdi)
; AVX2-NEXT: shrl $16, %esi
; AVX2-NEXT: movb %sil, 2(%rdi)
; AVX2-NEXT: retq
;
; AVX512-LABEL: umulo_v4i24:
; AVX512: # %bb.0:
; AVX512-NEXT: vpbroadcastd {{.*#+}} xmm2 = [16777215,16777215,16777215,16777215]
; AVX512-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX512-NEXT: vpand %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX512-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
; AVX512-NEXT: vpmuludq %xmm2, %xmm3, %xmm2
; AVX512-NEXT: vpmuludq %xmm1, %xmm0, %xmm3
; AVX512-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
; AVX512-NEXT: vpblendd {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2],xmm2[3]
; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm1
; AVX512-NEXT: vpsrld $24, %xmm1, %xmm0
; AVX512-NEXT: vpor %xmm2, %xmm0, %xmm0
; AVX512-NEXT: vptestmd %xmm0, %xmm0, %k1
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
; AVX512-NEXT: vpextrd $3, %xmm1, %eax
; AVX512-NEXT: movw %ax, 9(%rdi)
; AVX512-NEXT: vpextrd $2, %xmm1, %ecx
; AVX512-NEXT: movw %cx, 6(%rdi)
; AVX512-NEXT: vpextrd $1, %xmm1, %edx
; AVX512-NEXT: movw %dx, 3(%rdi)
; AVX512-NEXT: vmovd %xmm1, %esi
; AVX512-NEXT: movw %si, (%rdi)
; AVX512-NEXT: shrl $16, %eax
; AVX512-NEXT: movb %al, 11(%rdi)
; AVX512-NEXT: shrl $16, %ecx
; AVX512-NEXT: movb %cl, 8(%rdi)
; AVX512-NEXT: shrl $16, %edx
; AVX512-NEXT: movb %dl, 5(%rdi)
; AVX512-NEXT: shrl $16, %esi
; AVX512-NEXT: movb %sil, 2(%rdi)
; AVX512-NEXT: retq
%t = call {<4 x i24>, <4 x i1>} @llvm.umul.with.overflow.v4i24(<4 x i24> %a0, <4 x i24> %a1)
%val = extractvalue {<4 x i24>, <4 x i1>} %t, 0
%obit = extractvalue {<4 x i24>, <4 x i1>} %t, 1
%res = sext <4 x i1> %obit to <4 x i32>
store <4 x i24> %val, <4 x i24>* %p2
ret <4 x i32> %res
}
define <4 x i32> @umulo_v4i1(<4 x i1> %a0, <4 x i1> %a1, <4 x i1>* %p2) nounwind {
; SSE2-LABEL: umulo_v4i1:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [1,1,1,1]
; SSE2-NEXT: pand %xmm2, %xmm1
; SSE2-NEXT: pand %xmm2, %xmm0
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: pmuludq %xmm1, %xmm2
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
; SSE2-NEXT: pmuludq %xmm3, %xmm4
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,3,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; SSE2-NEXT: pxor %xmm3, %xmm3
; SSE2-NEXT: pcmpeqd %xmm3, %xmm2
; SSE2-NEXT: pcmpeqd %xmm4, %xmm4
; SSE2-NEXT: pxor %xmm4, %xmm2
; SSE2-NEXT: pmaddwd %xmm1, %xmm0
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrld $1, %xmm1
; SSE2-NEXT: pcmpeqd %xmm3, %xmm1
; SSE2-NEXT: pxor %xmm4, %xmm1
; SSE2-NEXT: por %xmm2, %xmm1
; SSE2-NEXT: pslld $31, %xmm0
; SSE2-NEXT: movmskps %xmm0, %eax
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movdqa %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: umulo_v4i1:
; SSSE3: # %bb.0:
; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [1,1,1,1]
; SSSE3-NEXT: pand %xmm2, %xmm1
; SSSE3-NEXT: pand %xmm2, %xmm0
; SSSE3-NEXT: movdqa %xmm0, %xmm2
; SSSE3-NEXT: pmuludq %xmm1, %xmm2
; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
; SSSE3-NEXT: pmuludq %xmm3, %xmm4
; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,3,2,3]
; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; SSSE3-NEXT: pxor %xmm3, %xmm3
; SSSE3-NEXT: pcmpeqd %xmm3, %xmm2
; SSSE3-NEXT: pcmpeqd %xmm4, %xmm4
; SSSE3-NEXT: pxor %xmm4, %xmm2
; SSSE3-NEXT: pmaddwd %xmm1, %xmm0
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: psrld $1, %xmm1
; SSSE3-NEXT: pcmpeqd %xmm3, %xmm1
; SSSE3-NEXT: pxor %xmm4, %xmm1
; SSSE3-NEXT: por %xmm2, %xmm1
; SSSE3-NEXT: pslld $31, %xmm0
; SSSE3-NEXT: movmskps %xmm0, %eax
; SSSE3-NEXT: movb %al, (%rdi)
; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: umulo_v4i1:
; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [1,1,1,1]
; SSE41-NEXT: pand %xmm2, %xmm0
; SSE41-NEXT: pand %xmm2, %xmm1
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
; SSE41-NEXT: pmuludq %xmm2, %xmm3
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: pmuludq %xmm1, %xmm2
; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3]
; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm3[2,3],xmm4[4,5],xmm3[6,7]
; SSE41-NEXT: pxor %xmm2, %xmm2
; SSE41-NEXT: pcmpeqd %xmm2, %xmm4
; SSE41-NEXT: pcmpeqd %xmm3, %xmm3
; SSE41-NEXT: pxor %xmm3, %xmm4
; SSE41-NEXT: pmaddwd %xmm0, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: psrld $1, %xmm0
; SSE41-NEXT: pcmpeqd %xmm0, %xmm2
; SSE41-NEXT: pxor %xmm3, %xmm2
; SSE41-NEXT: por %xmm4, %xmm2
; SSE41-NEXT: pslld $31, %xmm1
; SSE41-NEXT: movmskps %xmm1, %eax
; SSE41-NEXT: movb %al, (%rdi)
; SSE41-NEXT: movdqa %xmm2, %xmm0
; SSE41-NEXT: retq
;
; AVX1-LABEL: umulo_v4i1:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [1,1,1,1]
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
; AVX1-NEXT: vpmuludq %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm3
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7]
; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
; AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpmaddwd %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vpsrld $1, %xmm1, %xmm0
; AVX1-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm0
; AVX1-NEXT: vpor %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpslld $31, %xmm1, %xmm1
; AVX1-NEXT: vmovmskps %xmm1, %eax
; AVX1-NEXT: movb %al, (%rdi)
; AVX1-NEXT: retq
;
; AVX2-LABEL: umulo_v4i1:
; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1,1,1,1]
; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
; AVX2-NEXT: vpmuludq %xmm2, %xmm3, %xmm2
; AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm3
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
; AVX2-NEXT: vpblendd {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2],xmm2[3]
; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX2-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
; AVX2-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
; AVX2-NEXT: vpxor %xmm4, %xmm2, %xmm2
; AVX2-NEXT: vpmaddwd %xmm1, %xmm0, %xmm1
; AVX2-NEXT: vpsrld $1, %xmm1, %xmm0
; AVX2-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0
; AVX2-NEXT: vpxor %xmm4, %xmm0, %xmm0
; AVX2-NEXT: vpor %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vpslld $31, %xmm1, %xmm1
; AVX2-NEXT: vmovmskps %xmm1, %eax
; AVX2-NEXT: movb %al, (%rdi)
; AVX2-NEXT: retq
;
; AVX512-LABEL: umulo_v4i1:
; AVX512: # %bb.0:
; AVX512-NEXT: pushq %rbx
; AVX512-NEXT: vpslld $31, %xmm0, %xmm0
; AVX512-NEXT: vptestmd %xmm0, %xmm0, %k0
; AVX512-NEXT: kshiftrw $3, %k0, %k1
; AVX512-NEXT: kmovd %k1, %r9d
; AVX512-NEXT: andb $1, %r9b
; AVX512-NEXT: vpslld $31, %xmm1, %xmm0
; AVX512-NEXT: vptestmd %xmm0, %xmm0, %k1
; AVX512-NEXT: kshiftrw $3, %k1, %k2
; AVX512-NEXT: kmovd %k2, %r10d
; AVX512-NEXT: andb $1, %r10b
; AVX512-NEXT: kshiftrw $2, %k0, %k2
; AVX512-NEXT: kmovd %k2, %r11d
; AVX512-NEXT: andb $1, %r11b
; AVX512-NEXT: kshiftrw $2, %k1, %k2
; AVX512-NEXT: kmovd %k2, %ebx
; AVX512-NEXT: andb $1, %bl
; AVX512-NEXT: kshiftrw $1, %k0, %k2
; AVX512-NEXT: kmovd %k2, %edx
; AVX512-NEXT: andb $1, %dl
; AVX512-NEXT: kshiftrw $1, %k1, %k2
; AVX512-NEXT: kmovd %k2, %esi
; AVX512-NEXT: andb $1, %sil
; AVX512-NEXT: kmovd %k0, %eax
; AVX512-NEXT: andb $1, %al
; AVX512-NEXT: kmovd %k1, %ecx
; AVX512-NEXT: andb $1, %cl
; AVX512-NEXT: # kill: def $al killed $al killed $eax
; AVX512-NEXT: mulb %cl
; AVX512-NEXT: movl %eax, %r8d
; AVX512-NEXT: seto %al
; AVX512-NEXT: testb $-2, %r8b
; AVX512-NEXT: setne %cl
; AVX512-NEXT: orb %al, %cl
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; AVX512-NEXT: setne %al
; AVX512-NEXT: kmovd %eax, %k0
; AVX512-NEXT: kshiftrw $1, %k0, %k1
; AVX512-NEXT: movl %edx, %eax
; AVX512-NEXT: mulb %sil
; AVX512-NEXT: movl %eax, %edx
; AVX512-NEXT: seto %al
; AVX512-NEXT: testb $-2, %dl
; AVX512-NEXT: setne %cl
; AVX512-NEXT: orb %al, %cl
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; AVX512-NEXT: setne %al
; AVX512-NEXT: kmovd %eax, %k2
; AVX512-NEXT: kxorw %k2, %k1, %k1
; AVX512-NEXT: kshiftlw $15, %k1, %k1
; AVX512-NEXT: kshiftrw $14, %k1, %k1
; AVX512-NEXT: kxorw %k1, %k0, %k0
; AVX512-NEXT: kshiftrw $2, %k0, %k1
; AVX512-NEXT: movl %r11d, %eax
; AVX512-NEXT: mulb %bl
; AVX512-NEXT: movl %eax, %esi
; AVX512-NEXT: seto %al
; AVX512-NEXT: testb $-2, %sil
; AVX512-NEXT: setne %cl
; AVX512-NEXT: orb %al, %cl
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; AVX512-NEXT: setne %al
; AVX512-NEXT: kmovd %eax, %k2
; AVX512-NEXT: kxorw %k2, %k1, %k1
; AVX512-NEXT: kshiftlw $15, %k1, %k1
; AVX512-NEXT: kshiftrw $13, %k1, %k1
; AVX512-NEXT: kxorw %k1, %k0, %k0
; AVX512-NEXT: kshiftlw $13, %k0, %k0
; AVX512-NEXT: kshiftrw $13, %k0, %k0
; AVX512-NEXT: movl %r9d, %eax
; AVX512-NEXT: mulb %r10b
; AVX512-NEXT: # kill: def $al killed $al def $eax
; AVX512-NEXT: seto %cl
; AVX512-NEXT: testb $-2, %al
; AVX512-NEXT: setne %bl
; AVX512-NEXT: orb %cl, %bl
[LegalizeTypes][AArch64][X86] Make type legalization of vector (S/U)ADD/SUB/MULO follow getSetCCResultType for the overflow bits. Make UnrollVectorOverflowOp properly convert from scalar boolean contents to vector boolean contents Summary: When promoting the over flow vector for these ops we should use the target's desired setcc result type. This way a v8i32 result type will use a v8i32 overflow vector instead of a v8i16 overflow vector. A v8i16 overflow vector will cause LegalizeDAG/LegalizeVectorOps to have to use v8i32 and truncate to v8i16 in its expansion. By doing this in type legalization instead, we get the truncate into the DAG earlier and give DAG combine more of a chance to optimize it. We also have to fix unrolling to use the scalar setcc result type for the scalarized operation, and convert it to the required vector element type after the scalar operation. We have to observe the vector boolean contents when doing this conversion. The previous code was just taking the scalar result and putting it in the vector. But for X86 and AArch64 that would have only put a the boolean value in bit 0 of the element and left all other bits in the element 0. We need to ensure all bits in the element are the same. I'm using a select with constants here because that's what setcc unrolling in LegalizeVectorOps used. Reviewers: spatel, RKSimon, nikic Reviewed By: nikic Subscribers: javed.absar, kristof.beyls, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58567 llvm-svn: 354753
2019-02-24 20:23:36 +01:00
; AVX512-NEXT: setne %cl
; AVX512-NEXT: kmovd %ecx, %k1
; AVX512-NEXT: kshiftlw $3, %k1, %k1
; AVX512-NEXT: korw %k1, %k0, %k1
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
; AVX512-NEXT: kmovd %r8d, %k0
; AVX512-NEXT: kshiftrw $1, %k0, %k1
; AVX512-NEXT: kmovd %edx, %k2
; AVX512-NEXT: kxorw %k2, %k1, %k1
; AVX512-NEXT: kshiftlw $15, %k1, %k1
; AVX512-NEXT: kshiftrw $14, %k1, %k1
; AVX512-NEXT: kxorw %k1, %k0, %k0
; AVX512-NEXT: kshiftrw $2, %k0, %k1
; AVX512-NEXT: kmovd %esi, %k2
; AVX512-NEXT: kxorw %k2, %k1, %k1
; AVX512-NEXT: kshiftlw $15, %k1, %k1
; AVX512-NEXT: kshiftrw $13, %k1, %k1
; AVX512-NEXT: kxorw %k1, %k0, %k0
; AVX512-NEXT: kshiftrw $3, %k0, %k1
; AVX512-NEXT: kmovd %eax, %k2
; AVX512-NEXT: kxorw %k2, %k1, %k1
; AVX512-NEXT: kshiftlw $15, %k1, %k1
; AVX512-NEXT: kshiftrw $12, %k1, %k1
; AVX512-NEXT: kxorw %k1, %k0, %k0
; AVX512-NEXT: kmovd %k0, %eax
; AVX512-NEXT: movb %al, (%rdi)
; AVX512-NEXT: popq %rbx
; AVX512-NEXT: retq
%t = call {<4 x i1>, <4 x i1>} @llvm.umul.with.overflow.v4i1(<4 x i1> %a0, <4 x i1> %a1)
%val = extractvalue {<4 x i1>, <4 x i1>} %t, 0
%obit = extractvalue {<4 x i1>, <4 x i1>} %t, 1
%res = sext <4 x i1> %obit to <4 x i32>
store <4 x i1> %val, <4 x i1>* %p2
ret <4 x i32> %res
}
define <2 x i32> @umulo_v2i128(<2 x i128> %a0, <2 x i128> %a1, <2 x i128>* %p2) nounwind {
; SSE2-LABEL: umulo_v2i128:
; SSE2: # %bb.0:
; SSE2-NEXT: pushq %rbp
; SSE2-NEXT: pushq %r15
; SSE2-NEXT: pushq %r14
; SSE2-NEXT: pushq %r13
; SSE2-NEXT: pushq %r12
; SSE2-NEXT: pushq %rbx
; SSE2-NEXT: movq %rcx, %rax
; SSE2-NEXT: movq %rdx, %r12
; SSE2-NEXT: movq %rdi, %r11
; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %r14
; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %r15
; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %r10
; SSE2-NEXT: testq %r10, %r10
; SSE2-NEXT: setne %dl
; SSE2-NEXT: testq %rcx, %rcx
; SSE2-NEXT: setne %r13b
; SSE2-NEXT: andb %dl, %r13b
; SSE2-NEXT: mulq %r15
; SSE2-NEXT: movq %rax, %rdi
; SSE2-NEXT: seto %bpl
; SSE2-NEXT: movq %r10, %rax
; SSE2-NEXT: mulq %r12
; SSE2-NEXT: movq %rax, %rbx
; SSE2-NEXT: seto %cl
; SSE2-NEXT: orb %bpl, %cl
; SSE2-NEXT: addq %rdi, %rbx
; SSE2-NEXT: movq %r12, %rax
; SSE2-NEXT: mulq %r15
; SSE2-NEXT: movq %rax, %r10
; SSE2-NEXT: movq %rdx, %r15
; SSE2-NEXT: addq %rbx, %r15
; SSE2-NEXT: setb %al
; SSE2-NEXT: orb %cl, %al
; SSE2-NEXT: orb %r13b, %al
; SSE2-NEXT: movzbl %al, %ebp
; SSE2-NEXT: testq %r9, %r9
; SSE2-NEXT: setne %al
; SSE2-NEXT: testq %rsi, %rsi
; SSE2-NEXT: setne %r13b
; SSE2-NEXT: andb %al, %r13b
; SSE2-NEXT: movq %rsi, %rax
; SSE2-NEXT: mulq %r8
; SSE2-NEXT: movq %rax, %rsi
; SSE2-NEXT: seto %r12b
; SSE2-NEXT: movq %r9, %rax
; SSE2-NEXT: mulq %r11
; SSE2-NEXT: movq %rax, %rdi
; SSE2-NEXT: seto %bl
; SSE2-NEXT: orb %r12b, %bl
; SSE2-NEXT: addq %rsi, %rdi
; SSE2-NEXT: movq %r11, %rax
; SSE2-NEXT: mulq %r8
; SSE2-NEXT: addq %rdi, %rdx
; SSE2-NEXT: setb %cl
; SSE2-NEXT: orb %bl, %cl
; SSE2-NEXT: orb %r13b, %cl
; SSE2-NEXT: movzbl %cl, %ecx
; SSE2-NEXT: movd %ecx, %xmm0
; SSE2-NEXT: pinsrw $4, %ebp, %xmm0
; SSE2-NEXT: movq %r10, 16(%r14)
; SSE2-NEXT: movq %rax, (%r14)
; SSE2-NEXT: movq %r15, 24(%r14)
; SSE2-NEXT: movq %rdx, 8(%r14)
; SSE2-NEXT: psllq $63, %xmm0
; SSE2-NEXT: psrad $31, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
; SSE2-NEXT: popq %rbx
; SSE2-NEXT: popq %r12
; SSE2-NEXT: popq %r13
; SSE2-NEXT: popq %r14
; SSE2-NEXT: popq %r15
; SSE2-NEXT: popq %rbp
; SSE2-NEXT: retq
;
; SSSE3-LABEL: umulo_v2i128:
; SSSE3: # %bb.0:
; SSSE3-NEXT: pushq %rbp
; SSSE3-NEXT: pushq %r15
; SSSE3-NEXT: pushq %r14
; SSSE3-NEXT: pushq %r13
; SSSE3-NEXT: pushq %r12
; SSSE3-NEXT: pushq %rbx
; SSSE3-NEXT: movq %rcx, %rax
; SSSE3-NEXT: movq %rdx, %r12
; SSSE3-NEXT: movq %rdi, %r11
; SSSE3-NEXT: movq {{[0-9]+}}(%rsp), %r14
; SSSE3-NEXT: movq {{[0-9]+}}(%rsp), %r15
; SSSE3-NEXT: movq {{[0-9]+}}(%rsp), %r10
; SSSE3-NEXT: testq %r10, %r10
; SSSE3-NEXT: setne %dl
; SSSE3-NEXT: testq %rcx, %rcx
; SSSE3-NEXT: setne %r13b
; SSSE3-NEXT: andb %dl, %r13b
; SSSE3-NEXT: mulq %r15
; SSSE3-NEXT: movq %rax, %rdi
; SSSE3-NEXT: seto %bpl
; SSSE3-NEXT: movq %r10, %rax
; SSSE3-NEXT: mulq %r12
; SSSE3-NEXT: movq %rax, %rbx
; SSSE3-NEXT: seto %cl
; SSSE3-NEXT: orb %bpl, %cl
; SSSE3-NEXT: addq %rdi, %rbx
; SSSE3-NEXT: movq %r12, %rax
; SSSE3-NEXT: mulq %r15
; SSSE3-NEXT: movq %rax, %r10
; SSSE3-NEXT: movq %rdx, %r15
; SSSE3-NEXT: addq %rbx, %r15
; SSSE3-NEXT: setb %al
; SSSE3-NEXT: orb %cl, %al
; SSSE3-NEXT: orb %r13b, %al
; SSSE3-NEXT: movzbl %al, %ebp
; SSSE3-NEXT: testq %r9, %r9
; SSSE3-NEXT: setne %al
; SSSE3-NEXT: testq %rsi, %rsi
; SSSE3-NEXT: setne %r13b
; SSSE3-NEXT: andb %al, %r13b
; SSSE3-NEXT: movq %rsi, %rax
; SSSE3-NEXT: mulq %r8
; SSSE3-NEXT: movq %rax, %rsi
; SSSE3-NEXT: seto %r12b
; SSSE3-NEXT: movq %r9, %rax
; SSSE3-NEXT: mulq %r11
; SSSE3-NEXT: movq %rax, %rdi
; SSSE3-NEXT: seto %bl
; SSSE3-NEXT: orb %r12b, %bl
; SSSE3-NEXT: addq %rsi, %rdi
; SSSE3-NEXT: movq %r11, %rax
; SSSE3-NEXT: mulq %r8
; SSSE3-NEXT: addq %rdi, %rdx
; SSSE3-NEXT: setb %cl
; SSSE3-NEXT: orb %bl, %cl
; SSSE3-NEXT: orb %r13b, %cl
; SSSE3-NEXT: movzbl %cl, %ecx
; SSSE3-NEXT: movd %ecx, %xmm0
; SSSE3-NEXT: pinsrw $4, %ebp, %xmm0
; SSSE3-NEXT: movq %r10, 16(%r14)
; SSSE3-NEXT: movq %rax, (%r14)
; SSSE3-NEXT: movq %r15, 24(%r14)
; SSSE3-NEXT: movq %rdx, 8(%r14)
; SSSE3-NEXT: psllq $63, %xmm0
; SSSE3-NEXT: psrad $31, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
; SSSE3-NEXT: popq %rbx
; SSSE3-NEXT: popq %r12
; SSSE3-NEXT: popq %r13
; SSSE3-NEXT: popq %r14
; SSSE3-NEXT: popq %r15
; SSSE3-NEXT: popq %rbp
; SSSE3-NEXT: retq
;
; SSE41-LABEL: umulo_v2i128:
; SSE41: # %bb.0:
; SSE41-NEXT: pushq %rbp
; SSE41-NEXT: pushq %r15
; SSE41-NEXT: pushq %r14
; SSE41-NEXT: pushq %r13
; SSE41-NEXT: pushq %r12
; SSE41-NEXT: pushq %rbx
; SSE41-NEXT: movq %rcx, %rax
; SSE41-NEXT: movq %rdx, %r12
; SSE41-NEXT: movq %rdi, %r11
; SSE41-NEXT: movq {{[0-9]+}}(%rsp), %r14
; SSE41-NEXT: movq {{[0-9]+}}(%rsp), %r15
; SSE41-NEXT: movq {{[0-9]+}}(%rsp), %r10
; SSE41-NEXT: testq %r10, %r10
; SSE41-NEXT: setne %dl
; SSE41-NEXT: testq %rcx, %rcx
; SSE41-NEXT: setne %r13b
; SSE41-NEXT: andb %dl, %r13b
; SSE41-NEXT: mulq %r15
; SSE41-NEXT: movq %rax, %rdi
; SSE41-NEXT: seto %bpl
; SSE41-NEXT: movq %r10, %rax
; SSE41-NEXT: mulq %r12
; SSE41-NEXT: movq %rax, %rbx
; SSE41-NEXT: seto %cl
; SSE41-NEXT: orb %bpl, %cl
; SSE41-NEXT: addq %rdi, %rbx
; SSE41-NEXT: movq %r12, %rax
; SSE41-NEXT: mulq %r15
; SSE41-NEXT: movq %rax, %r10
; SSE41-NEXT: movq %rdx, %r15
; SSE41-NEXT: addq %rbx, %r15
; SSE41-NEXT: setb %al
; SSE41-NEXT: orb %cl, %al
; SSE41-NEXT: orb %r13b, %al
; SSE41-NEXT: movzbl %al, %ebp
; SSE41-NEXT: testq %r9, %r9
; SSE41-NEXT: setne %al
; SSE41-NEXT: testq %rsi, %rsi
; SSE41-NEXT: setne %r13b
; SSE41-NEXT: andb %al, %r13b
; SSE41-NEXT: movq %rsi, %rax
; SSE41-NEXT: mulq %r8
; SSE41-NEXT: movq %rax, %rsi
; SSE41-NEXT: seto %r12b
; SSE41-NEXT: movq %r9, %rax
; SSE41-NEXT: mulq %r11
; SSE41-NEXT: movq %rax, %rdi
; SSE41-NEXT: seto %bl
; SSE41-NEXT: orb %r12b, %bl
; SSE41-NEXT: addq %rsi, %rdi
; SSE41-NEXT: movq %r11, %rax
; SSE41-NEXT: mulq %r8
; SSE41-NEXT: addq %rdi, %rdx
; SSE41-NEXT: setb %cl
; SSE41-NEXT: orb %bl, %cl
; SSE41-NEXT: orb %r13b, %cl
; SSE41-NEXT: movzbl %cl, %ecx
; SSE41-NEXT: movd %ecx, %xmm0
; SSE41-NEXT: pinsrb $8, %ebp, %xmm0
; SSE41-NEXT: movq %r10, 16(%r14)
; SSE41-NEXT: movq %rax, (%r14)
; SSE41-NEXT: movq %r15, 24(%r14)
; SSE41-NEXT: movq %rdx, 8(%r14)
; SSE41-NEXT: psllq $63, %xmm0
; SSE41-NEXT: psrad $31, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
; SSE41-NEXT: popq %rbx
; SSE41-NEXT: popq %r12
; SSE41-NEXT: popq %r13
; SSE41-NEXT: popq %r14
; SSE41-NEXT: popq %r15
; SSE41-NEXT: popq %rbp
; SSE41-NEXT: retq
;
; AVX1-LABEL: umulo_v2i128:
; AVX1: # %bb.0:
; AVX1-NEXT: pushq %rbp
; AVX1-NEXT: pushq %r15
; AVX1-NEXT: pushq %r14
; AVX1-NEXT: pushq %r13
; AVX1-NEXT: pushq %r12
; AVX1-NEXT: pushq %rbx
; AVX1-NEXT: movq %rcx, %rax
; AVX1-NEXT: movq %rdx, %r12
; AVX1-NEXT: movq %rdi, %r11
; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %r14
; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %r15
; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %r10
; AVX1-NEXT: testq %r10, %r10
; AVX1-NEXT: setne %dl
; AVX1-NEXT: testq %rcx, %rcx
; AVX1-NEXT: setne %r13b
; AVX1-NEXT: andb %dl, %r13b
; AVX1-NEXT: mulq %r15
; AVX1-NEXT: movq %rax, %rdi
; AVX1-NEXT: seto %bpl
; AVX1-NEXT: movq %r10, %rax
; AVX1-NEXT: mulq %r12
; AVX1-NEXT: movq %rax, %rbx
; AVX1-NEXT: seto %cl
; AVX1-NEXT: orb %bpl, %cl
; AVX1-NEXT: addq %rdi, %rbx
; AVX1-NEXT: movq %r12, %rax
; AVX1-NEXT: mulq %r15
; AVX1-NEXT: movq %rax, %r10
; AVX1-NEXT: movq %rdx, %r15
; AVX1-NEXT: addq %rbx, %r15
; AVX1-NEXT: setb %al
; AVX1-NEXT: orb %cl, %al
; AVX1-NEXT: orb %r13b, %al
; AVX1-NEXT: movzbl %al, %ebp
; AVX1-NEXT: testq %r9, %r9
; AVX1-NEXT: setne %al
; AVX1-NEXT: testq %rsi, %rsi
; AVX1-NEXT: setne %r13b
; AVX1-NEXT: andb %al, %r13b
; AVX1-NEXT: movq %rsi, %rax
; AVX1-NEXT: mulq %r8
; AVX1-NEXT: movq %rax, %rsi
; AVX1-NEXT: seto %r12b
; AVX1-NEXT: movq %r9, %rax
; AVX1-NEXT: mulq %r11
; AVX1-NEXT: movq %rax, %rdi
; AVX1-NEXT: seto %cl
; AVX1-NEXT: orb %r12b, %cl
; AVX1-NEXT: addq %rsi, %rdi
; AVX1-NEXT: movq %r11, %rax
; AVX1-NEXT: mulq %r8
; AVX1-NEXT: addq %rdi, %rdx
; AVX1-NEXT: setb %bl
; AVX1-NEXT: orb %cl, %bl
; AVX1-NEXT: orb %r13b, %bl
; AVX1-NEXT: movzbl %bl, %ecx
; AVX1-NEXT: vmovd %ecx, %xmm0
; AVX1-NEXT: vpinsrb $8, %ebp, %xmm0, %xmm0
; AVX1-NEXT: movq %r10, 16(%r14)
; AVX1-NEXT: movq %rax, (%r14)
; AVX1-NEXT: movq %r15, 24(%r14)
; AVX1-NEXT: movq %rdx, 8(%r14)
; AVX1-NEXT: vpsllq $63, %xmm0, %xmm0
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
; AVX1-NEXT: popq %rbx
; AVX1-NEXT: popq %r12
; AVX1-NEXT: popq %r13
; AVX1-NEXT: popq %r14
; AVX1-NEXT: popq %r15
; AVX1-NEXT: popq %rbp
; AVX1-NEXT: retq
;
; AVX2-LABEL: umulo_v2i128:
; AVX2: # %bb.0:
; AVX2-NEXT: pushq %rbp
; AVX2-NEXT: pushq %r15
; AVX2-NEXT: pushq %r14
; AVX2-NEXT: pushq %r13
; AVX2-NEXT: pushq %r12
; AVX2-NEXT: pushq %rbx
; AVX2-NEXT: movq %rcx, %rax
; AVX2-NEXT: movq %rdx, %r12
; AVX2-NEXT: movq %rdi, %r11
; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r14
; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r15
; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r10
; AVX2-NEXT: testq %r10, %r10
; AVX2-NEXT: setne %dl
; AVX2-NEXT: testq %rcx, %rcx
; AVX2-NEXT: setne %r13b
; AVX2-NEXT: andb %dl, %r13b
; AVX2-NEXT: mulq %r15
; AVX2-NEXT: movq %rax, %rdi
; AVX2-NEXT: seto %bpl
; AVX2-NEXT: movq %r10, %rax
; AVX2-NEXT: mulq %r12
; AVX2-NEXT: movq %rax, %rbx
; AVX2-NEXT: seto %cl
; AVX2-NEXT: orb %bpl, %cl
; AVX2-NEXT: addq %rdi, %rbx
; AVX2-NEXT: movq %r12, %rax
; AVX2-NEXT: mulq %r15
; AVX2-NEXT: movq %rax, %r10
; AVX2-NEXT: movq %rdx, %r15
; AVX2-NEXT: addq %rbx, %r15
; AVX2-NEXT: setb %al
; AVX2-NEXT: orb %cl, %al
; AVX2-NEXT: orb %r13b, %al
; AVX2-NEXT: movzbl %al, %ebp
; AVX2-NEXT: testq %r9, %r9
; AVX2-NEXT: setne %al
; AVX2-NEXT: testq %rsi, %rsi
; AVX2-NEXT: setne %r13b
; AVX2-NEXT: andb %al, %r13b
; AVX2-NEXT: movq %rsi, %rax
; AVX2-NEXT: mulq %r8
; AVX2-NEXT: movq %rax, %rsi
; AVX2-NEXT: seto %r12b
; AVX2-NEXT: movq %r9, %rax
; AVX2-NEXT: mulq %r11
; AVX2-NEXT: movq %rax, %rdi
; AVX2-NEXT: seto %cl
; AVX2-NEXT: orb %r12b, %cl
; AVX2-NEXT: addq %rsi, %rdi
; AVX2-NEXT: movq %r11, %rax
; AVX2-NEXT: mulq %r8
; AVX2-NEXT: addq %rdi, %rdx
; AVX2-NEXT: setb %bl
; AVX2-NEXT: orb %cl, %bl
; AVX2-NEXT: orb %r13b, %bl
; AVX2-NEXT: movzbl %bl, %ecx
; AVX2-NEXT: vmovd %ecx, %xmm0
; AVX2-NEXT: vpinsrb $8, %ebp, %xmm0, %xmm0
; AVX2-NEXT: movq %r10, 16(%r14)
; AVX2-NEXT: movq %rax, (%r14)
; AVX2-NEXT: movq %r15, 24(%r14)
; AVX2-NEXT: movq %rdx, 8(%r14)
; AVX2-NEXT: vpsllq $63, %xmm0, %xmm0
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
; AVX2-NEXT: popq %rbx
; AVX2-NEXT: popq %r12
; AVX2-NEXT: popq %r13
; AVX2-NEXT: popq %r14
; AVX2-NEXT: popq %r15
; AVX2-NEXT: popq %rbp
; AVX2-NEXT: retq
;
; AVX512-LABEL: umulo_v2i128:
; AVX512: # %bb.0:
; AVX512-NEXT: pushq %rbp
; AVX512-NEXT: pushq %r15
; AVX512-NEXT: pushq %r14
; AVX512-NEXT: pushq %r13
; AVX512-NEXT: pushq %r12
; AVX512-NEXT: pushq %rbx
; AVX512-NEXT: movq %rcx, %rax
; AVX512-NEXT: movq %rdx, %r12
; AVX512-NEXT: movq %rdi, %r11
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r14
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r15
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r10
; AVX512-NEXT: testq %r10, %r10
; AVX512-NEXT: setne %dl
; AVX512-NEXT: testq %rcx, %rcx
; AVX512-NEXT: setne %r13b
; AVX512-NEXT: andb %dl, %r13b
; AVX512-NEXT: mulq %r15
; AVX512-NEXT: movq %rax, %rdi
; AVX512-NEXT: seto %bpl
; AVX512-NEXT: movq %r10, %rax
; AVX512-NEXT: mulq %r12
; AVX512-NEXT: movq %rax, %rbx
; AVX512-NEXT: seto %cl
; AVX512-NEXT: orb %bpl, %cl
; AVX512-NEXT: addq %rdi, %rbx
; AVX512-NEXT: movq %r12, %rax
; AVX512-NEXT: mulq %r15
; AVX512-NEXT: movq %rax, %r10
; AVX512-NEXT: movq %rdx, %r15
; AVX512-NEXT: addq %rbx, %r15
; AVX512-NEXT: setb %al
; AVX512-NEXT: orb %cl, %al
; AVX512-NEXT: orb %r13b, %al
; AVX512-NEXT: movb %al, -{{[0-9]+}}(%rsp)
; AVX512-NEXT: testq %r9, %r9
; AVX512-NEXT: setne %al
; AVX512-NEXT: testq %rsi, %rsi
; AVX512-NEXT: setne %cl
; AVX512-NEXT: andb %al, %cl
; AVX512-NEXT: movq %rsi, %rax
; AVX512-NEXT: mulq %r8
; AVX512-NEXT: movq %rax, %rsi
; AVX512-NEXT: seto %bpl
; AVX512-NEXT: movq %r9, %rax
; AVX512-NEXT: mulq %r11
; AVX512-NEXT: movq %rax, %rdi
; AVX512-NEXT: seto %bl
; AVX512-NEXT: orb %bpl, %bl
; AVX512-NEXT: addq %rsi, %rdi
; AVX512-NEXT: movq %r11, %rax
; AVX512-NEXT: mulq %r8
; AVX512-NEXT: addq %rdi, %rdx
; AVX512-NEXT: setb %sil
; AVX512-NEXT: orb %bl, %sil
; AVX512-NEXT: orb %cl, %sil
; AVX512-NEXT: movb %sil, -{{[0-9]+}}(%rsp)
; AVX512-NEXT: kmovw -{{[0-9]+}}(%rsp), %k1
; AVX512-NEXT: movq %r10, 16(%r14)
; AVX512-NEXT: movq %rax, (%r14)
; AVX512-NEXT: movq %r15, 24(%r14)
; AVX512-NEXT: movq %rdx, 8(%r14)
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX512-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z}
; AVX512-NEXT: popq %rbx
; AVX512-NEXT: popq %r12
; AVX512-NEXT: popq %r13
; AVX512-NEXT: popq %r14
; AVX512-NEXT: popq %r15
; AVX512-NEXT: popq %rbp
; AVX512-NEXT: retq
%t = call {<2 x i128>, <2 x i1>} @llvm.umul.with.overflow.v2i128(<2 x i128> %a0, <2 x i128> %a1)
%val = extractvalue {<2 x i128>, <2 x i1>} %t, 0
%obit = extractvalue {<2 x i128>, <2 x i1>} %t, 1
%res = sext <2 x i1> %obit to <2 x i32>
store <2 x i128> %val, <2 x i128>* %p2
ret <2 x i32> %res
}