mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-26 04:32:44 +01:00
18451cc4a4
The motivation is that the update script has at least two deviations (`<...>@GOT`/`<...>@PLT`/ and not hiding pointer arithmetics) from what pretty much all the checklines were generated with, and most of the tests are still not updated, so each time one of the non-up-to-date tests is updated to see the effect of the code change, there is a lot of noise. Instead of having to deal with that each time, let's just deal with everything at once. This has been done via: ``` cd llvm-project/llvm/test/CodeGen/X86 grep -rl "; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py" | xargs -L1 <...>/llvm-project/llvm/utils/update_llc_test_checks.py --llc-binary <...>/llvm-project/build/bin/llc ``` Not all tests were regenerated, however.
2173 lines
109 KiB
LLVM
2173 lines
109 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+sse4.1 -show-mc-encoding | FileCheck %s --check-prefixes=SSE,X86-SSE
|
|
; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+avx -show-mc-encoding | FileCheck %s --check-prefixes=AVX,AVX1,X86-AVX1
|
|
; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl -show-mc-encoding | FileCheck %s --check-prefixes=AVX,AVX512,X86-AVX512
|
|
; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+sse4.1 -show-mc-encoding | FileCheck %s --check-prefixes=SSE,X64-SSE
|
|
; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+avx -show-mc-encoding | FileCheck %s --check-prefixes=AVX,X64-AVX,AVX1,X64-AVX1
|
|
; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl -show-mc-encoding | FileCheck %s --check-prefixes=AVX,X64-AVX,AVX512,X64-AVX512
|
|
|
|
@g16 = external global i16
|
|
|
|
define <4 x i32> @pinsrd_1(i32 %s, <4 x i32> %tmp) nounwind {
|
|
; X86-SSE-LABEL: pinsrd_1:
|
|
; X86-SSE: ## %bb.0:
|
|
; X86-SSE-NEXT: pinsrd $1, {{[0-9]+}}(%esp), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x22,0x44,0x24,0x04,0x01]
|
|
; X86-SSE-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X86-AVX1-LABEL: pinsrd_1:
|
|
; X86-AVX1: ## %bb.0:
|
|
; X86-AVX1-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x04,0x01]
|
|
; X86-AVX1-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X86-AVX512-LABEL: pinsrd_1:
|
|
; X86-AVX512: ## %bb.0:
|
|
; X86-AVX512-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x04,0x01]
|
|
; X86-AVX512-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X64-SSE-LABEL: pinsrd_1:
|
|
; X64-SSE: ## %bb.0:
|
|
; X64-SSE-NEXT: pinsrd $1, %edi, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x22,0xc7,0x01]
|
|
; X64-SSE-NEXT: retq ## encoding: [0xc3]
|
|
;
|
|
; X64-AVX1-LABEL: pinsrd_1:
|
|
; X64-AVX1: ## %bb.0:
|
|
; X64-AVX1-NEXT: vpinsrd $1, %edi, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x22,0xc7,0x01]
|
|
; X64-AVX1-NEXT: retq ## encoding: [0xc3]
|
|
;
|
|
; X64-AVX512-LABEL: pinsrd_1:
|
|
; X64-AVX512: ## %bb.0:
|
|
; X64-AVX512-NEXT: vpinsrd $1, %edi, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0xc7,0x01]
|
|
; X64-AVX512-NEXT: retq ## encoding: [0xc3]
|
|
%tmp1 = insertelement <4 x i32> %tmp, i32 %s, i32 1
|
|
ret <4 x i32> %tmp1
|
|
}
|
|
|
|
define <16 x i8> @pinsrb_1(i8 %s, <16 x i8> %tmp) nounwind {
|
|
; X86-SSE-LABEL: pinsrb_1:
|
|
; X86-SSE: ## %bb.0:
|
|
; X86-SSE-NEXT: pinsrb $1, {{[0-9]+}}(%esp), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x20,0x44,0x24,0x04,0x01]
|
|
; X86-SSE-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X86-AVX1-LABEL: pinsrb_1:
|
|
; X86-AVX1: ## %bb.0:
|
|
; X86-AVX1-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0x44,0x24,0x04,0x01]
|
|
; X86-AVX1-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X86-AVX512-LABEL: pinsrb_1:
|
|
; X86-AVX512: ## %bb.0:
|
|
; X86-AVX512-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0x44,0x24,0x04,0x01]
|
|
; X86-AVX512-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X64-SSE-LABEL: pinsrb_1:
|
|
; X64-SSE: ## %bb.0:
|
|
; X64-SSE-NEXT: pinsrb $1, %edi, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x20,0xc7,0x01]
|
|
; X64-SSE-NEXT: retq ## encoding: [0xc3]
|
|
;
|
|
; X64-AVX1-LABEL: pinsrb_1:
|
|
; X64-AVX1: ## %bb.0:
|
|
; X64-AVX1-NEXT: vpinsrb $1, %edi, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc7,0x01]
|
|
; X64-AVX1-NEXT: retq ## encoding: [0xc3]
|
|
;
|
|
; X64-AVX512-LABEL: pinsrb_1:
|
|
; X64-AVX512: ## %bb.0:
|
|
; X64-AVX512-NEXT: vpinsrb $1, %edi, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc7,0x01]
|
|
; X64-AVX512-NEXT: retq ## encoding: [0xc3]
|
|
%tmp1 = insertelement <16 x i8> %tmp, i8 %s, i32 1
|
|
ret <16 x i8> %tmp1
|
|
}
|
|
|
|
define <2 x i64> @pmovzxbq_1() nounwind {
|
|
; X86-SSE-LABEL: pmovzxbq_1:
|
|
; X86-SSE: ## %bb.0: ## %entry
|
|
; X86-SSE-NEXT: movl L_g16$non_lazy_ptr, %eax ## encoding: [0xa1,A,A,A,A]
|
|
; X86-SSE-NEXT: ## fixup A - offset: 1, value: L_g16$non_lazy_ptr, kind: FK_Data_4
|
|
; X86-SSE-NEXT: pmovzxbq (%eax), %xmm0 ## encoding: [0x66,0x0f,0x38,0x32,0x00]
|
|
; X86-SSE-NEXT: ## xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
|
|
; X86-SSE-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X86-AVX1-LABEL: pmovzxbq_1:
|
|
; X86-AVX1: ## %bb.0: ## %entry
|
|
; X86-AVX1-NEXT: movl L_g16$non_lazy_ptr, %eax ## encoding: [0xa1,A,A,A,A]
|
|
; X86-AVX1-NEXT: ## fixup A - offset: 1, value: L_g16$non_lazy_ptr, kind: FK_Data_4
|
|
; X86-AVX1-NEXT: vpmovzxbq (%eax), %xmm0 ## encoding: [0xc4,0xe2,0x79,0x32,0x00]
|
|
; X86-AVX1-NEXT: ## xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
|
|
; X86-AVX1-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X86-AVX512-LABEL: pmovzxbq_1:
|
|
; X86-AVX512: ## %bb.0: ## %entry
|
|
; X86-AVX512-NEXT: movl L_g16$non_lazy_ptr, %eax ## encoding: [0xa1,A,A,A,A]
|
|
; X86-AVX512-NEXT: ## fixup A - offset: 1, value: L_g16$non_lazy_ptr, kind: FK_Data_4
|
|
; X86-AVX512-NEXT: vpmovzxbq (%eax), %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x32,0x00]
|
|
; X86-AVX512-NEXT: ## xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
|
|
; X86-AVX512-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X64-SSE-LABEL: pmovzxbq_1:
|
|
; X64-SSE: ## %bb.0: ## %entry
|
|
; X64-SSE-NEXT: movq _g16@GOTPCREL(%rip), %rax ## encoding: [0x48,0x8b,0x05,A,A,A,A]
|
|
; X64-SSE-NEXT: ## fixup A - offset: 3, value: _g16@GOTPCREL-4, kind: reloc_riprel_4byte_movq_load
|
|
; X64-SSE-NEXT: pmovzxbq (%rax), %xmm0 ## encoding: [0x66,0x0f,0x38,0x32,0x00]
|
|
; X64-SSE-NEXT: ## xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
|
|
; X64-SSE-NEXT: retq ## encoding: [0xc3]
|
|
;
|
|
; X64-AVX1-LABEL: pmovzxbq_1:
|
|
; X64-AVX1: ## %bb.0: ## %entry
|
|
; X64-AVX1-NEXT: movq _g16@GOTPCREL(%rip), %rax ## encoding: [0x48,0x8b,0x05,A,A,A,A]
|
|
; X64-AVX1-NEXT: ## fixup A - offset: 3, value: _g16@GOTPCREL-4, kind: reloc_riprel_4byte_movq_load
|
|
; X64-AVX1-NEXT: vpmovzxbq (%rax), %xmm0 ## encoding: [0xc4,0xe2,0x79,0x32,0x00]
|
|
; X64-AVX1-NEXT: ## xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
|
|
; X64-AVX1-NEXT: retq ## encoding: [0xc3]
|
|
;
|
|
; X64-AVX512-LABEL: pmovzxbq_1:
|
|
; X64-AVX512: ## %bb.0: ## %entry
|
|
; X64-AVX512-NEXT: movq _g16@GOTPCREL(%rip), %rax ## encoding: [0x48,0x8b,0x05,A,A,A,A]
|
|
; X64-AVX512-NEXT: ## fixup A - offset: 3, value: _g16@GOTPCREL-4, kind: reloc_riprel_4byte_movq_load
|
|
; X64-AVX512-NEXT: vpmovzxbq (%rax), %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x32,0x00]
|
|
; X64-AVX512-NEXT: ## xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
|
|
; X64-AVX512-NEXT: retq ## encoding: [0xc3]
|
|
entry:
|
|
%0 = load i16, i16* @g16, align 2 ; <i16> [#uses=1]
|
|
%1 = insertelement <8 x i16> undef, i16 %0, i32 0 ; <<8 x i16>> [#uses=1]
|
|
%2 = bitcast <8 x i16> %1 to <16 x i8> ; <<16 x i8>> [#uses=1]
|
|
%3 = tail call <2 x i64> @llvm.x86.sse41.pmovzxbq(<16 x i8> %2) nounwind readnone ; <<2 x i64>> [#uses=1]
|
|
ret <2 x i64> %3
|
|
}
|
|
|
|
declare <2 x i64> @llvm.x86.sse41.pmovzxbq(<16 x i8>) nounwind readnone
|
|
|
|
define i32 @extractps_1(<4 x float> %v) nounwind {
|
|
; SSE-LABEL: extractps_1:
|
|
; SSE: ## %bb.0:
|
|
; SSE-NEXT: extractps $3, %xmm0, %eax ## encoding: [0x66,0x0f,0x3a,0x17,0xc0,0x03]
|
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX1-LABEL: extractps_1:
|
|
; AVX1: ## %bb.0:
|
|
; AVX1-NEXT: vextractps $3, %xmm0, %eax ## encoding: [0xc4,0xe3,0x79,0x17,0xc0,0x03]
|
|
; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX512-LABEL: extractps_1:
|
|
; AVX512: ## %bb.0:
|
|
; AVX512-NEXT: vextractps $3, %xmm0, %eax ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x17,0xc0,0x03]
|
|
; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
%s = extractelement <4 x float> %v, i32 3
|
|
%i = bitcast float %s to i32
|
|
ret i32 %i
|
|
}
|
|
define i32 @extractps_2(<4 x float> %v) nounwind {
|
|
; SSE-LABEL: extractps_2:
|
|
; SSE: ## %bb.0:
|
|
; SSE-NEXT: extractps $3, %xmm0, %eax ## encoding: [0x66,0x0f,0x3a,0x17,0xc0,0x03]
|
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX1-LABEL: extractps_2:
|
|
; AVX1: ## %bb.0:
|
|
; AVX1-NEXT: vextractps $3, %xmm0, %eax ## encoding: [0xc4,0xe3,0x79,0x17,0xc0,0x03]
|
|
; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX512-LABEL: extractps_2:
|
|
; AVX512: ## %bb.0:
|
|
; AVX512-NEXT: vextractps $3, %xmm0, %eax ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x17,0xc0,0x03]
|
|
; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
%t = bitcast <4 x float> %v to <4 x i32>
|
|
%s = extractelement <4 x i32> %t, i32 3
|
|
ret i32 %s
|
|
}
|
|
|
|
|
|
; The non-store form of extractps puts its result into a GPR.
|
|
; This makes it suitable for an extract from a <4 x float> that
|
|
; is bitcasted to i32, but unsuitable for much of anything else.
|
|
|
|
define float @ext_1(<4 x float> %v) nounwind {
|
|
; X86-SSE-LABEL: ext_1:
|
|
; X86-SSE: ## %bb.0:
|
|
; X86-SSE-NEXT: pushl %eax ## encoding: [0x50]
|
|
; X86-SSE-NEXT: shufps $255, %xmm0, %xmm0 ## encoding: [0x0f,0xc6,0xc0,0xff]
|
|
; X86-SSE-NEXT: ## xmm0 = xmm0[3,3,3,3]
|
|
; X86-SSE-NEXT: addss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ## encoding: [0xf3,0x0f,0x58,0x05,A,A,A,A]
|
|
; X86-SSE-NEXT: ## fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
|
|
; X86-SSE-NEXT: movss %xmm0, (%esp) ## encoding: [0xf3,0x0f,0x11,0x04,0x24]
|
|
; X86-SSE-NEXT: flds (%esp) ## encoding: [0xd9,0x04,0x24]
|
|
; X86-SSE-NEXT: popl %eax ## encoding: [0x58]
|
|
; X86-SSE-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X86-AVX1-LABEL: ext_1:
|
|
; X86-AVX1: ## %bb.0:
|
|
; X86-AVX1-NEXT: pushl %eax ## encoding: [0x50]
|
|
; X86-AVX1-NEXT: vpermilps $255, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x04,0xc0,0xff]
|
|
; X86-AVX1-NEXT: ## xmm0 = xmm0[3,3,3,3]
|
|
; X86-AVX1-NEXT: vaddss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x58,0x05,A,A,A,A]
|
|
; X86-AVX1-NEXT: ## fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
|
|
; X86-AVX1-NEXT: vmovss %xmm0, (%esp) ## encoding: [0xc5,0xfa,0x11,0x04,0x24]
|
|
; X86-AVX1-NEXT: flds (%esp) ## encoding: [0xd9,0x04,0x24]
|
|
; X86-AVX1-NEXT: popl %eax ## encoding: [0x58]
|
|
; X86-AVX1-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X86-AVX512-LABEL: ext_1:
|
|
; X86-AVX512: ## %bb.0:
|
|
; X86-AVX512-NEXT: pushl %eax ## encoding: [0x50]
|
|
; X86-AVX512-NEXT: vpermilps $255, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x04,0xc0,0xff]
|
|
; X86-AVX512-NEXT: ## xmm0 = xmm0[3,3,3,3]
|
|
; X86-AVX512-NEXT: vaddss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x58,0x05,A,A,A,A]
|
|
; X86-AVX512-NEXT: ## fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
|
|
; X86-AVX512-NEXT: vmovss %xmm0, (%esp) ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x11,0x04,0x24]
|
|
; X86-AVX512-NEXT: flds (%esp) ## encoding: [0xd9,0x04,0x24]
|
|
; X86-AVX512-NEXT: popl %eax ## encoding: [0x58]
|
|
; X86-AVX512-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X64-SSE-LABEL: ext_1:
|
|
; X64-SSE: ## %bb.0:
|
|
; X64-SSE-NEXT: shufps $255, %xmm0, %xmm0 ## encoding: [0x0f,0xc6,0xc0,0xff]
|
|
; X64-SSE-NEXT: ## xmm0 = xmm0[3,3,3,3]
|
|
; X64-SSE-NEXT: addss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ## encoding: [0xf3,0x0f,0x58,0x05,A,A,A,A]
|
|
; X64-SSE-NEXT: ## fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
|
|
; X64-SSE-NEXT: retq ## encoding: [0xc3]
|
|
;
|
|
; X64-AVX1-LABEL: ext_1:
|
|
; X64-AVX1: ## %bb.0:
|
|
; X64-AVX1-NEXT: vpermilps $255, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x04,0xc0,0xff]
|
|
; X64-AVX1-NEXT: ## xmm0 = xmm0[3,3,3,3]
|
|
; X64-AVX1-NEXT: vaddss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x58,0x05,A,A,A,A]
|
|
; X64-AVX1-NEXT: ## fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
|
|
; X64-AVX1-NEXT: retq ## encoding: [0xc3]
|
|
;
|
|
; X64-AVX512-LABEL: ext_1:
|
|
; X64-AVX512: ## %bb.0:
|
|
; X64-AVX512-NEXT: vpermilps $255, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x04,0xc0,0xff]
|
|
; X64-AVX512-NEXT: ## xmm0 = xmm0[3,3,3,3]
|
|
; X64-AVX512-NEXT: vaddss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x58,0x05,A,A,A,A]
|
|
; X64-AVX512-NEXT: ## fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
|
|
; X64-AVX512-NEXT: retq ## encoding: [0xc3]
|
|
%s = extractelement <4 x float> %v, i32 3
|
|
%t = fadd float %s, 1.0
|
|
ret float %t
|
|
}
|
|
|
|
define float @ext_2(<4 x float> %v) nounwind {
|
|
; X86-SSE-LABEL: ext_2:
|
|
; X86-SSE: ## %bb.0:
|
|
; X86-SSE-NEXT: pushl %eax ## encoding: [0x50]
|
|
; X86-SSE-NEXT: shufps $255, %xmm0, %xmm0 ## encoding: [0x0f,0xc6,0xc0,0xff]
|
|
; X86-SSE-NEXT: ## xmm0 = xmm0[3,3,3,3]
|
|
; X86-SSE-NEXT: movss %xmm0, (%esp) ## encoding: [0xf3,0x0f,0x11,0x04,0x24]
|
|
; X86-SSE-NEXT: flds (%esp) ## encoding: [0xd9,0x04,0x24]
|
|
; X86-SSE-NEXT: popl %eax ## encoding: [0x58]
|
|
; X86-SSE-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X86-AVX1-LABEL: ext_2:
|
|
; X86-AVX1: ## %bb.0:
|
|
; X86-AVX1-NEXT: pushl %eax ## encoding: [0x50]
|
|
; X86-AVX1-NEXT: vpermilps $255, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x04,0xc0,0xff]
|
|
; X86-AVX1-NEXT: ## xmm0 = xmm0[3,3,3,3]
|
|
; X86-AVX1-NEXT: vmovss %xmm0, (%esp) ## encoding: [0xc5,0xfa,0x11,0x04,0x24]
|
|
; X86-AVX1-NEXT: flds (%esp) ## encoding: [0xd9,0x04,0x24]
|
|
; X86-AVX1-NEXT: popl %eax ## encoding: [0x58]
|
|
; X86-AVX1-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X86-AVX512-LABEL: ext_2:
|
|
; X86-AVX512: ## %bb.0:
|
|
; X86-AVX512-NEXT: pushl %eax ## encoding: [0x50]
|
|
; X86-AVX512-NEXT: vpermilps $255, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x04,0xc0,0xff]
|
|
; X86-AVX512-NEXT: ## xmm0 = xmm0[3,3,3,3]
|
|
; X86-AVX512-NEXT: vmovss %xmm0, (%esp) ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x11,0x04,0x24]
|
|
; X86-AVX512-NEXT: flds (%esp) ## encoding: [0xd9,0x04,0x24]
|
|
; X86-AVX512-NEXT: popl %eax ## encoding: [0x58]
|
|
; X86-AVX512-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X64-SSE-LABEL: ext_2:
|
|
; X64-SSE: ## %bb.0:
|
|
; X64-SSE-NEXT: shufps $255, %xmm0, %xmm0 ## encoding: [0x0f,0xc6,0xc0,0xff]
|
|
; X64-SSE-NEXT: ## xmm0 = xmm0[3,3,3,3]
|
|
; X64-SSE-NEXT: retq ## encoding: [0xc3]
|
|
;
|
|
; X64-AVX1-LABEL: ext_2:
|
|
; X64-AVX1: ## %bb.0:
|
|
; X64-AVX1-NEXT: vpermilps $255, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x04,0xc0,0xff]
|
|
; X64-AVX1-NEXT: ## xmm0 = xmm0[3,3,3,3]
|
|
; X64-AVX1-NEXT: retq ## encoding: [0xc3]
|
|
;
|
|
; X64-AVX512-LABEL: ext_2:
|
|
; X64-AVX512: ## %bb.0:
|
|
; X64-AVX512-NEXT: vpermilps $255, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x04,0xc0,0xff]
|
|
; X64-AVX512-NEXT: ## xmm0 = xmm0[3,3,3,3]
|
|
; X64-AVX512-NEXT: retq ## encoding: [0xc3]
|
|
%s = extractelement <4 x float> %v, i32 3
|
|
ret float %s
|
|
}
|
|
|
|
define i32 @ext_3(<4 x i32> %v) nounwind {
|
|
; SSE-LABEL: ext_3:
|
|
; SSE: ## %bb.0:
|
|
; SSE-NEXT: extractps $3, %xmm0, %eax ## encoding: [0x66,0x0f,0x3a,0x17,0xc0,0x03]
|
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX1-LABEL: ext_3:
|
|
; AVX1: ## %bb.0:
|
|
; AVX1-NEXT: vextractps $3, %xmm0, %eax ## encoding: [0xc4,0xe3,0x79,0x17,0xc0,0x03]
|
|
; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX512-LABEL: ext_3:
|
|
; AVX512: ## %bb.0:
|
|
; AVX512-NEXT: vextractps $3, %xmm0, %eax ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x17,0xc0,0x03]
|
|
; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
%i = extractelement <4 x i32> %v, i32 3
|
|
ret i32 %i
|
|
}
|
|
|
|
define <4 x float> @insertps_1(<4 x float> %t1, <4 x float> %t2) nounwind {
|
|
; SSE-LABEL: insertps_1:
|
|
; SSE: ## %bb.0:
|
|
; SSE-NEXT: insertps $21, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc1,0x15]
|
|
; SSE-NEXT: ## xmm0 = zero,xmm1[0],zero,xmm0[3]
|
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX1-LABEL: insertps_1:
|
|
; AVX1: ## %bb.0:
|
|
; AVX1-NEXT: vinsertps $21, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x15]
|
|
; AVX1-NEXT: ## xmm0 = zero,xmm1[0],zero,xmm0[3]
|
|
; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX512-LABEL: insertps_1:
|
|
; AVX512: ## %bb.0:
|
|
; AVX512-NEXT: vinsertps $21, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x15]
|
|
; AVX512-NEXT: ## xmm0 = zero,xmm1[0],zero,xmm0[3]
|
|
; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
%tmp1 = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %t1, <4 x float> %t2, i32 21) nounwind readnone
|
|
ret <4 x float> %tmp1
|
|
}
|
|
|
|
declare <4 x float> @llvm.x86.sse41.insertps(<4 x float>, <4 x float>, i32) nounwind readnone
|
|
|
|
; When optimizing for speed, prefer blendps over insertps even if it means we have to
|
|
; generate a separate movss to load the scalar operand.
|
|
define <4 x float> @blendps_not_insertps_1(<4 x float> %t1, float %t2) nounwind {
|
|
; X86-SSE-LABEL: blendps_not_insertps_1:
|
|
; X86-SSE: ## %bb.0:
|
|
; X86-SSE-NEXT: movss {{[0-9]+}}(%esp), %xmm1 ## encoding: [0xf3,0x0f,0x10,0x4c,0x24,0x04]
|
|
; X86-SSE-NEXT: ## xmm1 = mem[0],zero,zero,zero
|
|
; X86-SSE-NEXT: blendps $1, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0c,0xc1,0x01]
|
|
; X86-SSE-NEXT: ## xmm0 = xmm1[0],xmm0[1,2,3]
|
|
; X86-SSE-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X86-AVX1-LABEL: blendps_not_insertps_1:
|
|
; X86-AVX1: ## %bb.0:
|
|
; X86-AVX1-NEXT: vmovss {{[0-9]+}}(%esp), %xmm1 ## encoding: [0xc5,0xfa,0x10,0x4c,0x24,0x04]
|
|
; X86-AVX1-NEXT: ## xmm1 = mem[0],zero,zero,zero
|
|
; X86-AVX1-NEXT: vblendps $1, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x01]
|
|
; X86-AVX1-NEXT: ## xmm0 = xmm1[0],xmm0[1,2,3]
|
|
; X86-AVX1-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X86-AVX512-LABEL: blendps_not_insertps_1:
|
|
; X86-AVX512: ## %bb.0:
|
|
; X86-AVX512-NEXT: vmovss {{[0-9]+}}(%esp), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x4c,0x24,0x04]
|
|
; X86-AVX512-NEXT: ## xmm1 = mem[0],zero,zero,zero
|
|
; X86-AVX512-NEXT: vblendps $1, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x01]
|
|
; X86-AVX512-NEXT: ## xmm0 = xmm1[0],xmm0[1,2,3]
|
|
; X86-AVX512-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X64-SSE-LABEL: blendps_not_insertps_1:
|
|
; X64-SSE: ## %bb.0:
|
|
; X64-SSE-NEXT: blendps $1, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0c,0xc1,0x01]
|
|
; X64-SSE-NEXT: ## xmm0 = xmm1[0],xmm0[1,2,3]
|
|
; X64-SSE-NEXT: retq ## encoding: [0xc3]
|
|
;
|
|
; X64-AVX-LABEL: blendps_not_insertps_1:
|
|
; X64-AVX: ## %bb.0:
|
|
; X64-AVX-NEXT: vblendps $1, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x01]
|
|
; X64-AVX-NEXT: ## xmm0 = xmm1[0],xmm0[1,2,3]
|
|
; X64-AVX-NEXT: retq ## encoding: [0xc3]
|
|
%tmp1 = insertelement <4 x float> %t1, float %t2, i32 0
|
|
ret <4 x float> %tmp1
|
|
}
|
|
|
|
; When optimizing for size, generate an insertps if there's a load fold opportunity.
|
|
; The difference between i386 and x86-64 ABIs for the float operand means we should
|
|
; generate an insertps for X86 but not for X64!
|
|
define <4 x float> @insertps_or_blendps(<4 x float> %t1, float %t2) minsize nounwind {
|
|
; X86-SSE-LABEL: insertps_or_blendps:
|
|
; X86-SSE: ## %bb.0:
|
|
; X86-SSE-NEXT: movss {{[0-9]+}}(%esp), %xmm1 ## encoding: [0xf3,0x0f,0x10,0x4c,0x24,0x04]
|
|
; X86-SSE-NEXT: ## xmm1 = mem[0],zero,zero,zero
|
|
; X86-SSE-NEXT: movss %xmm1, %xmm0 ## encoding: [0xf3,0x0f,0x10,0xc1]
|
|
; X86-SSE-NEXT: ## xmm0 = xmm1[0],xmm0[1,2,3]
|
|
; X86-SSE-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X86-AVX1-LABEL: insertps_or_blendps:
|
|
; X86-AVX1: ## %bb.0:
|
|
; X86-AVX1-NEXT: vmovss {{[0-9]+}}(%esp), %xmm1 ## encoding: [0xc5,0xfa,0x10,0x4c,0x24,0x04]
|
|
; X86-AVX1-NEXT: ## xmm1 = mem[0],zero,zero,zero
|
|
; X86-AVX1-NEXT: vmovss %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x10,0xc1]
|
|
; X86-AVX1-NEXT: ## xmm0 = xmm1[0],xmm0[1,2,3]
|
|
; X86-AVX1-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X86-AVX512-LABEL: insertps_or_blendps:
|
|
; X86-AVX512: ## %bb.0:
|
|
; X86-AVX512-NEXT: vmovss {{[0-9]+}}(%esp), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x4c,0x24,0x04]
|
|
; X86-AVX512-NEXT: ## xmm1 = mem[0],zero,zero,zero
|
|
; X86-AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0xc1]
|
|
; X86-AVX512-NEXT: ## xmm0 = xmm1[0],xmm0[1,2,3]
|
|
; X86-AVX512-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X64-SSE-LABEL: insertps_or_blendps:
|
|
; X64-SSE: ## %bb.0:
|
|
; X64-SSE-NEXT: movss %xmm1, %xmm0 ## encoding: [0xf3,0x0f,0x10,0xc1]
|
|
; X64-SSE-NEXT: ## xmm0 = xmm1[0],xmm0[1,2,3]
|
|
; X64-SSE-NEXT: retq ## encoding: [0xc3]
|
|
;
|
|
; X64-AVX1-LABEL: insertps_or_blendps:
|
|
; X64-AVX1: ## %bb.0:
|
|
; X64-AVX1-NEXT: vmovss %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x10,0xc1]
|
|
; X64-AVX1-NEXT: ## xmm0 = xmm1[0],xmm0[1,2,3]
|
|
; X64-AVX1-NEXT: retq ## encoding: [0xc3]
|
|
;
|
|
; X64-AVX512-LABEL: insertps_or_blendps:
|
|
; X64-AVX512: ## %bb.0:
|
|
; X64-AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0xc1]
|
|
; X64-AVX512-NEXT: ## xmm0 = xmm1[0],xmm0[1,2,3]
|
|
; X64-AVX512-NEXT: retq ## encoding: [0xc3]
|
|
%tmp1 = insertelement <4 x float> %t1, float %t2, i32 0
|
|
ret <4 x float> %tmp1
|
|
}
|
|
|
|
; An insert into the low 32-bits of a vector from the low 32-bits of another vector
|
|
; is always just a blendps because blendps is never more expensive than insertps.
|
|
define <4 x float> @blendps_not_insertps_2(<4 x float> %t1, <4 x float> %t2) nounwind {
|
|
; SSE-LABEL: blendps_not_insertps_2:
|
|
; SSE: ## %bb.0:
|
|
; SSE-NEXT: blendps $1, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0c,0xc1,0x01]
|
|
; SSE-NEXT: ## xmm0 = xmm1[0],xmm0[1,2,3]
|
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX-LABEL: blendps_not_insertps_2:
|
|
; AVX: ## %bb.0:
|
|
; AVX-NEXT: vblendps $1, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x01]
|
|
; AVX-NEXT: ## xmm0 = xmm1[0],xmm0[1,2,3]
|
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
%tmp2 = extractelement <4 x float> %t2, i32 0
|
|
%tmp1 = insertelement <4 x float> %t1, float %tmp2, i32 0
|
|
ret <4 x float> %tmp1
|
|
}
|
|
|
|
define i32 @ptestz_1(<2 x i64> %t1, <2 x i64> %t2) nounwind {
|
|
; SSE-LABEL: ptestz_1:
|
|
; SSE: ## %bb.0:
|
|
; SSE-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
|
|
; SSE-NEXT: ptest %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x17,0xc1]
|
|
; SSE-NEXT: sete %al ## encoding: [0x0f,0x94,0xc0]
|
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX-LABEL: ptestz_1:
|
|
; AVX: ## %bb.0:
|
|
; AVX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
|
|
; AVX-NEXT: vptest %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x17,0xc1]
|
|
; AVX-NEXT: sete %al ## encoding: [0x0f,0x94,0xc0]
|
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
%tmp1 = call i32 @llvm.x86.sse41.ptestz(<2 x i64> %t1, <2 x i64> %t2) nounwind readnone
|
|
ret i32 %tmp1
|
|
}
|
|
|
|
define i32 @ptestz_2(<2 x i64> %t1, <2 x i64> %t2) nounwind {
|
|
; SSE-LABEL: ptestz_2:
|
|
; SSE: ## %bb.0:
|
|
; SSE-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
|
|
; SSE-NEXT: ptest %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x17,0xc1]
|
|
; SSE-NEXT: setb %al ## encoding: [0x0f,0x92,0xc0]
|
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX-LABEL: ptestz_2:
|
|
; AVX: ## %bb.0:
|
|
; AVX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
|
|
; AVX-NEXT: vptest %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x17,0xc1]
|
|
; AVX-NEXT: setb %al ## encoding: [0x0f,0x92,0xc0]
|
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
%tmp1 = call i32 @llvm.x86.sse41.ptestc(<2 x i64> %t1, <2 x i64> %t2) nounwind readnone
|
|
ret i32 %tmp1
|
|
}
|
|
|
|
define i32 @ptestz_3(<2 x i64> %t1, <2 x i64> %t2) nounwind {
|
|
; SSE-LABEL: ptestz_3:
|
|
; SSE: ## %bb.0:
|
|
; SSE-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
|
|
; SSE-NEXT: ptest %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x17,0xc1]
|
|
; SSE-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
|
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX-LABEL: ptestz_3:
|
|
; AVX: ## %bb.0:
|
|
; AVX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
|
|
; AVX-NEXT: vptest %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x17,0xc1]
|
|
; AVX-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
|
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
%tmp1 = call i32 @llvm.x86.sse41.ptestnzc(<2 x i64> %t1, <2 x i64> %t2) nounwind readnone
|
|
ret i32 %tmp1
|
|
}
|
|
|
|
declare i32 @llvm.x86.sse41.ptestz(<2 x i64>, <2 x i64>) nounwind readnone
|
|
declare i32 @llvm.x86.sse41.ptestc(<2 x i64>, <2 x i64>) nounwind readnone
|
|
declare i32 @llvm.x86.sse41.ptestnzc(<2 x i64>, <2 x i64>) nounwind readnone
|
|
|
|
; This used to compile to insertps $0 + insertps $16. insertps $0 is always
|
|
; pointless.
|
|
define <2 x float> @buildvector(<2 x float> %A, <2 x float> %B) nounwind {
|
|
; SSE-LABEL: buildvector:
|
|
; SSE: ## %bb.0: ## %entry
|
|
; SSE-NEXT: movshdup %xmm0, %xmm2 ## encoding: [0xf3,0x0f,0x16,0xd0]
|
|
; SSE-NEXT: ## xmm2 = xmm0[1,1,3,3]
|
|
; SSE-NEXT: movshdup %xmm1, %xmm3 ## encoding: [0xf3,0x0f,0x16,0xd9]
|
|
; SSE-NEXT: ## xmm3 = xmm1[1,1,3,3]
|
|
; SSE-NEXT: addss %xmm2, %xmm3 ## encoding: [0xf3,0x0f,0x58,0xda]
|
|
; SSE-NEXT: addss %xmm1, %xmm0 ## encoding: [0xf3,0x0f,0x58,0xc1]
|
|
; SSE-NEXT: insertps $16, %xmm3, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc3,0x10]
|
|
; SSE-NEXT: ## xmm0 = xmm0[0],xmm3[0],xmm0[2,3]
|
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX1-LABEL: buildvector:
|
|
; AVX1: ## %bb.0: ## %entry
|
|
; AVX1-NEXT: vmovshdup %xmm0, %xmm2 ## encoding: [0xc5,0xfa,0x16,0xd0]
|
|
; AVX1-NEXT: ## xmm2 = xmm0[1,1,3,3]
|
|
; AVX1-NEXT: vmovshdup %xmm1, %xmm3 ## encoding: [0xc5,0xfa,0x16,0xd9]
|
|
; AVX1-NEXT: ## xmm3 = xmm1[1,1,3,3]
|
|
; AVX1-NEXT: vaddss %xmm3, %xmm2, %xmm2 ## encoding: [0xc5,0xea,0x58,0xd3]
|
|
; AVX1-NEXT: vaddss %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x58,0xc1]
|
|
; AVX1-NEXT: vinsertps $16, %xmm2, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc2,0x10]
|
|
; AVX1-NEXT: ## xmm0 = xmm0[0],xmm2[0],xmm0[2,3]
|
|
; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX512-LABEL: buildvector:
|
|
; AVX512: ## %bb.0: ## %entry
|
|
; AVX512-NEXT: vmovshdup %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x16,0xd0]
|
|
; AVX512-NEXT: ## xmm2 = xmm0[1,1,3,3]
|
|
; AVX512-NEXT: vmovshdup %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x16,0xd9]
|
|
; AVX512-NEXT: ## xmm3 = xmm1[1,1,3,3]
|
|
; AVX512-NEXT: vaddss %xmm3, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xea,0x58,0xd3]
|
|
; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x58,0xc1]
|
|
; AVX512-NEXT: vinsertps $16, %xmm2, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc2,0x10]
|
|
; AVX512-NEXT: ## xmm0 = xmm0[0],xmm2[0],xmm0[2,3]
|
|
; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
entry:
|
|
%tmp7 = extractelement <2 x float> %A, i32 0
|
|
%tmp5 = extractelement <2 x float> %A, i32 1
|
|
%tmp3 = extractelement <2 x float> %B, i32 0
|
|
%tmp1 = extractelement <2 x float> %B, i32 1
|
|
%add.r = fadd float %tmp7, %tmp3
|
|
%add.i = fadd float %tmp5, %tmp1
|
|
%tmp11 = insertelement <2 x float> undef, float %add.r, i32 0
|
|
%tmp9 = insertelement <2 x float> %tmp11, float %add.i, i32 1
|
|
ret <2 x float> %tmp9
|
|
}
|
|
|
|
define <4 x float> @insertps_from_shufflevector_1(<4 x float> %a, <4 x float>* nocapture readonly %pb) {
|
|
; X86-SSE-LABEL: insertps_from_shufflevector_1:
|
|
; X86-SSE: ## %bb.0: ## %entry
|
|
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
|
; X86-SSE-NEXT: movaps (%eax), %xmm1 ## encoding: [0x0f,0x28,0x08]
|
|
; X86-SSE-NEXT: insertps $48, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc1,0x30]
|
|
; X86-SSE-NEXT: ## xmm0 = xmm0[0,1,2],xmm1[0]
|
|
; X86-SSE-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X86-AVX1-LABEL: insertps_from_shufflevector_1:
|
|
; X86-AVX1: ## %bb.0: ## %entry
|
|
; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
|
; X86-AVX1-NEXT: vmovaps (%eax), %xmm1 ## encoding: [0xc5,0xf8,0x28,0x08]
|
|
; X86-AVX1-NEXT: vinsertps $48, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x30]
|
|
; X86-AVX1-NEXT: ## xmm0 = xmm0[0,1,2],xmm1[0]
|
|
; X86-AVX1-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X86-AVX512-LABEL: insertps_from_shufflevector_1:
|
|
; X86-AVX512: ## %bb.0: ## %entry
|
|
; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
|
; X86-AVX512-NEXT: vmovaps (%eax), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x08]
|
|
; X86-AVX512-NEXT: vinsertps $48, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x30]
|
|
; X86-AVX512-NEXT: ## xmm0 = xmm0[0,1,2],xmm1[0]
|
|
; X86-AVX512-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X64-SSE-LABEL: insertps_from_shufflevector_1:
|
|
; X64-SSE: ## %bb.0: ## %entry
|
|
; X64-SSE-NEXT: movaps (%rdi), %xmm1 ## encoding: [0x0f,0x28,0x0f]
|
|
; X64-SSE-NEXT: insertps $48, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc1,0x30]
|
|
; X64-SSE-NEXT: ## xmm0 = xmm0[0,1,2],xmm1[0]
|
|
; X64-SSE-NEXT: retq ## encoding: [0xc3]
|
|
;
|
|
; X64-AVX1-LABEL: insertps_from_shufflevector_1:
|
|
; X64-AVX1: ## %bb.0: ## %entry
|
|
; X64-AVX1-NEXT: vmovaps (%rdi), %xmm1 ## encoding: [0xc5,0xf8,0x28,0x0f]
|
|
; X64-AVX1-NEXT: vinsertps $48, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x30]
|
|
; X64-AVX1-NEXT: ## xmm0 = xmm0[0,1,2],xmm1[0]
|
|
; X64-AVX1-NEXT: retq ## encoding: [0xc3]
|
|
;
|
|
; X64-AVX512-LABEL: insertps_from_shufflevector_1:
|
|
; X64-AVX512: ## %bb.0: ## %entry
|
|
; X64-AVX512-NEXT: vmovaps (%rdi), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x0f]
|
|
; X64-AVX512-NEXT: vinsertps $48, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x30]
|
|
; X64-AVX512-NEXT: ## xmm0 = xmm0[0,1,2],xmm1[0]
|
|
; X64-AVX512-NEXT: retq ## encoding: [0xc3]
|
|
entry:
|
|
%0 = load <4 x float>, <4 x float>* %pb, align 16
|
|
%vecinit6 = shufflevector <4 x float> %a, <4 x float> %0, <4 x i32> <i32 0, i32 1, i32 2, i32 4>
|
|
ret <4 x float> %vecinit6
|
|
}
|
|
|
|
define <4 x float> @insertps_from_shufflevector_2(<4 x float> %a, <4 x float> %b) {
|
|
; SSE-LABEL: insertps_from_shufflevector_2:
|
|
; SSE: ## %bb.0: ## %entry
|
|
; SSE-NEXT: insertps $96, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc1,0x60]
|
|
; SSE-NEXT: ## xmm0 = xmm0[0,1],xmm1[1],xmm0[3]
|
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX1-LABEL: insertps_from_shufflevector_2:
|
|
; AVX1: ## %bb.0: ## %entry
|
|
; AVX1-NEXT: vinsertps $96, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x60]
|
|
; AVX1-NEXT: ## xmm0 = xmm0[0,1],xmm1[1],xmm0[3]
|
|
; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX512-LABEL: insertps_from_shufflevector_2:
|
|
; AVX512: ## %bb.0: ## %entry
|
|
; AVX512-NEXT: vinsertps $96, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x60]
|
|
; AVX512-NEXT: ## xmm0 = xmm0[0,1],xmm1[1],xmm0[3]
|
|
; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
entry:
|
|
%vecinit6 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 1, i32 5, i32 3>
|
|
ret <4 x float> %vecinit6
|
|
}
|
|
|
|
; For loading an i32 from memory into an xmm register we use pinsrd
|
|
; instead of insertps
|
|
define <4 x i32> @pinsrd_from_shufflevector_i32(<4 x i32> %a, <4 x i32>* nocapture readonly %pb) {
|
|
; X86-SSE-LABEL: pinsrd_from_shufflevector_i32:
|
|
; X86-SSE: ## %bb.0: ## %entry
|
|
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
|
; X86-SSE-NEXT: pshufd $0, (%eax), %xmm1 ## encoding: [0x66,0x0f,0x70,0x08,0x00]
|
|
; X86-SSE-NEXT: ## xmm1 = mem[0,0,0,0]
|
|
; X86-SSE-NEXT: pblendw $192, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0e,0xc1,0xc0]
|
|
; X86-SSE-NEXT: ## xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
|
|
; X86-SSE-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X86-AVX1-LABEL: pinsrd_from_shufflevector_i32:
|
|
; X86-AVX1: ## %bb.0: ## %entry
|
|
; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
|
; X86-AVX1-NEXT: vpermilps $0, (%eax), %xmm1 ## encoding: [0xc4,0xe3,0x79,0x04,0x08,0x00]
|
|
; X86-AVX1-NEXT: ## xmm1 = mem[0,0,0,0]
|
|
; X86-AVX1-NEXT: vblendps $8, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x08]
|
|
; X86-AVX1-NEXT: ## xmm0 = xmm0[0,1,2],xmm1[3]
|
|
; X86-AVX1-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X86-AVX512-LABEL: pinsrd_from_shufflevector_i32:
|
|
; X86-AVX512: ## %bb.0: ## %entry
|
|
; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
|
; X86-AVX512-NEXT: vbroadcastss (%eax), %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x18,0x08]
|
|
; X86-AVX512-NEXT: vblendps $8, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x08]
|
|
; X86-AVX512-NEXT: ## xmm0 = xmm0[0,1,2],xmm1[3]
|
|
; X86-AVX512-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X64-SSE-LABEL: pinsrd_from_shufflevector_i32:
|
|
; X64-SSE: ## %bb.0: ## %entry
|
|
; X64-SSE-NEXT: pshufd $0, (%rdi), %xmm1 ## encoding: [0x66,0x0f,0x70,0x0f,0x00]
|
|
; X64-SSE-NEXT: ## xmm1 = mem[0,0,0,0]
|
|
; X64-SSE-NEXT: pblendw $192, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0e,0xc1,0xc0]
|
|
; X64-SSE-NEXT: ## xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
|
|
; X64-SSE-NEXT: retq ## encoding: [0xc3]
|
|
;
|
|
; X64-AVX1-LABEL: pinsrd_from_shufflevector_i32:
|
|
; X64-AVX1: ## %bb.0: ## %entry
|
|
; X64-AVX1-NEXT: vpermilps $0, (%rdi), %xmm1 ## encoding: [0xc4,0xe3,0x79,0x04,0x0f,0x00]
|
|
; X64-AVX1-NEXT: ## xmm1 = mem[0,0,0,0]
|
|
; X64-AVX1-NEXT: vblendps $8, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x08]
|
|
; X64-AVX1-NEXT: ## xmm0 = xmm0[0,1,2],xmm1[3]
|
|
; X64-AVX1-NEXT: retq ## encoding: [0xc3]
|
|
;
|
|
; X64-AVX512-LABEL: pinsrd_from_shufflevector_i32:
|
|
; X64-AVX512: ## %bb.0: ## %entry
|
|
; X64-AVX512-NEXT: vbroadcastss (%rdi), %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x18,0x0f]
|
|
; X64-AVX512-NEXT: vblendps $8, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x08]
|
|
; X64-AVX512-NEXT: ## xmm0 = xmm0[0,1,2],xmm1[3]
|
|
; X64-AVX512-NEXT: retq ## encoding: [0xc3]
|
|
entry:
|
|
%0 = load <4 x i32>, <4 x i32>* %pb, align 16
|
|
%vecinit6 = shufflevector <4 x i32> %a, <4 x i32> %0, <4 x i32> <i32 0, i32 1, i32 2, i32 4>
|
|
ret <4 x i32> %vecinit6
|
|
}
|
|
|
|
define <4 x i32> @insertps_from_shufflevector_i32_2(<4 x i32> %a, <4 x i32> %b) {
|
|
; SSE-LABEL: insertps_from_shufflevector_i32_2:
|
|
; SSE: ## %bb.0: ## %entry
|
|
; SSE-NEXT: pshufd $238, %xmm1, %xmm1 ## encoding: [0x66,0x0f,0x70,0xc9,0xee]
|
|
; SSE-NEXT: ## xmm1 = xmm1[2,3,2,3]
|
|
; SSE-NEXT: pblendw $12, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0e,0xc1,0x0c]
|
|
; SSE-NEXT: ## xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
|
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX1-LABEL: insertps_from_shufflevector_i32_2:
|
|
; AVX1: ## %bb.0: ## %entry
|
|
; AVX1-NEXT: vpermilps $238, %xmm1, %xmm1 ## encoding: [0xc4,0xe3,0x79,0x04,0xc9,0xee]
|
|
; AVX1-NEXT: ## xmm1 = xmm1[2,3,2,3]
|
|
; AVX1-NEXT: vblendps $2, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x02]
|
|
; AVX1-NEXT: ## xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
|
|
; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX512-LABEL: insertps_from_shufflevector_i32_2:
|
|
; AVX512: ## %bb.0: ## %entry
|
|
; AVX512-NEXT: vpermilps $238, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x04,0xc9,0xee]
|
|
; AVX512-NEXT: ## xmm1 = xmm1[2,3,2,3]
|
|
; AVX512-NEXT: vblendps $2, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x02]
|
|
; AVX512-NEXT: ## xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
|
|
; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
entry:
|
|
%vecinit6 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 7, i32 2, i32 3>
|
|
ret <4 x i32> %vecinit6
|
|
}
|
|
|
|
define <4 x float> @insertps_from_load_ins_elt_undef(<4 x float> %a, float* %b) {
|
|
; X86-SSE-LABEL: insertps_from_load_ins_elt_undef:
|
|
; X86-SSE: ## %bb.0:
|
|
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
|
; X86-SSE-NEXT: insertps $16, (%eax), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0x00,0x10]
|
|
; X86-SSE-NEXT: ## xmm0 = xmm0[0],mem[0],xmm0[2,3]
|
|
; X86-SSE-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X86-AVX1-LABEL: insertps_from_load_ins_elt_undef:
|
|
; X86-AVX1: ## %bb.0:
|
|
; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
|
; X86-AVX1-NEXT: vinsertps $16, (%eax), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0x00,0x10]
|
|
; X86-AVX1-NEXT: ## xmm0 = xmm0[0],mem[0],xmm0[2,3]
|
|
; X86-AVX1-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X86-AVX512-LABEL: insertps_from_load_ins_elt_undef:
|
|
; X86-AVX512: ## %bb.0:
|
|
; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
|
; X86-AVX512-NEXT: vinsertps $16, (%eax), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0x00,0x10]
|
|
; X86-AVX512-NEXT: ## xmm0 = xmm0[0],mem[0],xmm0[2,3]
|
|
; X86-AVX512-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X64-SSE-LABEL: insertps_from_load_ins_elt_undef:
|
|
; X64-SSE: ## %bb.0:
|
|
; X64-SSE-NEXT: insertps $16, (%rdi), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0x07,0x10]
|
|
; X64-SSE-NEXT: ## xmm0 = xmm0[0],mem[0],xmm0[2,3]
|
|
; X64-SSE-NEXT: retq ## encoding: [0xc3]
|
|
;
|
|
; X64-AVX1-LABEL: insertps_from_load_ins_elt_undef:
|
|
; X64-AVX1: ## %bb.0:
|
|
; X64-AVX1-NEXT: vinsertps $16, (%rdi), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0x07,0x10]
|
|
; X64-AVX1-NEXT: ## xmm0 = xmm0[0],mem[0],xmm0[2,3]
|
|
; X64-AVX1-NEXT: retq ## encoding: [0xc3]
|
|
;
|
|
; X64-AVX512-LABEL: insertps_from_load_ins_elt_undef:
|
|
; X64-AVX512: ## %bb.0:
|
|
; X64-AVX512-NEXT: vinsertps $16, (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0x07,0x10]
|
|
; X64-AVX512-NEXT: ## xmm0 = xmm0[0],mem[0],xmm0[2,3]
|
|
; X64-AVX512-NEXT: retq ## encoding: [0xc3]
|
|
%1 = load float, float* %b, align 4
|
|
%2 = insertelement <4 x float> undef, float %1, i32 0
|
|
%result = shufflevector <4 x float> %a, <4 x float> %2, <4 x i32> <i32 0, i32 4, i32 2, i32 3>
|
|
ret <4 x float> %result
|
|
}
|
|
|
|
; TODO: Like on pinsrd_from_shufflevector_i32, remove this mov instr
|
|
define <4 x i32> @insertps_from_load_ins_elt_undef_i32(<4 x i32> %a, i32* %b) {
|
|
; X86-SSE-LABEL: insertps_from_load_ins_elt_undef_i32:
|
|
; X86-SSE: ## %bb.0:
|
|
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
|
; X86-SSE-NEXT: pinsrd $2, (%eax), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x22,0x00,0x02]
|
|
; X86-SSE-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X86-AVX1-LABEL: insertps_from_load_ins_elt_undef_i32:
|
|
; X86-AVX1: ## %bb.0:
|
|
; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
|
; X86-AVX1-NEXT: vpinsrd $2, (%eax), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x22,0x00,0x02]
|
|
; X86-AVX1-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X86-AVX512-LABEL: insertps_from_load_ins_elt_undef_i32:
|
|
; X86-AVX512: ## %bb.0:
|
|
; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
|
; X86-AVX512-NEXT: vpinsrd $2, (%eax), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0x00,0x02]
|
|
; X86-AVX512-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X64-SSE-LABEL: insertps_from_load_ins_elt_undef_i32:
|
|
; X64-SSE: ## %bb.0:
|
|
; X64-SSE-NEXT: pinsrd $2, (%rdi), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x22,0x07,0x02]
|
|
; X64-SSE-NEXT: retq ## encoding: [0xc3]
|
|
;
|
|
; X64-AVX1-LABEL: insertps_from_load_ins_elt_undef_i32:
|
|
; X64-AVX1: ## %bb.0:
|
|
; X64-AVX1-NEXT: vpinsrd $2, (%rdi), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x22,0x07,0x02]
|
|
; X64-AVX1-NEXT: retq ## encoding: [0xc3]
|
|
;
|
|
; X64-AVX512-LABEL: insertps_from_load_ins_elt_undef_i32:
|
|
; X64-AVX512: ## %bb.0:
|
|
; X64-AVX512-NEXT: vpinsrd $2, (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0x07,0x02]
|
|
; X64-AVX512-NEXT: retq ## encoding: [0xc3]
|
|
%1 = load i32, i32* %b, align 4
|
|
%2 = insertelement <4 x i32> undef, i32 %1, i32 0
|
|
%result = shufflevector <4 x i32> %a, <4 x i32> %2, <4 x i32> <i32 0, i32 1, i32 4, i32 3>
|
|
ret <4 x i32> %result
|
|
}
|
|
|
|
;;;;;; Shuffles optimizable with a single insertps or blend instruction
|
|
define <4 x float> @shuf_XYZ0(<4 x float> %x, <4 x float> %a) {
|
|
; SSE-LABEL: shuf_XYZ0:
|
|
; SSE: ## %bb.0:
|
|
; SSE-NEXT: xorps %xmm1, %xmm1 ## encoding: [0x0f,0x57,0xc9]
|
|
; SSE-NEXT: blendps $8, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0c,0xc1,0x08]
|
|
; SSE-NEXT: ## xmm0 = xmm0[0,1,2],xmm1[3]
|
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX1-LABEL: shuf_XYZ0:
|
|
; AVX1: ## %bb.0:
|
|
; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x57,0xc9]
|
|
; AVX1-NEXT: vblendps $8, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x08]
|
|
; AVX1-NEXT: ## xmm0 = xmm0[0,1,2],xmm1[3]
|
|
; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX512-LABEL: shuf_XYZ0:
|
|
; AVX512: ## %bb.0:
|
|
; AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf0,0x57,0xc9]
|
|
; AVX512-NEXT: vblendps $8, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x08]
|
|
; AVX512-NEXT: ## xmm0 = xmm0[0,1,2],xmm1[3]
|
|
; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
%vecext = extractelement <4 x float> %x, i32 0
|
|
%vecinit = insertelement <4 x float> undef, float %vecext, i32 0
|
|
%vecext1 = extractelement <4 x float> %x, i32 1
|
|
%vecinit2 = insertelement <4 x float> %vecinit, float %vecext1, i32 1
|
|
%vecext3 = extractelement <4 x float> %x, i32 2
|
|
%vecinit4 = insertelement <4 x float> %vecinit2, float %vecext3, i32 2
|
|
%vecinit5 = insertelement <4 x float> %vecinit4, float 0.0, i32 3
|
|
ret <4 x float> %vecinit5
|
|
}
|
|
|
|
define <4 x float> @shuf_XY00(<4 x float> %x, <4 x float> %a) {
|
|
; SSE-LABEL: shuf_XY00:
|
|
; SSE: ## %bb.0:
|
|
; SSE-NEXT: movq %xmm0, %xmm0 ## encoding: [0xf3,0x0f,0x7e,0xc0]
|
|
; SSE-NEXT: ## xmm0 = xmm0[0],zero
|
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX1-LABEL: shuf_XY00:
|
|
; AVX1: ## %bb.0:
|
|
; AVX1-NEXT: vmovq %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x7e,0xc0]
|
|
; AVX1-NEXT: ## xmm0 = xmm0[0],zero
|
|
; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX512-LABEL: shuf_XY00:
|
|
; AVX512: ## %bb.0:
|
|
; AVX512-NEXT: vmovq %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x7e,0xc0]
|
|
; AVX512-NEXT: ## xmm0 = xmm0[0],zero
|
|
; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
%vecext = extractelement <4 x float> %x, i32 0
|
|
%vecinit = insertelement <4 x float> undef, float %vecext, i32 0
|
|
%vecext1 = extractelement <4 x float> %x, i32 1
|
|
%vecinit2 = insertelement <4 x float> %vecinit, float %vecext1, i32 1
|
|
%vecinit3 = insertelement <4 x float> %vecinit2, float 0.0, i32 2
|
|
%vecinit4 = insertelement <4 x float> %vecinit3, float 0.0, i32 3
|
|
ret <4 x float> %vecinit4
|
|
}
|
|
|
|
define <4 x float> @shuf_XYY0(<4 x float> %x, <4 x float> %a) {
|
|
; SSE-LABEL: shuf_XYY0:
|
|
; SSE: ## %bb.0:
|
|
; SSE-NEXT: insertps $104, %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc0,0x68]
|
|
; SSE-NEXT: ## xmm0 = xmm0[0,1,1],zero
|
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX1-LABEL: shuf_XYY0:
|
|
; AVX1: ## %bb.0:
|
|
; AVX1-NEXT: vinsertps $104, %xmm0, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc0,0x68]
|
|
; AVX1-NEXT: ## xmm0 = xmm0[0,1,1],zero
|
|
; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX512-LABEL: shuf_XYY0:
|
|
; AVX512: ## %bb.0:
|
|
; AVX512-NEXT: vinsertps $104, %xmm0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc0,0x68]
|
|
; AVX512-NEXT: ## xmm0 = xmm0[0,1,1],zero
|
|
; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
%vecext = extractelement <4 x float> %x, i32 0
|
|
%vecinit = insertelement <4 x float> undef, float %vecext, i32 0
|
|
%vecext1 = extractelement <4 x float> %x, i32 1
|
|
%vecinit2 = insertelement <4 x float> %vecinit, float %vecext1, i32 1
|
|
%vecinit4 = insertelement <4 x float> %vecinit2, float %vecext1, i32 2
|
|
%vecinit5 = insertelement <4 x float> %vecinit4, float 0.0, i32 3
|
|
ret <4 x float> %vecinit5
|
|
}
|
|
|
|
define <4 x float> @shuf_XYW0(<4 x float> %x, <4 x float> %a) {
|
|
; SSE-LABEL: shuf_XYW0:
|
|
; SSE: ## %bb.0:
|
|
; SSE-NEXT: insertps $232, %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc0,0xe8]
|
|
; SSE-NEXT: ## xmm0 = xmm0[0,1,3],zero
|
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX1-LABEL: shuf_XYW0:
|
|
; AVX1: ## %bb.0:
|
|
; AVX1-NEXT: vinsertps $232, %xmm0, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc0,0xe8]
|
|
; AVX1-NEXT: ## xmm0 = xmm0[0,1,3],zero
|
|
; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX512-LABEL: shuf_XYW0:
|
|
; AVX512: ## %bb.0:
|
|
; AVX512-NEXT: vinsertps $232, %xmm0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc0,0xe8]
|
|
; AVX512-NEXT: ## xmm0 = xmm0[0,1,3],zero
|
|
; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
%vecext = extractelement <4 x float> %x, i32 0
|
|
%vecinit = insertelement <4 x float> undef, float %vecext, i32 0
|
|
%vecext1 = extractelement <4 x float> %x, i32 1
|
|
%vecinit2 = insertelement <4 x float> %vecinit, float %vecext1, i32 1
|
|
%vecext2 = extractelement <4 x float> %x, i32 3
|
|
%vecinit3 = insertelement <4 x float> %vecinit2, float %vecext2, i32 2
|
|
%vecinit4 = insertelement <4 x float> %vecinit3, float 0.0, i32 3
|
|
ret <4 x float> %vecinit4
|
|
}
|
|
|
|
define <4 x float> @shuf_W00W(<4 x float> %x, <4 x float> %a) {
|
|
; SSE-LABEL: shuf_W00W:
|
|
; SSE: ## %bb.0:
|
|
; SSE-NEXT: insertps $198, %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc0,0xc6]
|
|
; SSE-NEXT: ## xmm0 = xmm0[3],zero,zero,xmm0[3]
|
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX1-LABEL: shuf_W00W:
|
|
; AVX1: ## %bb.0:
|
|
; AVX1-NEXT: vinsertps $198, %xmm0, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc0,0xc6]
|
|
; AVX1-NEXT: ## xmm0 = xmm0[3],zero,zero,xmm0[3]
|
|
; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX512-LABEL: shuf_W00W:
|
|
; AVX512: ## %bb.0:
|
|
; AVX512-NEXT: vinsertps $198, %xmm0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc0,0xc6]
|
|
; AVX512-NEXT: ## xmm0 = xmm0[3],zero,zero,xmm0[3]
|
|
; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
%vecext = extractelement <4 x float> %x, i32 3
|
|
%vecinit = insertelement <4 x float> undef, float %vecext, i32 0
|
|
%vecinit2 = insertelement <4 x float> %vecinit, float 0.0, i32 1
|
|
%vecinit3 = insertelement <4 x float> %vecinit2, float 0.0, i32 2
|
|
%vecinit4 = insertelement <4 x float> %vecinit3, float %vecext, i32 3
|
|
ret <4 x float> %vecinit4
|
|
}
|
|
|
|
define <4 x float> @shuf_X00A(<4 x float> %x, <4 x float> %a) {
|
|
; SSE-LABEL: shuf_X00A:
|
|
; SSE: ## %bb.0:
|
|
; SSE-NEXT: insertps $54, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc1,0x36]
|
|
; SSE-NEXT: ## xmm0 = xmm0[0],zero,zero,xmm1[0]
|
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX1-LABEL: shuf_X00A:
|
|
; AVX1: ## %bb.0:
|
|
; AVX1-NEXT: vinsertps $54, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x36]
|
|
; AVX1-NEXT: ## xmm0 = xmm0[0],zero,zero,xmm1[0]
|
|
; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX512-LABEL: shuf_X00A:
|
|
; AVX512: ## %bb.0:
|
|
; AVX512-NEXT: vinsertps $54, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x36]
|
|
; AVX512-NEXT: ## xmm0 = xmm0[0],zero,zero,xmm1[0]
|
|
; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
%vecext = extractelement <4 x float> %x, i32 0
|
|
%vecinit = insertelement <4 x float> undef, float %vecext, i32 0
|
|
%vecinit1 = insertelement <4 x float> %vecinit, float 0.0, i32 1
|
|
%vecinit2 = insertelement <4 x float> %vecinit1, float 0.0, i32 2
|
|
%vecinit4 = shufflevector <4 x float> %vecinit2, <4 x float> %a, <4 x i32> <i32 0, i32 1, i32 2, i32 4>
|
|
ret <4 x float> %vecinit4
|
|
}
|
|
|
|
define <4 x float> @shuf_X00X(<4 x float> %x, <4 x float> %a) {
|
|
; SSE-LABEL: shuf_X00X:
|
|
; SSE: ## %bb.0:
|
|
; SSE-NEXT: insertps $54, %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc0,0x36]
|
|
; SSE-NEXT: ## xmm0 = xmm0[0],zero,zero,xmm0[0]
|
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX1-LABEL: shuf_X00X:
|
|
; AVX1: ## %bb.0:
|
|
; AVX1-NEXT: vinsertps $54, %xmm0, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc0,0x36]
|
|
; AVX1-NEXT: ## xmm0 = xmm0[0],zero,zero,xmm0[0]
|
|
; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX512-LABEL: shuf_X00X:
|
|
; AVX512: ## %bb.0:
|
|
; AVX512-NEXT: vinsertps $54, %xmm0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc0,0x36]
|
|
; AVX512-NEXT: ## xmm0 = xmm0[0],zero,zero,xmm0[0]
|
|
; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
%vecext = extractelement <4 x float> %x, i32 0
|
|
%vecinit = insertelement <4 x float> undef, float %vecext, i32 0
|
|
%vecinit1 = insertelement <4 x float> %vecinit, float 0.0, i32 1
|
|
%vecinit2 = insertelement <4 x float> %vecinit1, float 0.0, i32 2
|
|
%vecinit4 = shufflevector <4 x float> %vecinit2, <4 x float> %x, <4 x i32> <i32 0, i32 1, i32 2, i32 4>
|
|
ret <4 x float> %vecinit4
|
|
}
|
|
|
|
define <4 x float> @shuf_X0YC(<4 x float> %x, <4 x float> %a) {
|
|
; SSE-LABEL: shuf_X0YC:
|
|
; SSE: ## %bb.0:
|
|
; SSE-NEXT: xorps %xmm2, %xmm2 ## encoding: [0x0f,0x57,0xd2]
|
|
; SSE-NEXT: unpcklps %xmm2, %xmm0 ## encoding: [0x0f,0x14,0xc2]
|
|
; SSE-NEXT: ## xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
|
|
; SSE-NEXT: insertps $176, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc1,0xb0]
|
|
; SSE-NEXT: ## xmm0 = xmm0[0,1,2],xmm1[2]
|
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX1-LABEL: shuf_X0YC:
|
|
; AVX1: ## %bb.0:
|
|
; AVX1-NEXT: vxorps %xmm2, %xmm2, %xmm2 ## encoding: [0xc5,0xe8,0x57,0xd2]
|
|
; AVX1-NEXT: vunpcklps %xmm2, %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x14,0xc2]
|
|
; AVX1-NEXT: ## xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
|
|
; AVX1-NEXT: vinsertps $176, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc1,0xb0]
|
|
; AVX1-NEXT: ## xmm0 = xmm0[0,1,2],xmm1[2]
|
|
; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX512-LABEL: shuf_X0YC:
|
|
; AVX512: ## %bb.0:
|
|
; AVX512-NEXT: vxorps %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe8,0x57,0xd2]
|
|
; AVX512-NEXT: vunpcklps %xmm2, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x14,0xc2]
|
|
; AVX512-NEXT: ## xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
|
|
; AVX512-NEXT: vinsertps $176, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0xb0]
|
|
; AVX512-NEXT: ## xmm0 = xmm0[0,1,2],xmm1[2]
|
|
; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
%vecext = extractelement <4 x float> %x, i32 0
|
|
%vecinit = insertelement <4 x float> undef, float %vecext, i32 0
|
|
%vecinit1 = insertelement <4 x float> %vecinit, float 0.0, i32 1
|
|
%vecinit3 = shufflevector <4 x float> %vecinit1, <4 x float> %x, <4 x i32> <i32 0, i32 1, i32 5, i32 undef>
|
|
%vecinit5 = shufflevector <4 x float> %vecinit3, <4 x float> %a, <4 x i32> <i32 0, i32 1, i32 2, i32 6>
|
|
ret <4 x float> %vecinit5
|
|
}
|
|
|
|
define <4 x i32> @i32_shuf_XYZ0(<4 x i32> %x, <4 x i32> %a) {
|
|
; SSE-LABEL: i32_shuf_XYZ0:
|
|
; SSE: ## %bb.0:
|
|
; SSE-NEXT: xorps %xmm1, %xmm1 ## encoding: [0x0f,0x57,0xc9]
|
|
; SSE-NEXT: blendps $8, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0c,0xc1,0x08]
|
|
; SSE-NEXT: ## xmm0 = xmm0[0,1,2],xmm1[3]
|
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX1-LABEL: i32_shuf_XYZ0:
|
|
; AVX1: ## %bb.0:
|
|
; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x57,0xc9]
|
|
; AVX1-NEXT: vblendps $8, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x08]
|
|
; AVX1-NEXT: ## xmm0 = xmm0[0,1,2],xmm1[3]
|
|
; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX512-LABEL: i32_shuf_XYZ0:
|
|
; AVX512: ## %bb.0:
|
|
; AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf0,0x57,0xc9]
|
|
; AVX512-NEXT: vblendps $8, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x08]
|
|
; AVX512-NEXT: ## xmm0 = xmm0[0,1,2],xmm1[3]
|
|
; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
%vecext = extractelement <4 x i32> %x, i32 0
|
|
%vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
|
|
%vecext1 = extractelement <4 x i32> %x, i32 1
|
|
%vecinit2 = insertelement <4 x i32> %vecinit, i32 %vecext1, i32 1
|
|
%vecext3 = extractelement <4 x i32> %x, i32 2
|
|
%vecinit4 = insertelement <4 x i32> %vecinit2, i32 %vecext3, i32 2
|
|
%vecinit5 = insertelement <4 x i32> %vecinit4, i32 0, i32 3
|
|
ret <4 x i32> %vecinit5
|
|
}
|
|
|
|
define <4 x i32> @i32_shuf_XY00(<4 x i32> %x, <4 x i32> %a) {
|
|
; SSE-LABEL: i32_shuf_XY00:
|
|
; SSE: ## %bb.0:
|
|
; SSE-NEXT: movq %xmm0, %xmm0 ## encoding: [0xf3,0x0f,0x7e,0xc0]
|
|
; SSE-NEXT: ## xmm0 = xmm0[0],zero
|
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX1-LABEL: i32_shuf_XY00:
|
|
; AVX1: ## %bb.0:
|
|
; AVX1-NEXT: vmovq %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x7e,0xc0]
|
|
; AVX1-NEXT: ## xmm0 = xmm0[0],zero
|
|
; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX512-LABEL: i32_shuf_XY00:
|
|
; AVX512: ## %bb.0:
|
|
; AVX512-NEXT: vmovq %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x7e,0xc0]
|
|
; AVX512-NEXT: ## xmm0 = xmm0[0],zero
|
|
; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
%vecext = extractelement <4 x i32> %x, i32 0
|
|
%vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
|
|
%vecext1 = extractelement <4 x i32> %x, i32 1
|
|
%vecinit2 = insertelement <4 x i32> %vecinit, i32 %vecext1, i32 1
|
|
%vecinit3 = insertelement <4 x i32> %vecinit2, i32 0, i32 2
|
|
%vecinit4 = insertelement <4 x i32> %vecinit3, i32 0, i32 3
|
|
ret <4 x i32> %vecinit4
|
|
}
|
|
|
|
define <4 x i32> @i32_shuf_XYY0(<4 x i32> %x, <4 x i32> %a) {
|
|
; SSE-LABEL: i32_shuf_XYY0:
|
|
; SSE: ## %bb.0:
|
|
; SSE-NEXT: pshufd $212, %xmm0, %xmm1 ## encoding: [0x66,0x0f,0x70,0xc8,0xd4]
|
|
; SSE-NEXT: ## xmm1 = xmm0[0,1,1,3]
|
|
; SSE-NEXT: pxor %xmm0, %xmm0 ## encoding: [0x66,0x0f,0xef,0xc0]
|
|
; SSE-NEXT: pblendw $63, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0e,0xc1,0x3f]
|
|
; SSE-NEXT: ## xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7]
|
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX1-LABEL: i32_shuf_XYY0:
|
|
; AVX1: ## %bb.0:
|
|
; AVX1-NEXT: vpermilps $212, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x04,0xc0,0xd4]
|
|
; AVX1-NEXT: ## xmm0 = xmm0[0,1,1,3]
|
|
; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x57,0xc9]
|
|
; AVX1-NEXT: vblendps $8, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x08]
|
|
; AVX1-NEXT: ## xmm0 = xmm0[0,1,2],xmm1[3]
|
|
; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX512-LABEL: i32_shuf_XYY0:
|
|
; AVX512: ## %bb.0:
|
|
; AVX512-NEXT: vpermilps $212, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x04,0xc0,0xd4]
|
|
; AVX512-NEXT: ## xmm0 = xmm0[0,1,1,3]
|
|
; AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf0,0x57,0xc9]
|
|
; AVX512-NEXT: vblendps $8, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x08]
|
|
; AVX512-NEXT: ## xmm0 = xmm0[0,1,2],xmm1[3]
|
|
; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
%vecext = extractelement <4 x i32> %x, i32 0
|
|
%vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
|
|
%vecext1 = extractelement <4 x i32> %x, i32 1
|
|
%vecinit2 = insertelement <4 x i32> %vecinit, i32 %vecext1, i32 1
|
|
%vecinit4 = insertelement <4 x i32> %vecinit2, i32 %vecext1, i32 2
|
|
%vecinit5 = insertelement <4 x i32> %vecinit4, i32 0, i32 3
|
|
ret <4 x i32> %vecinit5
|
|
}
|
|
|
|
define <4 x i32> @i32_shuf_XYW0(<4 x i32> %x, <4 x i32> %a) {
|
|
; SSE-LABEL: i32_shuf_XYW0:
|
|
; SSE: ## %bb.0:
|
|
; SSE-NEXT: pshufd $244, %xmm0, %xmm1 ## encoding: [0x66,0x0f,0x70,0xc8,0xf4]
|
|
; SSE-NEXT: ## xmm1 = xmm0[0,1,3,3]
|
|
; SSE-NEXT: pxor %xmm0, %xmm0 ## encoding: [0x66,0x0f,0xef,0xc0]
|
|
; SSE-NEXT: pblendw $63, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0e,0xc1,0x3f]
|
|
; SSE-NEXT: ## xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7]
|
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX1-LABEL: i32_shuf_XYW0:
|
|
; AVX1: ## %bb.0:
|
|
; AVX1-NEXT: vpermilps $244, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x04,0xc0,0xf4]
|
|
; AVX1-NEXT: ## xmm0 = xmm0[0,1,3,3]
|
|
; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x57,0xc9]
|
|
; AVX1-NEXT: vblendps $8, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x08]
|
|
; AVX1-NEXT: ## xmm0 = xmm0[0,1,2],xmm1[3]
|
|
; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX512-LABEL: i32_shuf_XYW0:
|
|
; AVX512: ## %bb.0:
|
|
; AVX512-NEXT: vpermilps $244, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x04,0xc0,0xf4]
|
|
; AVX512-NEXT: ## xmm0 = xmm0[0,1,3,3]
|
|
; AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf0,0x57,0xc9]
|
|
; AVX512-NEXT: vblendps $8, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x08]
|
|
; AVX512-NEXT: ## xmm0 = xmm0[0,1,2],xmm1[3]
|
|
; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
%vecext = extractelement <4 x i32> %x, i32 0
|
|
%vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
|
|
%vecext1 = extractelement <4 x i32> %x, i32 1
|
|
%vecinit2 = insertelement <4 x i32> %vecinit, i32 %vecext1, i32 1
|
|
%vecext2 = extractelement <4 x i32> %x, i32 3
|
|
%vecinit3 = insertelement <4 x i32> %vecinit2, i32 %vecext2, i32 2
|
|
%vecinit4 = insertelement <4 x i32> %vecinit3, i32 0, i32 3
|
|
ret <4 x i32> %vecinit4
|
|
}
|
|
|
|
define <4 x i32> @i32_shuf_W00W(<4 x i32> %x, <4 x i32> %a) {
|
|
; SSE-LABEL: i32_shuf_W00W:
|
|
; SSE: ## %bb.0:
|
|
; SSE-NEXT: pshufd $255, %xmm0, %xmm1 ## encoding: [0x66,0x0f,0x70,0xc8,0xff]
|
|
; SSE-NEXT: ## xmm1 = xmm0[3,3,3,3]
|
|
; SSE-NEXT: pxor %xmm0, %xmm0 ## encoding: [0x66,0x0f,0xef,0xc0]
|
|
; SSE-NEXT: pblendw $195, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0e,0xc1,0xc3]
|
|
; SSE-NEXT: ## xmm0 = xmm1[0,1],xmm0[2,3,4,5],xmm1[6,7]
|
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX1-LABEL: i32_shuf_W00W:
|
|
; AVX1: ## %bb.0:
|
|
; AVX1-NEXT: vpermilps $255, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x04,0xc0,0xff]
|
|
; AVX1-NEXT: ## xmm0 = xmm0[3,3,3,3]
|
|
; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x57,0xc9]
|
|
; AVX1-NEXT: vblendps $6, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x06]
|
|
; AVX1-NEXT: ## xmm0 = xmm0[0],xmm1[1,2],xmm0[3]
|
|
; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX512-LABEL: i32_shuf_W00W:
|
|
; AVX512: ## %bb.0:
|
|
; AVX512-NEXT: vpermilps $255, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x04,0xc0,0xff]
|
|
; AVX512-NEXT: ## xmm0 = xmm0[3,3,3,3]
|
|
; AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf0,0x57,0xc9]
|
|
; AVX512-NEXT: vblendps $6, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x06]
|
|
; AVX512-NEXT: ## xmm0 = xmm0[0],xmm1[1,2],xmm0[3]
|
|
; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
%vecext = extractelement <4 x i32> %x, i32 3
|
|
%vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
|
|
%vecinit2 = insertelement <4 x i32> %vecinit, i32 0, i32 1
|
|
%vecinit3 = insertelement <4 x i32> %vecinit2, i32 0, i32 2
|
|
%vecinit4 = insertelement <4 x i32> %vecinit3, i32 %vecext, i32 3
|
|
ret <4 x i32> %vecinit4
|
|
}
|
|
|
|
define <4 x i32> @i32_shuf_X00A(<4 x i32> %x, <4 x i32> %a) {
|
|
; SSE-LABEL: i32_shuf_X00A:
|
|
; SSE: ## %bb.0:
|
|
; SSE-NEXT: pxor %xmm2, %xmm2 ## encoding: [0x66,0x0f,0xef,0xd2]
|
|
; SSE-NEXT: pblendw $252, %xmm2, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0e,0xc2,0xfc]
|
|
; SSE-NEXT: ## xmm0 = xmm0[0,1],xmm2[2,3,4,5,6,7]
|
|
; SSE-NEXT: pshufd $0, %xmm1, %xmm1 ## encoding: [0x66,0x0f,0x70,0xc9,0x00]
|
|
; SSE-NEXT: ## xmm1 = xmm1[0,0,0,0]
|
|
; SSE-NEXT: pblendw $192, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0e,0xc1,0xc0]
|
|
; SSE-NEXT: ## xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
|
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX1-LABEL: i32_shuf_X00A:
|
|
; AVX1: ## %bb.0:
|
|
; AVX1-NEXT: vxorps %xmm2, %xmm2, %xmm2 ## encoding: [0xc5,0xe8,0x57,0xd2]
|
|
; AVX1-NEXT: vblendps $1, %xmm0, %xmm2, %xmm0 ## encoding: [0xc4,0xe3,0x69,0x0c,0xc0,0x01]
|
|
; AVX1-NEXT: ## xmm0 = xmm0[0],xmm2[1,2,3]
|
|
; AVX1-NEXT: vpermilps $0, %xmm1, %xmm1 ## encoding: [0xc4,0xe3,0x79,0x04,0xc9,0x00]
|
|
; AVX1-NEXT: ## xmm1 = xmm1[0,0,0,0]
|
|
; AVX1-NEXT: vblendps $8, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x08]
|
|
; AVX1-NEXT: ## xmm0 = xmm0[0,1,2],xmm1[3]
|
|
; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX512-LABEL: i32_shuf_X00A:
|
|
; AVX512: ## %bb.0:
|
|
; AVX512-NEXT: vxorps %xmm2, %xmm2, %xmm2 ## encoding: [0xc5,0xe8,0x57,0xd2]
|
|
; AVX512-NEXT: vblendps $1, %xmm0, %xmm2, %xmm0 ## encoding: [0xc4,0xe3,0x69,0x0c,0xc0,0x01]
|
|
; AVX512-NEXT: ## xmm0 = xmm0[0],xmm2[1,2,3]
|
|
; AVX512-NEXT: vbroadcastss %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x18,0xc9]
|
|
; AVX512-NEXT: vblendps $8, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x08]
|
|
; AVX512-NEXT: ## xmm0 = xmm0[0,1,2],xmm1[3]
|
|
; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
%vecext = extractelement <4 x i32> %x, i32 0
|
|
%vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
|
|
%vecinit1 = insertelement <4 x i32> %vecinit, i32 0, i32 1
|
|
%vecinit2 = insertelement <4 x i32> %vecinit1, i32 0, i32 2
|
|
%vecinit4 = shufflevector <4 x i32> %vecinit2, <4 x i32> %a, <4 x i32> <i32 0, i32 1, i32 2, i32 4>
|
|
ret <4 x i32> %vecinit4
|
|
}
|
|
|
|
define <4 x i32> @i32_shuf_X00X(<4 x i32> %x, <4 x i32> %a) {
|
|
; SSE-LABEL: i32_shuf_X00X:
|
|
; SSE: ## %bb.0:
|
|
; SSE-NEXT: pxor %xmm1, %xmm1 ## encoding: [0x66,0x0f,0xef,0xc9]
|
|
; SSE-NEXT: pshufd $0, %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x70,0xc0,0x00]
|
|
; SSE-NEXT: ## xmm0 = xmm0[0,0,0,0]
|
|
; SSE-NEXT: pblendw $60, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0e,0xc1,0x3c]
|
|
; SSE-NEXT: ## xmm0 = xmm0[0,1],xmm1[2,3,4,5],xmm0[6,7]
|
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX1-LABEL: i32_shuf_X00X:
|
|
; AVX1: ## %bb.0:
|
|
; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x57,0xc9]
|
|
; AVX1-NEXT: vpermilps $0, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x04,0xc0,0x00]
|
|
; AVX1-NEXT: ## xmm0 = xmm0[0,0,0,0]
|
|
; AVX1-NEXT: vblendps $6, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x06]
|
|
; AVX1-NEXT: ## xmm0 = xmm0[0],xmm1[1,2],xmm0[3]
|
|
; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX512-LABEL: i32_shuf_X00X:
|
|
; AVX512: ## %bb.0:
|
|
; AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf0,0x57,0xc9]
|
|
; AVX512-NEXT: vbroadcastss %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x18,0xc0]
|
|
; AVX512-NEXT: vblendps $6, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x06]
|
|
; AVX512-NEXT: ## xmm0 = xmm0[0],xmm1[1,2],xmm0[3]
|
|
; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
%vecext = extractelement <4 x i32> %x, i32 0
|
|
%vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
|
|
%vecinit1 = insertelement <4 x i32> %vecinit, i32 0, i32 1
|
|
%vecinit2 = insertelement <4 x i32> %vecinit1, i32 0, i32 2
|
|
%vecinit4 = shufflevector <4 x i32> %vecinit2, <4 x i32> %x, <4 x i32> <i32 0, i32 1, i32 2, i32 4>
|
|
ret <4 x i32> %vecinit4
|
|
}
|
|
|
|
define <4 x i32> @i32_shuf_X0YC(<4 x i32> %x, <4 x i32> %a) {
|
|
; SSE-LABEL: i32_shuf_X0YC:
|
|
; SSE: ## %bb.0:
|
|
; SSE-NEXT: pmovzxdq %xmm0, %xmm2 ## encoding: [0x66,0x0f,0x38,0x35,0xd0]
|
|
; SSE-NEXT: ## xmm2 = xmm0[0],zero,xmm0[1],zero
|
|
; SSE-NEXT: pshufd $170, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x70,0xc1,0xaa]
|
|
; SSE-NEXT: ## xmm0 = xmm1[2,2,2,2]
|
|
; SSE-NEXT: pblendw $63, %xmm2, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0e,0xc2,0x3f]
|
|
; SSE-NEXT: ## xmm0 = xmm2[0,1,2,3,4,5],xmm0[6,7]
|
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX1-LABEL: i32_shuf_X0YC:
|
|
; AVX1: ## %bb.0:
|
|
; AVX1-NEXT: vpmovzxdq %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x35,0xc0]
|
|
; AVX1-NEXT: ## xmm0 = xmm0[0],zero,xmm0[1],zero
|
|
; AVX1-NEXT: vpshufd $170, %xmm1, %xmm1 ## encoding: [0xc5,0xf9,0x70,0xc9,0xaa]
|
|
; AVX1-NEXT: ## xmm1 = xmm1[2,2,2,2]
|
|
; AVX1-NEXT: vpblendw $192, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0e,0xc1,0xc0]
|
|
; AVX1-NEXT: ## xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
|
|
; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX512-LABEL: i32_shuf_X0YC:
|
|
; AVX512: ## %bb.0:
|
|
; AVX512-NEXT: vpmovzxdq %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x35,0xc0]
|
|
; AVX512-NEXT: ## xmm0 = xmm0[0],zero,xmm0[1],zero
|
|
; AVX512-NEXT: vpshufd $170, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x70,0xc9,0xaa]
|
|
; AVX512-NEXT: ## xmm1 = xmm1[2,2,2,2]
|
|
; AVX512-NEXT: vpblendd $8, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x02,0xc1,0x08]
|
|
; AVX512-NEXT: ## xmm0 = xmm0[0,1,2],xmm1[3]
|
|
; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
%vecext = extractelement <4 x i32> %x, i32 0
|
|
%vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
|
|
%vecinit1 = insertelement <4 x i32> %vecinit, i32 0, i32 1
|
|
%vecinit3 = shufflevector <4 x i32> %vecinit1, <4 x i32> %x, <4 x i32> <i32 0, i32 1, i32 5, i32 undef>
|
|
%vecinit5 = shufflevector <4 x i32> %vecinit3, <4 x i32> %a, <4 x i32> <i32 0, i32 1, i32 2, i32 6>
|
|
ret <4 x i32> %vecinit5
|
|
}
|
|
|
|
;; Test for a bug in the first implementation of LowerBuildVectorv4X86
|
|
define < 4 x float> @test_insertps_no_undef(<4 x float> %x) {
|
|
; SSE-LABEL: test_insertps_no_undef:
|
|
; SSE: ## %bb.0:
|
|
; SSE-NEXT: xorps %xmm1, %xmm1 ## encoding: [0x0f,0x57,0xc9]
|
|
; SSE-NEXT: blendps $7, %xmm0, %xmm1 ## encoding: [0x66,0x0f,0x3a,0x0c,0xc8,0x07]
|
|
; SSE-NEXT: ## xmm1 = xmm0[0,1,2],xmm1[3]
|
|
; SSE-NEXT: maxps %xmm1, %xmm0 ## encoding: [0x0f,0x5f,0xc1]
|
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX1-LABEL: test_insertps_no_undef:
|
|
; AVX1: ## %bb.0:
|
|
; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x57,0xc9]
|
|
; AVX1-NEXT: vblendps $8, %xmm1, %xmm0, %xmm1 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc9,0x08]
|
|
; AVX1-NEXT: ## xmm1 = xmm0[0,1,2],xmm1[3]
|
|
; AVX1-NEXT: vmaxps %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x5f,0xc1]
|
|
; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX512-LABEL: test_insertps_no_undef:
|
|
; AVX512: ## %bb.0:
|
|
; AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf0,0x57,0xc9]
|
|
; AVX512-NEXT: vblendps $8, %xmm1, %xmm0, %xmm1 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc9,0x08]
|
|
; AVX512-NEXT: ## xmm1 = xmm0[0,1,2],xmm1[3]
|
|
; AVX512-NEXT: vmaxps %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x5f,0xc1]
|
|
; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
%vecext = extractelement <4 x float> %x, i32 0
|
|
%vecinit = insertelement <4 x float> undef, float %vecext, i32 0
|
|
%vecext1 = extractelement <4 x float> %x, i32 1
|
|
%vecinit2 = insertelement <4 x float> %vecinit, float %vecext1, i32 1
|
|
%vecext3 = extractelement <4 x float> %x, i32 2
|
|
%vecinit4 = insertelement <4 x float> %vecinit2, float %vecext3, i32 2
|
|
%vecinit5 = insertelement <4 x float> %vecinit4, float 0.0, i32 3
|
|
%mask = fcmp olt <4 x float> %vecinit5, %x
|
|
%res = select <4 x i1> %mask, <4 x float> %x, <4 x float>%vecinit5
|
|
ret <4 x float> %res
|
|
}
|
|
|
|
define <8 x i16> @blendvb_fallback(<8 x i1> %mask, <8 x i16> %x, <8 x i16> %y) {
|
|
; SSE-LABEL: blendvb_fallback:
|
|
; SSE: ## %bb.0:
|
|
; SSE-NEXT: psllw $15, %xmm0 ## encoding: [0x66,0x0f,0x71,0xf0,0x0f]
|
|
; SSE-NEXT: psraw $15, %xmm0 ## encoding: [0x66,0x0f,0x71,0xe0,0x0f]
|
|
; SSE-NEXT: pblendvb %xmm0, %xmm1, %xmm2 ## encoding: [0x66,0x0f,0x38,0x10,0xd1]
|
|
; SSE-NEXT: movdqa %xmm2, %xmm0 ## encoding: [0x66,0x0f,0x6f,0xc2]
|
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX1-LABEL: blendvb_fallback:
|
|
; AVX1: ## %bb.0:
|
|
; AVX1-NEXT: vpsllw $15, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x71,0xf0,0x0f]
|
|
; AVX1-NEXT: vpsraw $15, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x71,0xe0,0x0f]
|
|
; AVX1-NEXT: vpblendvb %xmm0, %xmm1, %xmm2, %xmm0 ## encoding: [0xc4,0xe3,0x69,0x4c,0xc1,0x00]
|
|
; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX512-LABEL: blendvb_fallback:
|
|
; AVX512: ## %bb.0:
|
|
; AVX512-NEXT: vpsllw $15, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x71,0xf0,0x0f]
|
|
; AVX512-NEXT: vpmovw2m %xmm0, %k1 ## encoding: [0x62,0xf2,0xfe,0x08,0x29,0xc8]
|
|
; AVX512-NEXT: vpblendmw %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x09,0x66,0xc1]
|
|
; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
%ret = select <8 x i1> %mask, <8 x i16> %x, <8 x i16> %y
|
|
ret <8 x i16> %ret
|
|
}
|
|
|
|
; On X86, account for the argument's move to registers
|
|
define <4 x float> @insertps_from_vector_load(<4 x float> %a, <4 x float>* nocapture readonly %pb) {
|
|
; X86-SSE-LABEL: insertps_from_vector_load:
|
|
; X86-SSE: ## %bb.0:
|
|
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
|
; X86-SSE-NEXT: movaps (%eax), %xmm1 ## encoding: [0x0f,0x28,0x08]
|
|
; X86-SSE-NEXT: insertps $48, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc1,0x30]
|
|
; X86-SSE-NEXT: ## xmm0 = xmm0[0,1,2],xmm1[0]
|
|
; X86-SSE-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X86-AVX1-LABEL: insertps_from_vector_load:
|
|
; X86-AVX1: ## %bb.0:
|
|
; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
|
; X86-AVX1-NEXT: vmovaps (%eax), %xmm1 ## encoding: [0xc5,0xf8,0x28,0x08]
|
|
; X86-AVX1-NEXT: vinsertps $48, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x30]
|
|
; X86-AVX1-NEXT: ## xmm0 = xmm0[0,1,2],xmm1[0]
|
|
; X86-AVX1-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X86-AVX512-LABEL: insertps_from_vector_load:
|
|
; X86-AVX512: ## %bb.0:
|
|
; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
|
; X86-AVX512-NEXT: vmovaps (%eax), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x08]
|
|
; X86-AVX512-NEXT: vinsertps $48, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x30]
|
|
; X86-AVX512-NEXT: ## xmm0 = xmm0[0,1,2],xmm1[0]
|
|
; X86-AVX512-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X64-SSE-LABEL: insertps_from_vector_load:
|
|
; X64-SSE: ## %bb.0:
|
|
; X64-SSE-NEXT: movaps (%rdi), %xmm1 ## encoding: [0x0f,0x28,0x0f]
|
|
; X64-SSE-NEXT: insertps $48, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc1,0x30]
|
|
; X64-SSE-NEXT: ## xmm0 = xmm0[0,1,2],xmm1[0]
|
|
; X64-SSE-NEXT: retq ## encoding: [0xc3]
|
|
;
|
|
; X64-AVX1-LABEL: insertps_from_vector_load:
|
|
; X64-AVX1: ## %bb.0:
|
|
; X64-AVX1-NEXT: vmovaps (%rdi), %xmm1 ## encoding: [0xc5,0xf8,0x28,0x0f]
|
|
; X64-AVX1-NEXT: vinsertps $48, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x30]
|
|
; X64-AVX1-NEXT: ## xmm0 = xmm0[0,1,2],xmm1[0]
|
|
; X64-AVX1-NEXT: retq ## encoding: [0xc3]
|
|
;
|
|
; X64-AVX512-LABEL: insertps_from_vector_load:
|
|
; X64-AVX512: ## %bb.0:
|
|
; X64-AVX512-NEXT: vmovaps (%rdi), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x0f]
|
|
; X64-AVX512-NEXT: vinsertps $48, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x30]
|
|
; X64-AVX512-NEXT: ## xmm0 = xmm0[0,1,2],xmm1[0]
|
|
; X64-AVX512-NEXT: retq ## encoding: [0xc3]
|
|
%1 = load <4 x float>, <4 x float>* %pb, align 16
|
|
%2 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %1, i32 48)
|
|
ret <4 x float> %2
|
|
}
|
|
|
|
;; Use a non-zero CountS for insertps
|
|
;; Try to match a bit more of the instr, since we need the load's offset.
|
|
define <4 x float> @insertps_from_vector_load_offset(<4 x float> %a, <4 x float>* nocapture readonly %pb) {
|
|
; X86-SSE-LABEL: insertps_from_vector_load_offset:
|
|
; X86-SSE: ## %bb.0:
|
|
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
|
; X86-SSE-NEXT: movaps (%eax), %xmm1 ## encoding: [0x0f,0x28,0x08]
|
|
; X86-SSE-NEXT: insertps $96, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc1,0x60]
|
|
; X86-SSE-NEXT: ## xmm0 = xmm0[0,1],xmm1[1],xmm0[3]
|
|
; X86-SSE-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X86-AVX1-LABEL: insertps_from_vector_load_offset:
|
|
; X86-AVX1: ## %bb.0:
|
|
; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
|
; X86-AVX1-NEXT: vmovaps (%eax), %xmm1 ## encoding: [0xc5,0xf8,0x28,0x08]
|
|
; X86-AVX1-NEXT: vinsertps $96, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x60]
|
|
; X86-AVX1-NEXT: ## xmm0 = xmm0[0,1],xmm1[1],xmm0[3]
|
|
; X86-AVX1-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X86-AVX512-LABEL: insertps_from_vector_load_offset:
|
|
; X86-AVX512: ## %bb.0:
|
|
; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
|
; X86-AVX512-NEXT: vmovaps (%eax), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x08]
|
|
; X86-AVX512-NEXT: vinsertps $96, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x60]
|
|
; X86-AVX512-NEXT: ## xmm0 = xmm0[0,1],xmm1[1],xmm0[3]
|
|
; X86-AVX512-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X64-SSE-LABEL: insertps_from_vector_load_offset:
|
|
; X64-SSE: ## %bb.0:
|
|
; X64-SSE-NEXT: movaps (%rdi), %xmm1 ## encoding: [0x0f,0x28,0x0f]
|
|
; X64-SSE-NEXT: insertps $96, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc1,0x60]
|
|
; X64-SSE-NEXT: ## xmm0 = xmm0[0,1],xmm1[1],xmm0[3]
|
|
; X64-SSE-NEXT: retq ## encoding: [0xc3]
|
|
;
|
|
; X64-AVX1-LABEL: insertps_from_vector_load_offset:
|
|
; X64-AVX1: ## %bb.0:
|
|
; X64-AVX1-NEXT: vmovaps (%rdi), %xmm1 ## encoding: [0xc5,0xf8,0x28,0x0f]
|
|
; X64-AVX1-NEXT: vinsertps $96, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x60]
|
|
; X64-AVX1-NEXT: ## xmm0 = xmm0[0,1],xmm1[1],xmm0[3]
|
|
; X64-AVX1-NEXT: retq ## encoding: [0xc3]
|
|
;
|
|
; X64-AVX512-LABEL: insertps_from_vector_load_offset:
|
|
; X64-AVX512: ## %bb.0:
|
|
; X64-AVX512-NEXT: vmovaps (%rdi), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x0f]
|
|
; X64-AVX512-NEXT: vinsertps $96, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x60]
|
|
; X64-AVX512-NEXT: ## xmm0 = xmm0[0,1],xmm1[1],xmm0[3]
|
|
; X64-AVX512-NEXT: retq ## encoding: [0xc3]
|
|
%1 = load <4 x float>, <4 x float>* %pb, align 16
|
|
%2 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %1, i32 96)
|
|
ret <4 x float> %2
|
|
}
|
|
|
|
;; Try to match a bit more of the instr, since we need the load's offset.
|
|
define <4 x float> @insertps_from_vector_load_offset_2(<4 x float> %a, <4 x float>* nocapture readonly %pb, i64 %index) {
|
|
; X86-SSE-LABEL: insertps_from_vector_load_offset_2:
|
|
; X86-SSE: ## %bb.0:
|
|
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
|
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x08]
|
|
; X86-SSE-NEXT: shll $4, %ecx ## encoding: [0xc1,0xe1,0x04]
|
|
; X86-SSE-NEXT: movaps (%eax,%ecx), %xmm1 ## encoding: [0x0f,0x28,0x0c,0x08]
|
|
; X86-SSE-NEXT: insertps $192, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc1,0xc0]
|
|
; X86-SSE-NEXT: ## xmm0 = xmm1[3],xmm0[1,2,3]
|
|
; X86-SSE-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X86-AVX1-LABEL: insertps_from_vector_load_offset_2:
|
|
; X86-AVX1: ## %bb.0:
|
|
; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
|
; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x08]
|
|
; X86-AVX1-NEXT: shll $4, %ecx ## encoding: [0xc1,0xe1,0x04]
|
|
; X86-AVX1-NEXT: vmovaps (%eax,%ecx), %xmm1 ## encoding: [0xc5,0xf8,0x28,0x0c,0x08]
|
|
; X86-AVX1-NEXT: vinsertps $192, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc1,0xc0]
|
|
; X86-AVX1-NEXT: ## xmm0 = xmm1[3],xmm0[1,2,3]
|
|
; X86-AVX1-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X86-AVX512-LABEL: insertps_from_vector_load_offset_2:
|
|
; X86-AVX512: ## %bb.0:
|
|
; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
|
; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x08]
|
|
; X86-AVX512-NEXT: shll $4, %ecx ## encoding: [0xc1,0xe1,0x04]
|
|
; X86-AVX512-NEXT: vmovaps (%eax,%ecx), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x0c,0x08]
|
|
; X86-AVX512-NEXT: vinsertps $192, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0xc0]
|
|
; X86-AVX512-NEXT: ## xmm0 = xmm1[3],xmm0[1,2,3]
|
|
; X86-AVX512-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X64-SSE-LABEL: insertps_from_vector_load_offset_2:
|
|
; X64-SSE: ## %bb.0:
|
|
; X64-SSE-NEXT: shlq $4, %rsi ## encoding: [0x48,0xc1,0xe6,0x04]
|
|
; X64-SSE-NEXT: movaps (%rdi,%rsi), %xmm1 ## encoding: [0x0f,0x28,0x0c,0x37]
|
|
; X64-SSE-NEXT: insertps $192, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc1,0xc0]
|
|
; X64-SSE-NEXT: ## xmm0 = xmm1[3],xmm0[1,2,3]
|
|
; X64-SSE-NEXT: retq ## encoding: [0xc3]
|
|
;
|
|
; X64-AVX1-LABEL: insertps_from_vector_load_offset_2:
|
|
; X64-AVX1: ## %bb.0:
|
|
; X64-AVX1-NEXT: shlq $4, %rsi ## encoding: [0x48,0xc1,0xe6,0x04]
|
|
; X64-AVX1-NEXT: vmovaps (%rdi,%rsi), %xmm1 ## encoding: [0xc5,0xf8,0x28,0x0c,0x37]
|
|
; X64-AVX1-NEXT: vinsertps $192, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc1,0xc0]
|
|
; X64-AVX1-NEXT: ## xmm0 = xmm1[3],xmm0[1,2,3]
|
|
; X64-AVX1-NEXT: retq ## encoding: [0xc3]
|
|
;
|
|
; X64-AVX512-LABEL: insertps_from_vector_load_offset_2:
|
|
; X64-AVX512: ## %bb.0:
|
|
; X64-AVX512-NEXT: shlq $4, %rsi ## encoding: [0x48,0xc1,0xe6,0x04]
|
|
; X64-AVX512-NEXT: vmovaps (%rdi,%rsi), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x0c,0x37]
|
|
; X64-AVX512-NEXT: vinsertps $192, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0xc0]
|
|
; X64-AVX512-NEXT: ## xmm0 = xmm1[3],xmm0[1,2,3]
|
|
; X64-AVX512-NEXT: retq ## encoding: [0xc3]
|
|
%1 = getelementptr inbounds <4 x float>, <4 x float>* %pb, i64 %index
|
|
%2 = load <4 x float>, <4 x float>* %1, align 16
|
|
%3 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %2, i32 192)
|
|
ret <4 x float> %3
|
|
}
|
|
|
|
define <4 x float> @insertps_from_broadcast_loadf32(<4 x float> %a, float* nocapture readonly %fb, i64 %index) {
|
|
; X86-SSE-LABEL: insertps_from_broadcast_loadf32:
|
|
; X86-SSE: ## %bb.0:
|
|
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x08]
|
|
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
|
|
; X86-SSE-NEXT: insertps $48, (%ecx,%eax,4), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0x04,0x81,0x30]
|
|
; X86-SSE-NEXT: ## xmm0 = xmm0[0,1,2],mem[0]
|
|
; X86-SSE-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X86-AVX1-LABEL: insertps_from_broadcast_loadf32:
|
|
; X86-AVX1: ## %bb.0:
|
|
; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x08]
|
|
; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
|
|
; X86-AVX1-NEXT: vinsertps $48, (%ecx,%eax,4), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0x04,0x81,0x30]
|
|
; X86-AVX1-NEXT: ## xmm0 = xmm0[0,1,2],mem[0]
|
|
; X86-AVX1-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X86-AVX512-LABEL: insertps_from_broadcast_loadf32:
|
|
; X86-AVX512: ## %bb.0:
|
|
; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x08]
|
|
; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
|
|
; X86-AVX512-NEXT: vinsertps $48, (%ecx,%eax,4), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0x04,0x81,0x30]
|
|
; X86-AVX512-NEXT: ## xmm0 = xmm0[0,1,2],mem[0]
|
|
; X86-AVX512-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X64-SSE-LABEL: insertps_from_broadcast_loadf32:
|
|
; X64-SSE: ## %bb.0:
|
|
; X64-SSE-NEXT: insertps $48, (%rdi,%rsi,4), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0x04,0xb7,0x30]
|
|
; X64-SSE-NEXT: ## xmm0 = xmm0[0,1,2],mem[0]
|
|
; X64-SSE-NEXT: retq ## encoding: [0xc3]
|
|
;
|
|
; X64-AVX1-LABEL: insertps_from_broadcast_loadf32:
|
|
; X64-AVX1: ## %bb.0:
|
|
; X64-AVX1-NEXT: vinsertps $48, (%rdi,%rsi,4), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0x04,0xb7,0x30]
|
|
; X64-AVX1-NEXT: ## xmm0 = xmm0[0,1,2],mem[0]
|
|
; X64-AVX1-NEXT: retq ## encoding: [0xc3]
|
|
;
|
|
; X64-AVX512-LABEL: insertps_from_broadcast_loadf32:
|
|
; X64-AVX512: ## %bb.0:
|
|
; X64-AVX512-NEXT: vinsertps $48, (%rdi,%rsi,4), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0x04,0xb7,0x30]
|
|
; X64-AVX512-NEXT: ## xmm0 = xmm0[0,1,2],mem[0]
|
|
; X64-AVX512-NEXT: retq ## encoding: [0xc3]
|
|
%1 = getelementptr inbounds float, float* %fb, i64 %index
|
|
%2 = load float, float* %1, align 4
|
|
%3 = insertelement <4 x float> undef, float %2, i32 0
|
|
%4 = insertelement <4 x float> %3, float %2, i32 1
|
|
%5 = insertelement <4 x float> %4, float %2, i32 2
|
|
%6 = insertelement <4 x float> %5, float %2, i32 3
|
|
%7 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %6, i32 48)
|
|
ret <4 x float> %7
|
|
}
|
|
|
|
define <4 x float> @insertps_from_broadcast_loadv4f32(<4 x float> %a, <4 x float>* nocapture readonly %b) {
|
|
; X86-SSE-LABEL: insertps_from_broadcast_loadv4f32:
|
|
; X86-SSE: ## %bb.0:
|
|
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
|
; X86-SSE-NEXT: movups (%eax), %xmm1 ## encoding: [0x0f,0x10,0x08]
|
|
; X86-SSE-NEXT: insertps $48, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc1,0x30]
|
|
; X86-SSE-NEXT: ## xmm0 = xmm0[0,1,2],xmm1[0]
|
|
; X86-SSE-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X86-AVX1-LABEL: insertps_from_broadcast_loadv4f32:
|
|
; X86-AVX1: ## %bb.0:
|
|
; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
|
; X86-AVX1-NEXT: vinsertps $48, (%eax), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0x00,0x30]
|
|
; X86-AVX1-NEXT: ## xmm0 = xmm0[0,1,2],mem[0]
|
|
; X86-AVX1-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X86-AVX512-LABEL: insertps_from_broadcast_loadv4f32:
|
|
; X86-AVX512: ## %bb.0:
|
|
; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
|
; X86-AVX512-NEXT: vinsertps $48, (%eax), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0x00,0x30]
|
|
; X86-AVX512-NEXT: ## xmm0 = xmm0[0,1,2],mem[0]
|
|
; X86-AVX512-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X64-SSE-LABEL: insertps_from_broadcast_loadv4f32:
|
|
; X64-SSE: ## %bb.0:
|
|
; X64-SSE-NEXT: movups (%rdi), %xmm1 ## encoding: [0x0f,0x10,0x0f]
|
|
; X64-SSE-NEXT: insertps $48, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc1,0x30]
|
|
; X64-SSE-NEXT: ## xmm0 = xmm0[0,1,2],xmm1[0]
|
|
; X64-SSE-NEXT: retq ## encoding: [0xc3]
|
|
;
|
|
; X64-AVX1-LABEL: insertps_from_broadcast_loadv4f32:
|
|
; X64-AVX1: ## %bb.0:
|
|
; X64-AVX1-NEXT: vinsertps $48, (%rdi), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0x07,0x30]
|
|
; X64-AVX1-NEXT: ## xmm0 = xmm0[0,1,2],mem[0]
|
|
; X64-AVX1-NEXT: retq ## encoding: [0xc3]
|
|
;
|
|
; X64-AVX512-LABEL: insertps_from_broadcast_loadv4f32:
|
|
; X64-AVX512: ## %bb.0:
|
|
; X64-AVX512-NEXT: vinsertps $48, (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0x07,0x30]
|
|
; X64-AVX512-NEXT: ## xmm0 = xmm0[0,1,2],mem[0]
|
|
; X64-AVX512-NEXT: retq ## encoding: [0xc3]
|
|
%1 = load <4 x float>, <4 x float>* %b, align 4
|
|
%2 = extractelement <4 x float> %1, i32 0
|
|
%3 = insertelement <4 x float> undef, float %2, i32 0
|
|
%4 = insertelement <4 x float> %3, float %2, i32 1
|
|
%5 = insertelement <4 x float> %4, float %2, i32 2
|
|
%6 = insertelement <4 x float> %5, float %2, i32 3
|
|
%7 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %6, i32 48)
|
|
ret <4 x float> %7
|
|
}
|
|
|
|
define <4 x float> @insertps_from_broadcast_multiple_use(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x float> %d, float* nocapture readonly %fb, i64 %index) {
|
|
; X86-SSE-LABEL: insertps_from_broadcast_multiple_use:
|
|
; X86-SSE: ## %bb.0:
|
|
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x08]
|
|
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
|
|
; X86-SSE-NEXT: movss (%ecx,%eax,4), %xmm4 ## encoding: [0xf3,0x0f,0x10,0x24,0x81]
|
|
; X86-SSE-NEXT: ## xmm4 = mem[0],zero,zero,zero
|
|
; X86-SSE-NEXT: insertps $48, %xmm4, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc4,0x30]
|
|
; X86-SSE-NEXT: ## xmm0 = xmm0[0,1,2],xmm4[0]
|
|
; X86-SSE-NEXT: insertps $48, %xmm4, %xmm1 ## encoding: [0x66,0x0f,0x3a,0x21,0xcc,0x30]
|
|
; X86-SSE-NEXT: ## xmm1 = xmm1[0,1,2],xmm4[0]
|
|
; X86-SSE-NEXT: addps %xmm1, %xmm0 ## encoding: [0x0f,0x58,0xc1]
|
|
; X86-SSE-NEXT: insertps $48, %xmm4, %xmm2 ## encoding: [0x66,0x0f,0x3a,0x21,0xd4,0x30]
|
|
; X86-SSE-NEXT: ## xmm2 = xmm2[0,1,2],xmm4[0]
|
|
; X86-SSE-NEXT: insertps $48, %xmm4, %xmm3 ## encoding: [0x66,0x0f,0x3a,0x21,0xdc,0x30]
|
|
; X86-SSE-NEXT: ## xmm3 = xmm3[0,1,2],xmm4[0]
|
|
; X86-SSE-NEXT: addps %xmm2, %xmm3 ## encoding: [0x0f,0x58,0xda]
|
|
; X86-SSE-NEXT: addps %xmm3, %xmm0 ## encoding: [0x0f,0x58,0xc3]
|
|
; X86-SSE-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X86-AVX1-LABEL: insertps_from_broadcast_multiple_use:
|
|
; X86-AVX1: ## %bb.0:
|
|
; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x08]
|
|
; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
|
|
; X86-AVX1-NEXT: vbroadcastss (%ecx,%eax,4), %xmm4 ## encoding: [0xc4,0xe2,0x79,0x18,0x24,0x81]
|
|
; X86-AVX1-NEXT: vinsertps $48, %xmm4, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc4,0x30]
|
|
; X86-AVX1-NEXT: ## xmm0 = xmm0[0,1,2],xmm4[0]
|
|
; X86-AVX1-NEXT: vinsertps $48, %xmm4, %xmm1, %xmm1 ## encoding: [0xc4,0xe3,0x71,0x21,0xcc,0x30]
|
|
; X86-AVX1-NEXT: ## xmm1 = xmm1[0,1,2],xmm4[0]
|
|
; X86-AVX1-NEXT: vaddps %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x58,0xc1]
|
|
; X86-AVX1-NEXT: vinsertps $48, %xmm4, %xmm2, %xmm1 ## encoding: [0xc4,0xe3,0x69,0x21,0xcc,0x30]
|
|
; X86-AVX1-NEXT: ## xmm1 = xmm2[0,1,2],xmm4[0]
|
|
; X86-AVX1-NEXT: vinsertps $48, %xmm4, %xmm3, %xmm2 ## encoding: [0xc4,0xe3,0x61,0x21,0xd4,0x30]
|
|
; X86-AVX1-NEXT: ## xmm2 = xmm3[0,1,2],xmm4[0]
|
|
; X86-AVX1-NEXT: vaddps %xmm2, %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x58,0xca]
|
|
; X86-AVX1-NEXT: vaddps %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x58,0xc1]
|
|
; X86-AVX1-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X86-AVX512-LABEL: insertps_from_broadcast_multiple_use:
|
|
; X86-AVX512: ## %bb.0:
|
|
; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x08]
|
|
; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
|
|
; X86-AVX512-NEXT: vbroadcastss (%ecx,%eax,4), %xmm4 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x18,0x24,0x81]
|
|
; X86-AVX512-NEXT: vinsertps $48, %xmm4, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc4,0x30]
|
|
; X86-AVX512-NEXT: ## xmm0 = xmm0[0,1,2],xmm4[0]
|
|
; X86-AVX512-NEXT: vinsertps $48, %xmm4, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x71,0x21,0xcc,0x30]
|
|
; X86-AVX512-NEXT: ## xmm1 = xmm1[0,1,2],xmm4[0]
|
|
; X86-AVX512-NEXT: vaddps %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x58,0xc1]
|
|
; X86-AVX512-NEXT: vinsertps $48, %xmm4, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x69,0x21,0xcc,0x30]
|
|
; X86-AVX512-NEXT: ## xmm1 = xmm2[0,1,2],xmm4[0]
|
|
; X86-AVX512-NEXT: vinsertps $48, %xmm4, %xmm3, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x61,0x21,0xd4,0x30]
|
|
; X86-AVX512-NEXT: ## xmm2 = xmm3[0,1,2],xmm4[0]
|
|
; X86-AVX512-NEXT: vaddps %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf0,0x58,0xca]
|
|
; X86-AVX512-NEXT: vaddps %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x58,0xc1]
|
|
; X86-AVX512-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X64-SSE-LABEL: insertps_from_broadcast_multiple_use:
|
|
; X64-SSE: ## %bb.0:
|
|
; X64-SSE-NEXT: movss (%rdi,%rsi,4), %xmm4 ## encoding: [0xf3,0x0f,0x10,0x24,0xb7]
|
|
; X64-SSE-NEXT: ## xmm4 = mem[0],zero,zero,zero
|
|
; X64-SSE-NEXT: insertps $48, %xmm4, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc4,0x30]
|
|
; X64-SSE-NEXT: ## xmm0 = xmm0[0,1,2],xmm4[0]
|
|
; X64-SSE-NEXT: insertps $48, %xmm4, %xmm1 ## encoding: [0x66,0x0f,0x3a,0x21,0xcc,0x30]
|
|
; X64-SSE-NEXT: ## xmm1 = xmm1[0,1,2],xmm4[0]
|
|
; X64-SSE-NEXT: addps %xmm1, %xmm0 ## encoding: [0x0f,0x58,0xc1]
|
|
; X64-SSE-NEXT: insertps $48, %xmm4, %xmm2 ## encoding: [0x66,0x0f,0x3a,0x21,0xd4,0x30]
|
|
; X64-SSE-NEXT: ## xmm2 = xmm2[0,1,2],xmm4[0]
|
|
; X64-SSE-NEXT: insertps $48, %xmm4, %xmm3 ## encoding: [0x66,0x0f,0x3a,0x21,0xdc,0x30]
|
|
; X64-SSE-NEXT: ## xmm3 = xmm3[0,1,2],xmm4[0]
|
|
; X64-SSE-NEXT: addps %xmm2, %xmm3 ## encoding: [0x0f,0x58,0xda]
|
|
; X64-SSE-NEXT: addps %xmm3, %xmm0 ## encoding: [0x0f,0x58,0xc3]
|
|
; X64-SSE-NEXT: retq ## encoding: [0xc3]
|
|
;
|
|
; X64-AVX1-LABEL: insertps_from_broadcast_multiple_use:
|
|
; X64-AVX1: ## %bb.0:
|
|
; X64-AVX1-NEXT: vbroadcastss (%rdi,%rsi,4), %xmm4 ## encoding: [0xc4,0xe2,0x79,0x18,0x24,0xb7]
|
|
; X64-AVX1-NEXT: vinsertps $48, %xmm4, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc4,0x30]
|
|
; X64-AVX1-NEXT: ## xmm0 = xmm0[0,1,2],xmm4[0]
|
|
; X64-AVX1-NEXT: vinsertps $48, %xmm4, %xmm1, %xmm1 ## encoding: [0xc4,0xe3,0x71,0x21,0xcc,0x30]
|
|
; X64-AVX1-NEXT: ## xmm1 = xmm1[0,1,2],xmm4[0]
|
|
; X64-AVX1-NEXT: vaddps %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x58,0xc1]
|
|
; X64-AVX1-NEXT: vinsertps $48, %xmm4, %xmm2, %xmm1 ## encoding: [0xc4,0xe3,0x69,0x21,0xcc,0x30]
|
|
; X64-AVX1-NEXT: ## xmm1 = xmm2[0,1,2],xmm4[0]
|
|
; X64-AVX1-NEXT: vinsertps $48, %xmm4, %xmm3, %xmm2 ## encoding: [0xc4,0xe3,0x61,0x21,0xd4,0x30]
|
|
; X64-AVX1-NEXT: ## xmm2 = xmm3[0,1,2],xmm4[0]
|
|
; X64-AVX1-NEXT: vaddps %xmm2, %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x58,0xca]
|
|
; X64-AVX1-NEXT: vaddps %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x58,0xc1]
|
|
; X64-AVX1-NEXT: retq ## encoding: [0xc3]
|
|
;
|
|
; X64-AVX512-LABEL: insertps_from_broadcast_multiple_use:
|
|
; X64-AVX512: ## %bb.0:
|
|
; X64-AVX512-NEXT: vbroadcastss (%rdi,%rsi,4), %xmm4 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x18,0x24,0xb7]
|
|
; X64-AVX512-NEXT: vinsertps $48, %xmm4, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc4,0x30]
|
|
; X64-AVX512-NEXT: ## xmm0 = xmm0[0,1,2],xmm4[0]
|
|
; X64-AVX512-NEXT: vinsertps $48, %xmm4, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x71,0x21,0xcc,0x30]
|
|
; X64-AVX512-NEXT: ## xmm1 = xmm1[0,1,2],xmm4[0]
|
|
; X64-AVX512-NEXT: vaddps %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x58,0xc1]
|
|
; X64-AVX512-NEXT: vinsertps $48, %xmm4, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x69,0x21,0xcc,0x30]
|
|
; X64-AVX512-NEXT: ## xmm1 = xmm2[0,1,2],xmm4[0]
|
|
; X64-AVX512-NEXT: vinsertps $48, %xmm4, %xmm3, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x61,0x21,0xd4,0x30]
|
|
; X64-AVX512-NEXT: ## xmm2 = xmm3[0,1,2],xmm4[0]
|
|
; X64-AVX512-NEXT: vaddps %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf0,0x58,0xca]
|
|
; X64-AVX512-NEXT: vaddps %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x58,0xc1]
|
|
; X64-AVX512-NEXT: retq ## encoding: [0xc3]
|
|
%1 = getelementptr inbounds float, float* %fb, i64 %index
|
|
%2 = load float, float* %1, align 4
|
|
%3 = insertelement <4 x float> undef, float %2, i32 0
|
|
%4 = insertelement <4 x float> %3, float %2, i32 1
|
|
%5 = insertelement <4 x float> %4, float %2, i32 2
|
|
%6 = insertelement <4 x float> %5, float %2, i32 3
|
|
%7 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %6, i32 48)
|
|
%8 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %b, <4 x float> %6, i32 48)
|
|
%9 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %c, <4 x float> %6, i32 48)
|
|
%10 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %d, <4 x float> %6, i32 48)
|
|
%11 = fadd <4 x float> %7, %8
|
|
%12 = fadd <4 x float> %9, %10
|
|
%13 = fadd <4 x float> %11, %12
|
|
ret <4 x float> %13
|
|
}
|
|
|
|
define <4 x float> @insertps_with_undefs(<4 x float> %a, float* %b) {
|
|
; X86-SSE-LABEL: insertps_with_undefs:
|
|
; X86-SSE: ## %bb.0:
|
|
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
|
; X86-SSE-NEXT: movss (%eax), %xmm1 ## encoding: [0xf3,0x0f,0x10,0x08]
|
|
; X86-SSE-NEXT: ## xmm1 = mem[0],zero,zero,zero
|
|
; X86-SSE-NEXT: movlhps %xmm0, %xmm1 ## encoding: [0x0f,0x16,0xc8]
|
|
; X86-SSE-NEXT: ## xmm1 = xmm1[0],xmm0[0]
|
|
; X86-SSE-NEXT: movaps %xmm1, %xmm0 ## encoding: [0x0f,0x28,0xc1]
|
|
; X86-SSE-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X86-AVX1-LABEL: insertps_with_undefs:
|
|
; X86-AVX1: ## %bb.0:
|
|
; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
|
; X86-AVX1-NEXT: vmovss (%eax), %xmm1 ## encoding: [0xc5,0xfa,0x10,0x08]
|
|
; X86-AVX1-NEXT: ## xmm1 = mem[0],zero,zero,zero
|
|
; X86-AVX1-NEXT: vmovlhps %xmm0, %xmm1, %xmm0 ## encoding: [0xc5,0xf0,0x16,0xc0]
|
|
; X86-AVX1-NEXT: ## xmm0 = xmm1[0],xmm0[0]
|
|
; X86-AVX1-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X86-AVX512-LABEL: insertps_with_undefs:
|
|
; X86-AVX512: ## %bb.0:
|
|
; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
|
; X86-AVX512-NEXT: vmovss (%eax), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x08]
|
|
; X86-AVX512-NEXT: ## xmm1 = mem[0],zero,zero,zero
|
|
; X86-AVX512-NEXT: vmovlhps %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf0,0x16,0xc0]
|
|
; X86-AVX512-NEXT: ## xmm0 = xmm1[0],xmm0[0]
|
|
; X86-AVX512-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X64-SSE-LABEL: insertps_with_undefs:
|
|
; X64-SSE: ## %bb.0:
|
|
; X64-SSE-NEXT: movss (%rdi), %xmm1 ## encoding: [0xf3,0x0f,0x10,0x0f]
|
|
; X64-SSE-NEXT: ## xmm1 = mem[0],zero,zero,zero
|
|
; X64-SSE-NEXT: movlhps %xmm0, %xmm1 ## encoding: [0x0f,0x16,0xc8]
|
|
; X64-SSE-NEXT: ## xmm1 = xmm1[0],xmm0[0]
|
|
; X64-SSE-NEXT: movaps %xmm1, %xmm0 ## encoding: [0x0f,0x28,0xc1]
|
|
; X64-SSE-NEXT: retq ## encoding: [0xc3]
|
|
;
|
|
; X64-AVX1-LABEL: insertps_with_undefs:
|
|
; X64-AVX1: ## %bb.0:
|
|
; X64-AVX1-NEXT: vmovss (%rdi), %xmm1 ## encoding: [0xc5,0xfa,0x10,0x0f]
|
|
; X64-AVX1-NEXT: ## xmm1 = mem[0],zero,zero,zero
|
|
; X64-AVX1-NEXT: vmovlhps %xmm0, %xmm1, %xmm0 ## encoding: [0xc5,0xf0,0x16,0xc0]
|
|
; X64-AVX1-NEXT: ## xmm0 = xmm1[0],xmm0[0]
|
|
; X64-AVX1-NEXT: retq ## encoding: [0xc3]
|
|
;
|
|
; X64-AVX512-LABEL: insertps_with_undefs:
|
|
; X64-AVX512: ## %bb.0:
|
|
; X64-AVX512-NEXT: vmovss (%rdi), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x0f]
|
|
; X64-AVX512-NEXT: ## xmm1 = mem[0],zero,zero,zero
|
|
; X64-AVX512-NEXT: vmovlhps %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf0,0x16,0xc0]
|
|
; X64-AVX512-NEXT: ## xmm0 = xmm1[0],xmm0[0]
|
|
; X64-AVX512-NEXT: retq ## encoding: [0xc3]
|
|
%1 = load float, float* %b, align 4
|
|
%2 = insertelement <4 x float> undef, float %1, i32 0
|
|
%result = shufflevector <4 x float> %a, <4 x float> %2, <4 x i32> <i32 4, i32 undef, i32 0, i32 7>
|
|
ret <4 x float> %result
|
|
}
|
|
|
|
; Test for a bug in X86ISelLowering.cpp:getINSERTPS where we were using
|
|
; the destination index to change the load, instead of the source index.
|
|
define <4 x float> @pr20087(<4 x float> %a, <4 x float> *%ptr) {
|
|
; X86-SSE-LABEL: pr20087:
|
|
; X86-SSE: ## %bb.0:
|
|
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
|
; X86-SSE-NEXT: movaps (%eax), %xmm1 ## encoding: [0x0f,0x28,0x08]
|
|
; X86-SSE-NEXT: insertps $178, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc1,0xb2]
|
|
; X86-SSE-NEXT: ## xmm0 = xmm0[0],zero,xmm0[2],xmm1[2]
|
|
; X86-SSE-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X86-AVX1-LABEL: pr20087:
|
|
; X86-AVX1: ## %bb.0:
|
|
; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
|
; X86-AVX1-NEXT: vmovaps (%eax), %xmm1 ## encoding: [0xc5,0xf8,0x28,0x08]
|
|
; X86-AVX1-NEXT: vinsertps $178, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc1,0xb2]
|
|
; X86-AVX1-NEXT: ## xmm0 = xmm0[0],zero,xmm0[2],xmm1[2]
|
|
; X86-AVX1-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X86-AVX512-LABEL: pr20087:
|
|
; X86-AVX512: ## %bb.0:
|
|
; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
|
; X86-AVX512-NEXT: vmovaps (%eax), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x08]
|
|
; X86-AVX512-NEXT: vinsertps $178, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0xb2]
|
|
; X86-AVX512-NEXT: ## xmm0 = xmm0[0],zero,xmm0[2],xmm1[2]
|
|
; X86-AVX512-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X64-SSE-LABEL: pr20087:
|
|
; X64-SSE: ## %bb.0:
|
|
; X64-SSE-NEXT: movaps (%rdi), %xmm1 ## encoding: [0x0f,0x28,0x0f]
|
|
; X64-SSE-NEXT: insertps $178, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc1,0xb2]
|
|
; X64-SSE-NEXT: ## xmm0 = xmm0[0],zero,xmm0[2],xmm1[2]
|
|
; X64-SSE-NEXT: retq ## encoding: [0xc3]
|
|
;
|
|
; X64-AVX1-LABEL: pr20087:
|
|
; X64-AVX1: ## %bb.0:
|
|
; X64-AVX1-NEXT: vmovaps (%rdi), %xmm1 ## encoding: [0xc5,0xf8,0x28,0x0f]
|
|
; X64-AVX1-NEXT: vinsertps $178, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc1,0xb2]
|
|
; X64-AVX1-NEXT: ## xmm0 = xmm0[0],zero,xmm0[2],xmm1[2]
|
|
; X64-AVX1-NEXT: retq ## encoding: [0xc3]
|
|
;
|
|
; X64-AVX512-LABEL: pr20087:
|
|
; X64-AVX512: ## %bb.0:
|
|
; X64-AVX512-NEXT: vmovaps (%rdi), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x0f]
|
|
; X64-AVX512-NEXT: vinsertps $178, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0xb2]
|
|
; X64-AVX512-NEXT: ## xmm0 = xmm0[0],zero,xmm0[2],xmm1[2]
|
|
; X64-AVX512-NEXT: retq ## encoding: [0xc3]
|
|
%load = load <4 x float> , <4 x float> *%ptr
|
|
%ret = shufflevector <4 x float> %load, <4 x float> %a, <4 x i32> <i32 4, i32 undef, i32 6, i32 2>
|
|
ret <4 x float> %ret
|
|
}
|
|
|
|
; Edge case for insertps where we end up with a shuffle with mask=<0, 7, -1, -1>
|
|
define void @insertps_pr20411(<4 x i32> %shuffle109, <4 x i32> %shuffle116, i32* noalias nocapture %RET) #1 {
|
|
; X86-SSE-LABEL: insertps_pr20411:
|
|
; X86-SSE: ## %bb.0:
|
|
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
|
; X86-SSE-NEXT: pshufd $238, %xmm1, %xmm1 ## encoding: [0x66,0x0f,0x70,0xc9,0xee]
|
|
; X86-SSE-NEXT: ## xmm1 = xmm1[2,3,2,3]
|
|
; X86-SSE-NEXT: pblendw $243, %xmm0, %xmm1 ## encoding: [0x66,0x0f,0x3a,0x0e,0xc8,0xf3]
|
|
; X86-SSE-NEXT: ## xmm1 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
|
|
; X86-SSE-NEXT: movdqu %xmm1, (%eax) ## encoding: [0xf3,0x0f,0x7f,0x08]
|
|
; X86-SSE-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X86-AVX1-LABEL: insertps_pr20411:
|
|
; X86-AVX1: ## %bb.0:
|
|
; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
|
; X86-AVX1-NEXT: vpermilps $238, %xmm1, %xmm1 ## encoding: [0xc4,0xe3,0x79,0x04,0xc9,0xee]
|
|
; X86-AVX1-NEXT: ## xmm1 = xmm1[2,3,2,3]
|
|
; X86-AVX1-NEXT: vblendps $2, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x02]
|
|
; X86-AVX1-NEXT: ## xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
|
|
; X86-AVX1-NEXT: vmovups %xmm0, (%eax) ## encoding: [0xc5,0xf8,0x11,0x00]
|
|
; X86-AVX1-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X86-AVX512-LABEL: insertps_pr20411:
|
|
; X86-AVX512: ## %bb.0:
|
|
; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
|
; X86-AVX512-NEXT: vpermilps $238, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x04,0xc9,0xee]
|
|
; X86-AVX512-NEXT: ## xmm1 = xmm1[2,3,2,3]
|
|
; X86-AVX512-NEXT: vblendps $2, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x02]
|
|
; X86-AVX512-NEXT: ## xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
|
|
; X86-AVX512-NEXT: vmovups %xmm0, (%eax) ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x11,0x00]
|
|
; X86-AVX512-NEXT: retl ## encoding: [0xc3]
|
|
;
|
|
; X64-SSE-LABEL: insertps_pr20411:
|
|
; X64-SSE: ## %bb.0:
|
|
; X64-SSE-NEXT: pshufd $238, %xmm1, %xmm1 ## encoding: [0x66,0x0f,0x70,0xc9,0xee]
|
|
; X64-SSE-NEXT: ## xmm1 = xmm1[2,3,2,3]
|
|
; X64-SSE-NEXT: pblendw $243, %xmm0, %xmm1 ## encoding: [0x66,0x0f,0x3a,0x0e,0xc8,0xf3]
|
|
; X64-SSE-NEXT: ## xmm1 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
|
|
; X64-SSE-NEXT: movdqu %xmm1, (%rdi) ## encoding: [0xf3,0x0f,0x7f,0x0f]
|
|
; X64-SSE-NEXT: retq ## encoding: [0xc3]
|
|
;
|
|
; X64-AVX1-LABEL: insertps_pr20411:
|
|
; X64-AVX1: ## %bb.0:
|
|
; X64-AVX1-NEXT: vpermilps $238, %xmm1, %xmm1 ## encoding: [0xc4,0xe3,0x79,0x04,0xc9,0xee]
|
|
; X64-AVX1-NEXT: ## xmm1 = xmm1[2,3,2,3]
|
|
; X64-AVX1-NEXT: vblendps $2, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x02]
|
|
; X64-AVX1-NEXT: ## xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
|
|
; X64-AVX1-NEXT: vmovups %xmm0, (%rdi) ## encoding: [0xc5,0xf8,0x11,0x07]
|
|
; X64-AVX1-NEXT: retq ## encoding: [0xc3]
|
|
;
|
|
; X64-AVX512-LABEL: insertps_pr20411:
|
|
; X64-AVX512: ## %bb.0:
|
|
; X64-AVX512-NEXT: vpermilps $238, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x04,0xc9,0xee]
|
|
; X64-AVX512-NEXT: ## xmm1 = xmm1[2,3,2,3]
|
|
; X64-AVX512-NEXT: vblendps $2, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x02]
|
|
; X64-AVX512-NEXT: ## xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
|
|
; X64-AVX512-NEXT: vmovups %xmm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x11,0x07]
|
|
; X64-AVX512-NEXT: retq ## encoding: [0xc3]
|
|
%shuffle117 = shufflevector <4 x i32> %shuffle109, <4 x i32> %shuffle116, <4 x i32> <i32 0, i32 7, i32 undef, i32 undef>
|
|
%ptrcast = bitcast i32* %RET to <4 x i32>*
|
|
store <4 x i32> %shuffle117, <4 x i32>* %ptrcast, align 4
|
|
ret void
|
|
}
|
|
|
|
define <4 x float> @insertps_4(<4 x float> %A, <4 x float> %B) {
|
|
; SSE-LABEL: insertps_4:
|
|
; SSE: ## %bb.0:
|
|
; SSE-NEXT: insertps $170, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc1,0xaa]
|
|
; SSE-NEXT: ## xmm0 = xmm0[0],zero,xmm1[2],zero
|
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX1-LABEL: insertps_4:
|
|
; AVX1: ## %bb.0:
|
|
; AVX1-NEXT: vinsertps $170, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc1,0xaa]
|
|
; AVX1-NEXT: ## xmm0 = xmm0[0],zero,xmm1[2],zero
|
|
; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX512-LABEL: insertps_4:
|
|
; AVX512: ## %bb.0:
|
|
; AVX512-NEXT: vinsertps $170, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0xaa]
|
|
; AVX512-NEXT: ## xmm0 = xmm0[0],zero,xmm1[2],zero
|
|
; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
%vecext = extractelement <4 x float> %A, i32 0
|
|
%vecinit = insertelement <4 x float> undef, float %vecext, i32 0
|
|
%vecinit1 = insertelement <4 x float> %vecinit, float 0.000000e+00, i32 1
|
|
%vecext2 = extractelement <4 x float> %B, i32 2
|
|
%vecinit3 = insertelement <4 x float> %vecinit1, float %vecext2, i32 2
|
|
%vecinit4 = insertelement <4 x float> %vecinit3, float 0.000000e+00, i32 3
|
|
ret <4 x float> %vecinit4
|
|
}
|
|
|
|
define <4 x float> @insertps_5(<4 x float> %A, <4 x float> %B) {
|
|
; SSE-LABEL: insertps_5:
|
|
; SSE: ## %bb.0:
|
|
; SSE-NEXT: insertps $92, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc1,0x5c]
|
|
; SSE-NEXT: ## xmm0 = xmm0[0],xmm1[1],zero,zero
|
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX1-LABEL: insertps_5:
|
|
; AVX1: ## %bb.0:
|
|
; AVX1-NEXT: vinsertps $92, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x5c]
|
|
; AVX1-NEXT: ## xmm0 = xmm0[0],xmm1[1],zero,zero
|
|
; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX512-LABEL: insertps_5:
|
|
; AVX512: ## %bb.0:
|
|
; AVX512-NEXT: vinsertps $92, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x5c]
|
|
; AVX512-NEXT: ## xmm0 = xmm0[0],xmm1[1],zero,zero
|
|
; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
%vecext = extractelement <4 x float> %A, i32 0
|
|
%vecinit = insertelement <4 x float> undef, float %vecext, i32 0
|
|
%vecext1 = extractelement <4 x float> %B, i32 1
|
|
%vecinit2 = insertelement <4 x float> %vecinit, float %vecext1, i32 1
|
|
%vecinit3 = insertelement <4 x float> %vecinit2, float 0.000000e+00, i32 2
|
|
%vecinit4 = insertelement <4 x float> %vecinit3, float 0.000000e+00, i32 3
|
|
ret <4 x float> %vecinit4
|
|
}
|
|
|
|
define <4 x float> @insertps_6(<4 x float> %A, <4 x float> %B) {
|
|
; SSE-LABEL: insertps_6:
|
|
; SSE: ## %bb.0:
|
|
; SSE-NEXT: insertps $169, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc1,0xa9]
|
|
; SSE-NEXT: ## xmm0 = zero,xmm0[1],xmm1[2],zero
|
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX1-LABEL: insertps_6:
|
|
; AVX1: ## %bb.0:
|
|
; AVX1-NEXT: vinsertps $169, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc1,0xa9]
|
|
; AVX1-NEXT: ## xmm0 = zero,xmm0[1],xmm1[2],zero
|
|
; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX512-LABEL: insertps_6:
|
|
; AVX512: ## %bb.0:
|
|
; AVX512-NEXT: vinsertps $169, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0xa9]
|
|
; AVX512-NEXT: ## xmm0 = zero,xmm0[1],xmm1[2],zero
|
|
; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
%vecext = extractelement <4 x float> %A, i32 1
|
|
%vecinit = insertelement <4 x float> <float 0.000000e+00, float undef, float undef, float undef>, float %vecext, i32 1
|
|
%vecext1 = extractelement <4 x float> %B, i32 2
|
|
%vecinit2 = insertelement <4 x float> %vecinit, float %vecext1, i32 2
|
|
%vecinit3 = insertelement <4 x float> %vecinit2, float 0.000000e+00, i32 3
|
|
ret <4 x float> %vecinit3
|
|
}
|
|
|
|
define <4 x float> @insertps_7(<4 x float> %A, <4 x float> %B) {
|
|
; SSE-LABEL: insertps_7:
|
|
; SSE: ## %bb.0:
|
|
; SSE-NEXT: insertps $106, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc1,0x6a]
|
|
; SSE-NEXT: ## xmm0 = xmm0[0],zero,xmm1[1],zero
|
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX1-LABEL: insertps_7:
|
|
; AVX1: ## %bb.0:
|
|
; AVX1-NEXT: vinsertps $106, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x6a]
|
|
; AVX1-NEXT: ## xmm0 = xmm0[0],zero,xmm1[1],zero
|
|
; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX512-LABEL: insertps_7:
|
|
; AVX512: ## %bb.0:
|
|
; AVX512-NEXT: vinsertps $106, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x6a]
|
|
; AVX512-NEXT: ## xmm0 = xmm0[0],zero,xmm1[1],zero
|
|
; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
%vecext = extractelement <4 x float> %A, i32 0
|
|
%vecinit = insertelement <4 x float> undef, float %vecext, i32 0
|
|
%vecinit1 = insertelement <4 x float> %vecinit, float 0.000000e+00, i32 1
|
|
%vecext2 = extractelement <4 x float> %B, i32 1
|
|
%vecinit3 = insertelement <4 x float> %vecinit1, float %vecext2, i32 2
|
|
%vecinit4 = insertelement <4 x float> %vecinit3, float 0.000000e+00, i32 3
|
|
ret <4 x float> %vecinit4
|
|
}
|
|
|
|
define <4 x float> @insertps_8(<4 x float> %A, <4 x float> %B) {
|
|
; SSE-LABEL: insertps_8:
|
|
; SSE: ## %bb.0:
|
|
; SSE-NEXT: insertps $28, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc1,0x1c]
|
|
; SSE-NEXT: ## xmm0 = xmm0[0],xmm1[0],zero,zero
|
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX1-LABEL: insertps_8:
|
|
; AVX1: ## %bb.0:
|
|
; AVX1-NEXT: vinsertps $28, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x1c]
|
|
; AVX1-NEXT: ## xmm0 = xmm0[0],xmm1[0],zero,zero
|
|
; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX512-LABEL: insertps_8:
|
|
; AVX512: ## %bb.0:
|
|
; AVX512-NEXT: vinsertps $28, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x1c]
|
|
; AVX512-NEXT: ## xmm0 = xmm0[0],xmm1[0],zero,zero
|
|
; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
%vecext = extractelement <4 x float> %A, i32 0
|
|
%vecinit = insertelement <4 x float> undef, float %vecext, i32 0
|
|
%vecext1 = extractelement <4 x float> %B, i32 0
|
|
%vecinit2 = insertelement <4 x float> %vecinit, float %vecext1, i32 1
|
|
%vecinit3 = insertelement <4 x float> %vecinit2, float 0.000000e+00, i32 2
|
|
%vecinit4 = insertelement <4 x float> %vecinit3, float 0.000000e+00, i32 3
|
|
ret <4 x float> %vecinit4
|
|
}
|
|
|
|
define <4 x float> @insertps_9(<4 x float> %A, <4 x float> %B) {
|
|
; SSE-LABEL: insertps_9:
|
|
; SSE: ## %bb.0:
|
|
; SSE-NEXT: insertps $25, %xmm0, %xmm1 ## encoding: [0x66,0x0f,0x3a,0x21,0xc8,0x19]
|
|
; SSE-NEXT: ## xmm1 = zero,xmm0[0],xmm1[2],zero
|
|
; SSE-NEXT: movaps %xmm1, %xmm0 ## encoding: [0x0f,0x28,0xc1]
|
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX1-LABEL: insertps_9:
|
|
; AVX1: ## %bb.0:
|
|
; AVX1-NEXT: vinsertps $25, %xmm0, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x71,0x21,0xc0,0x19]
|
|
; AVX1-NEXT: ## xmm0 = zero,xmm0[0],xmm1[2],zero
|
|
; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX512-LABEL: insertps_9:
|
|
; AVX512: ## %bb.0:
|
|
; AVX512-NEXT: vinsertps $25, %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x71,0x21,0xc0,0x19]
|
|
; AVX512-NEXT: ## xmm0 = zero,xmm0[0],xmm1[2],zero
|
|
; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
%vecext = extractelement <4 x float> %A, i32 0
|
|
%vecinit = insertelement <4 x float> <float 0.000000e+00, float undef, float undef, float undef>, float %vecext, i32 1
|
|
%vecext1 = extractelement <4 x float> %B, i32 2
|
|
%vecinit2 = insertelement <4 x float> %vecinit, float %vecext1, i32 2
|
|
%vecinit3 = insertelement <4 x float> %vecinit2, float 0.000000e+00, i32 3
|
|
ret <4 x float> %vecinit3
|
|
}
|
|
|
|
define <4 x float> @insertps_10(<4 x float> %A) {
|
|
; SSE-LABEL: insertps_10:
|
|
; SSE: ## %bb.0:
|
|
; SSE-NEXT: insertps $42, %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc0,0x2a]
|
|
; SSE-NEXT: ## xmm0 = xmm0[0],zero,xmm0[0],zero
|
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX1-LABEL: insertps_10:
|
|
; AVX1: ## %bb.0:
|
|
; AVX1-NEXT: vinsertps $42, %xmm0, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc0,0x2a]
|
|
; AVX1-NEXT: ## xmm0 = xmm0[0],zero,xmm0[0],zero
|
|
; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX512-LABEL: insertps_10:
|
|
; AVX512: ## %bb.0:
|
|
; AVX512-NEXT: vinsertps $42, %xmm0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc0,0x2a]
|
|
; AVX512-NEXT: ## xmm0 = xmm0[0],zero,xmm0[0],zero
|
|
; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
%vecext = extractelement <4 x float> %A, i32 0
|
|
%vecbuild1 = insertelement <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, float %vecext, i32 0
|
|
%vecbuild2 = insertelement <4 x float> %vecbuild1, float %vecext, i32 2
|
|
ret <4 x float> %vecbuild2
|
|
}
|
|
|
|
define <4 x float> @build_vector_to_shuffle_1(<4 x float> %A) {
|
|
; SSE-LABEL: build_vector_to_shuffle_1:
|
|
; SSE: ## %bb.0:
|
|
; SSE-NEXT: xorps %xmm1, %xmm1 ## encoding: [0x0f,0x57,0xc9]
|
|
; SSE-NEXT: blendps $5, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0c,0xc1,0x05]
|
|
; SSE-NEXT: ## xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
|
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX1-LABEL: build_vector_to_shuffle_1:
|
|
; AVX1: ## %bb.0:
|
|
; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x57,0xc9]
|
|
; AVX1-NEXT: vblendps $10, %xmm0, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x71,0x0c,0xc0,0x0a]
|
|
; AVX1-NEXT: ## xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
|
|
; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX512-LABEL: build_vector_to_shuffle_1:
|
|
; AVX512: ## %bb.0:
|
|
; AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf0,0x57,0xc9]
|
|
; AVX512-NEXT: vblendps $10, %xmm0, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x71,0x0c,0xc0,0x0a]
|
|
; AVX512-NEXT: ## xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
|
|
; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
%vecext = extractelement <4 x float> %A, i32 1
|
|
%vecinit = insertelement <4 x float> zeroinitializer, float %vecext, i32 1
|
|
%vecinit1 = insertelement <4 x float> %vecinit, float 0.0, i32 2
|
|
%vecinit3 = shufflevector <4 x float> %vecinit1, <4 x float> %A, <4 x i32> <i32 0, i32 1, i32 2, i32 7>
|
|
ret <4 x float> %vecinit3
|
|
}
|
|
|
|
define <4 x float> @build_vector_to_shuffle_2(<4 x float> %A) {
|
|
; SSE-LABEL: build_vector_to_shuffle_2:
|
|
; SSE: ## %bb.0:
|
|
; SSE-NEXT: xorps %xmm1, %xmm1 ## encoding: [0x0f,0x57,0xc9]
|
|
; SSE-NEXT: blendps $13, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0c,0xc1,0x0d]
|
|
; SSE-NEXT: ## xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
|
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX1-LABEL: build_vector_to_shuffle_2:
|
|
; AVX1: ## %bb.0:
|
|
; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x57,0xc9]
|
|
; AVX1-NEXT: vblendps $2, %xmm0, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x71,0x0c,0xc0,0x02]
|
|
; AVX1-NEXT: ## xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
|
|
; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
;
|
|
; AVX512-LABEL: build_vector_to_shuffle_2:
|
|
; AVX512: ## %bb.0:
|
|
; AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf0,0x57,0xc9]
|
|
; AVX512-NEXT: vblendps $2, %xmm0, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x71,0x0c,0xc0,0x02]
|
|
; AVX512-NEXT: ## xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
|
|
; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
|
%vecext = extractelement <4 x float> %A, i32 1
|
|
%vecinit = insertelement <4 x float> zeroinitializer, float %vecext, i32 1
|
|
%vecinit1 = insertelement <4 x float> %vecinit, float 0.0, i32 2
|
|
ret <4 x float> %vecinit1
|
|
}
|