1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2025-01-31 20:51:52 +01:00

[X86][update_llc_test_checks] Use a less greedy regular expression for replacing constant pool labels in tests.

While working on D97208 I noticed that these greedy regular
expressions prevent tests from failing when (%rip) appears after
a constant pool label when it didn't before.

Reviewed By: RKSimon, pengfei

Differential Revision: https://reviews.llvm.org/D99460
This commit is contained in:
Craig Topper 2021-03-28 11:30:49 -07:00
parent 4a9e2678d2
commit 6e75027132
144 changed files with 1680 additions and 1680 deletions

View File

@ -11,7 +11,7 @@ define <8 x i32> @test(<8 x float> %a, <8 x float> %b) {
; X86-NEXT: vcmpltps %ymm1, %ymm0, %ymm0 ; X86-NEXT: vcmpltps %ymm1, %ymm0, %ymm0
; X86-NEXT: vcmpltps %ymm3, %ymm2, %ymm1 ; X86-NEXT: vcmpltps %ymm3, %ymm2, %ymm1
; X86-NEXT: vandps %ymm1, %ymm0, %ymm0 ; X86-NEXT: vandps %ymm1, %ymm0, %ymm0
; X86-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0 ; X86-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X86-NEXT: retl ; X86-NEXT: retl
; ;
; X64-LABEL: test: ; X64-LABEL: test:

View File

@ -64,7 +64,7 @@ define i32 @add_const_add_const_extrause(i32 %arg) {
define <4 x i32> @vec_add_const_add_const(<4 x i32> %arg) { define <4 x i32> @vec_add_const_add_const(<4 x i32> %arg) {
; X86-LABEL: vec_add_const_add_const: ; X86-LABEL: vec_add_const_add_const:
; X86: # %bb.0: ; X86: # %bb.0:
; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 ; X86-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl ; X86-NEXT: retl
; ;
; X64-LABEL: vec_add_const_add_const: ; X64-LABEL: vec_add_const_add_const:
@ -87,7 +87,7 @@ define <4 x i32> @vec_add_const_add_const_extrause(<4 x i32> %arg) {
; X86-NEXT: paddd %xmm1, %xmm0 ; X86-NEXT: paddd %xmm1, %xmm0
; X86-NEXT: calll vec_use@PLT ; X86-NEXT: calll vec_use@PLT
; X86-NEXT: movdqu (%esp), %xmm0 # 16-byte Reload ; X86-NEXT: movdqu (%esp), %xmm0 # 16-byte Reload
; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 ; X86-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: addl $28, %esp ; X86-NEXT: addl $28, %esp
; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl ; X86-NEXT: retl
@ -115,7 +115,7 @@ define <4 x i32> @vec_add_const_add_const_extrause(<4 x i32> %arg) {
define <4 x i32> @vec_add_const_add_const_nonsplat(<4 x i32> %arg) { define <4 x i32> @vec_add_const_add_const_nonsplat(<4 x i32> %arg) {
; X86-LABEL: vec_add_const_add_const_nonsplat: ; X86-LABEL: vec_add_const_add_const_nonsplat:
; X86: # %bb.0: ; X86: # %bb.0:
; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 ; X86-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl ; X86-NEXT: retl
; ;
; X64-LABEL: vec_add_const_add_const_nonsplat: ; X64-LABEL: vec_add_const_add_const_nonsplat:
@ -186,7 +186,7 @@ define i32 @add_const_sub_const_extrause(i32 %arg) {
define <4 x i32> @vec_add_const_sub_const(<4 x i32> %arg) { define <4 x i32> @vec_add_const_sub_const(<4 x i32> %arg) {
; X86-LABEL: vec_add_const_sub_const: ; X86-LABEL: vec_add_const_sub_const:
; X86: # %bb.0: ; X86: # %bb.0:
; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 ; X86-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl ; X86-NEXT: retl
; ;
; X64-LABEL: vec_add_const_sub_const: ; X64-LABEL: vec_add_const_sub_const:
@ -209,7 +209,7 @@ define <4 x i32> @vec_add_const_sub_const_extrause(<4 x i32> %arg) {
; X86-NEXT: paddd %xmm1, %xmm0 ; X86-NEXT: paddd %xmm1, %xmm0
; X86-NEXT: calll vec_use@PLT ; X86-NEXT: calll vec_use@PLT
; X86-NEXT: movdqu (%esp), %xmm0 # 16-byte Reload ; X86-NEXT: movdqu (%esp), %xmm0 # 16-byte Reload
; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 ; X86-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: addl $28, %esp ; X86-NEXT: addl $28, %esp
; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl ; X86-NEXT: retl
@ -237,7 +237,7 @@ define <4 x i32> @vec_add_const_sub_const_extrause(<4 x i32> %arg) {
define <4 x i32> @vec_add_const_sub_const_nonsplat(<4 x i32> %arg) { define <4 x i32> @vec_add_const_sub_const_nonsplat(<4 x i32> %arg) {
; X86-LABEL: vec_add_const_sub_const_nonsplat: ; X86-LABEL: vec_add_const_sub_const_nonsplat:
; X86: # %bb.0: ; X86: # %bb.0:
; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 ; X86-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl ; X86-NEXT: retl
; ;
; X64-LABEL: vec_add_const_sub_const_nonsplat: ; X64-LABEL: vec_add_const_sub_const_nonsplat:
@ -440,7 +440,7 @@ define i32 @sub_const_add_const_extrause(i32 %arg) {
define <4 x i32> @vec_sub_const_add_const(<4 x i32> %arg) { define <4 x i32> @vec_sub_const_add_const(<4 x i32> %arg) {
; X86-LABEL: vec_sub_const_add_const: ; X86-LABEL: vec_sub_const_add_const:
; X86: # %bb.0: ; X86: # %bb.0:
; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 ; X86-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl ; X86-NEXT: retl
; ;
; X64-LABEL: vec_sub_const_add_const: ; X64-LABEL: vec_sub_const_add_const:
@ -458,10 +458,10 @@ define <4 x i32> @vec_sub_const_add_const_extrause(<4 x i32> %arg) {
; X86-NEXT: subl $28, %esp ; X86-NEXT: subl $28, %esp
; X86-NEXT: .cfi_def_cfa_offset 32 ; X86-NEXT: .cfi_def_cfa_offset 32
; X86-NEXT: movdqu %xmm0, (%esp) # 16-byte Spill ; X86-NEXT: movdqu %xmm0, (%esp) # 16-byte Spill
; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0 ; X86-NEXT: psubd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: calll vec_use@PLT ; X86-NEXT: calll vec_use@PLT
; X86-NEXT: movdqu (%esp), %xmm0 # 16-byte Reload ; X86-NEXT: movdqu (%esp), %xmm0 # 16-byte Reload
; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 ; X86-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: addl $28, %esp ; X86-NEXT: addl $28, %esp
; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl ; X86-NEXT: retl
@ -487,7 +487,7 @@ define <4 x i32> @vec_sub_const_add_const_extrause(<4 x i32> %arg) {
define <4 x i32> @vec_sub_const_add_const_nonsplat(<4 x i32> %arg) { define <4 x i32> @vec_sub_const_add_const_nonsplat(<4 x i32> %arg) {
; X86-LABEL: vec_sub_const_add_const_nonsplat: ; X86-LABEL: vec_sub_const_add_const_nonsplat:
; X86: # %bb.0: ; X86: # %bb.0:
; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 ; X86-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl ; X86-NEXT: retl
; ;
; X64-LABEL: vec_sub_const_add_const_nonsplat: ; X64-LABEL: vec_sub_const_add_const_nonsplat:
@ -558,7 +558,7 @@ define i32 @sub_const_sub_const_extrause(i32 %arg) {
define <4 x i32> @vec_sub_const_sub_const(<4 x i32> %arg) { define <4 x i32> @vec_sub_const_sub_const(<4 x i32> %arg) {
; X86-LABEL: vec_sub_const_sub_const: ; X86-LABEL: vec_sub_const_sub_const:
; X86: # %bb.0: ; X86: # %bb.0:
; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0 ; X86-NEXT: psubd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl ; X86-NEXT: retl
; ;
; X64-LABEL: vec_sub_const_sub_const: ; X64-LABEL: vec_sub_const_sub_const:
@ -576,10 +576,10 @@ define <4 x i32> @vec_sub_const_sub_const_extrause(<4 x i32> %arg) {
; X86-NEXT: subl $28, %esp ; X86-NEXT: subl $28, %esp
; X86-NEXT: .cfi_def_cfa_offset 32 ; X86-NEXT: .cfi_def_cfa_offset 32
; X86-NEXT: movdqu %xmm0, (%esp) # 16-byte Spill ; X86-NEXT: movdqu %xmm0, (%esp) # 16-byte Spill
; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0 ; X86-NEXT: psubd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: calll vec_use@PLT ; X86-NEXT: calll vec_use@PLT
; X86-NEXT: movdqu (%esp), %xmm0 # 16-byte Reload ; X86-NEXT: movdqu (%esp), %xmm0 # 16-byte Reload
; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0 ; X86-NEXT: psubd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: addl $28, %esp ; X86-NEXT: addl $28, %esp
; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl ; X86-NEXT: retl
@ -605,7 +605,7 @@ define <4 x i32> @vec_sub_const_sub_const_extrause(<4 x i32> %arg) {
define <4 x i32> @vec_sub_const_sub_const_nonsplat(<4 x i32> %arg) { define <4 x i32> @vec_sub_const_sub_const_nonsplat(<4 x i32> %arg) {
; X86-LABEL: vec_sub_const_sub_const_nonsplat: ; X86-LABEL: vec_sub_const_sub_const_nonsplat:
; X86: # %bb.0: ; X86: # %bb.0:
; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0 ; X86-NEXT: psubd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl ; X86-NEXT: retl
; ;
; X64-LABEL: vec_sub_const_sub_const_nonsplat: ; X64-LABEL: vec_sub_const_sub_const_nonsplat:
@ -698,7 +698,7 @@ define <4 x i32> @vec_sub_const_const_sub_extrause(<4 x i32> %arg) {
; X86: # %bb.0: ; X86: # %bb.0:
; X86-NEXT: subl $28, %esp ; X86-NEXT: subl $28, %esp
; X86-NEXT: .cfi_def_cfa_offset 32 ; X86-NEXT: .cfi_def_cfa_offset 32
; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0 ; X86-NEXT: psubd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: movdqu %xmm0, (%esp) # 16-byte Spill ; X86-NEXT: movdqu %xmm0, (%esp) # 16-byte Spill
; X86-NEXT: calll vec_use@PLT ; X86-NEXT: calll vec_use@PLT
; X86-NEXT: movdqa {{.*#+}} xmm0 = [2,2,2,2] ; X86-NEXT: movdqa {{.*#+}} xmm0 = [2,2,2,2]
@ -1074,7 +1074,7 @@ define i32 @const_sub_const_sub_extrause(i32 %arg) {
define <4 x i32> @vec_const_sub_const_sub(<4 x i32> %arg) { define <4 x i32> @vec_const_sub_const_sub(<4 x i32> %arg) {
; X86-LABEL: vec_const_sub_const_sub: ; X86-LABEL: vec_const_sub_const_sub:
; X86: # %bb.0: ; X86: # %bb.0:
; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 ; X86-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl ; X86-NEXT: retl
; ;
; X64-LABEL: vec_const_sub_const_sub: ; X64-LABEL: vec_const_sub_const_sub:
@ -1126,7 +1126,7 @@ define <4 x i32> @vec_const_sub_const_sub_extrause(<4 x i32> %arg) {
define <4 x i32> @vec_const_sub_const_sub_nonsplat(<4 x i32> %arg) { define <4 x i32> @vec_const_sub_const_sub_nonsplat(<4 x i32> %arg) {
; X86-LABEL: vec_const_sub_const_sub_nonsplat: ; X86-LABEL: vec_const_sub_const_sub_nonsplat:
; X86: # %bb.0: ; X86: # %bb.0:
; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 ; X86-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl ; X86-NEXT: retl
; ;
; X64-LABEL: vec_const_sub_const_sub_nonsplat: ; X64-LABEL: vec_const_sub_const_sub_nonsplat:

View File

@ -200,7 +200,7 @@ define dso_local void @fadd_32g() nounwind {
; X86-SSE1-NEXT: movl glob32, %eax ; X86-SSE1-NEXT: movl glob32, %eax
; X86-SSE1-NEXT: movl %eax, (%esp) ; X86-SSE1-NEXT: movl %eax, (%esp)
; X86-SSE1-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86-SSE1-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-SSE1-NEXT: addss {{\.LCPI.*}}, %xmm0 ; X86-SSE1-NEXT: addss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE1-NEXT: movss %xmm0, {{[0-9]+}}(%esp) ; X86-SSE1-NEXT: movss %xmm0, {{[0-9]+}}(%esp)
; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE1-NEXT: movl %eax, glob32 ; X86-SSE1-NEXT: movl %eax, glob32
@ -296,7 +296,7 @@ define dso_local void @fadd_64g() nounwind {
; X86-SSE2-NEXT: andl $-8, %esp ; X86-SSE2-NEXT: andl $-8, %esp
; X86-SSE2-NEXT: subl $8, %esp ; X86-SSE2-NEXT: subl $8, %esp
; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X86-SSE2-NEXT: addsd {{\.LCPI.*}}, %xmm0 ; X86-SSE2-NEXT: addsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: movsd %xmm0, (%esp) ; X86-SSE2-NEXT: movsd %xmm0, (%esp)
; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X86-SSE2-NEXT: movlps %xmm0, glob64 ; X86-SSE2-NEXT: movlps %xmm0, glob64
@ -311,7 +311,7 @@ define dso_local void @fadd_64g() nounwind {
; X86-AVX-NEXT: andl $-8, %esp ; X86-AVX-NEXT: andl $-8, %esp
; X86-AVX-NEXT: subl $8, %esp ; X86-AVX-NEXT: subl $8, %esp
; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX-NEXT: vaddsd {{\.LCPI.*}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vaddsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vmovsd %xmm0, (%esp) ; X86-AVX-NEXT: vmovsd %xmm0, (%esp)
; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX-NEXT: vmovlps %xmm0, glob64 ; X86-AVX-NEXT: vmovlps %xmm0, glob64
@ -361,7 +361,7 @@ define dso_local void @fadd_32imm() nounwind {
; X86-SSE1-NEXT: movl -559038737, %eax ; X86-SSE1-NEXT: movl -559038737, %eax
; X86-SSE1-NEXT: movl %eax, (%esp) ; X86-SSE1-NEXT: movl %eax, (%esp)
; X86-SSE1-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86-SSE1-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-SSE1-NEXT: addss {{\.LCPI.*}}, %xmm0 ; X86-SSE1-NEXT: addss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE1-NEXT: movss %xmm0, {{[0-9]+}}(%esp) ; X86-SSE1-NEXT: movss %xmm0, {{[0-9]+}}(%esp)
; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE1-NEXT: movl %eax, -559038737 ; X86-SSE1-NEXT: movl %eax, -559038737
@ -459,7 +459,7 @@ define dso_local void @fadd_64imm() nounwind {
; X86-SSE2-NEXT: andl $-8, %esp ; X86-SSE2-NEXT: andl $-8, %esp
; X86-SSE2-NEXT: subl $8, %esp ; X86-SSE2-NEXT: subl $8, %esp
; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X86-SSE2-NEXT: addsd {{\.LCPI.*}}, %xmm0 ; X86-SSE2-NEXT: addsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: movsd %xmm0, (%esp) ; X86-SSE2-NEXT: movsd %xmm0, (%esp)
; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X86-SSE2-NEXT: movlps %xmm0, -559038737 ; X86-SSE2-NEXT: movlps %xmm0, -559038737
@ -474,7 +474,7 @@ define dso_local void @fadd_64imm() nounwind {
; X86-AVX-NEXT: andl $-8, %esp ; X86-AVX-NEXT: andl $-8, %esp
; X86-AVX-NEXT: subl $8, %esp ; X86-AVX-NEXT: subl $8, %esp
; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX-NEXT: vaddsd {{\.LCPI.*}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vaddsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vmovsd %xmm0, (%esp) ; X86-AVX-NEXT: vmovsd %xmm0, (%esp)
; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX-NEXT: vmovlps %xmm0, -559038737 ; X86-AVX-NEXT: vmovlps %xmm0, -559038737
@ -526,7 +526,7 @@ define dso_local void @fadd_32stack() nounwind {
; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE1-NEXT: movl %eax, (%esp) ; X86-SSE1-NEXT: movl %eax, (%esp)
; X86-SSE1-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86-SSE1-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-SSE1-NEXT: addss {{\.LCPI.*}}, %xmm0 ; X86-SSE1-NEXT: addss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE1-NEXT: movss %xmm0, {{[0-9]+}}(%esp) ; X86-SSE1-NEXT: movss %xmm0, {{[0-9]+}}(%esp)
; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE1-NEXT: movl %eax, {{[0-9]+}}(%esp) ; X86-SSE1-NEXT: movl %eax, {{[0-9]+}}(%esp)
@ -628,7 +628,7 @@ define dso_local void @fadd_64stack() nounwind {
; X86-SSE2-NEXT: andl $-8, %esp ; X86-SSE2-NEXT: andl $-8, %esp
; X86-SSE2-NEXT: subl $16, %esp ; X86-SSE2-NEXT: subl $16, %esp
; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X86-SSE2-NEXT: addsd {{\.LCPI.*}}, %xmm0 ; X86-SSE2-NEXT: addsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: movsd %xmm0, (%esp) ; X86-SSE2-NEXT: movsd %xmm0, (%esp)
; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X86-SSE2-NEXT: movlps %xmm0, {{[0-9]+}}(%esp) ; X86-SSE2-NEXT: movlps %xmm0, {{[0-9]+}}(%esp)
@ -643,7 +643,7 @@ define dso_local void @fadd_64stack() nounwind {
; X86-AVX-NEXT: andl $-8, %esp ; X86-AVX-NEXT: andl $-8, %esp
; X86-AVX-NEXT: subl $16, %esp ; X86-AVX-NEXT: subl $16, %esp
; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX-NEXT: vaddsd {{\.LCPI.*}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vaddsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vmovsd %xmm0, (%esp) ; X86-AVX-NEXT: vmovsd %xmm0, (%esp)
; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp) ; X86-AVX-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)

View File

@ -49,7 +49,7 @@ define void @render(double %a0) nounwind {
; CHECK-NEXT: # in Loop: Header=BB2_2 Depth=1 ; CHECK-NEXT: # in Loop: Header=BB2_2 Depth=1
; CHECK-NEXT: vmovsd (%rsp), %xmm0 # 8-byte Reload ; CHECK-NEXT: vmovsd (%rsp), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero ; CHECK-NEXT: # xmm0 = mem[0],zero
; CHECK-NEXT: vucomisd {{\.LCPI.*}}, %xmm0 ; CHECK-NEXT: vucomisd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; CHECK-NEXT: jne .LBB2_5 ; CHECK-NEXT: jne .LBB2_5
; CHECK-NEXT: jnp .LBB2_2 ; CHECK-NEXT: jnp .LBB2_2
; CHECK-NEXT: .LBB2_5: # %if.then ; CHECK-NEXT: .LBB2_5: # %if.then

View File

@ -645,8 +645,8 @@ define void @test_x86_sse2_storeu_pd(i8* %a0, <2 x double> %a1) {
; X86-AVX: # %bb.0: ; X86-AVX: # %bb.0:
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x57,0xc9] ; X86-AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x57,0xc9]
; X86-AVX-NEXT: vmovhpd {{\.LCPI.*}}, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x16,0x0d,A,A,A,A] ; X86-AVX-NEXT: vmovhpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x16,0x0d,A,A,A,A]
; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX-NEXT: # xmm1 = xmm1[0],mem[0] ; X86-AVX-NEXT: # xmm1 = xmm1[0],mem[0]
; X86-AVX-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x58,0xc1] ; X86-AVX-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x58,0xc1]
; X86-AVX-NEXT: vmovupd %xmm0, (%eax) # encoding: [0xc5,0xf9,0x11,0x00] ; X86-AVX-NEXT: vmovupd %xmm0, (%eax) # encoding: [0xc5,0xf9,0x11,0x00]
@ -656,8 +656,8 @@ define void @test_x86_sse2_storeu_pd(i8* %a0, <2 x double> %a1) {
; X86-AVX512VL: # %bb.0: ; X86-AVX512VL: # %bb.0:
; X86-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] ; X86-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX512VL-NEXT: vxorpd %xmm1, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0x57,0xc9] ; X86-AVX512VL-NEXT: vxorpd %xmm1, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0x57,0xc9]
; X86-AVX512VL-NEXT: vmovhpd {{\.LCPI.*}}, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0x16,0x0d,A,A,A,A] ; X86-AVX512VL-NEXT: vmovhpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0x16,0x0d,A,A,A,A]
; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512VL-NEXT: # xmm1 = xmm1[0],mem[0] ; X86-AVX512VL-NEXT: # xmm1 = xmm1[0],mem[0]
; X86-AVX512VL-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x58,0xc1] ; X86-AVX512VL-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x58,0xc1]
; X86-AVX512VL-NEXT: vmovupd %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x11,0x00] ; X86-AVX512VL-NEXT: vmovupd %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x11,0x00]
@ -667,7 +667,7 @@ define void @test_x86_sse2_storeu_pd(i8* %a0, <2 x double> %a1) {
; X64-AVX: # %bb.0: ; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x57,0xc9] ; X64-AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x57,0xc9]
; X64-AVX-NEXT: vmovhpd {{.*}}(%rip), %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x16,0x0d,A,A,A,A] ; X64-AVX-NEXT: vmovhpd {{.*}}(%rip), %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x16,0x0d,A,A,A,A]
; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: # xmm1 = xmm1[0],mem[0] ; X64-AVX-NEXT: # xmm1 = xmm1[0],mem[0]
; X64-AVX-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x58,0xc1] ; X64-AVX-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x58,0xc1]
; X64-AVX-NEXT: vmovupd %xmm0, (%rdi) # encoding: [0xc5,0xf9,0x11,0x07] ; X64-AVX-NEXT: vmovupd %xmm0, (%rdi) # encoding: [0xc5,0xf9,0x11,0x07]
@ -677,7 +677,7 @@ define void @test_x86_sse2_storeu_pd(i8* %a0, <2 x double> %a1) {
; X64-AVX512VL: # %bb.0: ; X64-AVX512VL: # %bb.0:
; X64-AVX512VL-NEXT: vxorpd %xmm1, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0x57,0xc9] ; X64-AVX512VL-NEXT: vxorpd %xmm1, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0x57,0xc9]
; X64-AVX512VL-NEXT: vmovhpd {{.*}}(%rip), %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0x16,0x0d,A,A,A,A] ; X64-AVX512VL-NEXT: vmovhpd {{.*}}(%rip), %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0x16,0x0d,A,A,A,A]
; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: # xmm1 = xmm1[0],mem[0] ; X64-AVX512VL-NEXT: # xmm1 = xmm1[0],mem[0]
; X64-AVX512VL-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x58,0xc1] ; X64-AVX512VL-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x58,0xc1]
; X64-AVX512VL-NEXT: vmovupd %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x11,0x07] ; X64-AVX512VL-NEXT: vmovupd %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x11,0x07]

View File

@ -148,7 +148,7 @@ define <16 x i8> @mul_v16i8(<16 x i8> %i, <16 x i8> %j) nounwind readnone {
; X32-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero ; X32-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
; X32-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero ; X32-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; X32-NEXT: vpmullw %ymm1, %ymm0, %ymm0 ; X32-NEXT: vpmullw %ymm1, %ymm0, %ymm0
; X32-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0 ; X32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X32-NEXT: vextracti128 $1, %ymm0, %xmm1 ; X32-NEXT: vextracti128 $1, %ymm0, %xmm1
; X32-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 ; X32-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
; X32-NEXT: vzeroupper ; X32-NEXT: vzeroupper
@ -303,7 +303,7 @@ define <8 x i32> @mul_const5(<8 x i32> %x) {
define <8 x i32> @mul_const6(<8 x i32> %x) { define <8 x i32> @mul_const6(<8 x i32> %x) {
; X32-LABEL: mul_const6: ; X32-LABEL: mul_const6:
; X32: # %bb.0: ; X32: # %bb.0:
; X32-NEXT: vpmulld {{\.LCPI.*}}, %ymm0, %ymm0 ; X32-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X32-NEXT: retl ; X32-NEXT: retl
; ;
; X64-LABEL: mul_const6: ; X64-LABEL: mul_const6:

View File

@ -159,7 +159,7 @@ define <16 x i16> @sext_16i8_16i16(<16 x i8> %z) {
define <16 x i8> @trunc_16i16_16i8(<16 x i16> %z) { define <16 x i8> @trunc_16i16_16i8(<16 x i16> %z) {
; X32-LABEL: trunc_16i16_16i8: ; X32-LABEL: trunc_16i16_16i8:
; X32: # %bb.0: ; X32: # %bb.0:
; X32-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0 ; X32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X32-NEXT: vextracti128 $1, %ymm0, %xmm1 ; X32-NEXT: vextracti128 $1, %ymm0, %xmm1
; X32-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 ; X32-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
; X32-NEXT: vzeroupper ; X32-NEXT: vzeroupper

View File

@ -25,28 +25,28 @@ define <16 x i16> @test_x86_avx2_packssdw_fold() {
; X86-AVX: # %bb.0: ; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,0,0,0,255,32767,32767,65535,0,0,0,0,32769,32768,0,65280] ; X86-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,0,0,0,255,32767,32767,65535,0,0,0,0,32769,32768,0,65280]
; X86-AVX-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A] ; X86-AVX-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX-NEXT: retl # encoding: [0xc3] ; X86-AVX-NEXT: retl # encoding: [0xc3]
; ;
; X86-AVX512VL-LABEL: test_x86_avx2_packssdw_fold: ; X86-AVX512VL-LABEL: test_x86_avx2_packssdw_fold:
; X86-AVX512VL: # %bb.0: ; X86-AVX512VL: # %bb.0:
; X86-AVX512VL-NEXT: vmovaps {{\.LCPI.*}}, %ymm0 # EVEX TO VEX Compression ymm0 = [0,0,0,0,255,32767,32767,65535,0,0,0,0,32769,32768,0,65280] ; X86-AVX512VL-NEXT: vmovaps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0 # EVEX TO VEX Compression ymm0 = [0,0,0,0,255,32767,32767,65535,0,0,0,0,32769,32768,0,65280]
; X86-AVX512VL-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A] ; X86-AVX512VL-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512VL-NEXT: retl # encoding: [0xc3] ; X86-AVX512VL-NEXT: retl # encoding: [0xc3]
; ;
; X64-AVX-LABEL: test_x86_avx2_packssdw_fold: ; X64-AVX-LABEL: test_x86_avx2_packssdw_fold:
; X64-AVX: # %bb.0: ; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,0,0,0,255,32767,32767,65535,0,0,0,0,32769,32768,0,65280] ; X64-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,0,0,0,255,32767,32767,65535,0,0,0,0,32769,32768,0,65280]
; X64-AVX-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A] ; X64-AVX-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: retq # encoding: [0xc3] ; X64-AVX-NEXT: retq # encoding: [0xc3]
; ;
; X64-AVX512VL-LABEL: test_x86_avx2_packssdw_fold: ; X64-AVX512VL-LABEL: test_x86_avx2_packssdw_fold:
; X64-AVX512VL: # %bb.0: ; X64-AVX512VL: # %bb.0:
; X64-AVX512VL-NEXT: vmovaps {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [0,0,0,0,255,32767,32767,65535,0,0,0,0,32769,32768,0,65280] ; X64-AVX512VL-NEXT: vmovaps {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [0,0,0,0,255,32767,32767,65535,0,0,0,0,32769,32768,0,65280]
; X64-AVX512VL-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A] ; X64-AVX512VL-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: retq # encoding: [0xc3] ; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> zeroinitializer, <8 x i32> <i32 255, i32 32767, i32 65535, i32 -1, i32 -32767, i32 -65535, i32 0, i32 -256>) %res = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> zeroinitializer, <8 x i32> <i32 255, i32 32767, i32 65535, i32 -1, i32 -32767, i32 -65535, i32 0, i32 -256>)
ret <16 x i16> %res ret <16 x i16> %res
@ -74,28 +74,28 @@ define <32 x i8> @test_x86_avx2_packsswb_fold() {
; X86-AVX: # %bb.0: ; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0,0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0] ; X86-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0,0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0]
; X86-AVX-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A] ; X86-AVX-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX-NEXT: retl # encoding: [0xc3] ; X86-AVX-NEXT: retl # encoding: [0xc3]
; ;
; X86-AVX512VL-LABEL: test_x86_avx2_packsswb_fold: ; X86-AVX512VL-LABEL: test_x86_avx2_packsswb_fold:
; X86-AVX512VL: # %bb.0: ; X86-AVX512VL: # %bb.0:
; X86-AVX512VL-NEXT: vmovaps {{\.LCPI.*}}, %ymm0 # EVEX TO VEX Compression ymm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0,0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0] ; X86-AVX512VL-NEXT: vmovaps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0 # EVEX TO VEX Compression ymm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0,0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0]
; X86-AVX512VL-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A] ; X86-AVX512VL-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512VL-NEXT: retl # encoding: [0xc3] ; X86-AVX512VL-NEXT: retl # encoding: [0xc3]
; ;
; X64-AVX-LABEL: test_x86_avx2_packsswb_fold: ; X64-AVX-LABEL: test_x86_avx2_packsswb_fold:
; X64-AVX: # %bb.0: ; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0,0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0] ; X64-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0,0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0]
; X64-AVX-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A] ; X64-AVX-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: retq # encoding: [0xc3] ; X64-AVX-NEXT: retq # encoding: [0xc3]
; ;
; X64-AVX512VL-LABEL: test_x86_avx2_packsswb_fold: ; X64-AVX512VL-LABEL: test_x86_avx2_packsswb_fold:
; X64-AVX512VL: # %bb.0: ; X64-AVX512VL: # %bb.0:
; X64-AVX512VL-NEXT: vmovaps {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0,0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0] ; X64-AVX512VL-NEXT: vmovaps {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0,0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0]
; X64-AVX512VL-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A] ; X64-AVX512VL-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: retq # encoding: [0xc3] ; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
%res = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> <i16 0, i16 255, i16 256, i16 65535, i16 -1, i16 -255, i16 -256, i16 -32678, i16 0, i16 255, i16 256, i16 65535, i16 -1, i16 -255, i16 -256, i16 -32678>, <16 x i16> zeroinitializer) %res = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> <i16 0, i16 255, i16 256, i16 65535, i16 -1, i16 -255, i16 -256, i16 -32678, i16 0, i16 255, i16 256, i16 65535, i16 -1, i16 -255, i16 -256, i16 -32678>, <16 x i16> zeroinitializer)
ret <32 x i8> %res ret <32 x i8> %res
@ -123,28 +123,28 @@ define <32 x i8> @test_x86_avx2_packuswb_fold() {
; X86-AVX: # %bb.0: ; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0] ; X86-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0]
; X86-AVX-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A] ; X86-AVX-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX-NEXT: retl # encoding: [0xc3] ; X86-AVX-NEXT: retl # encoding: [0xc3]
; ;
; X86-AVX512VL-LABEL: test_x86_avx2_packuswb_fold: ; X86-AVX512VL-LABEL: test_x86_avx2_packuswb_fold:
; X86-AVX512VL: # %bb.0: ; X86-AVX512VL: # %bb.0:
; X86-AVX512VL-NEXT: vmovaps {{\.LCPI.*}}, %ymm0 # EVEX TO VEX Compression ymm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0] ; X86-AVX512VL-NEXT: vmovaps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0 # EVEX TO VEX Compression ymm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0]
; X86-AVX512VL-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A] ; X86-AVX512VL-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512VL-NEXT: retl # encoding: [0xc3] ; X86-AVX512VL-NEXT: retl # encoding: [0xc3]
; ;
; X64-AVX-LABEL: test_x86_avx2_packuswb_fold: ; X64-AVX-LABEL: test_x86_avx2_packuswb_fold:
; X64-AVX: # %bb.0: ; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0] ; X64-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0]
; X64-AVX-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A] ; X64-AVX-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: retq # encoding: [0xc3] ; X64-AVX-NEXT: retq # encoding: [0xc3]
; ;
; X64-AVX512VL-LABEL: test_x86_avx2_packuswb_fold: ; X64-AVX512VL-LABEL: test_x86_avx2_packuswb_fold:
; X64-AVX512VL: # %bb.0: ; X64-AVX512VL: # %bb.0:
; X64-AVX512VL-NEXT: vmovaps {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0] ; X64-AVX512VL-NEXT: vmovaps {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0]
; X64-AVX512VL-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A] ; X64-AVX512VL-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: retq # encoding: [0xc3] ; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
%res = call <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16> <i16 0, i16 255, i16 256, i16 65535, i16 -1, i16 -255, i16 -256, i16 -32678, i16 0, i16 255, i16 256, i16 65535, i16 -1, i16 -255, i16 -256, i16 -32678>, <16 x i16> zeroinitializer) %res = call <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16> <i16 0, i16 255, i16 256, i16 65535, i16 -1, i16 -255, i16 -256, i16 -32678, i16 0, i16 255, i16 256, i16 65535, i16 -1, i16 -255, i16 -256, i16 -32678>, <16 x i16> zeroinitializer)
ret <32 x i8> %res ret <32 x i8> %res
@ -753,28 +753,28 @@ define <16 x i16> @test_x86_avx2_packusdw_fold() {
; X86-AVX: # %bb.0: ; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,0,0,0,255,32767,65535,0,0,0,0,0,0,0,0,0] ; X86-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,0,0,0,255,32767,65535,0,0,0,0,0,0,0,0,0]
; X86-AVX-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A] ; X86-AVX-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX-NEXT: retl # encoding: [0xc3] ; X86-AVX-NEXT: retl # encoding: [0xc3]
; ;
; X86-AVX512VL-LABEL: test_x86_avx2_packusdw_fold: ; X86-AVX512VL-LABEL: test_x86_avx2_packusdw_fold:
; X86-AVX512VL: # %bb.0: ; X86-AVX512VL: # %bb.0:
; X86-AVX512VL-NEXT: vmovaps {{\.LCPI.*}}, %ymm0 # EVEX TO VEX Compression ymm0 = [0,0,0,0,255,32767,65535,0,0,0,0,0,0,0,0,0] ; X86-AVX512VL-NEXT: vmovaps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0 # EVEX TO VEX Compression ymm0 = [0,0,0,0,255,32767,65535,0,0,0,0,0,0,0,0,0]
; X86-AVX512VL-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A] ; X86-AVX512VL-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512VL-NEXT: retl # encoding: [0xc3] ; X86-AVX512VL-NEXT: retl # encoding: [0xc3]
; ;
; X64-AVX-LABEL: test_x86_avx2_packusdw_fold: ; X64-AVX-LABEL: test_x86_avx2_packusdw_fold:
; X64-AVX: # %bb.0: ; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,0,0,0,255,32767,65535,0,0,0,0,0,0,0,0,0] ; X64-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,0,0,0,255,32767,65535,0,0,0,0,0,0,0,0,0]
; X64-AVX-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A] ; X64-AVX-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: retq # encoding: [0xc3] ; X64-AVX-NEXT: retq # encoding: [0xc3]
; ;
; X64-AVX512VL-LABEL: test_x86_avx2_packusdw_fold: ; X64-AVX512VL-LABEL: test_x86_avx2_packusdw_fold:
; X64-AVX512VL: # %bb.0: ; X64-AVX512VL: # %bb.0:
; X64-AVX512VL-NEXT: vmovaps {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [0,0,0,0,255,32767,65535,0,0,0,0,0,0,0,0,0] ; X64-AVX512VL-NEXT: vmovaps {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [0,0,0,0,255,32767,65535,0,0,0,0,0,0,0,0,0]
; X64-AVX512VL-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A] ; X64-AVX512VL-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: retq # encoding: [0xc3] ; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> zeroinitializer, <8 x i32> <i32 255, i32 32767, i32 65535, i32 -1, i32 -32767, i32 -65535, i32 0, i32 -256>) %res = call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> zeroinitializer, <8 x i32> <i32 255, i32 32767, i32 65535, i32 -1, i32 -32767, i32 -65535, i32 0, i32 -256>)
ret <16 x i16> %res ret <16 x i16> %res
@ -1025,26 +1025,26 @@ define <4 x i32> @test_x86_avx2_psllv_d_const() {
; X86-AVX: # %bb.0: ; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [2,9,0,4294967295] ; X86-AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [2,9,0,4294967295]
; X86-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A] ; X86-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX-NEXT: vpsllvd {{\.LCPI.*}}, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x47,0x05,A,A,A,A] ; X86-AVX-NEXT: vpsllvd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x47,0x05,A,A,A,A]
; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [1,1,1,4294967295] ; X86-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [1,1,1,4294967295]
; X86-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x0d,A,A,A,A] ; X86-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x0d,A,A,A,A]
; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX-NEXT: vpsllvd %xmm1, %xmm1, %xmm1 # encoding: [0xc4,0xe2,0x71,0x47,0xc9] ; X86-AVX-NEXT: vpsllvd %xmm1, %xmm1, %xmm1 # encoding: [0xc4,0xe2,0x71,0x47,0xc9]
; X86-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfe,0xc1] ; X86-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfe,0xc1]
; X86-AVX-NEXT: retl # encoding: [0xc3] ; X86-AVX-NEXT: retl # encoding: [0xc3]
; ;
; X86-AVX512VL-LABEL: test_x86_avx2_psllv_d_const: ; X86-AVX512VL-LABEL: test_x86_avx2_psllv_d_const:
; X86-AVX512VL: # %bb.0: ; X86-AVX512VL: # %bb.0:
; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI.*}}, %xmm0 # EVEX TO VEX Compression xmm0 = [2,9,0,4294967295] ; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 # EVEX TO VEX Compression xmm0 = [2,9,0,4294967295]
; X86-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A] ; X86-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512VL-NEXT: vpsllvd {{\.LCPI.*}}, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x47,0x05,A,A,A,A] ; X86-AVX512VL-NEXT: vpsllvd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x47,0x05,A,A,A,A]
; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI.*}}, %xmm1 # EVEX TO VEX Compression xmm1 = [1,1,1,4294967295] ; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 # EVEX TO VEX Compression xmm1 = [1,1,1,4294967295]
; X86-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x0d,A,A,A,A] ; X86-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x0d,A,A,A,A]
; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512VL-NEXT: vpsllvd %xmm1, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0x47,0xc9] ; X86-AVX512VL-NEXT: vpsllvd %xmm1, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0x47,0xc9]
; X86-AVX512VL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc1] ; X86-AVX512VL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc1]
; X86-AVX512VL-NEXT: retl # encoding: [0xc3] ; X86-AVX512VL-NEXT: retl # encoding: [0xc3]
@ -1053,12 +1053,12 @@ define <4 x i32> @test_x86_avx2_psllv_d_const() {
; X64-AVX: # %bb.0: ; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [2,9,0,4294967295] ; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [2,9,0,4294967295]
; X64-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A] ; X64-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x47,0x05,A,A,A,A] ; X64-AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x47,0x05,A,A,A,A]
; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [1,1,1,4294967295] ; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [1,1,1,4294967295]
; X64-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x0d,A,A,A,A] ; X64-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x0d,A,A,A,A]
; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: vpsllvd %xmm1, %xmm1, %xmm1 # encoding: [0xc4,0xe2,0x71,0x47,0xc9] ; X64-AVX-NEXT: vpsllvd %xmm1, %xmm1, %xmm1 # encoding: [0xc4,0xe2,0x71,0x47,0xc9]
; X64-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfe,0xc1] ; X64-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfe,0xc1]
; X64-AVX-NEXT: retq # encoding: [0xc3] ; X64-AVX-NEXT: retq # encoding: [0xc3]
@ -1067,12 +1067,12 @@ define <4 x i32> @test_x86_avx2_psllv_d_const() {
; X64-AVX512VL: # %bb.0: ; X64-AVX512VL: # %bb.0:
; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %xmm0 # EVEX TO VEX Compression xmm0 = [2,9,0,4294967295] ; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %xmm0 # EVEX TO VEX Compression xmm0 = [2,9,0,4294967295]
; X64-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A] ; X64-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x47,0x05,A,A,A,A] ; X64-AVX512VL-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x47,0x05,A,A,A,A]
; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %xmm1 # EVEX TO VEX Compression xmm1 = [1,1,1,4294967295] ; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %xmm1 # EVEX TO VEX Compression xmm1 = [1,1,1,4294967295]
; X64-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x0d,A,A,A,A] ; X64-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x0d,A,A,A,A]
; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: vpsllvd %xmm1, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0x47,0xc9] ; X64-AVX512VL-NEXT: vpsllvd %xmm1, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0x47,0xc9]
; X64-AVX512VL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc1] ; X64-AVX512VL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc1]
; X64-AVX512VL-NEXT: retq # encoding: [0xc3] ; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
@ -1103,29 +1103,29 @@ define <8 x i32> @test_x86_avx2_psllv_d_256_const() {
; X86-AVX: # %bb.0: ; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [2,9,0,4294967295,3,7,4294967295,0] ; X86-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [2,9,0,4294967295,3,7,4294967295,0]
; X86-AVX-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A] ; X86-AVX-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX-NEXT: vpsllvd {{\.LCPI.*}}, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x47,0x05,A,A,A,A] ; X86-AVX-NEXT: vpsllvd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x47,0x05,A,A,A,A]
; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [4,4,4,4,4,4,4,4294967295] ; X86-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [4,4,4,4,4,4,4,4294967295]
; X86-AVX-NEXT: # encoding: [0xc5,0xfd,0x6f,0x0d,A,A,A,A] ; X86-AVX-NEXT: # encoding: [0xc5,0xfd,0x6f,0x0d,A,A,A,A]
; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX-NEXT: vpsllvd {{\.LCPI.*}}, %ymm1, %ymm1 # encoding: [0xc4,0xe2,0x75,0x47,0x0d,A,A,A,A] ; X86-AVX-NEXT: vpsllvd {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm1 # encoding: [0xc4,0xe2,0x75,0x47,0x0d,A,A,A,A]
; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0xfe,0xc1] ; X86-AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0xfe,0xc1]
; X86-AVX-NEXT: retl # encoding: [0xc3] ; X86-AVX-NEXT: retl # encoding: [0xc3]
; ;
; X86-AVX512VL-LABEL: test_x86_avx2_psllv_d_256_const: ; X86-AVX512VL-LABEL: test_x86_avx2_psllv_d_256_const:
; X86-AVX512VL: # %bb.0: ; X86-AVX512VL: # %bb.0:
; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI.*}}, %ymm0 # EVEX TO VEX Compression ymm0 = [2,9,0,4294967295,3,7,4294967295,0] ; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %ymm0 # EVEX TO VEX Compression ymm0 = [2,9,0,4294967295,3,7,4294967295,0]
; X86-AVX512VL-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A] ; X86-AVX512VL-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512VL-NEXT: vpsllvd {{\.LCPI.*}}, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x47,0x05,A,A,A,A] ; X86-AVX512VL-NEXT: vpsllvd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x47,0x05,A,A,A,A]
; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI.*}}, %ymm1 # EVEX TO VEX Compression ymm1 = [4,4,4,4,4,4,4,4294967295] ; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %ymm1 # EVEX TO VEX Compression ymm1 = [4,4,4,4,4,4,4,4294967295]
; X86-AVX512VL-NEXT: # encoding: [0xc5,0xfd,0x6f,0x0d,A,A,A,A] ; X86-AVX512VL-NEXT: # encoding: [0xc5,0xfd,0x6f,0x0d,A,A,A,A]
; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512VL-NEXT: vpsllvd {{\.LCPI.*}}, %ymm1, %ymm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x47,0x0d,A,A,A,A] ; X86-AVX512VL-NEXT: vpsllvd {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x47,0x0d,A,A,A,A]
; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512VL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc1] ; X86-AVX512VL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc1]
; X86-AVX512VL-NEXT: retl # encoding: [0xc3] ; X86-AVX512VL-NEXT: retl # encoding: [0xc3]
; ;
@ -1133,14 +1133,14 @@ define <8 x i32> @test_x86_avx2_psllv_d_256_const() {
; X64-AVX: # %bb.0: ; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [2,9,0,4294967295,3,7,4294967295,0] ; X64-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [2,9,0,4294967295,3,7,4294967295,0]
; X64-AVX-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A] ; X64-AVX-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: vpsllvd {{.*}}(%rip), %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x47,0x05,A,A,A,A] ; X64-AVX-NEXT: vpsllvd {{.*}}(%rip), %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x47,0x05,A,A,A,A]
; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [4,4,4,4,4,4,4,4294967295] ; X64-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [4,4,4,4,4,4,4,4294967295]
; X64-AVX-NEXT: # encoding: [0xc5,0xfd,0x6f,0x0d,A,A,A,A] ; X64-AVX-NEXT: # encoding: [0xc5,0xfd,0x6f,0x0d,A,A,A,A]
; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: vpsllvd {{.*}}(%rip), %ymm1, %ymm1 # encoding: [0xc4,0xe2,0x75,0x47,0x0d,A,A,A,A] ; X64-AVX-NEXT: vpsllvd {{.*}}(%rip), %ymm1, %ymm1 # encoding: [0xc4,0xe2,0x75,0x47,0x0d,A,A,A,A]
; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0xfe,0xc1] ; X64-AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0xfe,0xc1]
; X64-AVX-NEXT: retq # encoding: [0xc3] ; X64-AVX-NEXT: retq # encoding: [0xc3]
; ;
@ -1148,14 +1148,14 @@ define <8 x i32> @test_x86_avx2_psllv_d_256_const() {
; X64-AVX512VL: # %bb.0: ; X64-AVX512VL: # %bb.0:
; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [2,9,0,4294967295,3,7,4294967295,0] ; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [2,9,0,4294967295,3,7,4294967295,0]
; X64-AVX512VL-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A] ; X64-AVX512VL-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: vpsllvd {{.*}}(%rip), %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x47,0x05,A,A,A,A] ; X64-AVX512VL-NEXT: vpsllvd {{.*}}(%rip), %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x47,0x05,A,A,A,A]
; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %ymm1 # EVEX TO VEX Compression ymm1 = [4,4,4,4,4,4,4,4294967295] ; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %ymm1 # EVEX TO VEX Compression ymm1 = [4,4,4,4,4,4,4,4294967295]
; X64-AVX512VL-NEXT: # encoding: [0xc5,0xfd,0x6f,0x0d,A,A,A,A] ; X64-AVX512VL-NEXT: # encoding: [0xc5,0xfd,0x6f,0x0d,A,A,A,A]
; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: vpsllvd {{.*}}(%rip), %ymm1, %ymm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x47,0x0d,A,A,A,A] ; X64-AVX512VL-NEXT: vpsllvd {{.*}}(%rip), %ymm1, %ymm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x47,0x0d,A,A,A,A]
; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc1] ; X64-AVX512VL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc1]
; X64-AVX512VL-NEXT: retq # encoding: [0xc3] ; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
%res0 = call <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32> <i32 2, i32 9, i32 0, i32 -1, i32 3, i32 7, i32 -1, i32 0>, <8 x i32> <i32 1, i32 0, i32 33, i32 -1,i32 2, i32 0, i32 34, i32 -2>) %res0 = call <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32> <i32 2, i32 9, i32 0, i32 -1, i32 3, i32 7, i32 -1, i32 0>, <8 x i32> <i32 1, i32 0, i32 33, i32 -1,i32 2, i32 0, i32 34, i32 -2>)
@ -1184,36 +1184,36 @@ define <2 x i64> @test_x86_avx2_psllv_q_const() {
; X86-AVX: # %bb.0: ; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [4,0,4294967295,4294967295] ; X86-AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [4,0,4294967295,4294967295]
; X86-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A] ; X86-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX-NEXT: vpsllvq {{\.LCPI.*}}, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0xf9,0x47,0x05,A,A,A,A] ; X86-AVX-NEXT: vpsllvq {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0xf9,0x47,0x05,A,A,A,A]
; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX-NEXT: retl # encoding: [0xc3] ; X86-AVX-NEXT: retl # encoding: [0xc3]
; ;
; X86-AVX512VL-LABEL: test_x86_avx2_psllv_q_const: ; X86-AVX512VL-LABEL: test_x86_avx2_psllv_q_const:
; X86-AVX512VL: # %bb.0: ; X86-AVX512VL: # %bb.0:
; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI.*}}, %xmm0 # EVEX TO VEX Compression xmm0 = [4,0,4294967295,4294967295] ; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 # EVEX TO VEX Compression xmm0 = [4,0,4294967295,4294967295]
; X86-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A] ; X86-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512VL-NEXT: vpsllvq {{\.LCPI.*}}, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0x47,0x05,A,A,A,A] ; X86-AVX512VL-NEXT: vpsllvq {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0x47,0x05,A,A,A,A]
; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512VL-NEXT: retl # encoding: [0xc3] ; X86-AVX512VL-NEXT: retl # encoding: [0xc3]
; ;
; X64-AVX-LABEL: test_x86_avx2_psllv_q_const: ; X64-AVX-LABEL: test_x86_avx2_psllv_q_const:
; X64-AVX: # %bb.0: ; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [4,18446744073709551615] ; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [4,18446744073709551615]
; X64-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A] ; X64-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: vpsllvq {{.*}}(%rip), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0xf9,0x47,0x05,A,A,A,A] ; X64-AVX-NEXT: vpsllvq {{.*}}(%rip), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0xf9,0x47,0x05,A,A,A,A]
; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: retq # encoding: [0xc3] ; X64-AVX-NEXT: retq # encoding: [0xc3]
; ;
; X64-AVX512VL-LABEL: test_x86_avx2_psllv_q_const: ; X64-AVX512VL-LABEL: test_x86_avx2_psllv_q_const:
; X64-AVX512VL: # %bb.0: ; X64-AVX512VL: # %bb.0:
; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %xmm0 # EVEX TO VEX Compression xmm0 = [4,18446744073709551615] ; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %xmm0 # EVEX TO VEX Compression xmm0 = [4,18446744073709551615]
; X64-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A] ; X64-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: vpsllvq {{.*}}(%rip), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0x47,0x05,A,A,A,A] ; X64-AVX512VL-NEXT: vpsllvq {{.*}}(%rip), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0x47,0x05,A,A,A,A]
; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: retq # encoding: [0xc3] ; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.avx2.psllv.q(<2 x i64> <i64 4, i64 -1>, <2 x i64> <i64 1, i64 -1>) %res = call <2 x i64> @llvm.x86.avx2.psllv.q(<2 x i64> <i64 4, i64 -1>, <2 x i64> <i64 1, i64 -1>)
ret <2 x i64> %res ret <2 x i64> %res
@ -1240,36 +1240,36 @@ define <4 x i64> @test_x86_avx2_psllv_q_256_const() {
; X86-AVX: # %bb.0: ; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [4,0,4,0,4,0,4294967295,4294967295] ; X86-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [4,0,4,0,4,0,4294967295,4294967295]
; X86-AVX-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A] ; X86-AVX-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX-NEXT: vpsllvq {{\.LCPI.*}}, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0xfd,0x47,0x05,A,A,A,A] ; X86-AVX-NEXT: vpsllvq {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0xfd,0x47,0x05,A,A,A,A]
; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX-NEXT: retl # encoding: [0xc3] ; X86-AVX-NEXT: retl # encoding: [0xc3]
; ;
; X86-AVX512VL-LABEL: test_x86_avx2_psllv_q_256_const: ; X86-AVX512VL-LABEL: test_x86_avx2_psllv_q_256_const:
; X86-AVX512VL: # %bb.0: ; X86-AVX512VL: # %bb.0:
; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI.*}}, %ymm0 # EVEX TO VEX Compression ymm0 = [4,0,4,0,4,0,4294967295,4294967295] ; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %ymm0 # EVEX TO VEX Compression ymm0 = [4,0,4,0,4,0,4294967295,4294967295]
; X86-AVX512VL-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A] ; X86-AVX512VL-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512VL-NEXT: vpsllvq {{\.LCPI.*}}, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0x47,0x05,A,A,A,A] ; X86-AVX512VL-NEXT: vpsllvq {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0x47,0x05,A,A,A,A]
; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512VL-NEXT: retl # encoding: [0xc3] ; X86-AVX512VL-NEXT: retl # encoding: [0xc3]
; ;
; X64-AVX-LABEL: test_x86_avx2_psllv_q_256_const: ; X64-AVX-LABEL: test_x86_avx2_psllv_q_256_const:
; X64-AVX: # %bb.0: ; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [4,4,4,18446744073709551615] ; X64-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [4,4,4,18446744073709551615]
; X64-AVX-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A] ; X64-AVX-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: vpsllvq {{.*}}(%rip), %ymm0, %ymm0 # encoding: [0xc4,0xe2,0xfd,0x47,0x05,A,A,A,A] ; X64-AVX-NEXT: vpsllvq {{.*}}(%rip), %ymm0, %ymm0 # encoding: [0xc4,0xe2,0xfd,0x47,0x05,A,A,A,A]
; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: retq # encoding: [0xc3] ; X64-AVX-NEXT: retq # encoding: [0xc3]
; ;
; X64-AVX512VL-LABEL: test_x86_avx2_psllv_q_256_const: ; X64-AVX512VL-LABEL: test_x86_avx2_psllv_q_256_const:
; X64-AVX512VL: # %bb.0: ; X64-AVX512VL: # %bb.0:
; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [4,4,4,18446744073709551615] ; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [4,4,4,18446744073709551615]
; X64-AVX512VL-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A] ; X64-AVX512VL-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: vpsllvq {{.*}}(%rip), %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0x47,0x05,A,A,A,A] ; X64-AVX512VL-NEXT: vpsllvq {{.*}}(%rip), %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0x47,0x05,A,A,A,A]
; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: retq # encoding: [0xc3] ; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64> <i64 4, i64 4, i64 4, i64 -1>, <4 x i64> <i64 1, i64 1, i64 1, i64 -1>) %res = call <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64> <i64 4, i64 4, i64 4, i64 -1>, <4 x i64> <i64 1, i64 1, i64 1, i64 -1>)
ret <4 x i64> %res ret <4 x i64> %res
@ -1296,29 +1296,29 @@ define <4 x i32> @test_x86_avx2_psrlv_d_const() {
; X86-AVX: # %bb.0: ; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [2,9,0,4294967295] ; X86-AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [2,9,0,4294967295]
; X86-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A] ; X86-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX-NEXT: vpsrlvd {{\.LCPI.*}}, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x45,0x05,A,A,A,A] ; X86-AVX-NEXT: vpsrlvd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x45,0x05,A,A,A,A]
; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [4,4,4,4294967295] ; X86-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [4,4,4,4294967295]
; X86-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x0d,A,A,A,A] ; X86-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x0d,A,A,A,A]
; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX-NEXT: vpsrlvd {{\.LCPI.*}}, %xmm1, %xmm1 # encoding: [0xc4,0xe2,0x71,0x45,0x0d,A,A,A,A] ; X86-AVX-NEXT: vpsrlvd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1 # encoding: [0xc4,0xe2,0x71,0x45,0x0d,A,A,A,A]
; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfe,0xc1] ; X86-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfe,0xc1]
; X86-AVX-NEXT: retl # encoding: [0xc3] ; X86-AVX-NEXT: retl # encoding: [0xc3]
; ;
; X86-AVX512VL-LABEL: test_x86_avx2_psrlv_d_const: ; X86-AVX512VL-LABEL: test_x86_avx2_psrlv_d_const:
; X86-AVX512VL: # %bb.0: ; X86-AVX512VL: # %bb.0:
; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI.*}}, %xmm0 # EVEX TO VEX Compression xmm0 = [2,9,0,4294967295] ; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 # EVEX TO VEX Compression xmm0 = [2,9,0,4294967295]
; X86-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A] ; X86-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512VL-NEXT: vpsrlvd {{\.LCPI.*}}, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x45,0x05,A,A,A,A] ; X86-AVX512VL-NEXT: vpsrlvd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x45,0x05,A,A,A,A]
; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI.*}}, %xmm1 # EVEX TO VEX Compression xmm1 = [4,4,4,4294967295] ; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 # EVEX TO VEX Compression xmm1 = [4,4,4,4294967295]
; X86-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x0d,A,A,A,A] ; X86-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x0d,A,A,A,A]
; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512VL-NEXT: vpsrlvd {{\.LCPI.*}}, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0x45,0x0d,A,A,A,A] ; X86-AVX512VL-NEXT: vpsrlvd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0x45,0x0d,A,A,A,A]
; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512VL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc1] ; X86-AVX512VL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc1]
; X86-AVX512VL-NEXT: retl # encoding: [0xc3] ; X86-AVX512VL-NEXT: retl # encoding: [0xc3]
; ;
@ -1326,14 +1326,14 @@ define <4 x i32> @test_x86_avx2_psrlv_d_const() {
; X64-AVX: # %bb.0: ; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [2,9,0,4294967295] ; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [2,9,0,4294967295]
; X64-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A] ; X64-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x45,0x05,A,A,A,A] ; X64-AVX-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x45,0x05,A,A,A,A]
; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [4,4,4,4294967295] ; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [4,4,4,4294967295]
; X64-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x0d,A,A,A,A] ; X64-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x0d,A,A,A,A]
; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: vpsrlvd {{.*}}(%rip), %xmm1, %xmm1 # encoding: [0xc4,0xe2,0x71,0x45,0x0d,A,A,A,A] ; X64-AVX-NEXT: vpsrlvd {{.*}}(%rip), %xmm1, %xmm1 # encoding: [0xc4,0xe2,0x71,0x45,0x0d,A,A,A,A]
; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfe,0xc1] ; X64-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfe,0xc1]
; X64-AVX-NEXT: retq # encoding: [0xc3] ; X64-AVX-NEXT: retq # encoding: [0xc3]
; ;
@ -1341,14 +1341,14 @@ define <4 x i32> @test_x86_avx2_psrlv_d_const() {
; X64-AVX512VL: # %bb.0: ; X64-AVX512VL: # %bb.0:
; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %xmm0 # EVEX TO VEX Compression xmm0 = [2,9,0,4294967295] ; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %xmm0 # EVEX TO VEX Compression xmm0 = [2,9,0,4294967295]
; X64-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A] ; X64-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x45,0x05,A,A,A,A] ; X64-AVX512VL-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x45,0x05,A,A,A,A]
; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %xmm1 # EVEX TO VEX Compression xmm1 = [4,4,4,4294967295] ; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %xmm1 # EVEX TO VEX Compression xmm1 = [4,4,4,4294967295]
; X64-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x0d,A,A,A,A] ; X64-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x0d,A,A,A,A]
; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: vpsrlvd {{.*}}(%rip), %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0x45,0x0d,A,A,A,A] ; X64-AVX512VL-NEXT: vpsrlvd {{.*}}(%rip), %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0x45,0x0d,A,A,A,A]
; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc1] ; X64-AVX512VL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc1]
; X64-AVX512VL-NEXT: retq # encoding: [0xc3] ; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
%res0 = call <4 x i32> @llvm.x86.avx2.psrlv.d(<4 x i32> <i32 2, i32 9, i32 0, i32 -1>, <4 x i32> <i32 1, i32 0, i32 33, i32 -1>) %res0 = call <4 x i32> @llvm.x86.avx2.psrlv.d(<4 x i32> <i32 2, i32 9, i32 0, i32 -1>, <4 x i32> <i32 1, i32 0, i32 33, i32 -1>)
@ -1378,29 +1378,29 @@ define <8 x i32> @test_x86_avx2_psrlv_d_256_const() {
; X86-AVX: # %bb.0: ; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [2,9,0,4294967295,3,7,4294967295,0] ; X86-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [2,9,0,4294967295,3,7,4294967295,0]
; X86-AVX-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A] ; X86-AVX-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX-NEXT: vpsrlvd {{\.LCPI.*}}, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x45,0x05,A,A,A,A] ; X86-AVX-NEXT: vpsrlvd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x45,0x05,A,A,A,A]
; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [4,4,4,4,4,4,4,4294967295] ; X86-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [4,4,4,4,4,4,4,4294967295]
; X86-AVX-NEXT: # encoding: [0xc5,0xfd,0x6f,0x0d,A,A,A,A] ; X86-AVX-NEXT: # encoding: [0xc5,0xfd,0x6f,0x0d,A,A,A,A]
; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX-NEXT: vpsrlvd {{\.LCPI.*}}, %ymm1, %ymm1 # encoding: [0xc4,0xe2,0x75,0x45,0x0d,A,A,A,A] ; X86-AVX-NEXT: vpsrlvd {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm1 # encoding: [0xc4,0xe2,0x75,0x45,0x0d,A,A,A,A]
; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0xfe,0xc1] ; X86-AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0xfe,0xc1]
; X86-AVX-NEXT: retl # encoding: [0xc3] ; X86-AVX-NEXT: retl # encoding: [0xc3]
; ;
; X86-AVX512VL-LABEL: test_x86_avx2_psrlv_d_256_const: ; X86-AVX512VL-LABEL: test_x86_avx2_psrlv_d_256_const:
; X86-AVX512VL: # %bb.0: ; X86-AVX512VL: # %bb.0:
; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI.*}}, %ymm0 # EVEX TO VEX Compression ymm0 = [2,9,0,4294967295,3,7,4294967295,0] ; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %ymm0 # EVEX TO VEX Compression ymm0 = [2,9,0,4294967295,3,7,4294967295,0]
; X86-AVX512VL-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A] ; X86-AVX512VL-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512VL-NEXT: vpsrlvd {{\.LCPI.*}}, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x45,0x05,A,A,A,A] ; X86-AVX512VL-NEXT: vpsrlvd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x45,0x05,A,A,A,A]
; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI.*}}, %ymm1 # EVEX TO VEX Compression ymm1 = [4,4,4,4,4,4,4,4294967295] ; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %ymm1 # EVEX TO VEX Compression ymm1 = [4,4,4,4,4,4,4,4294967295]
; X86-AVX512VL-NEXT: # encoding: [0xc5,0xfd,0x6f,0x0d,A,A,A,A] ; X86-AVX512VL-NEXT: # encoding: [0xc5,0xfd,0x6f,0x0d,A,A,A,A]
; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512VL-NEXT: vpsrlvd {{\.LCPI.*}}, %ymm1, %ymm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x45,0x0d,A,A,A,A] ; X86-AVX512VL-NEXT: vpsrlvd {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x45,0x0d,A,A,A,A]
; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512VL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc1] ; X86-AVX512VL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc1]
; X86-AVX512VL-NEXT: retl # encoding: [0xc3] ; X86-AVX512VL-NEXT: retl # encoding: [0xc3]
; ;
@ -1408,14 +1408,14 @@ define <8 x i32> @test_x86_avx2_psrlv_d_256_const() {
; X64-AVX: # %bb.0: ; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [2,9,0,4294967295,3,7,4294967295,0] ; X64-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [2,9,0,4294967295,3,7,4294967295,0]
; X64-AVX-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A] ; X64-AVX-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: vpsrlvd {{.*}}(%rip), %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x45,0x05,A,A,A,A] ; X64-AVX-NEXT: vpsrlvd {{.*}}(%rip), %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x45,0x05,A,A,A,A]
; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [4,4,4,4,4,4,4,4294967295] ; X64-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [4,4,4,4,4,4,4,4294967295]
; X64-AVX-NEXT: # encoding: [0xc5,0xfd,0x6f,0x0d,A,A,A,A] ; X64-AVX-NEXT: # encoding: [0xc5,0xfd,0x6f,0x0d,A,A,A,A]
; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: vpsrlvd {{.*}}(%rip), %ymm1, %ymm1 # encoding: [0xc4,0xe2,0x75,0x45,0x0d,A,A,A,A] ; X64-AVX-NEXT: vpsrlvd {{.*}}(%rip), %ymm1, %ymm1 # encoding: [0xc4,0xe2,0x75,0x45,0x0d,A,A,A,A]
; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0xfe,0xc1] ; X64-AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0xfe,0xc1]
; X64-AVX-NEXT: retq # encoding: [0xc3] ; X64-AVX-NEXT: retq # encoding: [0xc3]
; ;
@ -1423,14 +1423,14 @@ define <8 x i32> @test_x86_avx2_psrlv_d_256_const() {
; X64-AVX512VL: # %bb.0: ; X64-AVX512VL: # %bb.0:
; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [2,9,0,4294967295,3,7,4294967295,0] ; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [2,9,0,4294967295,3,7,4294967295,0]
; X64-AVX512VL-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A] ; X64-AVX512VL-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: vpsrlvd {{.*}}(%rip), %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x45,0x05,A,A,A,A] ; X64-AVX512VL-NEXT: vpsrlvd {{.*}}(%rip), %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x45,0x05,A,A,A,A]
; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %ymm1 # EVEX TO VEX Compression ymm1 = [4,4,4,4,4,4,4,4294967295] ; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %ymm1 # EVEX TO VEX Compression ymm1 = [4,4,4,4,4,4,4,4294967295]
; X64-AVX512VL-NEXT: # encoding: [0xc5,0xfd,0x6f,0x0d,A,A,A,A] ; X64-AVX512VL-NEXT: # encoding: [0xc5,0xfd,0x6f,0x0d,A,A,A,A]
; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: vpsrlvd {{.*}}(%rip), %ymm1, %ymm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x45,0x0d,A,A,A,A] ; X64-AVX512VL-NEXT: vpsrlvd {{.*}}(%rip), %ymm1, %ymm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x45,0x0d,A,A,A,A]
; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc1] ; X64-AVX512VL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc1]
; X64-AVX512VL-NEXT: retq # encoding: [0xc3] ; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
%res0 = call <8 x i32> @llvm.x86.avx2.psrlv.d.256(<8 x i32> <i32 2, i32 9, i32 0, i32 -1, i32 3, i32 7, i32 -1, i32 0>, <8 x i32> <i32 1, i32 0, i32 33, i32 -1,i32 2, i32 0, i32 34, i32 -2>) %res0 = call <8 x i32> @llvm.x86.avx2.psrlv.d.256(<8 x i32> <i32 2, i32 9, i32 0, i32 -1, i32 3, i32 7, i32 -1, i32 0>, <8 x i32> <i32 1, i32 0, i32 33, i32 -1,i32 2, i32 0, i32 34, i32 -2>)
@ -1460,36 +1460,36 @@ define <2 x i64> @test_x86_avx2_psrlv_q_const() {
; X86-AVX: # %bb.0: ; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [4,0,4,0] ; X86-AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [4,0,4,0]
; X86-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A] ; X86-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX-NEXT: vpsrlvq {{\.LCPI.*}}, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0xf9,0x45,0x05,A,A,A,A] ; X86-AVX-NEXT: vpsrlvq {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0xf9,0x45,0x05,A,A,A,A]
; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX-NEXT: retl # encoding: [0xc3] ; X86-AVX-NEXT: retl # encoding: [0xc3]
; ;
; X86-AVX512VL-LABEL: test_x86_avx2_psrlv_q_const: ; X86-AVX512VL-LABEL: test_x86_avx2_psrlv_q_const:
; X86-AVX512VL: # %bb.0: ; X86-AVX512VL: # %bb.0:
; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI.*}}, %xmm0 # EVEX TO VEX Compression xmm0 = [4,0,4,0] ; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 # EVEX TO VEX Compression xmm0 = [4,0,4,0]
; X86-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A] ; X86-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512VL-NEXT: vpsrlvq {{\.LCPI.*}}, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0x45,0x05,A,A,A,A] ; X86-AVX512VL-NEXT: vpsrlvq {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0x45,0x05,A,A,A,A]
; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512VL-NEXT: retl # encoding: [0xc3] ; X86-AVX512VL-NEXT: retl # encoding: [0xc3]
; ;
; X64-AVX-LABEL: test_x86_avx2_psrlv_q_const: ; X64-AVX-LABEL: test_x86_avx2_psrlv_q_const:
; X64-AVX: # %bb.0: ; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [4,4] ; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [4,4]
; X64-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A] ; X64-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: vpsrlvq {{.*}}(%rip), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0xf9,0x45,0x05,A,A,A,A] ; X64-AVX-NEXT: vpsrlvq {{.*}}(%rip), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0xf9,0x45,0x05,A,A,A,A]
; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: retq # encoding: [0xc3] ; X64-AVX-NEXT: retq # encoding: [0xc3]
; ;
; X64-AVX512VL-LABEL: test_x86_avx2_psrlv_q_const: ; X64-AVX512VL-LABEL: test_x86_avx2_psrlv_q_const:
; X64-AVX512VL: # %bb.0: ; X64-AVX512VL: # %bb.0:
; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %xmm0 # EVEX TO VEX Compression xmm0 = [4,4] ; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %xmm0 # EVEX TO VEX Compression xmm0 = [4,4]
; X64-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A] ; X64-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: vpsrlvq {{.*}}(%rip), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0x45,0x05,A,A,A,A] ; X64-AVX512VL-NEXT: vpsrlvq {{.*}}(%rip), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0x45,0x05,A,A,A,A]
; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: retq # encoding: [0xc3] ; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64> <i64 4, i64 4>, <2 x i64> <i64 1, i64 -1>) %res = call <2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64> <i64 4, i64 4>, <2 x i64> <i64 1, i64 -1>)
ret <2 x i64> %res ret <2 x i64> %res
@ -1517,36 +1517,36 @@ define <4 x i64> @test_x86_avx2_psrlv_q_256_const() {
; X86-AVX: # %bb.0: ; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [4,0,4,0,4,0,4,0] ; X86-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [4,0,4,0,4,0,4,0]
; X86-AVX-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A] ; X86-AVX-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX-NEXT: vpsrlvq {{\.LCPI.*}}, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0xfd,0x45,0x05,A,A,A,A] ; X86-AVX-NEXT: vpsrlvq {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0xfd,0x45,0x05,A,A,A,A]
; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX-NEXT: retl # encoding: [0xc3] ; X86-AVX-NEXT: retl # encoding: [0xc3]
; ;
; X86-AVX512VL-LABEL: test_x86_avx2_psrlv_q_256_const: ; X86-AVX512VL-LABEL: test_x86_avx2_psrlv_q_256_const:
; X86-AVX512VL: # %bb.0: ; X86-AVX512VL: # %bb.0:
; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI.*}}, %ymm0 # EVEX TO VEX Compression ymm0 = [4,0,4,0,4,0,4,0] ; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %ymm0 # EVEX TO VEX Compression ymm0 = [4,0,4,0,4,0,4,0]
; X86-AVX512VL-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A] ; X86-AVX512VL-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512VL-NEXT: vpsrlvq {{\.LCPI.*}}, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0x45,0x05,A,A,A,A] ; X86-AVX512VL-NEXT: vpsrlvq {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0x45,0x05,A,A,A,A]
; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512VL-NEXT: retl # encoding: [0xc3] ; X86-AVX512VL-NEXT: retl # encoding: [0xc3]
; ;
; X64-AVX-LABEL: test_x86_avx2_psrlv_q_256_const: ; X64-AVX-LABEL: test_x86_avx2_psrlv_q_256_const:
; X64-AVX: # %bb.0: ; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpbroadcastq {{.*#+}} ymm0 = [4,4,4,4] ; X64-AVX-NEXT: vpbroadcastq {{.*#+}} ymm0 = [4,4,4,4]
; X64-AVX-NEXT: # encoding: [0xc4,0xe2,0x7d,0x59,0x05,A,A,A,A] ; X64-AVX-NEXT: # encoding: [0xc4,0xe2,0x7d,0x59,0x05,A,A,A,A]
; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: vpsrlvq {{.*}}(%rip), %ymm0, %ymm0 # encoding: [0xc4,0xe2,0xfd,0x45,0x05,A,A,A,A] ; X64-AVX-NEXT: vpsrlvq {{.*}}(%rip), %ymm0, %ymm0 # encoding: [0xc4,0xe2,0xfd,0x45,0x05,A,A,A,A]
; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: retq # encoding: [0xc3] ; X64-AVX-NEXT: retq # encoding: [0xc3]
; ;
; X64-AVX512VL-LABEL: test_x86_avx2_psrlv_q_256_const: ; X64-AVX512VL-LABEL: test_x86_avx2_psrlv_q_256_const:
; X64-AVX512VL: # %bb.0: ; X64-AVX512VL: # %bb.0:
; X64-AVX512VL-NEXT: vpbroadcastq {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [4,4,4,4] ; X64-AVX512VL-NEXT: vpbroadcastq {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [4,4,4,4]
; X64-AVX512VL-NEXT: # encoding: [0xc4,0xe2,0x7d,0x59,0x05,A,A,A,A] ; X64-AVX512VL-NEXT: # encoding: [0xc4,0xe2,0x7d,0x59,0x05,A,A,A,A]
; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: vpsrlvq {{.*}}(%rip), %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0x45,0x05,A,A,A,A] ; X64-AVX512VL-NEXT: vpsrlvq {{.*}}(%rip), %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0x45,0x05,A,A,A,A]
; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: retq # encoding: [0xc3] ; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64> <i64 4, i64 4, i64 4, i64 4>, <4 x i64> <i64 1, i64 1, i64 1, i64 -1>) %res = call <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64> <i64 4, i64 4, i64 4, i64 4>, <4 x i64> <i64 1, i64 1, i64 1, i64 -1>)
ret <4 x i64> %res ret <4 x i64> %res
@ -1573,36 +1573,36 @@ define <4 x i32> @test_x86_avx2_psrav_d_const() {
; X86-AVX: # %bb.0: ; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [2,9,4294967284,23] ; X86-AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [2,9,4294967284,23]
; X86-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A] ; X86-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX-NEXT: vpsravd {{\.LCPI.*}}, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x46,0x05,A,A,A,A] ; X86-AVX-NEXT: vpsravd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x46,0x05,A,A,A,A]
; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX-NEXT: retl # encoding: [0xc3] ; X86-AVX-NEXT: retl # encoding: [0xc3]
; ;
; X86-AVX512VL-LABEL: test_x86_avx2_psrav_d_const: ; X86-AVX512VL-LABEL: test_x86_avx2_psrav_d_const:
; X86-AVX512VL: # %bb.0: ; X86-AVX512VL: # %bb.0:
; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI.*}}, %xmm0 # EVEX TO VEX Compression xmm0 = [2,9,4294967284,23] ; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 # EVEX TO VEX Compression xmm0 = [2,9,4294967284,23]
; X86-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A] ; X86-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512VL-NEXT: vpsravd {{\.LCPI.*}}, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x46,0x05,A,A,A,A] ; X86-AVX512VL-NEXT: vpsravd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x46,0x05,A,A,A,A]
; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512VL-NEXT: retl # encoding: [0xc3] ; X86-AVX512VL-NEXT: retl # encoding: [0xc3]
; ;
; X64-AVX-LABEL: test_x86_avx2_psrav_d_const: ; X64-AVX-LABEL: test_x86_avx2_psrav_d_const:
; X64-AVX: # %bb.0: ; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [2,9,4294967284,23] ; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [2,9,4294967284,23]
; X64-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A] ; X64-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x46,0x05,A,A,A,A] ; X64-AVX-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x46,0x05,A,A,A,A]
; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: retq # encoding: [0xc3] ; X64-AVX-NEXT: retq # encoding: [0xc3]
; ;
; X64-AVX512VL-LABEL: test_x86_avx2_psrav_d_const: ; X64-AVX512VL-LABEL: test_x86_avx2_psrav_d_const:
; X64-AVX512VL: # %bb.0: ; X64-AVX512VL: # %bb.0:
; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %xmm0 # EVEX TO VEX Compression xmm0 = [2,9,4294967284,23] ; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %xmm0 # EVEX TO VEX Compression xmm0 = [2,9,4294967284,23]
; X64-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A] ; X64-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x46,0x05,A,A,A,A] ; X64-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x46,0x05,A,A,A,A]
; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: retq # encoding: [0xc3] ; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32> <i32 2, i32 9, i32 -12, i32 23>, <4 x i32> <i32 1, i32 18, i32 35, i32 52>) %res = call <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32> <i32 2, i32 9, i32 -12, i32 23>, <4 x i32> <i32 1, i32 18, i32 35, i32 52>)
ret <4 x i32> %res ret <4 x i32> %res
@ -1628,36 +1628,36 @@ define <8 x i32> @test_x86_avx2_psrav_d_256_const() {
; X86-AVX: # %bb.0: ; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51] ; X86-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51]
; X86-AVX-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A] ; X86-AVX-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX-NEXT: vpsravd {{\.LCPI.*}}, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A] ; X86-AVX-NEXT: vpsravd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A]
; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX-NEXT: retl # encoding: [0xc3] ; X86-AVX-NEXT: retl # encoding: [0xc3]
; ;
; X86-AVX512VL-LABEL: test_x86_avx2_psrav_d_256_const: ; X86-AVX512VL-LABEL: test_x86_avx2_psrav_d_256_const:
; X86-AVX512VL: # %bb.0: ; X86-AVX512VL: # %bb.0:
; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI.*}}, %ymm0 # EVEX TO VEX Compression ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51] ; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %ymm0 # EVEX TO VEX Compression ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51]
; X86-AVX512VL-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A] ; X86-AVX512VL-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512VL-NEXT: vpsravd {{\.LCPI.*}}, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A] ; X86-AVX512VL-NEXT: vpsravd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A]
; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512VL-NEXT: retl # encoding: [0xc3] ; X86-AVX512VL-NEXT: retl # encoding: [0xc3]
; ;
; X64-AVX-LABEL: test_x86_avx2_psrav_d_256_const: ; X64-AVX-LABEL: test_x86_avx2_psrav_d_256_const:
; X64-AVX: # %bb.0: ; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51] ; X64-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51]
; X64-AVX-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A] ; X64-AVX-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A] ; X64-AVX-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A]
; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: retq # encoding: [0xc3] ; X64-AVX-NEXT: retq # encoding: [0xc3]
; ;
; X64-AVX512VL-LABEL: test_x86_avx2_psrav_d_256_const: ; X64-AVX512VL-LABEL: test_x86_avx2_psrav_d_256_const:
; X64-AVX512VL: # %bb.0: ; X64-AVX512VL: # %bb.0:
; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51] ; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51]
; X64-AVX512VL-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A] ; X64-AVX512VL-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A] ; X64-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A]
; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: retq # encoding: [0xc3] ; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32> <i32 2, i32 9, i32 -12, i32 23, i32 -26, i32 37, i32 -40, i32 51>, <8 x i32> <i32 1, i32 18, i32 35, i32 52, i32 69, i32 15, i32 32, i32 49>) %res = call <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32> <i32 2, i32 9, i32 -12, i32 23, i32 -26, i32 37, i32 -40, i32 51>, <8 x i32> <i32 1, i32 18, i32 35, i32 52, i32 69, i32 15, i32 32, i32 49>)
ret <8 x i32> %res ret <8 x i32> %res

View File

@ -15,21 +15,21 @@ define i32 @f(<8 x float> %A, i8* %B, <4 x double> %C, <4 x i64> %E, <8 x i32> %
; X32-NEXT: movl 8(%ebp), %ecx ; X32-NEXT: movl 8(%ebp), %ecx
; X32-NEXT: movl 136(%ebp), %edx ; X32-NEXT: movl 136(%ebp), %edx
; X32-NEXT: movl (%edx), %eax ; X32-NEXT: movl (%edx), %eax
; X32-NEXT: vaddps {{\.LCPI.*}}, %ymm0, %ymm0 ; X32-NEXT: vaddps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X32-NEXT: vmovntps %ymm0, (%ecx) ; X32-NEXT: vmovntps %ymm0, (%ecx)
; X32-NEXT: vpaddq {{\.LCPI.*}}, %ymm2, %ymm0 ; X32-NEXT: vpaddq {{\.LCPI[0-9]+_[0-9]+}}, %ymm2, %ymm0
; X32-NEXT: addl (%edx), %eax ; X32-NEXT: addl (%edx), %eax
; X32-NEXT: vmovntdq %ymm0, (%ecx) ; X32-NEXT: vmovntdq %ymm0, (%ecx)
; X32-NEXT: vaddpd {{\.LCPI.*}}, %ymm1, %ymm0 ; X32-NEXT: vaddpd {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm0
; X32-NEXT: addl (%edx), %eax ; X32-NEXT: addl (%edx), %eax
; X32-NEXT: vmovntpd %ymm0, (%ecx) ; X32-NEXT: vmovntpd %ymm0, (%ecx)
; X32-NEXT: vpaddd {{\.LCPI.*}}, %ymm5, %ymm0 ; X32-NEXT: vpaddd {{\.LCPI[0-9]+_[0-9]+}}, %ymm5, %ymm0
; X32-NEXT: addl (%edx), %eax ; X32-NEXT: addl (%edx), %eax
; X32-NEXT: vmovntdq %ymm0, (%ecx) ; X32-NEXT: vmovntdq %ymm0, (%ecx)
; X32-NEXT: vpaddw {{\.LCPI.*}}, %ymm4, %ymm0 ; X32-NEXT: vpaddw {{\.LCPI[0-9]+_[0-9]+}}, %ymm4, %ymm0
; X32-NEXT: addl (%edx), %eax ; X32-NEXT: addl (%edx), %eax
; X32-NEXT: vmovntdq %ymm0, (%ecx) ; X32-NEXT: vmovntdq %ymm0, (%ecx)
; X32-NEXT: vpaddb {{\.LCPI.*}}, %ymm3, %ymm0 ; X32-NEXT: vpaddb {{\.LCPI[0-9]+_[0-9]+}}, %ymm3, %ymm0
; X32-NEXT: addl (%edx), %eax ; X32-NEXT: addl (%edx), %eax
; X32-NEXT: vmovntdq %ymm0, (%ecx) ; X32-NEXT: vmovntdq %ymm0, (%ecx)
; X32-NEXT: movl %ebp, %esp ; X32-NEXT: movl %ebp, %esp

View File

@ -424,7 +424,7 @@ define <32 x i8> @shl9(<32 x i8> %A) nounwind {
; X32-LABEL: shl9: ; X32-LABEL: shl9:
; X32: # %bb.0: ; X32: # %bb.0:
; X32-NEXT: vpsllw $3, %ymm0, %ymm0 ; X32-NEXT: vpsllw $3, %ymm0, %ymm0
; X32-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0 ; X32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X32-NEXT: retl ; X32-NEXT: retl
; ;
; X64-LABEL: shl9: ; X64-LABEL: shl9:
@ -440,7 +440,7 @@ define <32 x i8> @shr9(<32 x i8> %A) nounwind {
; X32-LABEL: shr9: ; X32-LABEL: shr9:
; X32: # %bb.0: ; X32: # %bb.0:
; X32-NEXT: vpsrlw $3, %ymm0, %ymm0 ; X32-NEXT: vpsrlw $3, %ymm0, %ymm0
; X32-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0 ; X32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X32-NEXT: retl ; X32-NEXT: retl
; ;
; X64-LABEL: shr9: ; X64-LABEL: shr9:
@ -472,7 +472,7 @@ define <32 x i8> @sra_v32i8(<32 x i8> %A) nounwind {
; X32-LABEL: sra_v32i8: ; X32-LABEL: sra_v32i8:
; X32: # %bb.0: ; X32: # %bb.0:
; X32-NEXT: vpsrlw $3, %ymm0, %ymm0 ; X32-NEXT: vpsrlw $3, %ymm0, %ymm0
; X32-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0 ; X32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X32-NEXT: vmovdqa {{.*#+}} ymm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16] ; X32-NEXT: vmovdqa {{.*#+}} ymm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; X32-NEXT: vpxor %ymm1, %ymm0, %ymm0 ; X32-NEXT: vpxor %ymm1, %ymm0, %ymm0
; X32-NEXT: vpsubb %ymm1, %ymm0, %ymm0 ; X32-NEXT: vpsubb %ymm1, %ymm0, %ymm0

View File

@ -7,7 +7,7 @@ define <4 x double> @test_broadcast_2f64_4f64(<2 x double> *%p) nounwind {
; X32: # %bb.0: ; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-NEXT: vaddpd {{\.LCPI.*}}, %ymm0, %ymm0 ; X32-NEXT: vaddpd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X32-NEXT: retl ; X32-NEXT: retl
; ;
; X64-LABEL: test_broadcast_2f64_4f64: ; X64-LABEL: test_broadcast_2f64_4f64:
@ -26,7 +26,7 @@ define <4 x i64> @test_broadcast_2i64_4i64(<2 x i64> *%p) nounwind {
; X32: # %bb.0: ; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X32-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-NEXT: vpaddq {{\.LCPI.*}}, %ymm0, %ymm0 ; X32-NEXT: vpaddq {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X32-NEXT: retl ; X32-NEXT: retl
; ;
; X64-LABEL: test_broadcast_2i64_4i64: ; X64-LABEL: test_broadcast_2i64_4i64:
@ -45,7 +45,7 @@ define <8 x float> @test_broadcast_4f32_8f32(<4 x float> *%p) nounwind {
; X32: # %bb.0: ; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-NEXT: vaddps {{\.LCPI.*}}, %ymm0, %ymm0 ; X32-NEXT: vaddps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X32-NEXT: retl ; X32-NEXT: retl
; ;
; X64-LABEL: test_broadcast_4f32_8f32: ; X64-LABEL: test_broadcast_4f32_8f32:
@ -64,7 +64,7 @@ define <8 x i32> @test_broadcast_4i32_8i32(<4 x i32> *%p) nounwind {
; X32: # %bb.0: ; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X32-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-NEXT: vpaddd {{\.LCPI.*}}, %ymm0, %ymm0 ; X32-NEXT: vpaddd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X32-NEXT: retl ; X32-NEXT: retl
; ;
; X64-LABEL: test_broadcast_4i32_8i32: ; X64-LABEL: test_broadcast_4i32_8i32:
@ -83,7 +83,7 @@ define <16 x i16> @test_broadcast_8i16_16i16(<8 x i16> *%p) nounwind {
; X32: # %bb.0: ; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X32-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-NEXT: vpaddw {{\.LCPI.*}}, %ymm0, %ymm0 ; X32-NEXT: vpaddw {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X32-NEXT: retl ; X32-NEXT: retl
; ;
; X64-LABEL: test_broadcast_8i16_16i16: ; X64-LABEL: test_broadcast_8i16_16i16:
@ -102,7 +102,7 @@ define <32 x i8> @test_broadcast_16i8_32i8(<16 x i8> *%p) nounwind {
; X32: # %bb.0: ; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X32-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-NEXT: vpaddb {{\.LCPI.*}}, %ymm0, %ymm0 ; X32-NEXT: vpaddb {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X32-NEXT: retl ; X32-NEXT: retl
; ;
; X64-LABEL: test_broadcast_16i8_32i8: ; X64-LABEL: test_broadcast_16i8_32i8:
@ -122,7 +122,7 @@ define <4 x double> @test_broadcast_2f64_4f64_reuse(<2 x double>* %p0, <2 x doub
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1] ; X32-NEXT: vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
; X32-NEXT: vaddpd {{\.LCPI.*}}, %ymm1, %ymm0 ; X32-NEXT: vaddpd {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm0
; X32-NEXT: vmovapd %xmm1, (%eax) ; X32-NEXT: vmovapd %xmm1, (%eax)
; X32-NEXT: retl ; X32-NEXT: retl
; ;
@ -145,7 +145,7 @@ define <4 x i64> @test_broadcast_2i64_4i64_reuse(<2 x i64>* %p0, <2 x i64>* %p1)
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1] ; X32-NEXT: vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1]
; X32-NEXT: vpaddq {{\.LCPI.*}}, %ymm1, %ymm0 ; X32-NEXT: vpaddq {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm0
; X32-NEXT: vmovdqa %xmm1, (%eax) ; X32-NEXT: vmovdqa %xmm1, (%eax)
; X32-NEXT: retl ; X32-NEXT: retl
; ;
@ -168,7 +168,7 @@ define <8 x float> @test_broadcast_4f32_8f32_reuse(<4 x float>* %p0, <4 x float>
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1] ; X32-NEXT: vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
; X32-NEXT: vaddps {{\.LCPI.*}}, %ymm1, %ymm0 ; X32-NEXT: vaddps {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm0
; X32-NEXT: vmovaps %xmm1, (%eax) ; X32-NEXT: vmovaps %xmm1, (%eax)
; X32-NEXT: retl ; X32-NEXT: retl
; ;
@ -191,7 +191,7 @@ define <8 x i32> @test_broadcast_4i32_8i32_reuse(<4 x i32>* %p0, <4 x i32>* %p1)
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1] ; X32-NEXT: vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1]
; X32-NEXT: vpaddd {{\.LCPI.*}}, %ymm1, %ymm0 ; X32-NEXT: vpaddd {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm0
; X32-NEXT: vmovdqa %xmm1, (%eax) ; X32-NEXT: vmovdqa %xmm1, (%eax)
; X32-NEXT: retl ; X32-NEXT: retl
; ;
@ -214,7 +214,7 @@ define <16 x i16> @test_broadcast_8i16_16i16_reuse(<8 x i16> *%p0, <8 x i16> *%p
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1] ; X32-NEXT: vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1]
; X32-NEXT: vpaddw {{\.LCPI.*}}, %ymm1, %ymm0 ; X32-NEXT: vpaddw {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm0
; X32-NEXT: vmovdqa %xmm1, (%eax) ; X32-NEXT: vmovdqa %xmm1, (%eax)
; X32-NEXT: retl ; X32-NEXT: retl
; ;
@ -237,7 +237,7 @@ define <32 x i8> @test_broadcast_16i8_32i8_reuse(<16 x i8> *%p0, <16 x i8> *%p1)
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1] ; X32-NEXT: vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1]
; X32-NEXT: vpaddb {{\.LCPI.*}}, %ymm1, %ymm0 ; X32-NEXT: vpaddb {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm0
; X32-NEXT: vmovdqa %xmm1, (%eax) ; X32-NEXT: vmovdqa %xmm1, (%eax)
; X32-NEXT: retl ; X32-NEXT: retl
; ;

View File

@ -486,10 +486,10 @@ define <32 x i8> @shl_32i8(<32 x i8> %r, <32 x i8> %a) nounwind {
; X32: # %bb.0: ; X32: # %bb.0:
; X32-NEXT: vpsllw $5, %ymm1, %ymm1 ; X32-NEXT: vpsllw $5, %ymm1, %ymm1
; X32-NEXT: vpsllw $4, %ymm0, %ymm2 ; X32-NEXT: vpsllw $4, %ymm0, %ymm2
; X32-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2 ; X32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm2, %ymm2
; X32-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; X32-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; X32-NEXT: vpsllw $2, %ymm0, %ymm2 ; X32-NEXT: vpsllw $2, %ymm0, %ymm2
; X32-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2 ; X32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm2, %ymm2
; X32-NEXT: vpaddb %ymm1, %ymm1, %ymm1 ; X32-NEXT: vpaddb %ymm1, %ymm1, %ymm1
; X32-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; X32-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; X32-NEXT: vpaddb %ymm0, %ymm0, %ymm2 ; X32-NEXT: vpaddb %ymm0, %ymm0, %ymm2
@ -692,14 +692,14 @@ define <32 x i8> @lshr_32i8(<32 x i8> %r, <32 x i8> %a) nounwind {
; X32: # %bb.0: ; X32: # %bb.0:
; X32-NEXT: vpsllw $5, %ymm1, %ymm1 ; X32-NEXT: vpsllw $5, %ymm1, %ymm1
; X32-NEXT: vpsrlw $4, %ymm0, %ymm2 ; X32-NEXT: vpsrlw $4, %ymm0, %ymm2
; X32-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2 ; X32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm2, %ymm2
; X32-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; X32-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; X32-NEXT: vpsrlw $2, %ymm0, %ymm2 ; X32-NEXT: vpsrlw $2, %ymm0, %ymm2
; X32-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2 ; X32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm2, %ymm2
; X32-NEXT: vpaddb %ymm1, %ymm1, %ymm1 ; X32-NEXT: vpaddb %ymm1, %ymm1, %ymm1
; X32-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; X32-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; X32-NEXT: vpsrlw $1, %ymm0, %ymm2 ; X32-NEXT: vpsrlw $1, %ymm0, %ymm2
; X32-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2 ; X32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm2, %ymm2
; X32-NEXT: vpaddb %ymm1, %ymm1, %ymm1 ; X32-NEXT: vpaddb %ymm1, %ymm1, %ymm1
; X32-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; X32-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; X32-NEXT: retl ; X32-NEXT: retl

View File

@ -1840,7 +1840,7 @@ define <2 x double> @test_mm_cvtu64_sd(<2 x double> %__A, i64 %__B) {
; X86-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero ; X86-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X86-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm1, %xmm1 ; X86-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm1, %xmm1
; X86-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] ; X86-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
; X86-NEXT: vsubpd {{\.LCPI.*}}, %xmm1, %xmm1 ; X86-NEXT: vsubpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1
; X86-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0] ; X86-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; X86-NEXT: vaddsd %xmm1, %xmm2, %xmm1 ; X86-NEXT: vaddsd %xmm1, %xmm2, %xmm1
; X86-NEXT: vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; X86-NEXT: vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
@ -1888,7 +1888,7 @@ define <4 x float> @test_mm_cvtu64_ss(<4 x float> %__A, i64 %__B) {
; X86-NEXT: vmovq %xmm1, {{[0-9]+}}(%esp) ; X86-NEXT: vmovq %xmm1, {{[0-9]+}}(%esp)
; X86-NEXT: shrl $31, %eax ; X86-NEXT: shrl $31, %eax
; X86-NEXT: fildll {{[0-9]+}}(%esp) ; X86-NEXT: fildll {{[0-9]+}}(%esp)
; X86-NEXT: fadds {{\.LCPI.*}}(,%eax,4) ; X86-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; X86-NEXT: fstps {{[0-9]+}}(%esp) ; X86-NEXT: fstps {{[0-9]+}}(%esp)
; X86-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; X86-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X86-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] ; X86-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
@ -3118,7 +3118,7 @@ entry:
define <8 x double> @test_mm512_fmsub_round_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { define <8 x double> @test_mm512_fmsub_round_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) {
; X86-LABEL: test_mm512_fmsub_round_pd: ; X86-LABEL: test_mm512_fmsub_round_pd:
; X86: # %bb.0: # %entry ; X86: # %bb.0: # %entry
; X86-NEXT: vpxorq {{\.LCPI.*}}{1to8}, %zmm2, %zmm2 ; X86-NEXT: vpxorq {{\.LCPI[0-9]+_[0-9]+}}{1to8}, %zmm2, %zmm2
; X86-NEXT: vfmadd213pd {rn-sae}, %zmm2, %zmm1, %zmm0 ; X86-NEXT: vfmadd213pd {rn-sae}, %zmm2, %zmm1, %zmm0
; X86-NEXT: retl ; X86-NEXT: retl
; ;
@ -3178,7 +3178,7 @@ entry:
define <8 x double> @test_mm512_fnmadd_round_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { define <8 x double> @test_mm512_fnmadd_round_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) {
; X86-LABEL: test_mm512_fnmadd_round_pd: ; X86-LABEL: test_mm512_fnmadd_round_pd:
; X86: # %bb.0: # %entry ; X86: # %bb.0: # %entry
; X86-NEXT: vpxorq {{\.LCPI.*}}{1to8}, %zmm0, %zmm0 ; X86-NEXT: vpxorq {{\.LCPI[0-9]+_[0-9]+}}{1to8}, %zmm0, %zmm0
; X86-NEXT: vfmadd213pd {rn-sae}, %zmm2, %zmm1, %zmm0 ; X86-NEXT: vfmadd213pd {rn-sae}, %zmm2, %zmm1, %zmm0
; X86-NEXT: retl ; X86-NEXT: retl
; ;
@ -3349,7 +3349,7 @@ entry:
define <8 x double> @test_mm512_fmsub_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { define <8 x double> @test_mm512_fmsub_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) {
; X86-LABEL: test_mm512_fmsub_pd: ; X86-LABEL: test_mm512_fmsub_pd:
; X86: # %bb.0: # %entry ; X86: # %bb.0: # %entry
; X86-NEXT: vpxorq {{\.LCPI.*}}{1to8}, %zmm2, %zmm2 ; X86-NEXT: vpxorq {{\.LCPI[0-9]+_[0-9]+}}{1to8}, %zmm2, %zmm2
; X86-NEXT: vfmadd213pd {{.*#+}} zmm0 = (zmm1 * zmm0) + zmm2 ; X86-NEXT: vfmadd213pd {{.*#+}} zmm0 = (zmm1 * zmm0) + zmm2
; X86-NEXT: retl ; X86-NEXT: retl
; ;
@ -3409,7 +3409,7 @@ entry:
define <8 x double> @test_mm512_fnmadd_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { define <8 x double> @test_mm512_fnmadd_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) {
; X86-LABEL: test_mm512_fnmadd_pd: ; X86-LABEL: test_mm512_fnmadd_pd:
; X86: # %bb.0: # %entry ; X86: # %bb.0: # %entry
; X86-NEXT: vpxorq {{\.LCPI.*}}{1to8}, %zmm0, %zmm0 ; X86-NEXT: vpxorq {{\.LCPI[0-9]+_[0-9]+}}{1to8}, %zmm0, %zmm0
; X86-NEXT: vfmadd213pd {{.*#+}} zmm0 = (zmm1 * zmm0) + zmm2 ; X86-NEXT: vfmadd213pd {{.*#+}} zmm0 = (zmm1 * zmm0) + zmm2
; X86-NEXT: retl ; X86-NEXT: retl
; ;
@ -3582,7 +3582,7 @@ entry:
define <16 x float> @test_mm512_fmsub_round_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { define <16 x float> @test_mm512_fmsub_round_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) {
; X86-LABEL: test_mm512_fmsub_round_ps: ; X86-LABEL: test_mm512_fmsub_round_ps:
; X86: # %bb.0: # %entry ; X86: # %bb.0: # %entry
; X86-NEXT: vpxord {{\.LCPI.*}}{1to16}, %zmm2, %zmm2 ; X86-NEXT: vpxord {{\.LCPI[0-9]+_[0-9]+}}{1to16}, %zmm2, %zmm2
; X86-NEXT: vfmadd213ps {rn-sae}, %zmm2, %zmm1, %zmm0 ; X86-NEXT: vfmadd213ps {rn-sae}, %zmm2, %zmm1, %zmm0
; X86-NEXT: retl ; X86-NEXT: retl
; ;
@ -3642,7 +3642,7 @@ entry:
define <16 x float> @test_mm512_fnmadd_round_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { define <16 x float> @test_mm512_fnmadd_round_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) {
; X86-LABEL: test_mm512_fnmadd_round_ps: ; X86-LABEL: test_mm512_fnmadd_round_ps:
; X86: # %bb.0: # %entry ; X86: # %bb.0: # %entry
; X86-NEXT: vpxord {{\.LCPI.*}}{1to16}, %zmm0, %zmm0 ; X86-NEXT: vpxord {{\.LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm0
; X86-NEXT: vfmadd213ps {rn-sae}, %zmm2, %zmm1, %zmm0 ; X86-NEXT: vfmadd213ps {rn-sae}, %zmm2, %zmm1, %zmm0
; X86-NEXT: retl ; X86-NEXT: retl
; ;
@ -3813,7 +3813,7 @@ entry:
define <16 x float> @test_mm512_fmsub_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { define <16 x float> @test_mm512_fmsub_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) {
; X86-LABEL: test_mm512_fmsub_ps: ; X86-LABEL: test_mm512_fmsub_ps:
; X86: # %bb.0: # %entry ; X86: # %bb.0: # %entry
; X86-NEXT: vpxord {{\.LCPI.*}}{1to16}, %zmm2, %zmm2 ; X86-NEXT: vpxord {{\.LCPI[0-9]+_[0-9]+}}{1to16}, %zmm2, %zmm2
; X86-NEXT: vfmadd213ps {{.*#+}} zmm0 = (zmm1 * zmm0) + zmm2 ; X86-NEXT: vfmadd213ps {{.*#+}} zmm0 = (zmm1 * zmm0) + zmm2
; X86-NEXT: retl ; X86-NEXT: retl
; ;
@ -3873,7 +3873,7 @@ entry:
define <16 x float> @test_mm512_fnmadd_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { define <16 x float> @test_mm512_fnmadd_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) {
; X86-LABEL: test_mm512_fnmadd_ps: ; X86-LABEL: test_mm512_fnmadd_ps:
; X86: # %bb.0: # %entry ; X86: # %bb.0: # %entry
; X86-NEXT: vpxord {{\.LCPI.*}}{1to16}, %zmm0, %zmm0 ; X86-NEXT: vpxord {{\.LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm0
; X86-NEXT: vfmadd213ps {{.*#+}} zmm0 = (zmm1 * zmm0) + zmm2 ; X86-NEXT: vfmadd213ps {{.*#+}} zmm0 = (zmm1 * zmm0) + zmm2
; X86-NEXT: retl ; X86-NEXT: retl
; ;
@ -4046,7 +4046,7 @@ entry:
define <8 x double> @test_mm512_fmsubadd_round_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { define <8 x double> @test_mm512_fmsubadd_round_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) {
; X86-LABEL: test_mm512_fmsubadd_round_pd: ; X86-LABEL: test_mm512_fmsubadd_round_pd:
; X86: # %bb.0: # %entry ; X86: # %bb.0: # %entry
; X86-NEXT: vpxorq {{\.LCPI.*}}{1to8}, %zmm2, %zmm2 ; X86-NEXT: vpxorq {{\.LCPI[0-9]+_[0-9]+}}{1to8}, %zmm2, %zmm2
; X86-NEXT: vfmaddsub213pd {rn-sae}, %zmm2, %zmm1, %zmm0 ; X86-NEXT: vfmaddsub213pd {rn-sae}, %zmm2, %zmm1, %zmm0
; X86-NEXT: retl ; X86-NEXT: retl
; ;
@ -4323,7 +4323,7 @@ entry:
define <16 x float> @test_mm512_fmsubadd_round_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { define <16 x float> @test_mm512_fmsubadd_round_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) {
; X86-LABEL: test_mm512_fmsubadd_round_ps: ; X86-LABEL: test_mm512_fmsubadd_round_ps:
; X86: # %bb.0: # %entry ; X86: # %bb.0: # %entry
; X86-NEXT: vpxord {{\.LCPI.*}}{1to16}, %zmm2, %zmm2 ; X86-NEXT: vpxord {{\.LCPI[0-9]+_[0-9]+}}{1to16}, %zmm2, %zmm2
; X86-NEXT: vfmaddsub213ps {rn-sae}, %zmm2, %zmm1, %zmm0 ; X86-NEXT: vfmaddsub213ps {rn-sae}, %zmm2, %zmm1, %zmm0
; X86-NEXT: retl ; X86-NEXT: retl
; ;

View File

@ -7118,9 +7118,9 @@ define <16 x i32> @test_x86_avx512_psllv_d_512_const() {
; X86-LABEL: test_x86_avx512_psllv_d_512_const: ; X86-LABEL: test_x86_avx512_psllv_d_512_const:
; X86: # %bb.0: ; X86: # %bb.0:
; X86-NEXT: vmovdqa64 {{.*#+}} zmm0 = [2,9,0,4294967295,3,7,4294967295,0,4,5,4294967294,0,5,3,4294967293,0] ; X86-NEXT: vmovdqa64 {{.*#+}} zmm0 = [2,9,0,4294967295,3,7,4294967295,0,4,5,4294967294,0,5,3,4294967293,0]
; X86-NEXT: vpsllvd {{\.LCPI.*}}, %zmm0, %zmm0 ; X86-NEXT: vpsllvd {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %zmm0
; X86-NEXT: vmovdqa64 {{.*#+}} zmm1 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4294967295] ; X86-NEXT: vmovdqa64 {{.*#+}} zmm1 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4294967295]
; X86-NEXT: vpsllvd {{\.LCPI.*}}, %zmm1, %zmm1 ; X86-NEXT: vpsllvd {{\.LCPI[0-9]+_[0-9]+}}, %zmm1, %zmm1
; X86-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; X86-NEXT: vpaddd %zmm1, %zmm0, %zmm0
; X86-NEXT: retl ; X86-NEXT: retl
%res0 = call <16 x i32> @llvm.x86.avx512.psllv.d.512(<16 x i32> <i32 2, i32 9, i32 0, i32 -1, i32 3, i32 7, i32 -1, i32 0, i32 4, i32 5, i32 -2, i32 0, i32 5, i32 3, i32 -3, i32 0>, <16 x i32> <i32 1, i32 0, i32 33, i32 -1,i32 2, i32 0, i32 34, i32 -2, i32 3, i32 0, i32 35, i32 -1, i32 4, i32 0, i32 36, i32 -3>) %res0 = call <16 x i32> @llvm.x86.avx512.psllv.d.512(<16 x i32> <i32 2, i32 9, i32 0, i32 -1, i32 3, i32 7, i32 -1, i32 0, i32 4, i32 5, i32 -2, i32 0, i32 5, i32 3, i32 -3, i32 0>, <16 x i32> <i32 1, i32 0, i32 33, i32 -1,i32 2, i32 0, i32 34, i32 -2, i32 3, i32 0, i32 35, i32 -1, i32 4, i32 0, i32 36, i32 -3>)
@ -7191,9 +7191,9 @@ define <8 x i64> @test_x86_avx512_psllv_q_512_const() {
; X86-LABEL: test_x86_avx512_psllv_q_512_const: ; X86-LABEL: test_x86_avx512_psllv_q_512_const:
; X86: # %bb.0: ; X86: # %bb.0:
; X86-NEXT: vmovdqa64 {{.*#+}} zmm0 = [2,0,9,0,0,0,4294967295,4294967295,3,0,7,0,4294967295,4294967295,0,0] ; X86-NEXT: vmovdqa64 {{.*#+}} zmm0 = [2,0,9,0,0,0,4294967295,4294967295,3,0,7,0,4294967295,4294967295,0,0]
; X86-NEXT: vpsllvq {{\.LCPI.*}}, %zmm0, %zmm0 ; X86-NEXT: vpsllvq {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %zmm0
; X86-NEXT: vmovdqa64 {{.*#+}} zmm1 = [4,0,4,0,4,0,4,0,4,0,4,0,4,0,4294967295,4294967295] ; X86-NEXT: vmovdqa64 {{.*#+}} zmm1 = [4,0,4,0,4,0,4,0,4,0,4,0,4,0,4294967295,4294967295]
; X86-NEXT: vpsllvq {{\.LCPI.*}}, %zmm1, %zmm1 ; X86-NEXT: vpsllvq {{\.LCPI[0-9]+_[0-9]+}}, %zmm1, %zmm1
; X86-NEXT: vpaddq %zmm1, %zmm0, %zmm0 ; X86-NEXT: vpaddq %zmm1, %zmm0, %zmm0
; X86-NEXT: retl ; X86-NEXT: retl
%res0 = call <8 x i64> @llvm.x86.avx512.psllv.q.512(<8 x i64> <i64 2, i64 9, i64 0, i64 -1, i64 3, i64 7, i64 -1, i64 0>, <8 x i64> <i64 1, i64 0, i64 33, i64 -1,i64 2, i64 0, i64 34, i64 -2>) %res0 = call <8 x i64> @llvm.x86.avx512.psllv.q.512(<8 x i64> <i64 2, i64 9, i64 0, i64 -1, i64 3, i64 7, i64 -1, i64 0>, <8 x i64> <i64 1, i64 0, i64 33, i64 -1,i64 2, i64 0, i64 34, i64 -2>)
@ -7366,9 +7366,9 @@ define <16 x i32> @test_x86_avx512_psrlv_d_512_const() {
; X86-LABEL: test_x86_avx512_psrlv_d_512_const: ; X86-LABEL: test_x86_avx512_psrlv_d_512_const:
; X86: # %bb.0: ; X86: # %bb.0:
; X86-NEXT: vmovdqa64 {{.*#+}} zmm0 = [2,9,0,4294967295,3,7,4294967295,0,4,5,4294967294,0,5,3,4294967293,0] ; X86-NEXT: vmovdqa64 {{.*#+}} zmm0 = [2,9,0,4294967295,3,7,4294967295,0,4,5,4294967294,0,5,3,4294967293,0]
; X86-NEXT: vpsrlvd {{\.LCPI.*}}, %zmm0, %zmm0 ; X86-NEXT: vpsrlvd {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %zmm0
; X86-NEXT: vmovdqa64 {{.*#+}} zmm1 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4294967295] ; X86-NEXT: vmovdqa64 {{.*#+}} zmm1 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4294967295]
; X86-NEXT: vpsrlvd {{\.LCPI.*}}, %zmm1, %zmm1 ; X86-NEXT: vpsrlvd {{\.LCPI[0-9]+_[0-9]+}}, %zmm1, %zmm1
; X86-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; X86-NEXT: vpaddd %zmm1, %zmm0, %zmm0
; X86-NEXT: retl ; X86-NEXT: retl
%res0 = call <16 x i32> @llvm.x86.avx512.psrlv.d.512(<16 x i32> <i32 2, i32 9, i32 0, i32 -1, i32 3, i32 7, i32 -1, i32 0, i32 4, i32 5, i32 -2, i32 0, i32 5, i32 3, i32 -3, i32 0>, <16 x i32> <i32 1, i32 0, i32 33, i32 -1,i32 2, i32 0, i32 34, i32 -2, i32 3, i32 0, i32 35, i32 -1, i32 4, i32 0, i32 36, i32 -3>) %res0 = call <16 x i32> @llvm.x86.avx512.psrlv.d.512(<16 x i32> <i32 2, i32 9, i32 0, i32 -1, i32 3, i32 7, i32 -1, i32 0, i32 4, i32 5, i32 -2, i32 0, i32 5, i32 3, i32 -3, i32 0>, <16 x i32> <i32 1, i32 0, i32 33, i32 -1,i32 2, i32 0, i32 34, i32 -2, i32 3, i32 0, i32 35, i32 -1, i32 4, i32 0, i32 36, i32 -3>)
@ -7439,9 +7439,9 @@ define <8 x i64> @test_x86_avx512_psrlv_q_512_const() {
; X86-LABEL: test_x86_avx512_psrlv_q_512_const: ; X86-LABEL: test_x86_avx512_psrlv_q_512_const:
; X86: # %bb.0: ; X86: # %bb.0:
; X86-NEXT: vmovdqa64 {{.*#+}} zmm0 = [2,0,9,0,0,0,4294967295,4294967295,3,0,7,0,4294967295,4294967295,0,0] ; X86-NEXT: vmovdqa64 {{.*#+}} zmm0 = [2,0,9,0,0,0,4294967295,4294967295,3,0,7,0,4294967295,4294967295,0,0]
; X86-NEXT: vpsrlvq {{\.LCPI.*}}, %zmm0, %zmm0 ; X86-NEXT: vpsrlvq {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %zmm0
; X86-NEXT: vmovdqa64 {{.*#+}} zmm1 = [4,0,4,0,4,0,4,0,4,0,4,0,4,0,4294967295,4294967295] ; X86-NEXT: vmovdqa64 {{.*#+}} zmm1 = [4,0,4,0,4,0,4,0,4,0,4,0,4,0,4294967295,4294967295]
; X86-NEXT: vpsrlvq {{\.LCPI.*}}, %zmm1, %zmm1 ; X86-NEXT: vpsrlvq {{\.LCPI[0-9]+_[0-9]+}}, %zmm1, %zmm1
; X86-NEXT: vpaddq %zmm1, %zmm0, %zmm0 ; X86-NEXT: vpaddq %zmm1, %zmm0, %zmm0
; X86-NEXT: retl ; X86-NEXT: retl
%res0 = call <8 x i64> @llvm.x86.avx512.psrlv.q.512(<8 x i64> <i64 2, i64 9, i64 0, i64 -1, i64 3, i64 7, i64 -1, i64 0>, <8 x i64> <i64 1, i64 0, i64 33, i64 -1,i64 2, i64 0, i64 34, i64 -2>) %res0 = call <8 x i64> @llvm.x86.avx512.psrlv.q.512(<8 x i64> <i64 2, i64 9, i64 0, i64 -1, i64 3, i64 7, i64 -1, i64 0>, <8 x i64> <i64 1, i64 0, i64 33, i64 -1,i64 2, i64 0, i64 34, i64 -2>)

View File

@ -1292,18 +1292,18 @@ define <32 x i16> @test_x86_avx512_psrlv_w_512_const() optsize {
; X86: # %bb.0: ; X86: # %bb.0:
; X86-NEXT: vmovdqa64 {{.*#+}} zmm0 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,65535] ; X86-NEXT: vmovdqa64 {{.*#+}} zmm0 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,65535]
; X86-NEXT: # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0x05,A,A,A,A] ; X86-NEXT: # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0x05,A,A,A,A]
; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-NEXT: vpsrlvw {{\.LCPI.*}}, %zmm0, %zmm0 # encoding: [0x62,0xf2,0xfd,0x48,0x10,0x05,A,A,A,A] ; X86-NEXT: vpsrlvw {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %zmm0 # encoding: [0x62,0xf2,0xfd,0x48,0x10,0x05,A,A,A,A]
; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-NEXT: retl # encoding: [0xc3] ; X86-NEXT: retl # encoding: [0xc3]
; ;
; X64-LABEL: test_x86_avx512_psrlv_w_512_const: ; X64-LABEL: test_x86_avx512_psrlv_w_512_const:
; X64: # %bb.0: ; X64: # %bb.0:
; X64-NEXT: vmovdqa64 {{.*#+}} zmm0 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,65535] ; X64-NEXT: vmovdqa64 {{.*#+}} zmm0 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,65535]
; X64-NEXT: # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0x05,A,A,A,A] ; X64-NEXT: # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0x05,A,A,A,A]
; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-NEXT: vpsrlvw {{.*}}(%rip), %zmm0, %zmm0 # encoding: [0x62,0xf2,0xfd,0x48,0x10,0x05,A,A,A,A] ; X64-NEXT: vpsrlvw {{.*}}(%rip), %zmm0, %zmm0 # encoding: [0x62,0xf2,0xfd,0x48,0x10,0x05,A,A,A,A]
; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-NEXT: retq # encoding: [0xc3] ; X64-NEXT: retq # encoding: [0xc3]
%res1 = call <32 x i16> @llvm.x86.avx512.psrlv.w.512(<32 x i16> <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 -1>, <32 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 -1>) %res1 = call <32 x i16> @llvm.x86.avx512.psrlv.w.512(<32 x i16> <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 -1>, <32 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 -1>)
ret <32 x i16> %res1 ret <32 x i16> %res1
@ -1410,18 +1410,18 @@ define <32 x i16>@test_int_x86_avx512_mask_psrav32_hi_const(<32 x i16> %x0, <32
; X86: # %bb.0: ; X86: # %bb.0:
; X86-NEXT: vmovdqa64 {{.*#+}} zmm0 = [2,9,65524,23,65510,37,65496,51,2,9,65524,23,65510,37,65496,51,2,9,65524,23,65510,37,65496,51,2,9,65524,23,65510,37,65496,51] ; X86-NEXT: vmovdqa64 {{.*#+}} zmm0 = [2,9,65524,23,65510,37,65496,51,2,9,65524,23,65510,37,65496,51,2,9,65524,23,65510,37,65496,51,2,9,65524,23,65510,37,65496,51]
; X86-NEXT: # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0x05,A,A,A,A] ; X86-NEXT: # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0x05,A,A,A,A]
; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-NEXT: vpsravw {{\.LCPI.*}}, %zmm0, %zmm0 # encoding: [0x62,0xf2,0xfd,0x48,0x11,0x05,A,A,A,A] ; X86-NEXT: vpsravw {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %zmm0 # encoding: [0x62,0xf2,0xfd,0x48,0x11,0x05,A,A,A,A]
; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-NEXT: retl # encoding: [0xc3] ; X86-NEXT: retl # encoding: [0xc3]
; ;
; X64-LABEL: test_int_x86_avx512_mask_psrav32_hi_const: ; X64-LABEL: test_int_x86_avx512_mask_psrav32_hi_const:
; X64: # %bb.0: ; X64: # %bb.0:
; X64-NEXT: vmovdqa64 {{.*#+}} zmm0 = [2,9,65524,23,65510,37,65496,51,2,9,65524,23,65510,37,65496,51,2,9,65524,23,65510,37,65496,51,2,9,65524,23,65510,37,65496,51] ; X64-NEXT: vmovdqa64 {{.*#+}} zmm0 = [2,9,65524,23,65510,37,65496,51,2,9,65524,23,65510,37,65496,51,2,9,65524,23,65510,37,65496,51,2,9,65524,23,65510,37,65496,51]
; X64-NEXT: # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0x05,A,A,A,A] ; X64-NEXT: # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0x05,A,A,A,A]
; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-NEXT: vpsravw {{.*}}(%rip), %zmm0, %zmm0 # encoding: [0x62,0xf2,0xfd,0x48,0x11,0x05,A,A,A,A] ; X64-NEXT: vpsravw {{.*}}(%rip), %zmm0, %zmm0 # encoding: [0x62,0xf2,0xfd,0x48,0x11,0x05,A,A,A,A]
; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-NEXT: retq # encoding: [0xc3] ; X64-NEXT: retq # encoding: [0xc3]
%1 = call <32 x i16> @llvm.x86.avx512.psrav.w.512(<32 x i16> <i16 2, i16 9, i16 -12, i16 23, i16 -26, i16 37, i16 -40, i16 51, i16 2, i16 9, i16 -12, i16 23, i16 -26, i16 37, i16 -40, i16 51, i16 2, i16 9, i16 -12, i16 23, i16 -26, i16 37, i16 -40, i16 51, i16 2, i16 9, i16 -12, i16 23, i16 -26, i16 37, i16 -40, i16 51>, <32 x i16> <i16 1, i16 10, i16 35, i16 52, i16 69, i16 9, i16 16, i16 49, i16 1, i16 10, i16 35, i16 52, i16 69, i16 9, i16 16, i16 49, i16 1, i16 10, i16 35, i16 52, i16 69, i16 9, i16 16, i16 49, i16 1, i16 10, i16 35, i16 52, i16 69, i16 9, i16 16, i16 49>) %1 = call <32 x i16> @llvm.x86.avx512.psrav.w.512(<32 x i16> <i16 2, i16 9, i16 -12, i16 23, i16 -26, i16 37, i16 -40, i16 51, i16 2, i16 9, i16 -12, i16 23, i16 -26, i16 37, i16 -40, i16 51, i16 2, i16 9, i16 -12, i16 23, i16 -26, i16 37, i16 -40, i16 51, i16 2, i16 9, i16 -12, i16 23, i16 -26, i16 37, i16 -40, i16 51>, <32 x i16> <i16 1, i16 10, i16 35, i16 52, i16 69, i16 9, i16 16, i16 49, i16 1, i16 10, i16 35, i16 52, i16 69, i16 9, i16 16, i16 49, i16 1, i16 10, i16 35, i16 52, i16 69, i16 9, i16 16, i16 49, i16 1, i16 10, i16 35, i16 52, i16 69, i16 9, i16 16, i16 49>)
ret <32 x i16> %1 ret <32 x i16> %1
@ -1575,18 +1575,18 @@ define <32 x i16> @test_x86_avx512_psllv_w_512_const() optsize {
; X86: # %bb.0: ; X86: # %bb.0:
; X86-NEXT: vmovdqa64 {{.*#+}} zmm0 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,65535] ; X86-NEXT: vmovdqa64 {{.*#+}} zmm0 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,65535]
; X86-NEXT: # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0x05,A,A,A,A] ; X86-NEXT: # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0x05,A,A,A,A]
; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-NEXT: vpsllvw {{\.LCPI.*}}, %zmm0, %zmm0 # encoding: [0x62,0xf2,0xfd,0x48,0x12,0x05,A,A,A,A] ; X86-NEXT: vpsllvw {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %zmm0 # encoding: [0x62,0xf2,0xfd,0x48,0x12,0x05,A,A,A,A]
; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-NEXT: retl # encoding: [0xc3] ; X86-NEXT: retl # encoding: [0xc3]
; ;
; X64-LABEL: test_x86_avx512_psllv_w_512_const: ; X64-LABEL: test_x86_avx512_psllv_w_512_const:
; X64: # %bb.0: ; X64: # %bb.0:
; X64-NEXT: vmovdqa64 {{.*#+}} zmm0 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,65535] ; X64-NEXT: vmovdqa64 {{.*#+}} zmm0 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,65535]
; X64-NEXT: # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0x05,A,A,A,A] ; X64-NEXT: # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0x05,A,A,A,A]
; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-NEXT: vpsllvw {{.*}}(%rip), %zmm0, %zmm0 # encoding: [0x62,0xf2,0xfd,0x48,0x12,0x05,A,A,A,A] ; X64-NEXT: vpsllvw {{.*}}(%rip), %zmm0, %zmm0 # encoding: [0x62,0xf2,0xfd,0x48,0x12,0x05,A,A,A,A]
; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-NEXT: retq # encoding: [0xc3] ; X64-NEXT: retq # encoding: [0xc3]
%res1 = call <32 x i16> @llvm.x86.avx512.psllv.w.512(<32 x i16> <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 -1>, <32 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 -1>) %res1 = call <32 x i16> @llvm.x86.avx512.psllv.w.512(<32 x i16> <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 -1>, <32 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 -1>)
ret <32 x i16> %res1 ret <32 x i16> %res1

View File

@ -2153,20 +2153,20 @@ define <8 x i16>@test_int_x86_avx512_maskz_psrlv8_hi(<8 x i16> %x0, <8 x i16> %x
define <8 x i16> @test_int_x86_avx512_psrlv_w_128_const() optsize { define <8 x i16> @test_int_x86_avx512_psrlv_w_128_const() optsize {
; X86-LABEL: test_int_x86_avx512_psrlv_w_128_const: ; X86-LABEL: test_int_x86_avx512_psrlv_w_128_const:
; X86: # %bb.0: ; X86: # %bb.0:
; X86-NEXT: vmovdqa {{\.LCPI.*}}, %xmm0 # EVEX TO VEX Compression xmm0 = [4,4,4,4,4,4,4,65535] ; X86-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 # EVEX TO VEX Compression xmm0 = [4,4,4,4,4,4,4,65535]
; X86-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A] ; X86-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
; X86-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-NEXT: vpsrlvw {{\.LCPI.*}}, %xmm0, %xmm0 # encoding: [0x62,0xf2,0xfd,0x08,0x10,0x05,A,A,A,A] ; X86-NEXT: vpsrlvw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # encoding: [0x62,0xf2,0xfd,0x08,0x10,0x05,A,A,A,A]
; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-NEXT: retl # encoding: [0xc3] ; X86-NEXT: retl # encoding: [0xc3]
; ;
; X64-LABEL: test_int_x86_avx512_psrlv_w_128_const: ; X64-LABEL: test_int_x86_avx512_psrlv_w_128_const:
; X64: # %bb.0: ; X64: # %bb.0:
; X64-NEXT: vmovdqa {{.*}}(%rip), %xmm0 # EVEX TO VEX Compression xmm0 = [4,4,4,4,4,4,4,65535] ; X64-NEXT: vmovdqa {{.*}}(%rip), %xmm0 # EVEX TO VEX Compression xmm0 = [4,4,4,4,4,4,4,65535]
; X64-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A] ; X64-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
; X64-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-NEXT: vpsrlvw {{.*}}(%rip), %xmm0, %xmm0 # encoding: [0x62,0xf2,0xfd,0x08,0x10,0x05,A,A,A,A] ; X64-NEXT: vpsrlvw {{.*}}(%rip), %xmm0, %xmm0 # encoding: [0x62,0xf2,0xfd,0x08,0x10,0x05,A,A,A,A]
; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-NEXT: retq # encoding: [0xc3] ; X64-NEXT: retq # encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.avx512.psrlv.w.128(<8 x i16> <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 -1>, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 -1>) %res = call <8 x i16> @llvm.x86.avx512.psrlv.w.128(<8 x i16> <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 -1>, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 -1>)
ret <8 x i16> %res ret <8 x i16> %res
@ -2177,20 +2177,20 @@ declare <8 x i16> @llvm.x86.avx512.psrlv.w.128(<8 x i16>, <8 x i16>)
define <16 x i16> @test_int_x86_avx512_psrlv_w_256_const() optsize { define <16 x i16> @test_int_x86_avx512_psrlv_w_256_const() optsize {
; X86-LABEL: test_int_x86_avx512_psrlv_w_256_const: ; X86-LABEL: test_int_x86_avx512_psrlv_w_256_const:
; X86: # %bb.0: ; X86: # %bb.0:
; X86-NEXT: vmovdqa {{\.LCPI.*}}, %ymm0 # EVEX TO VEX Compression ymm0 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,65535] ; X86-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %ymm0 # EVEX TO VEX Compression ymm0 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,65535]
; X86-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A] ; X86-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
; X86-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-NEXT: vpsrlvw {{\.LCPI.*}}, %ymm0, %ymm0 # encoding: [0x62,0xf2,0xfd,0x28,0x10,0x05,A,A,A,A] ; X86-NEXT: vpsrlvw {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 # encoding: [0x62,0xf2,0xfd,0x28,0x10,0x05,A,A,A,A]
; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-NEXT: retl # encoding: [0xc3] ; X86-NEXT: retl # encoding: [0xc3]
; ;
; X64-LABEL: test_int_x86_avx512_psrlv_w_256_const: ; X64-LABEL: test_int_x86_avx512_psrlv_w_256_const:
; X64: # %bb.0: ; X64: # %bb.0:
; X64-NEXT: vmovdqa {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,65535] ; X64-NEXT: vmovdqa {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,65535]
; X64-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A] ; X64-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
; X64-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-NEXT: vpsrlvw {{.*}}(%rip), %ymm0, %ymm0 # encoding: [0x62,0xf2,0xfd,0x28,0x10,0x05,A,A,A,A] ; X64-NEXT: vpsrlvw {{.*}}(%rip), %ymm0, %ymm0 # encoding: [0x62,0xf2,0xfd,0x28,0x10,0x05,A,A,A,A]
; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-NEXT: retq # encoding: [0xc3] ; X64-NEXT: retq # encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx512.psrlv.w.256(<16 x i16> <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 -1>, <16 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 -1>) %res = call <16 x i16> @llvm.x86.avx512.psrlv.w.256(<16 x i16> <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 -1>, <16 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 -1>)
ret <16 x i16> %res ret <16 x i16> %res
@ -2397,20 +2397,20 @@ define <8 x i16>@test_int_x86_avx512_maskz_psllv8_hi(<8 x i16> %x0, <8 x i16> %x
define <8 x i16> @test_int_x86_avx512_psllv_w_128_const() optsize { define <8 x i16> @test_int_x86_avx512_psllv_w_128_const() optsize {
; X86-LABEL: test_int_x86_avx512_psllv_w_128_const: ; X86-LABEL: test_int_x86_avx512_psllv_w_128_const:
; X86: # %bb.0: ; X86: # %bb.0:
; X86-NEXT: vmovdqa {{\.LCPI.*}}, %xmm0 # EVEX TO VEX Compression xmm0 = [4,4,4,4,4,4,4,65535] ; X86-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 # EVEX TO VEX Compression xmm0 = [4,4,4,4,4,4,4,65535]
; X86-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A] ; X86-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
; X86-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-NEXT: vpsllvw {{\.LCPI.*}}, %xmm0, %xmm0 # encoding: [0x62,0xf2,0xfd,0x08,0x12,0x05,A,A,A,A] ; X86-NEXT: vpsllvw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # encoding: [0x62,0xf2,0xfd,0x08,0x12,0x05,A,A,A,A]
; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-NEXT: retl # encoding: [0xc3] ; X86-NEXT: retl # encoding: [0xc3]
; ;
; X64-LABEL: test_int_x86_avx512_psllv_w_128_const: ; X64-LABEL: test_int_x86_avx512_psllv_w_128_const:
; X64: # %bb.0: ; X64: # %bb.0:
; X64-NEXT: vmovdqa {{.*}}(%rip), %xmm0 # EVEX TO VEX Compression xmm0 = [4,4,4,4,4,4,4,65535] ; X64-NEXT: vmovdqa {{.*}}(%rip), %xmm0 # EVEX TO VEX Compression xmm0 = [4,4,4,4,4,4,4,65535]
; X64-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A] ; X64-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
; X64-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-NEXT: vpsllvw {{.*}}(%rip), %xmm0, %xmm0 # encoding: [0x62,0xf2,0xfd,0x08,0x12,0x05,A,A,A,A] ; X64-NEXT: vpsllvw {{.*}}(%rip), %xmm0, %xmm0 # encoding: [0x62,0xf2,0xfd,0x08,0x12,0x05,A,A,A,A]
; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-NEXT: retq # encoding: [0xc3] ; X64-NEXT: retq # encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.avx512.psllv.w.128(<8 x i16> <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 -1>, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 -1>) %res = call <8 x i16> @llvm.x86.avx512.psllv.w.128(<8 x i16> <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 -1>, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 -1>)
ret <8 x i16> %res ret <8 x i16> %res
@ -2422,20 +2422,20 @@ declare <8 x i16> @llvm.x86.avx512.psllv.w.128(<8 x i16>, <8 x i16>)
define <16 x i16> @test_int_x86_avx512_psllv_w_256_const() optsize { define <16 x i16> @test_int_x86_avx512_psllv_w_256_const() optsize {
; X86-LABEL: test_int_x86_avx512_psllv_w_256_const: ; X86-LABEL: test_int_x86_avx512_psllv_w_256_const:
; X86: # %bb.0: ; X86: # %bb.0:
; X86-NEXT: vmovdqa {{\.LCPI.*}}, %ymm0 # EVEX TO VEX Compression ymm0 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,65535] ; X86-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %ymm0 # EVEX TO VEX Compression ymm0 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,65535]
; X86-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A] ; X86-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
; X86-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-NEXT: vpsllvw {{\.LCPI.*}}, %ymm0, %ymm0 # encoding: [0x62,0xf2,0xfd,0x28,0x12,0x05,A,A,A,A] ; X86-NEXT: vpsllvw {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 # encoding: [0x62,0xf2,0xfd,0x28,0x12,0x05,A,A,A,A]
; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-NEXT: retl # encoding: [0xc3] ; X86-NEXT: retl # encoding: [0xc3]
; ;
; X64-LABEL: test_int_x86_avx512_psllv_w_256_const: ; X64-LABEL: test_int_x86_avx512_psllv_w_256_const:
; X64: # %bb.0: ; X64: # %bb.0:
; X64-NEXT: vmovdqa {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,65535] ; X64-NEXT: vmovdqa {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,65535]
; X64-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A] ; X64-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
; X64-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-NEXT: vpsllvw {{.*}}(%rip), %ymm0, %ymm0 # encoding: [0x62,0xf2,0xfd,0x28,0x12,0x05,A,A,A,A] ; X64-NEXT: vpsllvw {{.*}}(%rip), %ymm0, %ymm0 # encoding: [0x62,0xf2,0xfd,0x28,0x12,0x05,A,A,A,A]
; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-NEXT: retq # encoding: [0xc3] ; X64-NEXT: retq # encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx512.psllv.w.256(<16 x i16> <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 -1>, <16 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 -1>) %res = call <16 x i16> @llvm.x86.avx512.psllv.w.256(<16 x i16> <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 -1>, <16 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 -1>)
ret <16 x i16> %res ret <16 x i16> %res

View File

@ -5,7 +5,7 @@
define <8 x i64> @avx512_funnel_shift_q_512(<8 x i64> %a0, <8 x i64> %a1) { define <8 x i64> @avx512_funnel_shift_q_512(<8 x i64> %a0, <8 x i64> %a1) {
; X86-LABEL: avx512_funnel_shift_q_512: ; X86-LABEL: avx512_funnel_shift_q_512:
; X86: # %bb.0: ; X86: # %bb.0:
; X86-NEXT: vpshldvq {{\.LCPI.*}}, %zmm1, %zmm0 ; X86-NEXT: vpshldvq {{\.LCPI[0-9]+_[0-9]+}}, %zmm1, %zmm0
; X86-NEXT: retl ; X86-NEXT: retl
; ;
; X64-LABEL: avx512_funnel_shift_q_512: ; X64-LABEL: avx512_funnel_shift_q_512:
@ -32,7 +32,7 @@ define <8 x i64> @avx512_funnel_shift_q_512_splat(<8 x i64> %a0, <8 x i64> %a1)
define <16 x i32> @avx512_funnel_shift_d_512(<16 x i32> %a0, <16 x i32> %a1) { define <16 x i32> @avx512_funnel_shift_d_512(<16 x i32> %a0, <16 x i32> %a1) {
; X86-LABEL: avx512_funnel_shift_d_512: ; X86-LABEL: avx512_funnel_shift_d_512:
; X86: # %bb.0: ; X86: # %bb.0:
; X86-NEXT: vpshldvd {{\.LCPI.*}}, %zmm1, %zmm0 ; X86-NEXT: vpshldvd {{\.LCPI[0-9]+_[0-9]+}}, %zmm1, %zmm0
; X86-NEXT: retl ; X86-NEXT: retl
; ;
; X64-LABEL: avx512_funnel_shift_d_512: ; X64-LABEL: avx512_funnel_shift_d_512:
@ -59,7 +59,7 @@ define <16 x i32> @avx512_funnel_shift_d_512_splat(<16 x i32> %a0, <16 x i32> %a
define <32 x i16> @avx512_funnel_shift_w_512(<32 x i16> %a0, <32 x i16> %a1) { define <32 x i16> @avx512_funnel_shift_w_512(<32 x i16> %a0, <32 x i16> %a1) {
; X86-LABEL: avx512_funnel_shift_w_512: ; X86-LABEL: avx512_funnel_shift_w_512:
; X86: # %bb.0: ; X86: # %bb.0:
; X86-NEXT: vpshldvw {{\.LCPI.*}}, %zmm1, %zmm0 ; X86-NEXT: vpshldvw {{\.LCPI[0-9]+_[0-9]+}}, %zmm1, %zmm0
; X86-NEXT: retl ; X86-NEXT: retl
; ;
; X64-LABEL: avx512_funnel_shift_w_512: ; X64-LABEL: avx512_funnel_shift_w_512:

View File

@ -5,7 +5,7 @@
define <2 x i64> @avx512_funnel_shift_q_128(<2 x i64> %a0, <2 x i64> %a1) { define <2 x i64> @avx512_funnel_shift_q_128(<2 x i64> %a0, <2 x i64> %a1) {
; X86-LABEL: avx512_funnel_shift_q_128: ; X86-LABEL: avx512_funnel_shift_q_128:
; X86: # %bb.0: ; X86: # %bb.0:
; X86-NEXT: vpshldvq {{\.LCPI.*}}, %xmm1, %xmm0 ; X86-NEXT: vpshldvq {{\.LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm0
; X86-NEXT: retl ; X86-NEXT: retl
; ;
; X64-LABEL: avx512_funnel_shift_q_128: ; X64-LABEL: avx512_funnel_shift_q_128:
@ -21,7 +21,7 @@ define <2 x i64> @avx512_funnel_shift_q_128(<2 x i64> %a0, <2 x i64> %a1) {
define <4 x i64> @avx512_funnel_shift_q_256(<4 x i64> %a0, <4 x i64> %a1) { define <4 x i64> @avx512_funnel_shift_q_256(<4 x i64> %a0, <4 x i64> %a1) {
; X86-LABEL: avx512_funnel_shift_q_256: ; X86-LABEL: avx512_funnel_shift_q_256:
; X86: # %bb.0: ; X86: # %bb.0:
; X86-NEXT: vpshldvq {{\.LCPI.*}}, %ymm1, %ymm0 ; X86-NEXT: vpshldvq {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm0
; X86-NEXT: retl ; X86-NEXT: retl
; ;
; X64-LABEL: avx512_funnel_shift_q_256: ; X64-LABEL: avx512_funnel_shift_q_256:
@ -59,7 +59,7 @@ define <4 x i64> @avx512_funnel_shift_q_256_splat(<4 x i64> %a0, <4 x i64> %a1)
define <4 x i32> @avx512_funnel_shift_d_128(<4 x i32> %a0, <4 x i32> %a1) { define <4 x i32> @avx512_funnel_shift_d_128(<4 x i32> %a0, <4 x i32> %a1) {
; X86-LABEL: avx512_funnel_shift_d_128: ; X86-LABEL: avx512_funnel_shift_d_128:
; X86: # %bb.0: ; X86: # %bb.0:
; X86-NEXT: vpshldvd {{\.LCPI.*}}, %xmm1, %xmm0 ; X86-NEXT: vpshldvd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm0
; X86-NEXT: retl ; X86-NEXT: retl
; ;
; X64-LABEL: avx512_funnel_shift_d_128: ; X64-LABEL: avx512_funnel_shift_d_128:
@ -75,7 +75,7 @@ define <4 x i32> @avx512_funnel_shift_d_128(<4 x i32> %a0, <4 x i32> %a1) {
define <8 x i32> @avx512_funnel_shift_d_256(<8 x i32> %a0, <8 x i32> %a1) { define <8 x i32> @avx512_funnel_shift_d_256(<8 x i32> %a0, <8 x i32> %a1) {
; X86-LABEL: avx512_funnel_shift_d_256: ; X86-LABEL: avx512_funnel_shift_d_256:
; X86: # %bb.0: ; X86: # %bb.0:
; X86-NEXT: vpshldvd {{\.LCPI.*}}, %ymm1, %ymm0 ; X86-NEXT: vpshldvd {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm0
; X86-NEXT: retl ; X86-NEXT: retl
; ;
; X64-LABEL: avx512_funnel_shift_d_256: ; X64-LABEL: avx512_funnel_shift_d_256:
@ -113,7 +113,7 @@ define <8 x i32> @avx512_funnel_shift_d_256_splat(<8 x i32> %a0, <8 x i32> %a1)
define <8 x i16> @avx512_funnel_shift_w_128(<8 x i16> %a0, <8 x i16> %a1) { define <8 x i16> @avx512_funnel_shift_w_128(<8 x i16> %a0, <8 x i16> %a1) {
; X86-LABEL: avx512_funnel_shift_w_128: ; X86-LABEL: avx512_funnel_shift_w_128:
; X86: # %bb.0: ; X86: # %bb.0:
; X86-NEXT: vpshldvw {{\.LCPI.*}}, %xmm1, %xmm0 ; X86-NEXT: vpshldvw {{\.LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm0
; X86-NEXT: retl ; X86-NEXT: retl
; ;
; X64-LABEL: avx512_funnel_shift_w_128: ; X64-LABEL: avx512_funnel_shift_w_128:
@ -129,7 +129,7 @@ define <8 x i16> @avx512_funnel_shift_w_128(<8 x i16> %a0, <8 x i16> %a1) {
define <16 x i16> @avx512_funnel_shift_w_256(<16 x i16> %a0, <16 x i16> %a1) { define <16 x i16> @avx512_funnel_shift_w_256(<16 x i16> %a0, <16 x i16> %a1) {
; X86-LABEL: avx512_funnel_shift_w_256: ; X86-LABEL: avx512_funnel_shift_w_256:
; X86: # %bb.0: ; X86: # %bb.0:
; X86-NEXT: vpshldvw {{\.LCPI.*}}, %ymm1, %ymm0 ; X86-NEXT: vpshldvw {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm0
; X86-NEXT: retl ; X86-NEXT: retl
; ;
; X64-LABEL: avx512_funnel_shift_w_256: ; X64-LABEL: avx512_funnel_shift_w_256:

View File

@ -1905,7 +1905,7 @@ define <2 x i64> @test_mm_mask_set1_epi32(<2 x i64> %__O, i8 zeroext %__M) {
; X86: # %bb.0: # %entry ; X86: # %bb.0: # %entry
; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: movb {{[0-9]+}}(%esp), %al
; X86-NEXT: kmovw %eax, %k1 ; X86-NEXT: kmovw %eax, %k1
; X86-NEXT: vpbroadcastd {{\.LCPI.*}}, %xmm0 {%k1} ; X86-NEXT: vpbroadcastd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 {%k1}
; X86-NEXT: retl ; X86-NEXT: retl
; ;
; X64-LABEL: test_mm_mask_set1_epi32: ; X64-LABEL: test_mm_mask_set1_epi32:
@ -1927,7 +1927,7 @@ define <2 x i64> @test_mm_maskz_set1_epi32(i8 zeroext %__M) {
; X86: # %bb.0: # %entry ; X86: # %bb.0: # %entry
; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: movb {{[0-9]+}}(%esp), %al
; X86-NEXT: kmovw %eax, %k1 ; X86-NEXT: kmovw %eax, %k1
; X86-NEXT: vpbroadcastd {{\.LCPI.*}}, %xmm0 {%k1} {z} ; X86-NEXT: vpbroadcastd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 {%k1} {z}
; X86-NEXT: retl ; X86-NEXT: retl
; ;
; X64-LABEL: test_mm_maskz_set1_epi32: ; X64-LABEL: test_mm_maskz_set1_epi32:
@ -1948,7 +1948,7 @@ define <4 x i64> @test_mm256_mask_set1_epi32(<4 x i64> %__O, i8 zeroext %__M) {
; X86: # %bb.0: # %entry ; X86: # %bb.0: # %entry
; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: movb {{[0-9]+}}(%esp), %al
; X86-NEXT: kmovw %eax, %k1 ; X86-NEXT: kmovw %eax, %k1
; X86-NEXT: vpbroadcastd {{\.LCPI.*}}, %ymm0 {%k1} ; X86-NEXT: vpbroadcastd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0 {%k1}
; X86-NEXT: retl ; X86-NEXT: retl
; ;
; X64-LABEL: test_mm256_mask_set1_epi32: ; X64-LABEL: test_mm256_mask_set1_epi32:
@ -1969,7 +1969,7 @@ define <4 x i64> @test_mm256_maskz_set1_epi32(i8 zeroext %__M) {
; X86: # %bb.0: # %entry ; X86: # %bb.0: # %entry
; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: movb {{[0-9]+}}(%esp), %al
; X86-NEXT: kmovw %eax, %k1 ; X86-NEXT: kmovw %eax, %k1
; X86-NEXT: vpbroadcastd {{\.LCPI.*}}, %ymm0 {%k1} {z} ; X86-NEXT: vpbroadcastd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0 {%k1} {z}
; X86-NEXT: retl ; X86-NEXT: retl
; ;
; X64-LABEL: test_mm256_maskz_set1_epi32: ; X64-LABEL: test_mm256_maskz_set1_epi32:

View File

@ -7321,20 +7321,20 @@ define <8 x i32>@test_int_x86_avx512_maskz_psrav8_si(<8 x i32> %x0, <8 x i32> %x
define <8 x i32>@test_int_x86_avx512_mask_psrav8_si_const() { define <8 x i32>@test_int_x86_avx512_mask_psrav8_si_const() {
; X86-LABEL: test_int_x86_avx512_mask_psrav8_si_const: ; X86-LABEL: test_int_x86_avx512_mask_psrav8_si_const:
; X86: # %bb.0: ; X86: # %bb.0:
; X86-NEXT: vmovdqa {{\.LCPI.*}}, %ymm0 # EVEX TO VEX Compression ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51] ; X86-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %ymm0 # EVEX TO VEX Compression ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51]
; X86-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A] ; X86-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
; X86-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-NEXT: vpsravd {{\.LCPI.*}}, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A] ; X86-NEXT: vpsravd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A]
; X86-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-NEXT: retl # encoding: [0xc3] ; X86-NEXT: retl # encoding: [0xc3]
; ;
; X64-LABEL: test_int_x86_avx512_mask_psrav8_si_const: ; X64-LABEL: test_int_x86_avx512_mask_psrav8_si_const:
; X64: # %bb.0: ; X64: # %bb.0:
; X64-NEXT: vmovdqa {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51] ; X64-NEXT: vmovdqa {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51]
; X64-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A] ; X64-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
; X64-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A] ; X64-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A]
; X64-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-NEXT: retq # encoding: [0xc3] ; X64-NEXT: retq # encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx512.mask.psrav8.si(<8 x i32> <i32 2, i32 9, i32 -12, i32 23, i32 -26, i32 37, i32 -40, i32 51>, <8 x i32> <i32 1, i32 18, i32 35, i32 52, i32 69, i32 15, i32 32, i32 49>, <8 x i32> zeroinitializer, i8 -1) %res = call <8 x i32> @llvm.x86.avx512.mask.psrav8.si(<8 x i32> <i32 2, i32 9, i32 -12, i32 23, i32 -26, i32 37, i32 -40, i32 51>, <8 x i32> <i32 1, i32 18, i32 35, i32 52, i32 69, i32 15, i32 32, i32 49>, <8 x i32> zeroinitializer, i8 -1)
ret <8 x i32> %res ret <8 x i32> %res
@ -8632,20 +8632,20 @@ define <2 x i64>@test_int_x86_avx512_maskz_psrav_q_128(<2 x i64> %x0, <2 x i64>
define <2 x i64>@test_int_x86_avx512_mask_psrav_q_128_const(i8 %x3) { define <2 x i64>@test_int_x86_avx512_mask_psrav_q_128_const(i8 %x3) {
; X86-LABEL: test_int_x86_avx512_mask_psrav_q_128_const: ; X86-LABEL: test_int_x86_avx512_mask_psrav_q_128_const:
; X86: # %bb.0: ; X86: # %bb.0:
; X86-NEXT: vmovdqa {{\.LCPI.*}}, %xmm0 # EVEX TO VEX Compression xmm0 = [2,0,4294967287,4294967295] ; X86-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 # EVEX TO VEX Compression xmm0 = [2,0,4294967287,4294967295]
; X86-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A] ; X86-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
; X86-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-NEXT: vpsravq {{\.LCPI.*}}, %xmm0, %xmm0 # encoding: [0x62,0xf2,0xfd,0x08,0x46,0x05,A,A,A,A] ; X86-NEXT: vpsravq {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # encoding: [0x62,0xf2,0xfd,0x08,0x46,0x05,A,A,A,A]
; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}, kind: FK_Data_4 ; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-NEXT: retl # encoding: [0xc3] ; X86-NEXT: retl # encoding: [0xc3]
; ;
; X64-LABEL: test_int_x86_avx512_mask_psrav_q_128_const: ; X64-LABEL: test_int_x86_avx512_mask_psrav_q_128_const:
; X64: # %bb.0: ; X64: # %bb.0:
; X64-NEXT: vmovdqa {{.*}}(%rip), %xmm0 # EVEX TO VEX Compression xmm0 = [2,18446744073709551607] ; X64-NEXT: vmovdqa {{.*}}(%rip), %xmm0 # EVEX TO VEX Compression xmm0 = [2,18446744073709551607]
; X64-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A] ; X64-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
; X64-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-NEXT: vpsravq {{.*}}(%rip), %xmm0, %xmm0 # encoding: [0x62,0xf2,0xfd,0x08,0x46,0x05,A,A,A,A] ; X64-NEXT: vpsravq {{.*}}(%rip), %xmm0, %xmm0 # encoding: [0x62,0xf2,0xfd,0x08,0x46,0x05,A,A,A,A]
; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-NEXT: retq # encoding: [0xc3] ; X64-NEXT: retq # encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.avx512.mask.psrav.q.128(<2 x i64> <i64 2, i64 -9>, <2 x i64> <i64 1, i64 90>, <2 x i64> zeroinitializer, i8 -1) %res = call <2 x i64> @llvm.x86.avx512.mask.psrav.q.128(<2 x i64> <i64 2, i64 -9>, <2 x i64> <i64 1, i64 90>, <2 x i64> zeroinitializer, i8 -1)
ret <2 x i64> %res ret <2 x i64> %res

View File

@ -79,7 +79,7 @@ define <2 x i16> @test_bitreverse_v2i16(<2 x i16> %a) nounwind {
; ;
; X86XOP-LABEL: test_bitreverse_v2i16: ; X86XOP-LABEL: test_bitreverse_v2i16:
; X86XOP: # %bb.0: ; X86XOP: # %bb.0:
; X86XOP-NEXT: vpperm {{\.LCPI.*}}, %xmm0, %xmm0, %xmm0 ; X86XOP-NEXT: vpperm {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0, %xmm0
; X86XOP-NEXT: retl ; X86XOP-NEXT: retl
%b = call <2 x i16> @llvm.bitreverse.v2i16(<2 x i16> %a) %b = call <2 x i16> @llvm.bitreverse.v2i16(<2 x i16> %a)
ret <2 x i16> %b ret <2 x i16> %b
@ -155,7 +155,7 @@ define i64 @test_bitreverse_i64(i64 %a) nounwind {
; X86XOP-LABEL: test_bitreverse_i64: ; X86XOP-LABEL: test_bitreverse_i64:
; X86XOP: # %bb.0: ; X86XOP: # %bb.0:
; X86XOP-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero ; X86XOP-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; X86XOP-NEXT: vpperm {{\.LCPI.*}}, %xmm0, %xmm0, %xmm0 ; X86XOP-NEXT: vpperm {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0, %xmm0
; X86XOP-NEXT: vmovd %xmm0, %eax ; X86XOP-NEXT: vmovd %xmm0, %eax
; X86XOP-NEXT: vpextrd $1, %xmm0, %edx ; X86XOP-NEXT: vpextrd $1, %xmm0, %edx
; X86XOP-NEXT: retl ; X86XOP-NEXT: retl
@ -213,7 +213,7 @@ define i32 @test_bitreverse_i32(i32 %a) nounwind {
; X86XOP-LABEL: test_bitreverse_i32: ; X86XOP-LABEL: test_bitreverse_i32:
; X86XOP: # %bb.0: ; X86XOP: # %bb.0:
; X86XOP-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86XOP-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86XOP-NEXT: vpperm {{\.LCPI.*}}, %xmm0, %xmm0, %xmm0 ; X86XOP-NEXT: vpperm {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0, %xmm0
; X86XOP-NEXT: vmovd %xmm0, %eax ; X86XOP-NEXT: vmovd %xmm0, %eax
; X86XOP-NEXT: retl ; X86XOP-NEXT: retl
%b = call i32 @llvm.bitreverse.i32(i32 %a) %b = call i32 @llvm.bitreverse.i32(i32 %a)
@ -272,7 +272,7 @@ define i24 @test_bitreverse_i24(i24 %a) nounwind {
; X86XOP-LABEL: test_bitreverse_i24: ; X86XOP-LABEL: test_bitreverse_i24:
; X86XOP: # %bb.0: ; X86XOP: # %bb.0:
; X86XOP-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86XOP-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86XOP-NEXT: vpperm {{\.LCPI.*}}, %xmm0, %xmm0, %xmm0 ; X86XOP-NEXT: vpperm {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0, %xmm0
; X86XOP-NEXT: vmovd %xmm0, %eax ; X86XOP-NEXT: vmovd %xmm0, %eax
; X86XOP-NEXT: shrl $8, %eax ; X86XOP-NEXT: shrl $8, %eax
; X86XOP-NEXT: retl ; X86XOP-NEXT: retl
@ -332,7 +332,7 @@ define i16 @test_bitreverse_i16(i16 %a) nounwind {
; X86XOP-LABEL: test_bitreverse_i16: ; X86XOP-LABEL: test_bitreverse_i16:
; X86XOP: # %bb.0: ; X86XOP: # %bb.0:
; X86XOP-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86XOP-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86XOP-NEXT: vpperm {{\.LCPI.*}}, %xmm0, %xmm0, %xmm0 ; X86XOP-NEXT: vpperm {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0, %xmm0
; X86XOP-NEXT: vmovd %xmm0, %eax ; X86XOP-NEXT: vmovd %xmm0, %eax
; X86XOP-NEXT: # kill: def $ax killed $ax killed $eax ; X86XOP-NEXT: # kill: def $ax killed $ax killed $eax
; X86XOP-NEXT: retl ; X86XOP-NEXT: retl
@ -383,7 +383,7 @@ define i8 @test_bitreverse_i8(i8 %a) {
; X86XOP-LABEL: test_bitreverse_i8: ; X86XOP-LABEL: test_bitreverse_i8:
; X86XOP: # %bb.0: ; X86XOP: # %bb.0:
; X86XOP-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86XOP-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86XOP-NEXT: vpperm {{\.LCPI.*}}, %xmm0, %xmm0, %xmm0 ; X86XOP-NEXT: vpperm {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0, %xmm0
; X86XOP-NEXT: vmovd %xmm0, %eax ; X86XOP-NEXT: vmovd %xmm0, %eax
; X86XOP-NEXT: # kill: def $al killed $al killed $eax ; X86XOP-NEXT: # kill: def $al killed $al killed $eax
; X86XOP-NEXT: retl ; X86XOP-NEXT: retl
@ -436,7 +436,7 @@ define i4 @test_bitreverse_i4(i4 %a) {
; X86XOP-LABEL: test_bitreverse_i4: ; X86XOP-LABEL: test_bitreverse_i4:
; X86XOP: # %bb.0: ; X86XOP: # %bb.0:
; X86XOP-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86XOP-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86XOP-NEXT: vpperm {{\.LCPI.*}}, %xmm0, %xmm0, %xmm0 ; X86XOP-NEXT: vpperm {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0, %xmm0
; X86XOP-NEXT: vmovd %xmm0, %eax ; X86XOP-NEXT: vmovd %xmm0, %eax
; X86XOP-NEXT: shrb $4, %al ; X86XOP-NEXT: shrb $4, %al
; X86XOP-NEXT: # kill: def $al killed $al killed $eax ; X86XOP-NEXT: # kill: def $al killed $al killed $eax

View File

@ -128,7 +128,7 @@ define <32 x i8> @f32xi8_i16(<32 x i8> %a) {
; AVX-NEXT: vpaddb %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vpaddb %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpaddb %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vpaddb %xmm2, %xmm0, %xmm0
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0 ; AVX-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; AVX-NEXT: retl ; AVX-NEXT: retl
; ;
; ALL32-LABEL: f32xi8_i16: ; ALL32-LABEL: f32xi8_i16:
@ -168,7 +168,7 @@ define <32 x i8> @f32xi8_i32(<32 x i8> %a) {
; AVX-NEXT: vpaddb %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vpaddb %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpaddb %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vpaddb %xmm2, %xmm0, %xmm0
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0 ; AVX-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; AVX-NEXT: retl ; AVX-NEXT: retl
; ;
; ALL32-LABEL: f32xi8_i32: ; ALL32-LABEL: f32xi8_i32:
@ -209,7 +209,7 @@ define <32 x i8> @f32xi8_i64(<32 x i8> %a) {
; AVX-NEXT: vpaddb %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vpaddb %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpaddb %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vpaddb %xmm2, %xmm0, %xmm0
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0 ; AVX-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; AVX-NEXT: retl ; AVX-NEXT: retl
; ;
; ALL32-LABEL: f32xi8_i64: ; ALL32-LABEL: f32xi8_i64:
@ -250,7 +250,7 @@ define <32 x i8> @f32xi8_i128(<32 x i8> %a) {
; AVX-NEXT: vpaddb %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vpaddb %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpaddb %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vpaddb %xmm2, %xmm0, %xmm0
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0 ; AVX-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; AVX-NEXT: retl ; AVX-NEXT: retl
; ;
; ALL32-LABEL: f32xi8_i128: ; ALL32-LABEL: f32xi8_i128:
@ -716,7 +716,7 @@ define <16 x i16> @f16xi16_i32(<16 x i16> %a) {
; AVX-NEXT: vpaddw %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vpaddw %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpaddw %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vpaddw %xmm2, %xmm0, %xmm0
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0 ; AVX-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; AVX-NEXT: retl ; AVX-NEXT: retl
; ;
; ALL32-LABEL: f16xi16_i32: ; ALL32-LABEL: f16xi16_i32:
@ -757,7 +757,7 @@ define <16 x i16> @f16xi16_i64(<16 x i16> %a) {
; AVX-NEXT: vpaddw %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vpaddw %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpaddw %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vpaddw %xmm2, %xmm0, %xmm0
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0 ; AVX-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; AVX-NEXT: retl ; AVX-NEXT: retl
; ;
; ALL32-LABEL: f16xi16_i64: ; ALL32-LABEL: f16xi16_i64:
@ -798,7 +798,7 @@ define <16 x i16> @f16xi16_i128(<16 x i16> %a) {
; AVX-NEXT: vpaddw %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vpaddw %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpaddw %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vpaddw %xmm2, %xmm0, %xmm0
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0 ; AVX-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; AVX-NEXT: retl ; AVX-NEXT: retl
; ;
; ALL32-LABEL: f16xi16_i128: ; ALL32-LABEL: f16xi16_i128:
@ -1161,7 +1161,7 @@ define <8 x i32> @f8xi32_i64(<8 x i32> %a) {
; AVX-NEXT: vpaddd %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vpaddd %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpaddd %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vpaddd %xmm2, %xmm0, %xmm0
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0 ; AVX-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; AVX-NEXT: retl ; AVX-NEXT: retl
; ;
; ALL32-LABEL: f8xi32_i64: ; ALL32-LABEL: f8xi32_i64:
@ -1202,7 +1202,7 @@ define <8 x i32> @f8xi32_i128(<8 x i32> %a) {
; AVX-NEXT: vpaddd %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vpaddd %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpaddd %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vpaddd %xmm2, %xmm0, %xmm0
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0 ; AVX-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; AVX-NEXT: retl ; AVX-NEXT: retl
; ;
; ALL32-LABEL: f8xi32_i128: ; ALL32-LABEL: f8xi32_i128:
@ -1386,7 +1386,7 @@ define <4 x i64> @f4xi64_i128(<4 x i64> %a) {
; AVX-NEXT: vpaddq %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vpaddq %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpaddq %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vpaddq %xmm2, %xmm0, %xmm0
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0 ; AVX-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; AVX-NEXT: retl ; AVX-NEXT: retl
; ;
; ALL32-LABEL: f4xi64_i128: ; ALL32-LABEL: f4xi64_i128:

View File

@ -32,7 +32,7 @@ define double @test1(i32 %a, i32 %b, double %x) nounwind {
; NOSSE2-NEXT: fldl {{[0-9]+}}(%esp) ; NOSSE2-NEXT: fldl {{[0-9]+}}(%esp)
; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE2-NEXT: flds {{\.LCPI.*}} ; NOSSE2-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE2-NEXT: fxch %st(1) ; NOSSE2-NEXT: fxch %st(1)
; NOSSE2-NEXT: fcmovnbe %st(1), %st ; NOSSE2-NEXT: fcmovnbe %st(1), %st
; NOSSE2-NEXT: fstp %st(1) ; NOSSE2-NEXT: fstp %st(1)
@ -43,7 +43,7 @@ define double @test1(i32 %a, i32 %b, double %x) nounwind {
; NOSSE1-NEXT: fldl {{[0-9]+}}(%esp) ; NOSSE1-NEXT: fldl {{[0-9]+}}(%esp)
; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: flds {{\.LCPI.*}} ; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE1-NEXT: fxch %st(1) ; NOSSE1-NEXT: fxch %st(1)
; NOSSE1-NEXT: fcmovnbe %st(1), %st ; NOSSE1-NEXT: fcmovnbe %st(1), %st
; NOSSE1-NEXT: fstp %st(1) ; NOSSE1-NEXT: fstp %st(1)
@ -54,7 +54,7 @@ define double @test1(i32 %a, i32 %b, double %x) nounwind {
; NOCMOV-NEXT: fldl {{[0-9]+}}(%esp) ; NOCMOV-NEXT: fldl {{[0-9]+}}(%esp)
; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: flds {{\.LCPI.*}} ; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOCMOV-NEXT: ja .LBB0_2 ; NOCMOV-NEXT: ja .LBB0_2
; NOCMOV-NEXT: # %bb.1: ; NOCMOV-NEXT: # %bb.1:
; NOCMOV-NEXT: fstp %st(0) ; NOCMOV-NEXT: fstp %st(0)
@ -95,7 +95,7 @@ define double @test2(i32 %a, i32 %b, double %x) nounwind {
; NOSSE2-NEXT: fldl {{[0-9]+}}(%esp) ; NOSSE2-NEXT: fldl {{[0-9]+}}(%esp)
; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE2-NEXT: flds {{\.LCPI.*}} ; NOSSE2-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE2-NEXT: fxch %st(1) ; NOSSE2-NEXT: fxch %st(1)
; NOSSE2-NEXT: fcmovnb %st(1), %st ; NOSSE2-NEXT: fcmovnb %st(1), %st
; NOSSE2-NEXT: fstp %st(1) ; NOSSE2-NEXT: fstp %st(1)
@ -106,7 +106,7 @@ define double @test2(i32 %a, i32 %b, double %x) nounwind {
; NOSSE1-NEXT: fldl {{[0-9]+}}(%esp) ; NOSSE1-NEXT: fldl {{[0-9]+}}(%esp)
; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: flds {{\.LCPI.*}} ; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE1-NEXT: fxch %st(1) ; NOSSE1-NEXT: fxch %st(1)
; NOSSE1-NEXT: fcmovnb %st(1), %st ; NOSSE1-NEXT: fcmovnb %st(1), %st
; NOSSE1-NEXT: fstp %st(1) ; NOSSE1-NEXT: fstp %st(1)
@ -117,7 +117,7 @@ define double @test2(i32 %a, i32 %b, double %x) nounwind {
; NOCMOV-NEXT: fldl {{[0-9]+}}(%esp) ; NOCMOV-NEXT: fldl {{[0-9]+}}(%esp)
; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: flds {{\.LCPI.*}} ; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOCMOV-NEXT: jae .LBB1_2 ; NOCMOV-NEXT: jae .LBB1_2
; NOCMOV-NEXT: # %bb.1: ; NOCMOV-NEXT: # %bb.1:
; NOCMOV-NEXT: fstp %st(0) ; NOCMOV-NEXT: fstp %st(0)
@ -158,7 +158,7 @@ define double @test3(i32 %a, i32 %b, double %x) nounwind {
; NOSSE2-NEXT: fldl {{[0-9]+}}(%esp) ; NOSSE2-NEXT: fldl {{[0-9]+}}(%esp)
; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE2-NEXT: flds {{\.LCPI.*}} ; NOSSE2-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE2-NEXT: fxch %st(1) ; NOSSE2-NEXT: fxch %st(1)
; NOSSE2-NEXT: fcmovb %st(1), %st ; NOSSE2-NEXT: fcmovb %st(1), %st
; NOSSE2-NEXT: fstp %st(1) ; NOSSE2-NEXT: fstp %st(1)
@ -169,7 +169,7 @@ define double @test3(i32 %a, i32 %b, double %x) nounwind {
; NOSSE1-NEXT: fldl {{[0-9]+}}(%esp) ; NOSSE1-NEXT: fldl {{[0-9]+}}(%esp)
; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: flds {{\.LCPI.*}} ; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE1-NEXT: fxch %st(1) ; NOSSE1-NEXT: fxch %st(1)
; NOSSE1-NEXT: fcmovb %st(1), %st ; NOSSE1-NEXT: fcmovb %st(1), %st
; NOSSE1-NEXT: fstp %st(1) ; NOSSE1-NEXT: fstp %st(1)
@ -180,7 +180,7 @@ define double @test3(i32 %a, i32 %b, double %x) nounwind {
; NOCMOV-NEXT: fldl {{[0-9]+}}(%esp) ; NOCMOV-NEXT: fldl {{[0-9]+}}(%esp)
; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: flds {{\.LCPI.*}} ; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOCMOV-NEXT: jb .LBB2_2 ; NOCMOV-NEXT: jb .LBB2_2
; NOCMOV-NEXT: # %bb.1: ; NOCMOV-NEXT: # %bb.1:
; NOCMOV-NEXT: fstp %st(0) ; NOCMOV-NEXT: fstp %st(0)
@ -221,7 +221,7 @@ define double @test4(i32 %a, i32 %b, double %x) nounwind {
; NOSSE2-NEXT: fldl {{[0-9]+}}(%esp) ; NOSSE2-NEXT: fldl {{[0-9]+}}(%esp)
; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE2-NEXT: flds {{\.LCPI.*}} ; NOSSE2-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE2-NEXT: fxch %st(1) ; NOSSE2-NEXT: fxch %st(1)
; NOSSE2-NEXT: fcmovbe %st(1), %st ; NOSSE2-NEXT: fcmovbe %st(1), %st
; NOSSE2-NEXT: fstp %st(1) ; NOSSE2-NEXT: fstp %st(1)
@ -232,7 +232,7 @@ define double @test4(i32 %a, i32 %b, double %x) nounwind {
; NOSSE1-NEXT: fldl {{[0-9]+}}(%esp) ; NOSSE1-NEXT: fldl {{[0-9]+}}(%esp)
; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: flds {{\.LCPI.*}} ; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE1-NEXT: fxch %st(1) ; NOSSE1-NEXT: fxch %st(1)
; NOSSE1-NEXT: fcmovbe %st(1), %st ; NOSSE1-NEXT: fcmovbe %st(1), %st
; NOSSE1-NEXT: fstp %st(1) ; NOSSE1-NEXT: fstp %st(1)
@ -243,7 +243,7 @@ define double @test4(i32 %a, i32 %b, double %x) nounwind {
; NOCMOV-NEXT: fldl {{[0-9]+}}(%esp) ; NOCMOV-NEXT: fldl {{[0-9]+}}(%esp)
; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: flds {{\.LCPI.*}} ; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOCMOV-NEXT: jbe .LBB3_2 ; NOCMOV-NEXT: jbe .LBB3_2
; NOCMOV-NEXT: # %bb.1: ; NOCMOV-NEXT: # %bb.1:
; NOCMOV-NEXT: fstp %st(0) ; NOCMOV-NEXT: fstp %st(0)
@ -286,7 +286,7 @@ define double @test5(i32 %a, i32 %b, double %x) nounwind {
; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE2-NEXT: setg %al ; NOSSE2-NEXT: setg %al
; NOSSE2-NEXT: testb %al, %al ; NOSSE2-NEXT: testb %al, %al
; NOSSE2-NEXT: flds {{\.LCPI.*}} ; NOSSE2-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE2-NEXT: fxch %st(1) ; NOSSE2-NEXT: fxch %st(1)
; NOSSE2-NEXT: fcmovne %st(1), %st ; NOSSE2-NEXT: fcmovne %st(1), %st
; NOSSE2-NEXT: fstp %st(1) ; NOSSE2-NEXT: fstp %st(1)
@ -299,7 +299,7 @@ define double @test5(i32 %a, i32 %b, double %x) nounwind {
; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: setg %al ; NOSSE1-NEXT: setg %al
; NOSSE1-NEXT: testb %al, %al ; NOSSE1-NEXT: testb %al, %al
; NOSSE1-NEXT: flds {{\.LCPI.*}} ; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE1-NEXT: fxch %st(1) ; NOSSE1-NEXT: fxch %st(1)
; NOSSE1-NEXT: fcmovne %st(1), %st ; NOSSE1-NEXT: fcmovne %st(1), %st
; NOSSE1-NEXT: fstp %st(1) ; NOSSE1-NEXT: fstp %st(1)
@ -310,7 +310,7 @@ define double @test5(i32 %a, i32 %b, double %x) nounwind {
; NOCMOV-NEXT: fldl {{[0-9]+}}(%esp) ; NOCMOV-NEXT: fldl {{[0-9]+}}(%esp)
; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: flds {{\.LCPI.*}} ; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOCMOV-NEXT: jg .LBB4_2 ; NOCMOV-NEXT: jg .LBB4_2
; NOCMOV-NEXT: # %bb.1: ; NOCMOV-NEXT: # %bb.1:
; NOCMOV-NEXT: fstp %st(0) ; NOCMOV-NEXT: fstp %st(0)
@ -353,7 +353,7 @@ define double @test6(i32 %a, i32 %b, double %x) nounwind {
; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE2-NEXT: setge %al ; NOSSE2-NEXT: setge %al
; NOSSE2-NEXT: testb %al, %al ; NOSSE2-NEXT: testb %al, %al
; NOSSE2-NEXT: flds {{\.LCPI.*}} ; NOSSE2-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE2-NEXT: fxch %st(1) ; NOSSE2-NEXT: fxch %st(1)
; NOSSE2-NEXT: fcmovne %st(1), %st ; NOSSE2-NEXT: fcmovne %st(1), %st
; NOSSE2-NEXT: fstp %st(1) ; NOSSE2-NEXT: fstp %st(1)
@ -366,7 +366,7 @@ define double @test6(i32 %a, i32 %b, double %x) nounwind {
; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: setge %al ; NOSSE1-NEXT: setge %al
; NOSSE1-NEXT: testb %al, %al ; NOSSE1-NEXT: testb %al, %al
; NOSSE1-NEXT: flds {{\.LCPI.*}} ; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE1-NEXT: fxch %st(1) ; NOSSE1-NEXT: fxch %st(1)
; NOSSE1-NEXT: fcmovne %st(1), %st ; NOSSE1-NEXT: fcmovne %st(1), %st
; NOSSE1-NEXT: fstp %st(1) ; NOSSE1-NEXT: fstp %st(1)
@ -377,7 +377,7 @@ define double @test6(i32 %a, i32 %b, double %x) nounwind {
; NOCMOV-NEXT: fldl {{[0-9]+}}(%esp) ; NOCMOV-NEXT: fldl {{[0-9]+}}(%esp)
; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: flds {{\.LCPI.*}} ; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOCMOV-NEXT: jge .LBB5_2 ; NOCMOV-NEXT: jge .LBB5_2
; NOCMOV-NEXT: # %bb.1: ; NOCMOV-NEXT: # %bb.1:
; NOCMOV-NEXT: fstp %st(0) ; NOCMOV-NEXT: fstp %st(0)
@ -420,7 +420,7 @@ define double @test7(i32 %a, i32 %b, double %x) nounwind {
; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE2-NEXT: setl %al ; NOSSE2-NEXT: setl %al
; NOSSE2-NEXT: testb %al, %al ; NOSSE2-NEXT: testb %al, %al
; NOSSE2-NEXT: flds {{\.LCPI.*}} ; NOSSE2-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE2-NEXT: fxch %st(1) ; NOSSE2-NEXT: fxch %st(1)
; NOSSE2-NEXT: fcmovne %st(1), %st ; NOSSE2-NEXT: fcmovne %st(1), %st
; NOSSE2-NEXT: fstp %st(1) ; NOSSE2-NEXT: fstp %st(1)
@ -433,7 +433,7 @@ define double @test7(i32 %a, i32 %b, double %x) nounwind {
; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: setl %al ; NOSSE1-NEXT: setl %al
; NOSSE1-NEXT: testb %al, %al ; NOSSE1-NEXT: testb %al, %al
; NOSSE1-NEXT: flds {{\.LCPI.*}} ; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE1-NEXT: fxch %st(1) ; NOSSE1-NEXT: fxch %st(1)
; NOSSE1-NEXT: fcmovne %st(1), %st ; NOSSE1-NEXT: fcmovne %st(1), %st
; NOSSE1-NEXT: fstp %st(1) ; NOSSE1-NEXT: fstp %st(1)
@ -444,7 +444,7 @@ define double @test7(i32 %a, i32 %b, double %x) nounwind {
; NOCMOV-NEXT: fldl {{[0-9]+}}(%esp) ; NOCMOV-NEXT: fldl {{[0-9]+}}(%esp)
; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: flds {{\.LCPI.*}} ; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOCMOV-NEXT: jl .LBB6_2 ; NOCMOV-NEXT: jl .LBB6_2
; NOCMOV-NEXT: # %bb.1: ; NOCMOV-NEXT: # %bb.1:
; NOCMOV-NEXT: fstp %st(0) ; NOCMOV-NEXT: fstp %st(0)
@ -487,7 +487,7 @@ define double @test8(i32 %a, i32 %b, double %x) nounwind {
; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE2-NEXT: setle %al ; NOSSE2-NEXT: setle %al
; NOSSE2-NEXT: testb %al, %al ; NOSSE2-NEXT: testb %al, %al
; NOSSE2-NEXT: flds {{\.LCPI.*}} ; NOSSE2-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE2-NEXT: fxch %st(1) ; NOSSE2-NEXT: fxch %st(1)
; NOSSE2-NEXT: fcmovne %st(1), %st ; NOSSE2-NEXT: fcmovne %st(1), %st
; NOSSE2-NEXT: fstp %st(1) ; NOSSE2-NEXT: fstp %st(1)
@ -500,7 +500,7 @@ define double @test8(i32 %a, i32 %b, double %x) nounwind {
; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: setle %al ; NOSSE1-NEXT: setle %al
; NOSSE1-NEXT: testb %al, %al ; NOSSE1-NEXT: testb %al, %al
; NOSSE1-NEXT: flds {{\.LCPI.*}} ; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE1-NEXT: fxch %st(1) ; NOSSE1-NEXT: fxch %st(1)
; NOSSE1-NEXT: fcmovne %st(1), %st ; NOSSE1-NEXT: fcmovne %st(1), %st
; NOSSE1-NEXT: fstp %st(1) ; NOSSE1-NEXT: fstp %st(1)
@ -511,7 +511,7 @@ define double @test8(i32 %a, i32 %b, double %x) nounwind {
; NOCMOV-NEXT: fldl {{[0-9]+}}(%esp) ; NOCMOV-NEXT: fldl {{[0-9]+}}(%esp)
; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: flds {{\.LCPI.*}} ; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOCMOV-NEXT: jle .LBB7_2 ; NOCMOV-NEXT: jle .LBB7_2
; NOCMOV-NEXT: # %bb.1: ; NOCMOV-NEXT: # %bb.1:
; NOCMOV-NEXT: fstp %st(0) ; NOCMOV-NEXT: fstp %st(0)
@ -565,7 +565,7 @@ define float @test9(i32 %a, i32 %b, float %x) nounwind {
; NOSSE1-NEXT: flds {{[0-9]+}}(%esp) ; NOSSE1-NEXT: flds {{[0-9]+}}(%esp)
; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: flds {{\.LCPI.*}} ; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE1-NEXT: fxch %st(1) ; NOSSE1-NEXT: fxch %st(1)
; NOSSE1-NEXT: fcmovnbe %st(1), %st ; NOSSE1-NEXT: fcmovnbe %st(1), %st
; NOSSE1-NEXT: fstp %st(1) ; NOSSE1-NEXT: fstp %st(1)
@ -576,7 +576,7 @@ define float @test9(i32 %a, i32 %b, float %x) nounwind {
; NOCMOV-NEXT: flds {{[0-9]+}}(%esp) ; NOCMOV-NEXT: flds {{[0-9]+}}(%esp)
; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: flds {{\.LCPI.*}} ; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOCMOV-NEXT: ja .LBB8_2 ; NOCMOV-NEXT: ja .LBB8_2
; NOCMOV-NEXT: # %bb.1: ; NOCMOV-NEXT: # %bb.1:
; NOCMOV-NEXT: fstp %st(0) ; NOCMOV-NEXT: fstp %st(0)
@ -630,7 +630,7 @@ define float @test10(i32 %a, i32 %b, float %x) nounwind {
; NOSSE1-NEXT: flds {{[0-9]+}}(%esp) ; NOSSE1-NEXT: flds {{[0-9]+}}(%esp)
; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: flds {{\.LCPI.*}} ; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE1-NEXT: fxch %st(1) ; NOSSE1-NEXT: fxch %st(1)
; NOSSE1-NEXT: fcmovnb %st(1), %st ; NOSSE1-NEXT: fcmovnb %st(1), %st
; NOSSE1-NEXT: fstp %st(1) ; NOSSE1-NEXT: fstp %st(1)
@ -641,7 +641,7 @@ define float @test10(i32 %a, i32 %b, float %x) nounwind {
; NOCMOV-NEXT: flds {{[0-9]+}}(%esp) ; NOCMOV-NEXT: flds {{[0-9]+}}(%esp)
; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: flds {{\.LCPI.*}} ; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOCMOV-NEXT: jae .LBB9_2 ; NOCMOV-NEXT: jae .LBB9_2
; NOCMOV-NEXT: # %bb.1: ; NOCMOV-NEXT: # %bb.1:
; NOCMOV-NEXT: fstp %st(0) ; NOCMOV-NEXT: fstp %st(0)
@ -695,7 +695,7 @@ define float @test11(i32 %a, i32 %b, float %x) nounwind {
; NOSSE1-NEXT: flds {{[0-9]+}}(%esp) ; NOSSE1-NEXT: flds {{[0-9]+}}(%esp)
; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: flds {{\.LCPI.*}} ; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE1-NEXT: fxch %st(1) ; NOSSE1-NEXT: fxch %st(1)
; NOSSE1-NEXT: fcmovb %st(1), %st ; NOSSE1-NEXT: fcmovb %st(1), %st
; NOSSE1-NEXT: fstp %st(1) ; NOSSE1-NEXT: fstp %st(1)
@ -706,7 +706,7 @@ define float @test11(i32 %a, i32 %b, float %x) nounwind {
; NOCMOV-NEXT: flds {{[0-9]+}}(%esp) ; NOCMOV-NEXT: flds {{[0-9]+}}(%esp)
; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: flds {{\.LCPI.*}} ; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOCMOV-NEXT: jb .LBB10_2 ; NOCMOV-NEXT: jb .LBB10_2
; NOCMOV-NEXT: # %bb.1: ; NOCMOV-NEXT: # %bb.1:
; NOCMOV-NEXT: fstp %st(0) ; NOCMOV-NEXT: fstp %st(0)
@ -760,7 +760,7 @@ define float @test12(i32 %a, i32 %b, float %x) nounwind {
; NOSSE1-NEXT: flds {{[0-9]+}}(%esp) ; NOSSE1-NEXT: flds {{[0-9]+}}(%esp)
; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: flds {{\.LCPI.*}} ; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE1-NEXT: fxch %st(1) ; NOSSE1-NEXT: fxch %st(1)
; NOSSE1-NEXT: fcmovbe %st(1), %st ; NOSSE1-NEXT: fcmovbe %st(1), %st
; NOSSE1-NEXT: fstp %st(1) ; NOSSE1-NEXT: fstp %st(1)
@ -771,7 +771,7 @@ define float @test12(i32 %a, i32 %b, float %x) nounwind {
; NOCMOV-NEXT: flds {{[0-9]+}}(%esp) ; NOCMOV-NEXT: flds {{[0-9]+}}(%esp)
; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: flds {{\.LCPI.*}} ; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOCMOV-NEXT: jbe .LBB11_2 ; NOCMOV-NEXT: jbe .LBB11_2
; NOCMOV-NEXT: # %bb.1: ; NOCMOV-NEXT: # %bb.1:
; NOCMOV-NEXT: fstp %st(0) ; NOCMOV-NEXT: fstp %st(0)
@ -827,7 +827,7 @@ define float @test13(i32 %a, i32 %b, float %x) nounwind {
; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: setg %al ; NOSSE1-NEXT: setg %al
; NOSSE1-NEXT: testb %al, %al ; NOSSE1-NEXT: testb %al, %al
; NOSSE1-NEXT: flds {{\.LCPI.*}} ; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE1-NEXT: fxch %st(1) ; NOSSE1-NEXT: fxch %st(1)
; NOSSE1-NEXT: fcmovne %st(1), %st ; NOSSE1-NEXT: fcmovne %st(1), %st
; NOSSE1-NEXT: fstp %st(1) ; NOSSE1-NEXT: fstp %st(1)
@ -838,7 +838,7 @@ define float @test13(i32 %a, i32 %b, float %x) nounwind {
; NOCMOV-NEXT: flds {{[0-9]+}}(%esp) ; NOCMOV-NEXT: flds {{[0-9]+}}(%esp)
; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: flds {{\.LCPI.*}} ; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOCMOV-NEXT: jg .LBB12_2 ; NOCMOV-NEXT: jg .LBB12_2
; NOCMOV-NEXT: # %bb.1: ; NOCMOV-NEXT: # %bb.1:
; NOCMOV-NEXT: fstp %st(0) ; NOCMOV-NEXT: fstp %st(0)
@ -894,7 +894,7 @@ define float @test14(i32 %a, i32 %b, float %x) nounwind {
; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: setge %al ; NOSSE1-NEXT: setge %al
; NOSSE1-NEXT: testb %al, %al ; NOSSE1-NEXT: testb %al, %al
; NOSSE1-NEXT: flds {{\.LCPI.*}} ; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE1-NEXT: fxch %st(1) ; NOSSE1-NEXT: fxch %st(1)
; NOSSE1-NEXT: fcmovne %st(1), %st ; NOSSE1-NEXT: fcmovne %st(1), %st
; NOSSE1-NEXT: fstp %st(1) ; NOSSE1-NEXT: fstp %st(1)
@ -905,7 +905,7 @@ define float @test14(i32 %a, i32 %b, float %x) nounwind {
; NOCMOV-NEXT: flds {{[0-9]+}}(%esp) ; NOCMOV-NEXT: flds {{[0-9]+}}(%esp)
; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: flds {{\.LCPI.*}} ; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOCMOV-NEXT: jge .LBB13_2 ; NOCMOV-NEXT: jge .LBB13_2
; NOCMOV-NEXT: # %bb.1: ; NOCMOV-NEXT: # %bb.1:
; NOCMOV-NEXT: fstp %st(0) ; NOCMOV-NEXT: fstp %st(0)
@ -961,7 +961,7 @@ define float @test15(i32 %a, i32 %b, float %x) nounwind {
; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: setl %al ; NOSSE1-NEXT: setl %al
; NOSSE1-NEXT: testb %al, %al ; NOSSE1-NEXT: testb %al, %al
; NOSSE1-NEXT: flds {{\.LCPI.*}} ; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE1-NEXT: fxch %st(1) ; NOSSE1-NEXT: fxch %st(1)
; NOSSE1-NEXT: fcmovne %st(1), %st ; NOSSE1-NEXT: fcmovne %st(1), %st
; NOSSE1-NEXT: fstp %st(1) ; NOSSE1-NEXT: fstp %st(1)
@ -972,7 +972,7 @@ define float @test15(i32 %a, i32 %b, float %x) nounwind {
; NOCMOV-NEXT: flds {{[0-9]+}}(%esp) ; NOCMOV-NEXT: flds {{[0-9]+}}(%esp)
; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: flds {{\.LCPI.*}} ; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOCMOV-NEXT: jl .LBB14_2 ; NOCMOV-NEXT: jl .LBB14_2
; NOCMOV-NEXT: # %bb.1: ; NOCMOV-NEXT: # %bb.1:
; NOCMOV-NEXT: fstp %st(0) ; NOCMOV-NEXT: fstp %st(0)
@ -1028,7 +1028,7 @@ define float @test16(i32 %a, i32 %b, float %x) nounwind {
; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: setle %al ; NOSSE1-NEXT: setle %al
; NOSSE1-NEXT: testb %al, %al ; NOSSE1-NEXT: testb %al, %al
; NOSSE1-NEXT: flds {{\.LCPI.*}} ; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE1-NEXT: fxch %st(1) ; NOSSE1-NEXT: fxch %st(1)
; NOSSE1-NEXT: fcmovne %st(1), %st ; NOSSE1-NEXT: fcmovne %st(1), %st
; NOSSE1-NEXT: fstp %st(1) ; NOSSE1-NEXT: fstp %st(1)
@ -1039,7 +1039,7 @@ define float @test16(i32 %a, i32 %b, float %x) nounwind {
; NOCMOV-NEXT: flds {{[0-9]+}}(%esp) ; NOCMOV-NEXT: flds {{[0-9]+}}(%esp)
; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: flds {{\.LCPI.*}} ; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOCMOV-NEXT: jle .LBB15_2 ; NOCMOV-NEXT: jle .LBB15_2
; NOCMOV-NEXT: # %bb.1: ; NOCMOV-NEXT: # %bb.1:
; NOCMOV-NEXT: fstp %st(0) ; NOCMOV-NEXT: fstp %st(0)
@ -1058,7 +1058,7 @@ define x86_fp80 @test17(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; SSE: # %bb.0: ; SSE: # %bb.0:
; SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ; SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE-NEXT: fldt {{[0-9]+}}(%esp) ; SSE-NEXT: fldt {{[0-9]+}}(%esp)
; SSE-NEXT: flds {{\.LCPI.*}} ; SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; SSE-NEXT: fxch %st(1) ; SSE-NEXT: fxch %st(1)
; SSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; SSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; SSE-NEXT: fcmovnbe %st(1), %st ; SSE-NEXT: fcmovnbe %st(1), %st
@ -1070,7 +1070,7 @@ define x86_fp80 @test17(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; NOSSE2-NEXT: fldt {{[0-9]+}}(%esp) ; NOSSE2-NEXT: fldt {{[0-9]+}}(%esp)
; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE2-NEXT: flds {{\.LCPI.*}} ; NOSSE2-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE2-NEXT: fxch %st(1) ; NOSSE2-NEXT: fxch %st(1)
; NOSSE2-NEXT: fcmovnbe %st(1), %st ; NOSSE2-NEXT: fcmovnbe %st(1), %st
; NOSSE2-NEXT: fstp %st(1) ; NOSSE2-NEXT: fstp %st(1)
@ -1081,7 +1081,7 @@ define x86_fp80 @test17(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; NOSSE1-NEXT: fldt {{[0-9]+}}(%esp) ; NOSSE1-NEXT: fldt {{[0-9]+}}(%esp)
; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: flds {{\.LCPI.*}} ; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE1-NEXT: fxch %st(1) ; NOSSE1-NEXT: fxch %st(1)
; NOSSE1-NEXT: fcmovnbe %st(1), %st ; NOSSE1-NEXT: fcmovnbe %st(1), %st
; NOSSE1-NEXT: fstp %st(1) ; NOSSE1-NEXT: fstp %st(1)
@ -1092,7 +1092,7 @@ define x86_fp80 @test17(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; NOCMOV-NEXT: fldt {{[0-9]+}}(%esp) ; NOCMOV-NEXT: fldt {{[0-9]+}}(%esp)
; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: flds {{\.LCPI.*}} ; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOCMOV-NEXT: ja .LBB16_2 ; NOCMOV-NEXT: ja .LBB16_2
; NOCMOV-NEXT: # %bb.1: ; NOCMOV-NEXT: # %bb.1:
; NOCMOV-NEXT: fstp %st(0) ; NOCMOV-NEXT: fstp %st(0)
@ -1111,7 +1111,7 @@ define x86_fp80 @test18(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; SSE: # %bb.0: ; SSE: # %bb.0:
; SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ; SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE-NEXT: fldt {{[0-9]+}}(%esp) ; SSE-NEXT: fldt {{[0-9]+}}(%esp)
; SSE-NEXT: flds {{\.LCPI.*}} ; SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; SSE-NEXT: fxch %st(1) ; SSE-NEXT: fxch %st(1)
; SSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; SSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; SSE-NEXT: fcmovnb %st(1), %st ; SSE-NEXT: fcmovnb %st(1), %st
@ -1123,7 +1123,7 @@ define x86_fp80 @test18(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; NOSSE2-NEXT: fldt {{[0-9]+}}(%esp) ; NOSSE2-NEXT: fldt {{[0-9]+}}(%esp)
; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE2-NEXT: flds {{\.LCPI.*}} ; NOSSE2-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE2-NEXT: fxch %st(1) ; NOSSE2-NEXT: fxch %st(1)
; NOSSE2-NEXT: fcmovnb %st(1), %st ; NOSSE2-NEXT: fcmovnb %st(1), %st
; NOSSE2-NEXT: fstp %st(1) ; NOSSE2-NEXT: fstp %st(1)
@ -1134,7 +1134,7 @@ define x86_fp80 @test18(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; NOSSE1-NEXT: fldt {{[0-9]+}}(%esp) ; NOSSE1-NEXT: fldt {{[0-9]+}}(%esp)
; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: flds {{\.LCPI.*}} ; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE1-NEXT: fxch %st(1) ; NOSSE1-NEXT: fxch %st(1)
; NOSSE1-NEXT: fcmovnb %st(1), %st ; NOSSE1-NEXT: fcmovnb %st(1), %st
; NOSSE1-NEXT: fstp %st(1) ; NOSSE1-NEXT: fstp %st(1)
@ -1145,7 +1145,7 @@ define x86_fp80 @test18(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; NOCMOV-NEXT: fldt {{[0-9]+}}(%esp) ; NOCMOV-NEXT: fldt {{[0-9]+}}(%esp)
; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: flds {{\.LCPI.*}} ; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOCMOV-NEXT: jae .LBB17_2 ; NOCMOV-NEXT: jae .LBB17_2
; NOCMOV-NEXT: # %bb.1: ; NOCMOV-NEXT: # %bb.1:
; NOCMOV-NEXT: fstp %st(0) ; NOCMOV-NEXT: fstp %st(0)
@ -1164,7 +1164,7 @@ define x86_fp80 @test19(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; SSE: # %bb.0: ; SSE: # %bb.0:
; SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ; SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE-NEXT: fldt {{[0-9]+}}(%esp) ; SSE-NEXT: fldt {{[0-9]+}}(%esp)
; SSE-NEXT: flds {{\.LCPI.*}} ; SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; SSE-NEXT: fxch %st(1) ; SSE-NEXT: fxch %st(1)
; SSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; SSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; SSE-NEXT: fcmovb %st(1), %st ; SSE-NEXT: fcmovb %st(1), %st
@ -1176,7 +1176,7 @@ define x86_fp80 @test19(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; NOSSE2-NEXT: fldt {{[0-9]+}}(%esp) ; NOSSE2-NEXT: fldt {{[0-9]+}}(%esp)
; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE2-NEXT: flds {{\.LCPI.*}} ; NOSSE2-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE2-NEXT: fxch %st(1) ; NOSSE2-NEXT: fxch %st(1)
; NOSSE2-NEXT: fcmovb %st(1), %st ; NOSSE2-NEXT: fcmovb %st(1), %st
; NOSSE2-NEXT: fstp %st(1) ; NOSSE2-NEXT: fstp %st(1)
@ -1187,7 +1187,7 @@ define x86_fp80 @test19(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; NOSSE1-NEXT: fldt {{[0-9]+}}(%esp) ; NOSSE1-NEXT: fldt {{[0-9]+}}(%esp)
; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: flds {{\.LCPI.*}} ; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE1-NEXT: fxch %st(1) ; NOSSE1-NEXT: fxch %st(1)
; NOSSE1-NEXT: fcmovb %st(1), %st ; NOSSE1-NEXT: fcmovb %st(1), %st
; NOSSE1-NEXT: fstp %st(1) ; NOSSE1-NEXT: fstp %st(1)
@ -1198,7 +1198,7 @@ define x86_fp80 @test19(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; NOCMOV-NEXT: fldt {{[0-9]+}}(%esp) ; NOCMOV-NEXT: fldt {{[0-9]+}}(%esp)
; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: flds {{\.LCPI.*}} ; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOCMOV-NEXT: jb .LBB18_2 ; NOCMOV-NEXT: jb .LBB18_2
; NOCMOV-NEXT: # %bb.1: ; NOCMOV-NEXT: # %bb.1:
; NOCMOV-NEXT: fstp %st(0) ; NOCMOV-NEXT: fstp %st(0)
@ -1217,7 +1217,7 @@ define x86_fp80 @test20(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; SSE: # %bb.0: ; SSE: # %bb.0:
; SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ; SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE-NEXT: fldt {{[0-9]+}}(%esp) ; SSE-NEXT: fldt {{[0-9]+}}(%esp)
; SSE-NEXT: flds {{\.LCPI.*}} ; SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; SSE-NEXT: fxch %st(1) ; SSE-NEXT: fxch %st(1)
; SSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; SSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; SSE-NEXT: fcmovbe %st(1), %st ; SSE-NEXT: fcmovbe %st(1), %st
@ -1229,7 +1229,7 @@ define x86_fp80 @test20(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; NOSSE2-NEXT: fldt {{[0-9]+}}(%esp) ; NOSSE2-NEXT: fldt {{[0-9]+}}(%esp)
; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE2-NEXT: flds {{\.LCPI.*}} ; NOSSE2-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE2-NEXT: fxch %st(1) ; NOSSE2-NEXT: fxch %st(1)
; NOSSE2-NEXT: fcmovbe %st(1), %st ; NOSSE2-NEXT: fcmovbe %st(1), %st
; NOSSE2-NEXT: fstp %st(1) ; NOSSE2-NEXT: fstp %st(1)
@ -1240,7 +1240,7 @@ define x86_fp80 @test20(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; NOSSE1-NEXT: fldt {{[0-9]+}}(%esp) ; NOSSE1-NEXT: fldt {{[0-9]+}}(%esp)
; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: flds {{\.LCPI.*}} ; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE1-NEXT: fxch %st(1) ; NOSSE1-NEXT: fxch %st(1)
; NOSSE1-NEXT: fcmovbe %st(1), %st ; NOSSE1-NEXT: fcmovbe %st(1), %st
; NOSSE1-NEXT: fstp %st(1) ; NOSSE1-NEXT: fstp %st(1)
@ -1251,7 +1251,7 @@ define x86_fp80 @test20(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; NOCMOV-NEXT: fldt {{[0-9]+}}(%esp) ; NOCMOV-NEXT: fldt {{[0-9]+}}(%esp)
; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: flds {{\.LCPI.*}} ; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOCMOV-NEXT: jbe .LBB19_2 ; NOCMOV-NEXT: jbe .LBB19_2
; NOCMOV-NEXT: # %bb.1: ; NOCMOV-NEXT: # %bb.1:
; NOCMOV-NEXT: fstp %st(0) ; NOCMOV-NEXT: fstp %st(0)
@ -1270,7 +1270,7 @@ define x86_fp80 @test21(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; SSE: # %bb.0: ; SSE: # %bb.0:
; SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ; SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE-NEXT: fldt {{[0-9]+}}(%esp) ; SSE-NEXT: fldt {{[0-9]+}}(%esp)
; SSE-NEXT: flds {{\.LCPI.*}} ; SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; SSE-NEXT: fxch %st(1) ; SSE-NEXT: fxch %st(1)
; SSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; SSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; SSE-NEXT: setg %al ; SSE-NEXT: setg %al
@ -1286,7 +1286,7 @@ define x86_fp80 @test21(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE2-NEXT: setg %al ; NOSSE2-NEXT: setg %al
; NOSSE2-NEXT: testb %al, %al ; NOSSE2-NEXT: testb %al, %al
; NOSSE2-NEXT: flds {{\.LCPI.*}} ; NOSSE2-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE2-NEXT: fxch %st(1) ; NOSSE2-NEXT: fxch %st(1)
; NOSSE2-NEXT: fcmovne %st(1), %st ; NOSSE2-NEXT: fcmovne %st(1), %st
; NOSSE2-NEXT: fstp %st(1) ; NOSSE2-NEXT: fstp %st(1)
@ -1299,7 +1299,7 @@ define x86_fp80 @test21(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: setg %al ; NOSSE1-NEXT: setg %al
; NOSSE1-NEXT: testb %al, %al ; NOSSE1-NEXT: testb %al, %al
; NOSSE1-NEXT: flds {{\.LCPI.*}} ; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE1-NEXT: fxch %st(1) ; NOSSE1-NEXT: fxch %st(1)
; NOSSE1-NEXT: fcmovne %st(1), %st ; NOSSE1-NEXT: fcmovne %st(1), %st
; NOSSE1-NEXT: fstp %st(1) ; NOSSE1-NEXT: fstp %st(1)
@ -1310,7 +1310,7 @@ define x86_fp80 @test21(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; NOCMOV-NEXT: fldt {{[0-9]+}}(%esp) ; NOCMOV-NEXT: fldt {{[0-9]+}}(%esp)
; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: flds {{\.LCPI.*}} ; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOCMOV-NEXT: jg .LBB20_2 ; NOCMOV-NEXT: jg .LBB20_2
; NOCMOV-NEXT: # %bb.1: ; NOCMOV-NEXT: # %bb.1:
; NOCMOV-NEXT: fstp %st(0) ; NOCMOV-NEXT: fstp %st(0)
@ -1330,7 +1330,7 @@ define x86_fp80 @test22(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; SSE: # %bb.0: ; SSE: # %bb.0:
; SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ; SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE-NEXT: fldt {{[0-9]+}}(%esp) ; SSE-NEXT: fldt {{[0-9]+}}(%esp)
; SSE-NEXT: flds {{\.LCPI.*}} ; SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; SSE-NEXT: fxch %st(1) ; SSE-NEXT: fxch %st(1)
; SSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; SSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; SSE-NEXT: setge %al ; SSE-NEXT: setge %al
@ -1346,7 +1346,7 @@ define x86_fp80 @test22(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE2-NEXT: setge %al ; NOSSE2-NEXT: setge %al
; NOSSE2-NEXT: testb %al, %al ; NOSSE2-NEXT: testb %al, %al
; NOSSE2-NEXT: flds {{\.LCPI.*}} ; NOSSE2-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE2-NEXT: fxch %st(1) ; NOSSE2-NEXT: fxch %st(1)
; NOSSE2-NEXT: fcmovne %st(1), %st ; NOSSE2-NEXT: fcmovne %st(1), %st
; NOSSE2-NEXT: fstp %st(1) ; NOSSE2-NEXT: fstp %st(1)
@ -1359,7 +1359,7 @@ define x86_fp80 @test22(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: setge %al ; NOSSE1-NEXT: setge %al
; NOSSE1-NEXT: testb %al, %al ; NOSSE1-NEXT: testb %al, %al
; NOSSE1-NEXT: flds {{\.LCPI.*}} ; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE1-NEXT: fxch %st(1) ; NOSSE1-NEXT: fxch %st(1)
; NOSSE1-NEXT: fcmovne %st(1), %st ; NOSSE1-NEXT: fcmovne %st(1), %st
; NOSSE1-NEXT: fstp %st(1) ; NOSSE1-NEXT: fstp %st(1)
@ -1370,7 +1370,7 @@ define x86_fp80 @test22(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; NOCMOV-NEXT: fldt {{[0-9]+}}(%esp) ; NOCMOV-NEXT: fldt {{[0-9]+}}(%esp)
; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: flds {{\.LCPI.*}} ; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOCMOV-NEXT: jge .LBB21_2 ; NOCMOV-NEXT: jge .LBB21_2
; NOCMOV-NEXT: # %bb.1: ; NOCMOV-NEXT: # %bb.1:
; NOCMOV-NEXT: fstp %st(0) ; NOCMOV-NEXT: fstp %st(0)
@ -1389,7 +1389,7 @@ define x86_fp80 @test23(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; SSE: # %bb.0: ; SSE: # %bb.0:
; SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ; SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE-NEXT: fldt {{[0-9]+}}(%esp) ; SSE-NEXT: fldt {{[0-9]+}}(%esp)
; SSE-NEXT: flds {{\.LCPI.*}} ; SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; SSE-NEXT: fxch %st(1) ; SSE-NEXT: fxch %st(1)
; SSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; SSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; SSE-NEXT: setl %al ; SSE-NEXT: setl %al
@ -1405,7 +1405,7 @@ define x86_fp80 @test23(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE2-NEXT: setl %al ; NOSSE2-NEXT: setl %al
; NOSSE2-NEXT: testb %al, %al ; NOSSE2-NEXT: testb %al, %al
; NOSSE2-NEXT: flds {{\.LCPI.*}} ; NOSSE2-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE2-NEXT: fxch %st(1) ; NOSSE2-NEXT: fxch %st(1)
; NOSSE2-NEXT: fcmovne %st(1), %st ; NOSSE2-NEXT: fcmovne %st(1), %st
; NOSSE2-NEXT: fstp %st(1) ; NOSSE2-NEXT: fstp %st(1)
@ -1418,7 +1418,7 @@ define x86_fp80 @test23(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: setl %al ; NOSSE1-NEXT: setl %al
; NOSSE1-NEXT: testb %al, %al ; NOSSE1-NEXT: testb %al, %al
; NOSSE1-NEXT: flds {{\.LCPI.*}} ; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE1-NEXT: fxch %st(1) ; NOSSE1-NEXT: fxch %st(1)
; NOSSE1-NEXT: fcmovne %st(1), %st ; NOSSE1-NEXT: fcmovne %st(1), %st
; NOSSE1-NEXT: fstp %st(1) ; NOSSE1-NEXT: fstp %st(1)
@ -1429,7 +1429,7 @@ define x86_fp80 @test23(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; NOCMOV-NEXT: fldt {{[0-9]+}}(%esp) ; NOCMOV-NEXT: fldt {{[0-9]+}}(%esp)
; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: flds {{\.LCPI.*}} ; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOCMOV-NEXT: jl .LBB22_2 ; NOCMOV-NEXT: jl .LBB22_2
; NOCMOV-NEXT: # %bb.1: ; NOCMOV-NEXT: # %bb.1:
; NOCMOV-NEXT: fstp %st(0) ; NOCMOV-NEXT: fstp %st(0)
@ -1448,7 +1448,7 @@ define x86_fp80 @test24(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; SSE: # %bb.0: ; SSE: # %bb.0:
; SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ; SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE-NEXT: fldt {{[0-9]+}}(%esp) ; SSE-NEXT: fldt {{[0-9]+}}(%esp)
; SSE-NEXT: flds {{\.LCPI.*}} ; SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; SSE-NEXT: fxch %st(1) ; SSE-NEXT: fxch %st(1)
; SSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; SSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; SSE-NEXT: setle %al ; SSE-NEXT: setle %al
@ -1464,7 +1464,7 @@ define x86_fp80 @test24(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE2-NEXT: setle %al ; NOSSE2-NEXT: setle %al
; NOSSE2-NEXT: testb %al, %al ; NOSSE2-NEXT: testb %al, %al
; NOSSE2-NEXT: flds {{\.LCPI.*}} ; NOSSE2-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE2-NEXT: fxch %st(1) ; NOSSE2-NEXT: fxch %st(1)
; NOSSE2-NEXT: fcmovne %st(1), %st ; NOSSE2-NEXT: fcmovne %st(1), %st
; NOSSE2-NEXT: fstp %st(1) ; NOSSE2-NEXT: fstp %st(1)
@ -1477,7 +1477,7 @@ define x86_fp80 @test24(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: setle %al ; NOSSE1-NEXT: setle %al
; NOSSE1-NEXT: testb %al, %al ; NOSSE1-NEXT: testb %al, %al
; NOSSE1-NEXT: flds {{\.LCPI.*}} ; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE1-NEXT: fxch %st(1) ; NOSSE1-NEXT: fxch %st(1)
; NOSSE1-NEXT: fcmovne %st(1), %st ; NOSSE1-NEXT: fcmovne %st(1), %st
; NOSSE1-NEXT: fstp %st(1) ; NOSSE1-NEXT: fstp %st(1)
@ -1488,7 +1488,7 @@ define x86_fp80 @test24(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; NOCMOV-NEXT: fldt {{[0-9]+}}(%esp) ; NOCMOV-NEXT: fldt {{[0-9]+}}(%esp)
; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: flds {{\.LCPI.*}} ; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOCMOV-NEXT: jle .LBB23_2 ; NOCMOV-NEXT: jle .LBB23_2
; NOCMOV-NEXT: # %bb.1: ; NOCMOV-NEXT: # %bb.1:
; NOCMOV-NEXT: fstp %st(0) ; NOCMOV-NEXT: fstp %st(0)

View File

@ -108,12 +108,12 @@ define dso_local i32 @test5(double %A) nounwind {
; CHECK-LABEL: test5: ; CHECK-LABEL: test5:
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: ucomisd {{.*}}(%rip), %xmm0 # encoding: [0x66,0x0f,0x2e,0x05,A,A,A,A] ; CHECK-NEXT: ucomisd {{.*}}(%rip), %xmm0 # encoding: [0x66,0x0f,0x2e,0x05,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; CHECK-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; CHECK-NEXT: ja .LBB5_3 # encoding: [0x77,A] ; CHECK-NEXT: ja .LBB5_3 # encoding: [0x77,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB5_3-1, kind: FK_PCRel_1 ; CHECK-NEXT: # fixup A - offset: 1, value: .LBB5_3-1, kind: FK_PCRel_1
; CHECK-NEXT: # %bb.1: # %entry ; CHECK-NEXT: # %bb.1: # %entry
; CHECK-NEXT: ucomisd {{.*}}(%rip), %xmm0 # encoding: [0x66,0x0f,0x2e,0x05,A,A,A,A] ; CHECK-NEXT: ucomisd {{.*}}(%rip), %xmm0 # encoding: [0x66,0x0f,0x2e,0x05,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; CHECK-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; CHECK-NEXT: jb .LBB5_3 # encoding: [0x72,A] ; CHECK-NEXT: jb .LBB5_3 # encoding: [0x72,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB5_3-1, kind: FK_PCRel_1 ; CHECK-NEXT: # fixup A - offset: 1, value: .LBB5_3-1, kind: FK_PCRel_1
; CHECK-NEXT: # %bb.2: # %bb12 ; CHECK-NEXT: # %bb.2: # %bb12

View File

@ -417,13 +417,13 @@ define dso_local float @load_constant_pool(float %x) #0 {
; ;
; MEDIUM-STATIC-LABEL: load_constant_pool: ; MEDIUM-STATIC-LABEL: load_constant_pool:
; MEDIUM-STATIC: # %bb.0: ; MEDIUM-STATIC: # %bb.0:
; MEDIUM-STATIC-NEXT: movabsq ${{\.LCPI.*}}, %rax ; MEDIUM-STATIC-NEXT: movabsq ${{\.LCPI[0-9]+_[0-9]+}}, %rax
; MEDIUM-STATIC-NEXT: addss (%rax), %xmm0 ; MEDIUM-STATIC-NEXT: addss (%rax), %xmm0
; MEDIUM-STATIC-NEXT: retq ; MEDIUM-STATIC-NEXT: retq
; ;
; LARGE-STATIC-LABEL: load_constant_pool: ; LARGE-STATIC-LABEL: load_constant_pool:
; LARGE-STATIC: # %bb.0: ; LARGE-STATIC: # %bb.0:
; LARGE-STATIC-NEXT: movabsq ${{\.LCPI.*}}, %rax ; LARGE-STATIC-NEXT: movabsq ${{\.LCPI[0-9]+_[0-9]+}}, %rax
; LARGE-STATIC-NEXT: addss (%rax), %xmm0 ; LARGE-STATIC-NEXT: addss (%rax), %xmm0
; LARGE-STATIC-NEXT: retq ; LARGE-STATIC-NEXT: retq
; ;
@ -435,7 +435,7 @@ define dso_local float @load_constant_pool(float %x) #0 {
; MEDIUM-PIC-LABEL: load_constant_pool: ; MEDIUM-PIC-LABEL: load_constant_pool:
; MEDIUM-PIC: # %bb.0: ; MEDIUM-PIC: # %bb.0:
; MEDIUM-PIC-NEXT: leaq {{.*}}(%rip), %rax ; MEDIUM-PIC-NEXT: leaq {{.*}}(%rip), %rax
; MEDIUM-PIC-NEXT: movabsq ${{\.LCPI.*}}@GOTOFF, %rcx ; MEDIUM-PIC-NEXT: movabsq ${{\.LCPI[0-9]+_[0-9]+}}@GOTOFF, %rcx
; MEDIUM-PIC-NEXT: addss (%rax,%rcx), %xmm0 ; MEDIUM-PIC-NEXT: addss (%rax,%rcx), %xmm0
; MEDIUM-PIC-NEXT: retq ; MEDIUM-PIC-NEXT: retq
; ;
@ -445,7 +445,7 @@ define dso_local float @load_constant_pool(float %x) #0 {
; LARGE-PIC-NEXT: leaq .L11${{.*}}(%rip), %rax ; LARGE-PIC-NEXT: leaq .L11${{.*}}(%rip), %rax
; LARGE-PIC-NEXT: movabsq $_GLOBAL_OFFSET_TABLE_-.L11$pb, %rcx ; LARGE-PIC-NEXT: movabsq $_GLOBAL_OFFSET_TABLE_-.L11$pb, %rcx
; LARGE-PIC-NEXT: addq %rax, %rcx ; LARGE-PIC-NEXT: addq %rax, %rcx
; LARGE-PIC-NEXT: movabsq ${{\.LCPI.*}}@GOTOFF, %rax ; LARGE-PIC-NEXT: movabsq ${{\.LCPI[0-9]+_[0-9]+}}@GOTOFF, %rax
; LARGE-PIC-NEXT: addss (%rcx,%rax), %xmm0 ; LARGE-PIC-NEXT: addss (%rcx,%rax), %xmm0
; LARGE-PIC-NEXT: retq ; LARGE-PIC-NEXT: retq
%a = fadd float %x, 1.0 %a = fadd float %x, 1.0

View File

@ -40,8 +40,8 @@ define float @bextr_uitofp(i32 %x, i32 %y) {
; X32-NEXT: movl $3855, %eax # imm = 0xF0F ; X32-NEXT: movl $3855, %eax # imm = 0xF0F
; X32-NEXT: bextrl %eax, {{[0-9]+}}(%esp), %eax ; X32-NEXT: bextrl %eax, {{[0-9]+}}(%esp), %eax
; X32-NEXT: movd %eax, %xmm0 ; X32-NEXT: movd %eax, %xmm0
; X32-NEXT: por {{\.LCPI.*}}, %xmm0 ; X32-NEXT: por {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X32-NEXT: subsd {{\.LCPI.*}}, %xmm0 ; X32-NEXT: subsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X32-NEXT: cvtsd2ss %xmm0, %xmm0 ; X32-NEXT: cvtsd2ss %xmm0, %xmm0
; X32-NEXT: movss %xmm0, (%esp) ; X32-NEXT: movss %xmm0, (%esp)
; X32-NEXT: flds (%esp) ; X32-NEXT: flds (%esp)

View File

@ -51,23 +51,23 @@ define <4 x i32> @test_demandedbits_bitreverse(<4 x i32> %a0) nounwind {
; X86-NEXT: packuswb %xmm2, %xmm0 ; X86-NEXT: packuswb %xmm2, %xmm0
; X86-NEXT: movdqa %xmm0, %xmm1 ; X86-NEXT: movdqa %xmm0, %xmm1
; X86-NEXT: psllw $4, %xmm1 ; X86-NEXT: psllw $4, %xmm1
; X86-NEXT: pand {{\.LCPI.*}}, %xmm1 ; X86-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-NEXT: psrlw $4, %xmm0 ; X86-NEXT: psrlw $4, %xmm0
; X86-NEXT: pand {{\.LCPI.*}}, %xmm0 ; X86-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: por %xmm1, %xmm0 ; X86-NEXT: por %xmm1, %xmm0
; X86-NEXT: movdqa {{.*#+}} xmm1 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51] ; X86-NEXT: movdqa {{.*#+}} xmm1 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
; X86-NEXT: pand %xmm0, %xmm1 ; X86-NEXT: pand %xmm0, %xmm1
; X86-NEXT: psllw $2, %xmm1 ; X86-NEXT: psllw $2, %xmm1
; X86-NEXT: pand {{\.LCPI.*}}, %xmm0 ; X86-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: psrlw $2, %xmm0 ; X86-NEXT: psrlw $2, %xmm0
; X86-NEXT: por %xmm1, %xmm0 ; X86-NEXT: por %xmm1, %xmm0
; X86-NEXT: movdqa {{.*#+}} xmm1 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85] ; X86-NEXT: movdqa {{.*#+}} xmm1 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85]
; X86-NEXT: pand %xmm0, %xmm1 ; X86-NEXT: pand %xmm0, %xmm1
; X86-NEXT: paddb %xmm1, %xmm1 ; X86-NEXT: paddb %xmm1, %xmm1
; X86-NEXT: pand {{\.LCPI.*}}, %xmm0 ; X86-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: psrlw $1, %xmm0 ; X86-NEXT: psrlw $1, %xmm0
; X86-NEXT: por %xmm1, %xmm0 ; X86-NEXT: por %xmm1, %xmm0
; X86-NEXT: pand {{\.LCPI.*}}, %xmm0 ; X86-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl ; X86-NEXT: retl
; ;
; X64-LABEL: test_demandedbits_bitreverse: ; X64-LABEL: test_demandedbits_bitreverse:

View File

@ -116,7 +116,7 @@ define void @testCombineMultiplies_splat(<4 x i32> %v1) nounwind {
; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] ; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; CHECK-NEXT: movdqa {{.*#+}} xmm2 = [242,242,242,242] ; CHECK-NEXT: movdqa {{.*#+}} xmm2 = [242,242,242,242]
; CHECK-NEXT: paddd %xmm0, %xmm2 ; CHECK-NEXT: paddd %xmm0, %xmm2
; CHECK-NEXT: paddd {{\.LCPI.*}}, %xmm0 ; CHECK-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; CHECK-NEXT: movdqa %xmm2, v2 ; CHECK-NEXT: movdqa %xmm2, v2
; CHECK-NEXT: movdqa %xmm0, v3 ; CHECK-NEXT: movdqa %xmm0, v3
; CHECK-NEXT: movdqa %xmm1, x ; CHECK-NEXT: movdqa %xmm1, x
@ -151,7 +151,7 @@ define void @testCombineMultiplies_non_splat(<4 x i32> %v1) nounwind {
; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] ; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; CHECK-NEXT: movdqa {{.*#+}} xmm2 = [242,726,1452,2420] ; CHECK-NEXT: movdqa {{.*#+}} xmm2 = [242,726,1452,2420]
; CHECK-NEXT: paddd %xmm0, %xmm2 ; CHECK-NEXT: paddd %xmm0, %xmm2
; CHECK-NEXT: paddd {{\.LCPI.*}}, %xmm0 ; CHECK-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; CHECK-NEXT: movdqa %xmm2, v2 ; CHECK-NEXT: movdqa %xmm2, v2
; CHECK-NEXT: movdqa %xmm0, v3 ; CHECK-NEXT: movdqa %xmm0, v3
; CHECK-NEXT: movdqa %xmm1, x ; CHECK-NEXT: movdqa %xmm1, x

View File

@ -328,7 +328,7 @@ define <3 x double> @extvselectsetcc_crash(<2 x double> %x) {
; ;
; X86-LABEL: extvselectsetcc_crash: ; X86-LABEL: extvselectsetcc_crash:
; X86: # %bb.0: ; X86: # %bb.0:
; X86-NEXT: vcmpeqpd {{\.LCPI.*}}, %xmm0, %xmm1 ; X86-NEXT: vcmpeqpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm1
; X86-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero ; X86-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
; X86-NEXT: vandpd %xmm2, %xmm1, %xmm1 ; X86-NEXT: vandpd %xmm2, %xmm1, %xmm1
; X86-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; X86-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
@ -556,7 +556,7 @@ define double @fabs_v4f64(<4 x double> %x) nounwind {
; X86-NEXT: movl %esp, %ebp ; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp ; X86-NEXT: andl $-8, %esp
; X86-NEXT: subl $8, %esp ; X86-NEXT: subl $8, %esp
; X86-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0 ; X86-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-NEXT: vmovlps %xmm0, (%esp) ; X86-NEXT: vmovlps %xmm0, (%esp)
; X86-NEXT: fldl (%esp) ; X86-NEXT: fldl (%esp)
; X86-NEXT: movl %ebp, %esp ; X86-NEXT: movl %ebp, %esp
@ -830,8 +830,8 @@ define double @copysign_v4f64(<4 x double> %x, <4 x double> %y) nounwind {
; X86-NEXT: movl %esp, %ebp ; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp ; X86-NEXT: andl $-8, %esp
; X86-NEXT: subl $8, %esp ; X86-NEXT: subl $8, %esp
; X86-NEXT: vandps {{\.LCPI.*}}, %xmm1, %xmm1 ; X86-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1
; X86-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0 ; X86-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-NEXT: vorps %xmm1, %xmm0, %xmm0 ; X86-NEXT: vorps %xmm1, %xmm0, %xmm0
; X86-NEXT: vmovlps %xmm0, (%esp) ; X86-NEXT: vmovlps %xmm0, (%esp)
; X86-NEXT: fldl (%esp) ; X86-NEXT: fldl (%esp)
@ -1111,7 +1111,7 @@ define double @round_v4f64(<4 x double> %x) nounwind {
; X86-NEXT: movl %esp, %ebp ; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp ; X86-NEXT: andl $-8, %esp
; X86-NEXT: subl $8, %esp ; X86-NEXT: subl $8, %esp
; X86-NEXT: vandpd {{\.LCPI.*}}, %xmm0, %xmm1 ; X86-NEXT: vandpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm1
; X86-NEXT: vmovddup {{.*#+}} xmm2 = [4.9999999999999994E-1,4.9999999999999994E-1] ; X86-NEXT: vmovddup {{.*#+}} xmm2 = [4.9999999999999994E-1,4.9999999999999994E-1]
; X86-NEXT: # xmm2 = mem[0,0] ; X86-NEXT: # xmm2 = mem[0,0]
; X86-NEXT: vorpd %xmm1, %xmm2, %xmm1 ; X86-NEXT: vorpd %xmm1, %xmm2, %xmm1

View File

@ -18,7 +18,7 @@ define double @fneg_f64(double %x) nounwind {
; SSE2-NEXT: andl $-8, %esp ; SSE2-NEXT: andl $-8, %esp
; SSE2-NEXT: subl $8, %esp ; SSE2-NEXT: subl $8, %esp
; SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE2-NEXT: xorps {{\.LCPI.*}}, %xmm0 ; SSE2-NEXT: xorps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE2-NEXT: movlps %xmm0, (%esp) ; SSE2-NEXT: movlps %xmm0, (%esp)
; SSE2-NEXT: fldl (%esp) ; SSE2-NEXT: fldl (%esp)
; SSE2-NEXT: movl %ebp, %esp ; SSE2-NEXT: movl %ebp, %esp
@ -40,7 +40,7 @@ define float @fneg_f32(float %x) nounwind {
; SSE2: # %bb.0: ; SSE2: # %bb.0:
; SSE2-NEXT: pushl %eax ; SSE2-NEXT: pushl %eax
; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: xorps {{\.LCPI.*}}, %xmm0 ; SSE2-NEXT: xorps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE2-NEXT: movss %xmm0, (%esp) ; SSE2-NEXT: movss %xmm0, (%esp)
; SSE2-NEXT: flds (%esp) ; SSE2-NEXT: flds (%esp)
; SSE2-NEXT: popl %eax ; SSE2-NEXT: popl %eax
@ -65,7 +65,7 @@ define void @fneg_f64_mem(double* %x, double* %y) nounwind {
; SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax ; SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx ; SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
; SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE2-NEXT: xorps {{\.LCPI.*}}, %xmm0 ; SSE2-NEXT: xorps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE2-NEXT: movsd %xmm0, (%eax) ; SSE2-NEXT: movsd %xmm0, (%eax)
; SSE2-NEXT: retl ; SSE2-NEXT: retl
%a = load double, double* %x %a = load double, double* %x

View File

@ -36,7 +36,7 @@ define fastcc double @uint64_to_fp(i64 %X) {
; CHECK-NEXT: movl %ecx, (%esp) ; CHECK-NEXT: movl %ecx, (%esp)
; CHECK-NEXT: shrl $31, %edx ; CHECK-NEXT: shrl $31, %edx
; CHECK-NEXT: fildll (%esp) ; CHECK-NEXT: fildll (%esp)
; CHECK-NEXT: fadds {{\.LCPI.*}}(,%edx,4) ; CHECK-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%edx,4)
; CHECK-NEXT: fstpl {{[0-9]+}}(%esp) ; CHECK-NEXT: fstpl {{[0-9]+}}(%esp)
; CHECK-NEXT: fldl {{[0-9]+}}(%esp) ; CHECK-NEXT: fldl {{[0-9]+}}(%esp)
; CHECK-NEXT: movl %ebp, %esp ; CHECK-NEXT: movl %ebp, %esp

View File

@ -548,9 +548,9 @@ define float @fma_const_fmul(float %x) {
; CHECK-LABEL: fma_const_fmul: ; CHECK-LABEL: fma_const_fmul:
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x59,0x0d,A,A,A,A] ; CHECK-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x59,0x0d,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; CHECK-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; CHECK-NEXT: vfmadd132ss {{.*}}(%rip), %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0x99,0x05,A,A,A,A] ; CHECK-NEXT: vfmadd132ss {{.*}}(%rip), %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0x99,0x05,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte ; CHECK-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; CHECK-NEXT: # xmm0 = (xmm0 * mem) + xmm1 ; CHECK-NEXT: # xmm0 = (xmm0 * mem) + xmm1
; CHECK-NEXT: retq # encoding: [0xc3] ; CHECK-NEXT: retq # encoding: [0xc3]
%mul1 = fmul contract float %x, 10.0 %mul1 = fmul contract float %x, 10.0

View File

@ -38,7 +38,7 @@ define dso_local float @fast_fmuladd_opts(float %a , float %b , float %c) {
; X86-LABEL: fast_fmuladd_opts: ; X86-LABEL: fast_fmuladd_opts:
; X86: # %bb.0: ; X86: # %bb.0:
; X86-NEXT: flds {{[0-9]+}}(%esp) ; X86-NEXT: flds {{[0-9]+}}(%esp)
; X86-NEXT: fmuls {{\.LCPI.*}} ; X86-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
; X86-NEXT: retl ; X86-NEXT: retl
%res = call fast float @llvm.fmuladd.f32(float %a, float 2.0, float %a) %res = call fast float @llvm.fmuladd.f32(float %a, float 2.0, float %a)
ret float %res ret float %res
@ -61,9 +61,9 @@ define dso_local double @not_so_fast_mul_add(double %x) {
; X86: # %bb.0: ; X86: # %bb.0:
; X86-NEXT: fldl {{[0-9]+}}(%esp) ; X86-NEXT: fldl {{[0-9]+}}(%esp)
; X86-NEXT: fld %st(0) ; X86-NEXT: fld %st(0)
; X86-NEXT: fmull {{\.LCPI.*}} ; X86-NEXT: fmull {{\.LCPI[0-9]+_[0-9]+}}
; X86-NEXT: fxch %st(1) ; X86-NEXT: fxch %st(1)
; X86-NEXT: fmull {{\.LCPI.*}} ; X86-NEXT: fmull {{\.LCPI[0-9]+_[0-9]+}}
; X86-NEXT: fxch %st(1) ; X86-NEXT: fxch %st(1)
; X86-NEXT: fstpl mul1 ; X86-NEXT: fstpl mul1
; X86-NEXT: retl ; X86-NEXT: retl
@ -127,7 +127,7 @@ define dso_local float @div_arcp_by_const(half %x) {
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl %eax, (%esp) ; X86-NEXT: movl %eax, (%esp)
; X86-NEXT: calll __gnu_h2f_ieee ; X86-NEXT: calll __gnu_h2f_ieee
; X86-NEXT: fmuls {{\.LCPI.*}} ; X86-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
; X86-NEXT: fstps (%esp) ; X86-NEXT: fstps (%esp)
; X86-NEXT: calll __gnu_f2h_ieee ; X86-NEXT: calll __gnu_f2h_ieee
; X86-NEXT: movzwl %ax, %eax ; X86-NEXT: movzwl %ax, %eax

View File

@ -443,7 +443,7 @@ define i64 @fptoui_i64_fp80(x86_fp80 %a0) nounwind {
; X86-NEXT: andl $-8, %esp ; X86-NEXT: andl $-8, %esp
; X86-NEXT: subl $16, %esp ; X86-NEXT: subl $16, %esp
; X86-NEXT: fldt 8(%ebp) ; X86-NEXT: fldt 8(%ebp)
; X86-NEXT: flds {{\.LCPI.*}} ; X86-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-NEXT: fucom %st(1) ; X86-NEXT: fucom %st(1)
; X86-NEXT: fnstsw %ax ; X86-NEXT: fnstsw %ax
; X86-NEXT: xorl %edx, %edx ; X86-NEXT: xorl %edx, %edx
@ -523,7 +523,7 @@ define i64 @fptoui_i64_fp80_ld(x86_fp80 *%a0) nounwind {
; X86-NEXT: subl $16, %esp ; X86-NEXT: subl $16, %esp
; X86-NEXT: movl 8(%ebp), %eax ; X86-NEXT: movl 8(%ebp), %eax
; X86-NEXT: fldt (%eax) ; X86-NEXT: fldt (%eax)
; X86-NEXT: flds {{\.LCPI.*}} ; X86-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-NEXT: fucom %st(1) ; X86-NEXT: fucom %st(1)
; X86-NEXT: fnstsw %ax ; X86-NEXT: fnstsw %ax
; X86-NEXT: xorl %edx, %edx ; X86-NEXT: xorl %edx, %edx
@ -825,7 +825,7 @@ define x86_fp80 @uitofp_fp80_i64(i64 %a0) nounwind {
; X86-NEXT: movl %eax, (%esp) ; X86-NEXT: movl %eax, (%esp)
; X86-NEXT: shrl $31, %ecx ; X86-NEXT: shrl $31, %ecx
; X86-NEXT: fildll (%esp) ; X86-NEXT: fildll (%esp)
; X86-NEXT: fadds {{\.LCPI.*}}(,%ecx,4) ; X86-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%ecx,4)
; X86-NEXT: movl %ebp, %esp ; X86-NEXT: movl %ebp, %esp
; X86-NEXT: popl %ebp ; X86-NEXT: popl %ebp
; X86-NEXT: retl ; X86-NEXT: retl
@ -837,7 +837,7 @@ define x86_fp80 @uitofp_fp80_i64(i64 %a0) nounwind {
; X64-NEXT: testq %rdi, %rdi ; X64-NEXT: testq %rdi, %rdi
; X64-NEXT: sets %al ; X64-NEXT: sets %al
; X64-NEXT: fildll -{{[0-9]+}}(%rsp) ; X64-NEXT: fildll -{{[0-9]+}}(%rsp)
; X64-NEXT: fadds {{\.LCPI.*}}(,%rax,4) ; X64-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%rax,4)
; X64-NEXT: retq ; X64-NEXT: retq
%1 = uitofp i64 %a0 to x86_fp80 %1 = uitofp i64 %a0 to x86_fp80
ret x86_fp80 %1 ret x86_fp80 %1
@ -857,7 +857,7 @@ define x86_fp80 @uitofp_fp80_i64_ld(i64 *%a0) nounwind {
; X86-NEXT: movl %ecx, (%esp) ; X86-NEXT: movl %ecx, (%esp)
; X86-NEXT: shrl $31, %eax ; X86-NEXT: shrl $31, %eax
; X86-NEXT: fildll (%esp) ; X86-NEXT: fildll (%esp)
; X86-NEXT: fadds {{\.LCPI.*}}(,%eax,4) ; X86-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; X86-NEXT: movl %ebp, %esp ; X86-NEXT: movl %ebp, %esp
; X86-NEXT: popl %ebp ; X86-NEXT: popl %ebp
; X86-NEXT: retl ; X86-NEXT: retl
@ -870,7 +870,7 @@ define x86_fp80 @uitofp_fp80_i64_ld(i64 *%a0) nounwind {
; X64-NEXT: testq %rax, %rax ; X64-NEXT: testq %rax, %rax
; X64-NEXT: sets %cl ; X64-NEXT: sets %cl
; X64-NEXT: fildll -{{[0-9]+}}(%rsp) ; X64-NEXT: fildll -{{[0-9]+}}(%rsp)
; X64-NEXT: fadds {{\.LCPI.*}}(,%rcx,4) ; X64-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%rcx,4)
; X64-NEXT: retq ; X64-NEXT: retq
%1 = load i64, i64 *%a0 %1 = load i64, i64 *%a0
%2 = uitofp i64 %1 to x86_fp80 %2 = uitofp i64 %1 to x86_fp80

View File

@ -18,7 +18,7 @@ define double @f1() #0 {
; X87-LABEL: f1: ; X87-LABEL: f1:
; X87: # %bb.0: # %entry ; X87: # %bb.0: # %entry
; X87-NEXT: fld1 ; X87-NEXT: fld1
; X87-NEXT: fdivs {{\.LCPI.*}} ; X87-NEXT: fdivs {{\.LCPI[0-9]+_[0-9]+}}
; X87-NEXT: wait ; X87-NEXT: wait
; X87-NEXT: retl ; X87-NEXT: retl
; ;
@ -27,7 +27,7 @@ define double @f1() #0 {
; X86-SSE-NEXT: subl $12, %esp ; X86-SSE-NEXT: subl $12, %esp
; X86-SSE-NEXT: .cfi_def_cfa_offset 16 ; X86-SSE-NEXT: .cfi_def_cfa_offset 16
; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X86-SSE-NEXT: divsd {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: divsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movsd %xmm0, (%esp) ; X86-SSE-NEXT: movsd %xmm0, (%esp)
; X86-SSE-NEXT: fldl (%esp) ; X86-SSE-NEXT: fldl (%esp)
; X86-SSE-NEXT: wait ; X86-SSE-NEXT: wait
@ -209,7 +209,7 @@ define double @f4(i32 %n, double %a) #0 {
; X86-SSE-NEXT: cmpl $0, {{[0-9]+}}(%esp) ; X86-SSE-NEXT: cmpl $0, {{[0-9]+}}(%esp)
; X86-SSE-NEXT: jle .LBB3_2 ; X86-SSE-NEXT: jle .LBB3_2
; X86-SSE-NEXT: # %bb.1: # %if.then ; X86-SSE-NEXT: # %bb.1: # %if.then
; X86-SSE-NEXT: addsd {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: addsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: .LBB3_2: # %if.end ; X86-SSE-NEXT: .LBB3_2: # %if.end
; X86-SSE-NEXT: movsd %xmm0, (%esp) ; X86-SSE-NEXT: movsd %xmm0, (%esp)
; X86-SSE-NEXT: fldl (%esp) ; X86-SSE-NEXT: fldl (%esp)
@ -255,7 +255,7 @@ if.end:
define double @f5() #0 { define double @f5() #0 {
; X87-LABEL: f5: ; X87-LABEL: f5:
; X87: # %bb.0: # %entry ; X87: # %bb.0: # %entry
; X87-NEXT: flds {{\.LCPI.*}} ; X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X87-NEXT: fsqrt ; X87-NEXT: fsqrt
; X87-NEXT: wait ; X87-NEXT: wait
; X87-NEXT: retl ; X87-NEXT: retl
@ -297,9 +297,9 @@ define double @f6() #0 {
; X87: # %bb.0: # %entry ; X87: # %bb.0: # %entry
; X87-NEXT: subl $28, %esp ; X87-NEXT: subl $28, %esp
; X87-NEXT: .cfi_def_cfa_offset 32 ; X87-NEXT: .cfi_def_cfa_offset 32
; X87-NEXT: flds {{\.LCPI.*}} ; X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X87-NEXT: fstpl {{[0-9]+}}(%esp) ; X87-NEXT: fstpl {{[0-9]+}}(%esp)
; X87-NEXT: fldl {{\.LCPI.*}} ; X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; X87-NEXT: fstpl (%esp) ; X87-NEXT: fstpl (%esp)
; X87-NEXT: wait ; X87-NEXT: wait
; X87-NEXT: calll pow ; X87-NEXT: calll pow
@ -355,7 +355,7 @@ define double @f7() #0 {
; X87: # %bb.0: # %entry ; X87: # %bb.0: # %entry
; X87-NEXT: subl $12, %esp ; X87-NEXT: subl $12, %esp
; X87-NEXT: .cfi_def_cfa_offset 16 ; X87-NEXT: .cfi_def_cfa_offset 16
; X87-NEXT: fldl {{\.LCPI.*}} ; X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; X87-NEXT: fstpl (%esp) ; X87-NEXT: fstpl (%esp)
; X87-NEXT: wait ; X87-NEXT: wait
; X87-NEXT: movl $3, {{[0-9]+}}(%esp) ; X87-NEXT: movl $3, {{[0-9]+}}(%esp)
@ -411,7 +411,7 @@ define double @f8() #0 {
; X87: # %bb.0: # %entry ; X87: # %bb.0: # %entry
; X87-NEXT: subl $12, %esp ; X87-NEXT: subl $12, %esp
; X87-NEXT: .cfi_def_cfa_offset 16 ; X87-NEXT: .cfi_def_cfa_offset 16
; X87-NEXT: flds {{\.LCPI.*}} ; X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X87-NEXT: fstpl (%esp) ; X87-NEXT: fstpl (%esp)
; X87-NEXT: wait ; X87-NEXT: wait
; X87-NEXT: calll sin ; X87-NEXT: calll sin
@ -462,7 +462,7 @@ define double @f9() #0 {
; X87: # %bb.0: # %entry ; X87: # %bb.0: # %entry
; X87-NEXT: subl $12, %esp ; X87-NEXT: subl $12, %esp
; X87-NEXT: .cfi_def_cfa_offset 16 ; X87-NEXT: .cfi_def_cfa_offset 16
; X87-NEXT: flds {{\.LCPI.*}} ; X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X87-NEXT: fstpl (%esp) ; X87-NEXT: fstpl (%esp)
; X87-NEXT: wait ; X87-NEXT: wait
; X87-NEXT: calll cos ; X87-NEXT: calll cos
@ -513,7 +513,7 @@ define double @f10() #0 {
; X87: # %bb.0: # %entry ; X87: # %bb.0: # %entry
; X87-NEXT: subl $12, %esp ; X87-NEXT: subl $12, %esp
; X87-NEXT: .cfi_def_cfa_offset 16 ; X87-NEXT: .cfi_def_cfa_offset 16
; X87-NEXT: flds {{\.LCPI.*}} ; X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X87-NEXT: fstpl (%esp) ; X87-NEXT: fstpl (%esp)
; X87-NEXT: wait ; X87-NEXT: wait
; X87-NEXT: calll exp ; X87-NEXT: calll exp
@ -564,7 +564,7 @@ define double @f11() #0 {
; X87: # %bb.0: # %entry ; X87: # %bb.0: # %entry
; X87-NEXT: subl $12, %esp ; X87-NEXT: subl $12, %esp
; X87-NEXT: .cfi_def_cfa_offset 16 ; X87-NEXT: .cfi_def_cfa_offset 16
; X87-NEXT: fldl {{\.LCPI.*}} ; X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; X87-NEXT: fstpl (%esp) ; X87-NEXT: fstpl (%esp)
; X87-NEXT: wait ; X87-NEXT: wait
; X87-NEXT: calll exp2 ; X87-NEXT: calll exp2
@ -615,7 +615,7 @@ define double @f12() #0 {
; X87: # %bb.0: # %entry ; X87: # %bb.0: # %entry
; X87-NEXT: subl $12, %esp ; X87-NEXT: subl $12, %esp
; X87-NEXT: .cfi_def_cfa_offset 16 ; X87-NEXT: .cfi_def_cfa_offset 16
; X87-NEXT: flds {{\.LCPI.*}} ; X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X87-NEXT: fstpl (%esp) ; X87-NEXT: fstpl (%esp)
; X87-NEXT: wait ; X87-NEXT: wait
; X87-NEXT: calll log ; X87-NEXT: calll log
@ -666,7 +666,7 @@ define double @f13() #0 {
; X87: # %bb.0: # %entry ; X87: # %bb.0: # %entry
; X87-NEXT: subl $12, %esp ; X87-NEXT: subl $12, %esp
; X87-NEXT: .cfi_def_cfa_offset 16 ; X87-NEXT: .cfi_def_cfa_offset 16
; X87-NEXT: flds {{\.LCPI.*}} ; X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X87-NEXT: fstpl (%esp) ; X87-NEXT: fstpl (%esp)
; X87-NEXT: wait ; X87-NEXT: wait
; X87-NEXT: calll log10 ; X87-NEXT: calll log10
@ -717,7 +717,7 @@ define double @f14() #0 {
; X87: # %bb.0: # %entry ; X87: # %bb.0: # %entry
; X87-NEXT: subl $12, %esp ; X87-NEXT: subl $12, %esp
; X87-NEXT: .cfi_def_cfa_offset 16 ; X87-NEXT: .cfi_def_cfa_offset 16
; X87-NEXT: flds {{\.LCPI.*}} ; X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X87-NEXT: fstpl (%esp) ; X87-NEXT: fstpl (%esp)
; X87-NEXT: wait ; X87-NEXT: wait
; X87-NEXT: calll log2 ; X87-NEXT: calll log2
@ -768,7 +768,7 @@ define double @f15() #0 {
; X87: # %bb.0: # %entry ; X87: # %bb.0: # %entry
; X87-NEXT: subl $12, %esp ; X87-NEXT: subl $12, %esp
; X87-NEXT: .cfi_def_cfa_offset 16 ; X87-NEXT: .cfi_def_cfa_offset 16
; X87-NEXT: fldl {{\.LCPI.*}} ; X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; X87-NEXT: fstpl (%esp) ; X87-NEXT: fstpl (%esp)
; X87-NEXT: wait ; X87-NEXT: wait
; X87-NEXT: calll rint ; X87-NEXT: calll rint
@ -816,7 +816,7 @@ define double @f16() #0 {
; X87: # %bb.0: # %entry ; X87: # %bb.0: # %entry
; X87-NEXT: subl $12, %esp ; X87-NEXT: subl $12, %esp
; X87-NEXT: .cfi_def_cfa_offset 16 ; X87-NEXT: .cfi_def_cfa_offset 16
; X87-NEXT: fldl {{\.LCPI.*}} ; X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; X87-NEXT: fstpl (%esp) ; X87-NEXT: fstpl (%esp)
; X87-NEXT: wait ; X87-NEXT: wait
; X87-NEXT: calll nearbyint ; X87-NEXT: calll nearbyint
@ -863,7 +863,7 @@ define double @f19() #0 {
; X87: # %bb.0: # %entry ; X87: # %bb.0: # %entry
; X87-NEXT: subl $28, %esp ; X87-NEXT: subl $28, %esp
; X87-NEXT: .cfi_def_cfa_offset 32 ; X87-NEXT: .cfi_def_cfa_offset 32
; X87-NEXT: flds {{\.LCPI.*}} ; X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X87-NEXT: fstpl {{[0-9]+}}(%esp) ; X87-NEXT: fstpl {{[0-9]+}}(%esp)
; X87-NEXT: wait ; X87-NEXT: wait
; X87-NEXT: movl $1072693248, {{[0-9]+}}(%esp) # imm = 0x3FF00000 ; X87-NEXT: movl $1072693248, {{[0-9]+}}(%esp) # imm = 0x3FF00000
@ -1356,7 +1356,7 @@ define i64 @f20u64(double %x) #0 {
; X87-NEXT: subl $20, %esp ; X87-NEXT: subl $20, %esp
; X87-NEXT: .cfi_def_cfa_offset 24 ; X87-NEXT: .cfi_def_cfa_offset 24
; X87-NEXT: fldl {{[0-9]+}}(%esp) ; X87-NEXT: fldl {{[0-9]+}}(%esp)
; X87-NEXT: flds {{\.LCPI.*}} ; X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X87-NEXT: wait ; X87-NEXT: wait
; X87-NEXT: xorl %edx, %edx ; X87-NEXT: xorl %edx, %edx
; X87-NEXT: fcomi %st(1), %st ; X87-NEXT: fcomi %st(1), %st
@ -1541,7 +1541,7 @@ define float @f21() #0 {
; X87: # %bb.0: # %entry ; X87: # %bb.0: # %entry
; X87-NEXT: pushl %eax ; X87-NEXT: pushl %eax
; X87-NEXT: .cfi_def_cfa_offset 8 ; X87-NEXT: .cfi_def_cfa_offset 8
; X87-NEXT: fldl {{\.LCPI.*}} ; X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; X87-NEXT: fstps (%esp) ; X87-NEXT: fstps (%esp)
; X87-NEXT: flds (%esp) ; X87-NEXT: flds (%esp)
; X87-NEXT: wait ; X87-NEXT: wait
@ -2437,8 +2437,8 @@ define double @uifdi(i32 %x) #0 {
; X86-SSE-NEXT: subl $12, %esp ; X86-SSE-NEXT: subl $12, %esp
; X86-SSE-NEXT: .cfi_def_cfa_offset 16 ; X86-SSE-NEXT: .cfi_def_cfa_offset 16
; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-SSE-NEXT: orpd {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: orpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: subsd {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: subsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movsd %xmm0, (%esp) ; X86-SSE-NEXT: movsd %xmm0, (%esp)
; X86-SSE-NEXT: fldl (%esp) ; X86-SSE-NEXT: fldl (%esp)
; X86-SSE-NEXT: wait ; X86-SSE-NEXT: wait
@ -2480,7 +2480,7 @@ define double @uifdl(i64 %x) #0 {
; X87-NEXT: movl %eax, (%esp) ; X87-NEXT: movl %eax, (%esp)
; X87-NEXT: shrl $31, %ecx ; X87-NEXT: shrl $31, %ecx
; X87-NEXT: fildll (%esp) ; X87-NEXT: fildll (%esp)
; X87-NEXT: fadds {{\.LCPI.*}}(,%ecx,4) ; X87-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%ecx,4)
; X87-NEXT: fstpl {{[0-9]+}}(%esp) ; X87-NEXT: fstpl {{[0-9]+}}(%esp)
; X87-NEXT: fldl {{[0-9]+}}(%esp) ; X87-NEXT: fldl {{[0-9]+}}(%esp)
; X87-NEXT: wait ; X87-NEXT: wait
@ -2497,7 +2497,7 @@ define double @uifdl(i64 %x) #0 {
; X86-SSE-NEXT: movlps %xmm0, {{[0-9]+}}(%esp) ; X86-SSE-NEXT: movlps %xmm0, {{[0-9]+}}(%esp)
; X86-SSE-NEXT: shrl $31, %eax ; X86-SSE-NEXT: shrl $31, %eax
; X86-SSE-NEXT: fildll {{[0-9]+}}(%esp) ; X86-SSE-NEXT: fildll {{[0-9]+}}(%esp)
; X86-SSE-NEXT: fadds {{\.LCPI.*}}(,%eax,4) ; X86-SSE-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; X86-SSE-NEXT: fstpl {{[0-9]+}}(%esp) ; X86-SSE-NEXT: fstpl {{[0-9]+}}(%esp)
; X86-SSE-NEXT: wait ; X86-SSE-NEXT: wait
; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
@ -2658,8 +2658,8 @@ define float @uiffi(i32 %x) #0 {
; X86-SSE-NEXT: pushl %eax ; X86-SSE-NEXT: pushl %eax
; X86-SSE-NEXT: .cfi_def_cfa_offset 8 ; X86-SSE-NEXT: .cfi_def_cfa_offset 8
; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-SSE-NEXT: orpd {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: orpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: subsd {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: subsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: cvtsd2ss %xmm0, %xmm0 ; X86-SSE-NEXT: cvtsd2ss %xmm0, %xmm0
; X86-SSE-NEXT: movss %xmm0, (%esp) ; X86-SSE-NEXT: movss %xmm0, (%esp)
; X86-SSE-NEXT: flds (%esp) ; X86-SSE-NEXT: flds (%esp)
@ -2702,7 +2702,7 @@ define float @uiffl(i64 %x) #0 {
; X87-NEXT: movl %eax, {{[0-9]+}}(%esp) ; X87-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X87-NEXT: shrl $31, %ecx ; X87-NEXT: shrl $31, %ecx
; X87-NEXT: fildll {{[0-9]+}}(%esp) ; X87-NEXT: fildll {{[0-9]+}}(%esp)
; X87-NEXT: fadds {{\.LCPI.*}}(,%ecx,4) ; X87-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%ecx,4)
; X87-NEXT: fstps {{[0-9]+}}(%esp) ; X87-NEXT: fstps {{[0-9]+}}(%esp)
; X87-NEXT: flds {{[0-9]+}}(%esp) ; X87-NEXT: flds {{[0-9]+}}(%esp)
; X87-NEXT: wait ; X87-NEXT: wait
@ -2719,7 +2719,7 @@ define float @uiffl(i64 %x) #0 {
; X86-SSE-NEXT: movlps %xmm0, {{[0-9]+}}(%esp) ; X86-SSE-NEXT: movlps %xmm0, {{[0-9]+}}(%esp)
; X86-SSE-NEXT: shrl $31, %eax ; X86-SSE-NEXT: shrl $31, %eax
; X86-SSE-NEXT: fildll {{[0-9]+}}(%esp) ; X86-SSE-NEXT: fildll {{[0-9]+}}(%esp)
; X86-SSE-NEXT: fadds {{\.LCPI.*}}(,%eax,4) ; X86-SSE-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; X86-SSE-NEXT: fstps {{[0-9]+}}(%esp) ; X86-SSE-NEXT: fstps {{[0-9]+}}(%esp)
; X86-SSE-NEXT: wait ; X86-SSE-NEXT: wait
; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero

View File

@ -4,8 +4,8 @@
define i32 @main() nounwind { define i32 @main() nounwind {
; CHECK-LABEL: main: ; CHECK-LABEL: main:
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fldl {{\.LCPI.*}} ; CHECK-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; CHECK-NEXT: fldl {{\.LCPI.*}} ; CHECK-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; CHECK-NEXT: fxch %st(1) ; CHECK-NEXT: fxch %st(1)
; CHECK-NEXT: #APP ; CHECK-NEXT: #APP
; CHECK-NEXT: fmul %st(1), %st ; CHECK-NEXT: fmul %st(1), %st

View File

@ -679,7 +679,7 @@ define i64 @fptoui_f32toi64(float %x) #0 {
; X87-NEXT: andl $-8, %esp ; X87-NEXT: andl $-8, %esp
; X87-NEXT: subl $16, %esp ; X87-NEXT: subl $16, %esp
; X87-NEXT: flds 8(%ebp) ; X87-NEXT: flds 8(%ebp)
; X87-NEXT: flds {{\.LCPI.*}} ; X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X87-NEXT: fcom %st(1) ; X87-NEXT: fcom %st(1)
; X87-NEXT: wait ; X87-NEXT: wait
; X87-NEXT: fnstsw %ax ; X87-NEXT: fnstsw %ax
@ -1319,7 +1319,7 @@ define i64 @fptoui_f64toi64(double %x) #0 {
; X87-NEXT: andl $-8, %esp ; X87-NEXT: andl $-8, %esp
; X87-NEXT: subl $16, %esp ; X87-NEXT: subl $16, %esp
; X87-NEXT: fldl 8(%ebp) ; X87-NEXT: fldl 8(%ebp)
; X87-NEXT: flds {{\.LCPI.*}} ; X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X87-NEXT: fcom %st(1) ; X87-NEXT: fcom %st(1)
; X87-NEXT: wait ; X87-NEXT: wait
; X87-NEXT: fnstsw %ax ; X87-NEXT: fnstsw %ax

View File

@ -488,8 +488,8 @@ define float @uitofp_i32tof32(i32 %x) #0 {
; SSE-X86-NEXT: pushl %eax ; SSE-X86-NEXT: pushl %eax
; SSE-X86-NEXT: .cfi_def_cfa_offset 8 ; SSE-X86-NEXT: .cfi_def_cfa_offset 8
; SSE-X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSE-X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-X86-NEXT: orpd {{\.LCPI.*}}, %xmm0 ; SSE-X86-NEXT: orpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE-X86-NEXT: subsd {{\.LCPI.*}}, %xmm0 ; SSE-X86-NEXT: subsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE-X86-NEXT: cvtsd2ss %xmm0, %xmm0 ; SSE-X86-NEXT: cvtsd2ss %xmm0, %xmm0
; SSE-X86-NEXT: movss %xmm0, (%esp) ; SSE-X86-NEXT: movss %xmm0, (%esp)
; SSE-X86-NEXT: flds (%esp) ; SSE-X86-NEXT: flds (%esp)
@ -509,8 +509,8 @@ define float @uitofp_i32tof32(i32 %x) #0 {
; AVX1-X86-NEXT: pushl %eax ; AVX1-X86-NEXT: pushl %eax
; AVX1-X86-NEXT: .cfi_def_cfa_offset 8 ; AVX1-X86-NEXT: .cfi_def_cfa_offset 8
; AVX1-X86-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; AVX1-X86-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX1-X86-NEXT: vorpd {{\.LCPI.*}}, %xmm0, %xmm0 ; AVX1-X86-NEXT: vorpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; AVX1-X86-NEXT: vsubsd {{\.LCPI.*}}, %xmm0, %xmm0 ; AVX1-X86-NEXT: vsubsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; AVX1-X86-NEXT: vcvtsd2ss %xmm0, %xmm0, %xmm0 ; AVX1-X86-NEXT: vcvtsd2ss %xmm0, %xmm0, %xmm0
; AVX1-X86-NEXT: vmovss %xmm0, (%esp) ; AVX1-X86-NEXT: vmovss %xmm0, (%esp)
; AVX1-X86-NEXT: flds (%esp) ; AVX1-X86-NEXT: flds (%esp)
@ -581,7 +581,7 @@ define float @uitofp_i64tof32(i64 %x) #0 {
; SSE-X86-NEXT: movlps %xmm0, {{[0-9]+}}(%esp) ; SSE-X86-NEXT: movlps %xmm0, {{[0-9]+}}(%esp)
; SSE-X86-NEXT: shrl $31, %eax ; SSE-X86-NEXT: shrl $31, %eax
; SSE-X86-NEXT: fildll {{[0-9]+}}(%esp) ; SSE-X86-NEXT: fildll {{[0-9]+}}(%esp)
; SSE-X86-NEXT: fadds {{\.LCPI.*}}(,%eax,4) ; SSE-X86-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; SSE-X86-NEXT: fstps {{[0-9]+}}(%esp) ; SSE-X86-NEXT: fstps {{[0-9]+}}(%esp)
; SSE-X86-NEXT: wait ; SSE-X86-NEXT: wait
; SSE-X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSE-X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@ -623,7 +623,7 @@ define float @uitofp_i64tof32(i64 %x) #0 {
; AVX-X86-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp) ; AVX-X86-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
; AVX-X86-NEXT: shrl $31, %eax ; AVX-X86-NEXT: shrl $31, %eax
; AVX-X86-NEXT: fildll {{[0-9]+}}(%esp) ; AVX-X86-NEXT: fildll {{[0-9]+}}(%esp)
; AVX-X86-NEXT: fadds {{\.LCPI.*}}(,%eax,4) ; AVX-X86-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; AVX-X86-NEXT: fstps {{[0-9]+}}(%esp) ; AVX-X86-NEXT: fstps {{[0-9]+}}(%esp)
; AVX-X86-NEXT: wait ; AVX-X86-NEXT: wait
; AVX-X86-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; AVX-X86-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@ -671,7 +671,7 @@ define float @uitofp_i64tof32(i64 %x) #0 {
; X87-NEXT: movl %eax, {{[0-9]+}}(%esp) ; X87-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X87-NEXT: shrl $31, %ecx ; X87-NEXT: shrl $31, %ecx
; X87-NEXT: fildll {{[0-9]+}}(%esp) ; X87-NEXT: fildll {{[0-9]+}}(%esp)
; X87-NEXT: fadds {{\.LCPI.*}}(,%ecx,4) ; X87-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%ecx,4)
; X87-NEXT: fstps {{[0-9]+}}(%esp) ; X87-NEXT: fstps {{[0-9]+}}(%esp)
; X87-NEXT: flds {{[0-9]+}}(%esp) ; X87-NEXT: flds {{[0-9]+}}(%esp)
; X87-NEXT: wait ; X87-NEXT: wait
@ -1164,8 +1164,8 @@ define double @uitofp_i32tof64(i32 %x) #0 {
; SSE-X86-NEXT: andl $-8, %esp ; SSE-X86-NEXT: andl $-8, %esp
; SSE-X86-NEXT: subl $8, %esp ; SSE-X86-NEXT: subl $8, %esp
; SSE-X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSE-X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-X86-NEXT: orpd {{\.LCPI.*}}, %xmm0 ; SSE-X86-NEXT: orpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE-X86-NEXT: subsd {{\.LCPI.*}}, %xmm0 ; SSE-X86-NEXT: subsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE-X86-NEXT: movsd %xmm0, (%esp) ; SSE-X86-NEXT: movsd %xmm0, (%esp)
; SSE-X86-NEXT: fldl (%esp) ; SSE-X86-NEXT: fldl (%esp)
; SSE-X86-NEXT: wait ; SSE-X86-NEXT: wait
@ -1190,8 +1190,8 @@ define double @uitofp_i32tof64(i32 %x) #0 {
; AVX1-X86-NEXT: andl $-8, %esp ; AVX1-X86-NEXT: andl $-8, %esp
; AVX1-X86-NEXT: subl $8, %esp ; AVX1-X86-NEXT: subl $8, %esp
; AVX1-X86-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; AVX1-X86-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX1-X86-NEXT: vorpd {{\.LCPI.*}}, %xmm0, %xmm0 ; AVX1-X86-NEXT: vorpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; AVX1-X86-NEXT: vsubsd {{\.LCPI.*}}, %xmm0, %xmm0 ; AVX1-X86-NEXT: vsubsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; AVX1-X86-NEXT: vmovsd %xmm0, (%esp) ; AVX1-X86-NEXT: vmovsd %xmm0, (%esp)
; AVX1-X86-NEXT: fldl (%esp) ; AVX1-X86-NEXT: fldl (%esp)
; AVX1-X86-NEXT: wait ; AVX1-X86-NEXT: wait
@ -1268,7 +1268,7 @@ define double @uitofp_i64tof64(i64 %x) #0 {
; SSE-X86-NEXT: movlps %xmm0, {{[0-9]+}}(%esp) ; SSE-X86-NEXT: movlps %xmm0, {{[0-9]+}}(%esp)
; SSE-X86-NEXT: shrl $31, %eax ; SSE-X86-NEXT: shrl $31, %eax
; SSE-X86-NEXT: fildll {{[0-9]+}}(%esp) ; SSE-X86-NEXT: fildll {{[0-9]+}}(%esp)
; SSE-X86-NEXT: fadds {{\.LCPI.*}}(,%eax,4) ; SSE-X86-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; SSE-X86-NEXT: fstpl {{[0-9]+}}(%esp) ; SSE-X86-NEXT: fstpl {{[0-9]+}}(%esp)
; SSE-X86-NEXT: wait ; SSE-X86-NEXT: wait
; SSE-X86-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; SSE-X86-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
@ -1310,7 +1310,7 @@ define double @uitofp_i64tof64(i64 %x) #0 {
; AVX-X86-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp) ; AVX-X86-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
; AVX-X86-NEXT: shrl $31, %eax ; AVX-X86-NEXT: shrl $31, %eax
; AVX-X86-NEXT: fildll {{[0-9]+}}(%esp) ; AVX-X86-NEXT: fildll {{[0-9]+}}(%esp)
; AVX-X86-NEXT: fadds {{\.LCPI.*}}(,%eax,4) ; AVX-X86-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; AVX-X86-NEXT: fstpl {{[0-9]+}}(%esp) ; AVX-X86-NEXT: fstpl {{[0-9]+}}(%esp)
; AVX-X86-NEXT: wait ; AVX-X86-NEXT: wait
; AVX-X86-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; AVX-X86-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
@ -1358,7 +1358,7 @@ define double @uitofp_i64tof64(i64 %x) #0 {
; X87-NEXT: movl %eax, (%esp) ; X87-NEXT: movl %eax, (%esp)
; X87-NEXT: shrl $31, %ecx ; X87-NEXT: shrl $31, %ecx
; X87-NEXT: fildll (%esp) ; X87-NEXT: fildll (%esp)
; X87-NEXT: fadds {{\.LCPI.*}}(,%ecx,4) ; X87-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%ecx,4)
; X87-NEXT: fstpl {{[0-9]+}}(%esp) ; X87-NEXT: fstpl {{[0-9]+}}(%esp)
; X87-NEXT: fldl {{[0-9]+}}(%esp) ; X87-NEXT: fldl {{[0-9]+}}(%esp)
; X87-NEXT: wait ; X87-NEXT: wait

View File

@ -1287,8 +1287,8 @@ define fp128 @TestTruncCopysign(fp128 %x, i32 %n) nounwind {
; X32-NEXT: addl $16, %esp ; X32-NEXT: addl $16, %esp
; X32-NEXT: fstpl {{[0-9]+}}(%esp) ; X32-NEXT: fstpl {{[0-9]+}}(%esp)
; X32-NEXT: testb $-128, {{[0-9]+}}(%esp) ; X32-NEXT: testb $-128, {{[0-9]+}}(%esp)
; X32-NEXT: flds {{\.LCPI.*}} ; X32-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X32-NEXT: flds {{\.LCPI.*}} ; X32-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X32-NEXT: jne .LBB26_3 ; X32-NEXT: jne .LBB26_3
; X32-NEXT: # %bb.2: # %if.then ; X32-NEXT: # %bb.2: # %if.then
; X32-NEXT: fstp %st(1) ; X32-NEXT: fstp %st(1)

View File

@ -144,7 +144,7 @@ define fp128 @TestI128_1(fp128 %x) #0 {
; SSE-NEXT: testl %eax, %eax ; SSE-NEXT: testl %eax, %eax
; SSE-NEXT: sets %cl ; SSE-NEXT: sets %cl
; SSE-NEXT: shlq $4, %rcx ; SSE-NEXT: shlq $4, %rcx
; SSE-NEXT: movaps {{\.LCPI.*}}(%rcx), %xmm0 ; SSE-NEXT: movaps {{\.LCPI[0-9]+_[0-9]+}}(%rcx), %xmm0
; SSE-NEXT: addq $40, %rsp ; SSE-NEXT: addq $40, %rsp
; SSE-NEXT: retq ; SSE-NEXT: retq
; ;
@ -164,7 +164,7 @@ define fp128 @TestI128_1(fp128 %x) #0 {
; AVX-NEXT: testl %eax, %eax ; AVX-NEXT: testl %eax, %eax
; AVX-NEXT: sets %cl ; AVX-NEXT: sets %cl
; AVX-NEXT: shlq $4, %rcx ; AVX-NEXT: shlq $4, %rcx
; AVX-NEXT: vmovaps {{\.LCPI.*}}(%rcx), %xmm0 ; AVX-NEXT: vmovaps {{\.LCPI[0-9]+_[0-9]+}}(%rcx), %xmm0
; AVX-NEXT: addq $40, %rsp ; AVX-NEXT: addq $40, %rsp
; AVX-NEXT: retq ; AVX-NEXT: retq
entry: entry:

View File

@ -588,7 +588,7 @@ define i64 @fp80_to_uint64(x86_fp80 %x) #0 {
; X86-NEXT: andl $-8, %esp ; X86-NEXT: andl $-8, %esp
; X86-NEXT: subl $16, %esp ; X86-NEXT: subl $16, %esp
; X86-NEXT: fldt 8(%ebp) ; X86-NEXT: fldt 8(%ebp)
; X86-NEXT: flds {{\.LCPI.*}} ; X86-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-NEXT: fcom %st(1) ; X86-NEXT: fcom %st(1)
; X86-NEXT: wait ; X86-NEXT: wait
; X86-NEXT: fnstsw %ax ; X86-NEXT: fnstsw %ax
@ -905,7 +905,7 @@ define x86_fp80 @uint64_to_fp80(i64 %x) #0 {
; X86-NEXT: movl %eax, (%esp) ; X86-NEXT: movl %eax, (%esp)
; X86-NEXT: shrl $31, %ecx ; X86-NEXT: shrl $31, %ecx
; X86-NEXT: fildll (%esp) ; X86-NEXT: fildll (%esp)
; X86-NEXT: fadds {{\.LCPI.*}}(,%ecx,4) ; X86-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%ecx,4)
; X86-NEXT: wait ; X86-NEXT: wait
; X86-NEXT: movl %ebp, %esp ; X86-NEXT: movl %ebp, %esp
; X86-NEXT: popl %ebp ; X86-NEXT: popl %ebp
@ -919,7 +919,7 @@ define x86_fp80 @uint64_to_fp80(i64 %x) #0 {
; X64-NEXT: testq %rdi, %rdi ; X64-NEXT: testq %rdi, %rdi
; X64-NEXT: sets %al ; X64-NEXT: sets %al
; X64-NEXT: fildll -{{[0-9]+}}(%rsp) ; X64-NEXT: fildll -{{[0-9]+}}(%rsp)
; X64-NEXT: fadds {{\.LCPI.*}}(,%rax,4) ; X64-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%rax,4)
; X64-NEXT: wait ; X64-NEXT: wait
; X64-NEXT: retq ; X64-NEXT: retq
%result = call x86_fp80 @llvm.experimental.constrained.uitofp.f80.i64(i64 %x, %result = call x86_fp80 @llvm.experimental.constrained.uitofp.f80.i64(i64 %x,

View File

@ -105,7 +105,7 @@ define i8 @test_signed_i8_f32(float %f) nounwind {
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: fists {{[0-9]+}}(%esp) ; X86-X87-NEXT: fists {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -117,7 +117,7 @@ define i8 @test_signed_i8_f32(float %f) nounwind {
; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movb {{[0-9]+}}(%esp), %dl ; X86-X87-NEXT: movb {{[0-9]+}}(%esp), %dl
; X86-X87-NEXT: .LBB1_2: ; X86-X87-NEXT: .LBB1_2:
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -176,7 +176,7 @@ define i13 @test_signed_i13_f32(float %f) nounwind {
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: fists {{[0-9]+}}(%esp) ; X86-X87-NEXT: fists {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -188,7 +188,7 @@ define i13 @test_signed_i13_f32(float %f) nounwind {
; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movzwl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB2_2: ; X86-X87-NEXT: .LBB2_2:
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -248,7 +248,7 @@ define i16 @test_signed_i16_f32(float %f) nounwind {
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: fists {{[0-9]+}}(%esp) ; X86-X87-NEXT: fists {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -260,7 +260,7 @@ define i16 @test_signed_i16_f32(float %f) nounwind {
; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movzwl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB3_2: ; X86-X87-NEXT: .LBB3_2:
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -320,7 +320,7 @@ define i19 @test_signed_i19_f32(float %f) nounwind {
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: fistl {{[0-9]+}}(%esp) ; X86-X87-NEXT: fistl {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw (%esp) ; X86-X87-NEXT: fldcw (%esp)
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -332,7 +332,7 @@ define i19 @test_signed_i19_f32(float %f) nounwind {
; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB4_2: ; X86-X87-NEXT: .LBB4_2:
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -362,8 +362,8 @@ define i19 @test_signed_i19_f32(float %f) nounwind {
; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-SSE-NEXT: xorl %eax, %eax ; X86-SSE-NEXT: xorl %eax, %eax
; X86-SSE-NEXT: ucomiss %xmm0, %xmm0 ; X86-SSE-NEXT: ucomiss %xmm0, %xmm0
; X86-SSE-NEXT: maxss {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: maxss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: minss {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: minss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: cvttss2si %xmm0, %ecx ; X86-SSE-NEXT: cvttss2si %xmm0, %ecx
; X86-SSE-NEXT: cmovnpl %ecx, %eax ; X86-SSE-NEXT: cmovnpl %ecx, %eax
; X86-SSE-NEXT: retl ; X86-SSE-NEXT: retl
@ -393,7 +393,7 @@ define i32 @test_signed_i32_f32(float %f) nounwind {
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: fistl {{[0-9]+}}(%esp) ; X86-X87-NEXT: fistl {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw (%esp) ; X86-X87-NEXT: fldcw (%esp)
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -405,7 +405,7 @@ define i32 @test_signed_i32_f32(float %f) nounwind {
; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB5_2: ; X86-X87-NEXT: .LBB5_2:
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -434,7 +434,7 @@ define i32 @test_signed_i32_f32(float %f) nounwind {
; X86-SSE: # %bb.0: ; X86-SSE: # %bb.0:
; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-SSE-NEXT: cvttss2si %xmm0, %eax ; X86-SSE-NEXT: cvttss2si %xmm0, %eax
; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $2147483647, %ecx # imm = 0x7FFFFFFF ; X86-SSE-NEXT: movl $2147483647, %ecx # imm = 0x7FFFFFFF
; X86-SSE-NEXT: cmovbel %eax, %ecx ; X86-SSE-NEXT: cmovbel %eax, %ecx
; X86-SSE-NEXT: xorl %eax, %eax ; X86-SSE-NEXT: xorl %eax, %eax
@ -471,7 +471,7 @@ define i50 @test_signed_i50_f32(float %f) nounwind {
; X86-X87-NEXT: fld %st(0) ; X86-X87-NEXT: fld %st(0)
; X86-X87-NEXT: fistpll {{[0-9]+}}(%esp) ; X86-X87-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -489,7 +489,7 @@ define i50 @test_signed_i50_f32(float %f) nounwind {
; X86-X87-NEXT: # %bb.3: ; X86-X87-NEXT: # %bb.3:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-X87-NEXT: .LBB6_4: ; X86-X87-NEXT: .LBB6_4:
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -537,12 +537,12 @@ define i50 @test_signed_i50_f32(float %f) nounwind {
; X86-SSE-NEXT: fistpll {{[0-9]+}}(%esp) ; X86-SSE-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-SSE-NEXT: xorl %ecx, %ecx ; X86-SSE-NEXT: xorl %ecx, %ecx
; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %esi ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-SSE-NEXT: cmovbl %ecx, %esi ; X86-SSE-NEXT: cmovbl %ecx, %esi
; X86-SSE-NEXT: movl $-131072, %eax # imm = 0xFFFE0000 ; X86-SSE-NEXT: movl $-131072, %eax # imm = 0xFFFE0000
; X86-SSE-NEXT: cmovael {{[0-9]+}}(%esp), %eax ; X86-SSE-NEXT: cmovael {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $131071, %edx # imm = 0x1FFFF ; X86-SSE-NEXT: movl $131071, %edx # imm = 0x1FFFF
; X86-SSE-NEXT: cmovbel %eax, %edx ; X86-SSE-NEXT: cmovbel %eax, %edx
; X86-SSE-NEXT: movl $-1, %eax ; X86-SSE-NEXT: movl $-1, %eax
@ -586,7 +586,7 @@ define i64 @test_signed_i64_f32(float %f) nounwind {
; X86-X87-NEXT: fld %st(0) ; X86-X87-NEXT: fld %st(0)
; X86-X87-NEXT: fistpll {{[0-9]+}}(%esp) ; X86-X87-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -604,7 +604,7 @@ define i64 @test_signed_i64_f32(float %f) nounwind {
; X86-X87-NEXT: # %bb.3: ; X86-X87-NEXT: # %bb.3:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-X87-NEXT: .LBB7_4: ; X86-X87-NEXT: .LBB7_4:
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -652,12 +652,12 @@ define i64 @test_signed_i64_f32(float %f) nounwind {
; X86-SSE-NEXT: fistpll {{[0-9]+}}(%esp) ; X86-SSE-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-SSE-NEXT: xorl %ecx, %ecx ; X86-SSE-NEXT: xorl %ecx, %ecx
; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %esi ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-SSE-NEXT: cmovbl %ecx, %esi ; X86-SSE-NEXT: cmovbl %ecx, %esi
; X86-SSE-NEXT: movl $-2147483648, %eax # imm = 0x80000000 ; X86-SSE-NEXT: movl $-2147483648, %eax # imm = 0x80000000
; X86-SSE-NEXT: cmovael {{[0-9]+}}(%esp), %eax ; X86-SSE-NEXT: cmovael {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $2147483647, %edx # imm = 0x7FFFFFFF ; X86-SSE-NEXT: movl $2147483647, %edx # imm = 0x7FFFFFFF
; X86-SSE-NEXT: cmovbel %eax, %edx ; X86-SSE-NEXT: cmovbel %eax, %edx
; X86-SSE-NEXT: movl $-1, %eax ; X86-SSE-NEXT: movl $-1, %eax
@ -695,7 +695,7 @@ define i100 @test_signed_i100_f32(float %f) nounwind {
; X86-X87-NEXT: fsts {{[0-9]+}}(%esp) ; X86-X87-NEXT: fsts {{[0-9]+}}(%esp)
; X86-X87-NEXT: leal {{[0-9]+}}(%esp), %eax ; X86-X87-NEXT: leal {{[0-9]+}}(%esp), %eax
; X86-X87-NEXT: movl %eax, (%esp) ; X86-X87-NEXT: movl %eax, (%esp)
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fsts {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill ; X86-X87-NEXT: fsts {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fucompp
@ -723,7 +723,7 @@ define i100 @test_signed_i100_f32(float %f) nounwind {
; X86-X87-NEXT: # %bb.5: ; X86-X87-NEXT: # %bb.5:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-X87-NEXT: .LBB8_6: ; X86-X87-NEXT: .LBB8_6:
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: flds {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload ; X86-X87-NEXT: flds {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -789,7 +789,7 @@ define i100 @test_signed_i100_f32(float %f) nounwind {
; X86-SSE-NEXT: subl $4, %esp ; X86-SSE-NEXT: subl $4, %esp
; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-SSE-NEXT: xorl %ebp, %ebp ; X86-SSE-NEXT: xorl %ebp, %ebp
; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $-8, %ebx ; X86-SSE-NEXT: movl $-8, %ebx
; X86-SSE-NEXT: movl $0, %ecx ; X86-SSE-NEXT: movl $0, %ecx
; X86-SSE-NEXT: movl $0, %edx ; X86-SSE-NEXT: movl $0, %edx
@ -801,7 +801,7 @@ define i100 @test_signed_i100_f32(float %f) nounwind {
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-SSE-NEXT: .LBB8_2: ; X86-SSE-NEXT: .LBB8_2:
; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $-1, %eax ; X86-SSE-NEXT: movl $-1, %eax
; X86-SSE-NEXT: cmoval %eax, %edi ; X86-SSE-NEXT: cmoval %eax, %edi
; X86-SSE-NEXT: cmoval %eax, %edx ; X86-SSE-NEXT: cmoval %eax, %edx
@ -864,7 +864,7 @@ define i128 @test_signed_i128_f32(float %f) nounwind {
; X86-X87-NEXT: fsts {{[0-9]+}}(%esp) ; X86-X87-NEXT: fsts {{[0-9]+}}(%esp)
; X86-X87-NEXT: leal {{[0-9]+}}(%esp), %eax ; X86-X87-NEXT: leal {{[0-9]+}}(%esp), %eax
; X86-X87-NEXT: movl %eax, (%esp) ; X86-X87-NEXT: movl %eax, (%esp)
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fsts {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill ; X86-X87-NEXT: fsts {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fucompp
@ -888,7 +888,7 @@ define i128 @test_signed_i128_f32(float %f) nounwind {
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB9_6: ; X86-X87-NEXT: .LBB9_6:
; X86-X87-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-X87-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: flds {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload ; X86-X87-NEXT: flds {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -964,7 +964,7 @@ define i128 @test_signed_i128_f32(float %f) nounwind {
; X86-SSE-NEXT: subl $4, %esp ; X86-SSE-NEXT: subl $4, %esp
; X86-SSE-NEXT: xorl %ecx, %ecx ; X86-SSE-NEXT: xorl %ecx, %ecx
; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT: cmovbl %ecx, %eax ; X86-SSE-NEXT: cmovbl %ecx, %eax
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
@ -973,7 +973,7 @@ define i128 @test_signed_i128_f32(float %f) nounwind {
; X86-SSE-NEXT: cmovbl %ecx, %edi ; X86-SSE-NEXT: cmovbl %ecx, %edi
; X86-SSE-NEXT: movl $-2147483648, %ebx # imm = 0x80000000 ; X86-SSE-NEXT: movl $-2147483648, %ebx # imm = 0x80000000
; X86-SSE-NEXT: cmovael {{[0-9]+}}(%esp), %ebx ; X86-SSE-NEXT: cmovael {{[0-9]+}}(%esp), %ebx
; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $2147483647, %ebp # imm = 0x7FFFFFFF ; X86-SSE-NEXT: movl $2147483647, %ebp # imm = 0x7FFFFFFF
; X86-SSE-NEXT: cmovbel %ebx, %ebp ; X86-SSE-NEXT: cmovbel %ebx, %ebp
; X86-SSE-NEXT: movl $-1, %ebx ; X86-SSE-NEXT: movl $-1, %ebx
@ -1125,7 +1125,7 @@ define i8 @test_signed_i8_f64(double %f) nounwind {
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: fists {{[0-9]+}}(%esp) ; X86-X87-NEXT: fists {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -1137,7 +1137,7 @@ define i8 @test_signed_i8_f64(double %f) nounwind {
; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movb {{[0-9]+}}(%esp), %dl ; X86-X87-NEXT: movb {{[0-9]+}}(%esp), %dl
; X86-X87-NEXT: .LBB11_2: ; X86-X87-NEXT: .LBB11_2:
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -1196,7 +1196,7 @@ define i13 @test_signed_i13_f64(double %f) nounwind {
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: fists {{[0-9]+}}(%esp) ; X86-X87-NEXT: fists {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -1208,7 +1208,7 @@ define i13 @test_signed_i13_f64(double %f) nounwind {
; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movzwl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB12_2: ; X86-X87-NEXT: .LBB12_2:
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -1268,7 +1268,7 @@ define i16 @test_signed_i16_f64(double %f) nounwind {
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: fists {{[0-9]+}}(%esp) ; X86-X87-NEXT: fists {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -1280,7 +1280,7 @@ define i16 @test_signed_i16_f64(double %f) nounwind {
; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movzwl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB13_2: ; X86-X87-NEXT: .LBB13_2:
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -1340,7 +1340,7 @@ define i19 @test_signed_i19_f64(double %f) nounwind {
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: fistl {{[0-9]+}}(%esp) ; X86-X87-NEXT: fistl {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw (%esp) ; X86-X87-NEXT: fldcw (%esp)
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -1352,7 +1352,7 @@ define i19 @test_signed_i19_f64(double %f) nounwind {
; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB14_2: ; X86-X87-NEXT: .LBB14_2:
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -1382,8 +1382,8 @@ define i19 @test_signed_i19_f64(double %f) nounwind {
; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X86-SSE-NEXT: xorl %eax, %eax ; X86-SSE-NEXT: xorl %eax, %eax
; X86-SSE-NEXT: ucomisd %xmm0, %xmm0 ; X86-SSE-NEXT: ucomisd %xmm0, %xmm0
; X86-SSE-NEXT: maxsd {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: maxsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: minsd {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: minsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: cvttsd2si %xmm0, %ecx ; X86-SSE-NEXT: cvttsd2si %xmm0, %ecx
; X86-SSE-NEXT: cmovnpl %ecx, %eax ; X86-SSE-NEXT: cmovnpl %ecx, %eax
; X86-SSE-NEXT: retl ; X86-SSE-NEXT: retl
@ -1413,7 +1413,7 @@ define i32 @test_signed_i32_f64(double %f) nounwind {
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: fistl {{[0-9]+}}(%esp) ; X86-X87-NEXT: fistl {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw (%esp) ; X86-X87-NEXT: fldcw (%esp)
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -1425,7 +1425,7 @@ define i32 @test_signed_i32_f64(double %f) nounwind {
; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB15_2: ; X86-X87-NEXT: .LBB15_2:
; X86-X87-NEXT: fldl {{\.LCPI.*}} ; X86-X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -1455,8 +1455,8 @@ define i32 @test_signed_i32_f64(double %f) nounwind {
; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X86-SSE-NEXT: xorl %eax, %eax ; X86-SSE-NEXT: xorl %eax, %eax
; X86-SSE-NEXT: ucomisd %xmm0, %xmm0 ; X86-SSE-NEXT: ucomisd %xmm0, %xmm0
; X86-SSE-NEXT: maxsd {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: maxsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: minsd {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: minsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: cvttsd2si %xmm0, %ecx ; X86-SSE-NEXT: cvttsd2si %xmm0, %ecx
; X86-SSE-NEXT: cmovnpl %ecx, %eax ; X86-SSE-NEXT: cmovnpl %ecx, %eax
; X86-SSE-NEXT: retl ; X86-SSE-NEXT: retl
@ -1489,7 +1489,7 @@ define i50 @test_signed_i50_f64(double %f) nounwind {
; X86-X87-NEXT: fld %st(0) ; X86-X87-NEXT: fld %st(0)
; X86-X87-NEXT: fistpll {{[0-9]+}}(%esp) ; X86-X87-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -1507,7 +1507,7 @@ define i50 @test_signed_i50_f64(double %f) nounwind {
; X86-X87-NEXT: # %bb.3: ; X86-X87-NEXT: # %bb.3:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-X87-NEXT: .LBB16_4: ; X86-X87-NEXT: .LBB16_4:
; X86-X87-NEXT: fldl {{\.LCPI.*}} ; X86-X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -1555,12 +1555,12 @@ define i50 @test_signed_i50_f64(double %f) nounwind {
; X86-SSE-NEXT: fistpll {{[0-9]+}}(%esp) ; X86-SSE-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-SSE-NEXT: xorl %ecx, %ecx ; X86-SSE-NEXT: xorl %ecx, %ecx
; X86-SSE-NEXT: ucomisd {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: ucomisd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %esi ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-SSE-NEXT: cmovbl %ecx, %esi ; X86-SSE-NEXT: cmovbl %ecx, %esi
; X86-SSE-NEXT: movl $-131072, %eax # imm = 0xFFFE0000 ; X86-SSE-NEXT: movl $-131072, %eax # imm = 0xFFFE0000
; X86-SSE-NEXT: cmovael {{[0-9]+}}(%esp), %eax ; X86-SSE-NEXT: cmovael {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT: ucomisd {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: ucomisd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $131071, %edx # imm = 0x1FFFF ; X86-SSE-NEXT: movl $131071, %edx # imm = 0x1FFFF
; X86-SSE-NEXT: cmovbel %eax, %edx ; X86-SSE-NEXT: cmovbel %eax, %edx
; X86-SSE-NEXT: movl $-1, %eax ; X86-SSE-NEXT: movl $-1, %eax
@ -1600,7 +1600,7 @@ define i64 @test_signed_i64_f64(double %f) nounwind {
; X86-X87-NEXT: fld %st(0) ; X86-X87-NEXT: fld %st(0)
; X86-X87-NEXT: fistpll {{[0-9]+}}(%esp) ; X86-X87-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -1618,7 +1618,7 @@ define i64 @test_signed_i64_f64(double %f) nounwind {
; X86-X87-NEXT: # %bb.3: ; X86-X87-NEXT: # %bb.3:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-X87-NEXT: .LBB17_4: ; X86-X87-NEXT: .LBB17_4:
; X86-X87-NEXT: fldl {{\.LCPI.*}} ; X86-X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -1666,12 +1666,12 @@ define i64 @test_signed_i64_f64(double %f) nounwind {
; X86-SSE-NEXT: fistpll {{[0-9]+}}(%esp) ; X86-SSE-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-SSE-NEXT: xorl %ecx, %ecx ; X86-SSE-NEXT: xorl %ecx, %ecx
; X86-SSE-NEXT: ucomisd {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: ucomisd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %esi ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-SSE-NEXT: cmovbl %ecx, %esi ; X86-SSE-NEXT: cmovbl %ecx, %esi
; X86-SSE-NEXT: movl $-2147483648, %eax # imm = 0x80000000 ; X86-SSE-NEXT: movl $-2147483648, %eax # imm = 0x80000000
; X86-SSE-NEXT: cmovael {{[0-9]+}}(%esp), %eax ; X86-SSE-NEXT: cmovael {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT: ucomisd {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: ucomisd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $2147483647, %edx # imm = 0x7FFFFFFF ; X86-SSE-NEXT: movl $2147483647, %edx # imm = 0x7FFFFFFF
; X86-SSE-NEXT: cmovbel %eax, %edx ; X86-SSE-NEXT: cmovbel %eax, %edx
; X86-SSE-NEXT: movl $-1, %eax ; X86-SSE-NEXT: movl $-1, %eax
@ -1709,7 +1709,7 @@ define i100 @test_signed_i100_f64(double %f) nounwind {
; X86-X87-NEXT: fstl {{[0-9]+}}(%esp) ; X86-X87-NEXT: fstl {{[0-9]+}}(%esp)
; X86-X87-NEXT: leal {{[0-9]+}}(%esp), %eax ; X86-X87-NEXT: leal {{[0-9]+}}(%esp), %eax
; X86-X87-NEXT: movl %eax, (%esp) ; X86-X87-NEXT: movl %eax, (%esp)
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fstl {{[-0-9]+}}(%e{{[sb]}}p) # 8-byte Folded Spill ; X86-X87-NEXT: fstl {{[-0-9]+}}(%e{{[sb]}}p) # 8-byte Folded Spill
; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fucompp
@ -1737,7 +1737,7 @@ define i100 @test_signed_i100_f64(double %f) nounwind {
; X86-X87-NEXT: # %bb.5: ; X86-X87-NEXT: # %bb.5:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-X87-NEXT: .LBB18_6: ; X86-X87-NEXT: .LBB18_6:
; X86-X87-NEXT: fldl {{\.LCPI.*}} ; X86-X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fldl {{[-0-9]+}}(%e{{[sb]}}p) # 8-byte Folded Reload ; X86-X87-NEXT: fldl {{[-0-9]+}}(%e{{[sb]}}p) # 8-byte Folded Reload
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -1803,7 +1803,7 @@ define i100 @test_signed_i100_f64(double %f) nounwind {
; X86-SSE-NEXT: subl $4, %esp ; X86-SSE-NEXT: subl $4, %esp
; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X86-SSE-NEXT: xorl %ebp, %ebp ; X86-SSE-NEXT: xorl %ebp, %ebp
; X86-SSE-NEXT: ucomisd {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: ucomisd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $-8, %ebx ; X86-SSE-NEXT: movl $-8, %ebx
; X86-SSE-NEXT: movl $0, %ecx ; X86-SSE-NEXT: movl $0, %ecx
; X86-SSE-NEXT: movl $0, %edx ; X86-SSE-NEXT: movl $0, %edx
@ -1815,7 +1815,7 @@ define i100 @test_signed_i100_f64(double %f) nounwind {
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-SSE-NEXT: .LBB18_2: ; X86-SSE-NEXT: .LBB18_2:
; X86-SSE-NEXT: ucomisd {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: ucomisd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $-1, %eax ; X86-SSE-NEXT: movl $-1, %eax
; X86-SSE-NEXT: cmoval %eax, %edi ; X86-SSE-NEXT: cmoval %eax, %edi
; X86-SSE-NEXT: cmoval %eax, %edx ; X86-SSE-NEXT: cmoval %eax, %edx
@ -1878,7 +1878,7 @@ define i128 @test_signed_i128_f64(double %f) nounwind {
; X86-X87-NEXT: fstl {{[0-9]+}}(%esp) ; X86-X87-NEXT: fstl {{[0-9]+}}(%esp)
; X86-X87-NEXT: leal {{[0-9]+}}(%esp), %eax ; X86-X87-NEXT: leal {{[0-9]+}}(%esp), %eax
; X86-X87-NEXT: movl %eax, (%esp) ; X86-X87-NEXT: movl %eax, (%esp)
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fstl {{[-0-9]+}}(%e{{[sb]}}p) # 8-byte Folded Spill ; X86-X87-NEXT: fstl {{[-0-9]+}}(%e{{[sb]}}p) # 8-byte Folded Spill
; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fucompp
@ -1902,7 +1902,7 @@ define i128 @test_signed_i128_f64(double %f) nounwind {
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB19_6: ; X86-X87-NEXT: .LBB19_6:
; X86-X87-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-X87-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-X87-NEXT: fldl {{\.LCPI.*}} ; X86-X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fldl {{[-0-9]+}}(%e{{[sb]}}p) # 8-byte Folded Reload ; X86-X87-NEXT: fldl {{[-0-9]+}}(%e{{[sb]}}p) # 8-byte Folded Reload
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -1978,7 +1978,7 @@ define i128 @test_signed_i128_f64(double %f) nounwind {
; X86-SSE-NEXT: subl $4, %esp ; X86-SSE-NEXT: subl $4, %esp
; X86-SSE-NEXT: xorl %ecx, %ecx ; X86-SSE-NEXT: xorl %ecx, %ecx
; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X86-SSE-NEXT: ucomisd {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: ucomisd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT: cmovbl %ecx, %eax ; X86-SSE-NEXT: cmovbl %ecx, %eax
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
@ -1987,7 +1987,7 @@ define i128 @test_signed_i128_f64(double %f) nounwind {
; X86-SSE-NEXT: cmovbl %ecx, %edi ; X86-SSE-NEXT: cmovbl %ecx, %edi
; X86-SSE-NEXT: movl $-2147483648, %ebx # imm = 0x80000000 ; X86-SSE-NEXT: movl $-2147483648, %ebx # imm = 0x80000000
; X86-SSE-NEXT: cmovael {{[0-9]+}}(%esp), %ebx ; X86-SSE-NEXT: cmovael {{[0-9]+}}(%esp), %ebx
; X86-SSE-NEXT: ucomisd {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: ucomisd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $2147483647, %ebp # imm = 0x7FFFFFFF ; X86-SSE-NEXT: movl $2147483647, %ebp # imm = 0x7FFFFFFF
; X86-SSE-NEXT: cmovbel %ebx, %ebp ; X86-SSE-NEXT: cmovbel %ebx, %ebp
; X86-SSE-NEXT: movl $-1, %ebx ; X86-SSE-NEXT: movl $-1, %ebx
@ -2153,7 +2153,7 @@ define i8 @test_signed_i8_f16(half %f) nounwind {
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: fists {{[0-9]+}}(%esp) ; X86-X87-NEXT: fists {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -2165,7 +2165,7 @@ define i8 @test_signed_i8_f16(half %f) nounwind {
; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movb {{[0-9]+}}(%esp), %dl ; X86-X87-NEXT: movb {{[0-9]+}}(%esp), %dl
; X86-X87-NEXT: .LBB21_2: ; X86-X87-NEXT: .LBB21_2:
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -2236,7 +2236,7 @@ define i13 @test_signed_i13_f16(half %f) nounwind {
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: fists {{[0-9]+}}(%esp) ; X86-X87-NEXT: fists {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -2248,7 +2248,7 @@ define i13 @test_signed_i13_f16(half %f) nounwind {
; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movzwl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB22_2: ; X86-X87-NEXT: .LBB22_2:
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -2320,7 +2320,7 @@ define i16 @test_signed_i16_f16(half %f) nounwind {
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: fists {{[0-9]+}}(%esp) ; X86-X87-NEXT: fists {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -2332,7 +2332,7 @@ define i16 @test_signed_i16_f16(half %f) nounwind {
; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movzwl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB23_2: ; X86-X87-NEXT: .LBB23_2:
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -2404,7 +2404,7 @@ define i19 @test_signed_i19_f16(half %f) nounwind {
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: fistl {{[0-9]+}}(%esp) ; X86-X87-NEXT: fistl {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -2416,7 +2416,7 @@ define i19 @test_signed_i19_f16(half %f) nounwind {
; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB24_2: ; X86-X87-NEXT: .LBB24_2:
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -2451,8 +2451,8 @@ define i19 @test_signed_i19_f16(half %f) nounwind {
; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-SSE-NEXT: xorl %eax, %eax ; X86-SSE-NEXT: xorl %eax, %eax
; X86-SSE-NEXT: ucomiss %xmm0, %xmm0 ; X86-SSE-NEXT: ucomiss %xmm0, %xmm0
; X86-SSE-NEXT: maxss {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: maxss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: minss {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: minss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: cvttss2si %xmm0, %ecx ; X86-SSE-NEXT: cvttss2si %xmm0, %ecx
; X86-SSE-NEXT: cmovnpl %ecx, %eax ; X86-SSE-NEXT: cmovnpl %ecx, %eax
; X86-SSE-NEXT: addl $12, %esp ; X86-SSE-NEXT: addl $12, %esp
@ -2489,7 +2489,7 @@ define i32 @test_signed_i32_f16(half %f) nounwind {
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: fistl {{[0-9]+}}(%esp) ; X86-X87-NEXT: fistl {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -2501,7 +2501,7 @@ define i32 @test_signed_i32_f16(half %f) nounwind {
; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB25_2: ; X86-X87-NEXT: .LBB25_2:
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -2535,7 +2535,7 @@ define i32 @test_signed_i32_f16(half %f) nounwind {
; X86-SSE-NEXT: fstps {{[0-9]+}}(%esp) ; X86-SSE-NEXT: fstps {{[0-9]+}}(%esp)
; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-SSE-NEXT: cvttss2si %xmm0, %eax ; X86-SSE-NEXT: cvttss2si %xmm0, %eax
; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $2147483647, %ecx # imm = 0x7FFFFFFF ; X86-SSE-NEXT: movl $2147483647, %ecx # imm = 0x7FFFFFFF
; X86-SSE-NEXT: cmovbel %eax, %ecx ; X86-SSE-NEXT: cmovbel %eax, %ecx
; X86-SSE-NEXT: xorl %eax, %eax ; X86-SSE-NEXT: xorl %eax, %eax
@ -2579,7 +2579,7 @@ define i50 @test_signed_i50_f16(half %f) nounwind {
; X86-X87-NEXT: fld %st(0) ; X86-X87-NEXT: fld %st(0)
; X86-X87-NEXT: fistpll {{[0-9]+}}(%esp) ; X86-X87-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -2597,7 +2597,7 @@ define i50 @test_signed_i50_f16(half %f) nounwind {
; X86-X87-NEXT: # %bb.3: ; X86-X87-NEXT: # %bb.3:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-X87-NEXT: .LBB26_4: ; X86-X87-NEXT: .LBB26_4:
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -2649,12 +2649,12 @@ define i50 @test_signed_i50_f16(half %f) nounwind {
; X86-SSE-NEXT: fistpll {{[0-9]+}}(%esp) ; X86-SSE-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-SSE-NEXT: xorl %ecx, %ecx ; X86-SSE-NEXT: xorl %ecx, %ecx
; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %esi ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-SSE-NEXT: cmovbl %ecx, %esi ; X86-SSE-NEXT: cmovbl %ecx, %esi
; X86-SSE-NEXT: movl $-131072, %eax # imm = 0xFFFE0000 ; X86-SSE-NEXT: movl $-131072, %eax # imm = 0xFFFE0000
; X86-SSE-NEXT: cmovael {{[0-9]+}}(%esp), %eax ; X86-SSE-NEXT: cmovael {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $131071, %edx # imm = 0x1FFFF ; X86-SSE-NEXT: movl $131071, %edx # imm = 0x1FFFF
; X86-SSE-NEXT: cmovbel %eax, %edx ; X86-SSE-NEXT: cmovbel %eax, %edx
; X86-SSE-NEXT: movl $-1, %eax ; X86-SSE-NEXT: movl $-1, %eax
@ -2704,7 +2704,7 @@ define i64 @test_signed_i64_f16(half %f) nounwind {
; X86-X87-NEXT: fld %st(0) ; X86-X87-NEXT: fld %st(0)
; X86-X87-NEXT: fistpll {{[0-9]+}}(%esp) ; X86-X87-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -2722,7 +2722,7 @@ define i64 @test_signed_i64_f16(half %f) nounwind {
; X86-X87-NEXT: # %bb.3: ; X86-X87-NEXT: # %bb.3:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-X87-NEXT: .LBB27_4: ; X86-X87-NEXT: .LBB27_4:
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -2774,12 +2774,12 @@ define i64 @test_signed_i64_f16(half %f) nounwind {
; X86-SSE-NEXT: fistpll {{[0-9]+}}(%esp) ; X86-SSE-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-SSE-NEXT: xorl %ecx, %ecx ; X86-SSE-NEXT: xorl %ecx, %ecx
; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %esi ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-SSE-NEXT: cmovbl %ecx, %esi ; X86-SSE-NEXT: cmovbl %ecx, %esi
; X86-SSE-NEXT: movl $-2147483648, %eax # imm = 0x80000000 ; X86-SSE-NEXT: movl $-2147483648, %eax # imm = 0x80000000
; X86-SSE-NEXT: cmovael {{[0-9]+}}(%esp), %eax ; X86-SSE-NEXT: cmovael {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $2147483647, %edx # imm = 0x7FFFFFFF ; X86-SSE-NEXT: movl $2147483647, %edx # imm = 0x7FFFFFFF
; X86-SSE-NEXT: cmovbel %eax, %edx ; X86-SSE-NEXT: cmovbel %eax, %edx
; X86-SSE-NEXT: movl $-1, %eax ; X86-SSE-NEXT: movl $-1, %eax
@ -2823,7 +2823,7 @@ define i100 @test_signed_i100_f16(half %f) nounwind {
; X86-X87-NEXT: leal {{[0-9]+}}(%esp), %eax ; X86-X87-NEXT: leal {{[0-9]+}}(%esp), %eax
; X86-X87-NEXT: movl %eax, (%esp) ; X86-X87-NEXT: movl %eax, (%esp)
; X86-X87-NEXT: fsts {{[0-9]+}}(%esp) ; X86-X87-NEXT: fsts {{[0-9]+}}(%esp)
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fsts {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill ; X86-X87-NEXT: fsts {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fucompp
@ -2851,7 +2851,7 @@ define i100 @test_signed_i100_f16(half %f) nounwind {
; X86-X87-NEXT: # %bb.5: ; X86-X87-NEXT: # %bb.5:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-X87-NEXT: .LBB28_6: ; X86-X87-NEXT: .LBB28_6:
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: flds {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload ; X86-X87-NEXT: flds {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -2923,7 +2923,7 @@ define i100 @test_signed_i100_f16(half %f) nounwind {
; X86-SSE-NEXT: movss {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 4-byte Reload ; X86-SSE-NEXT: movss {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 4-byte Reload
; X86-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero ; X86-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero
; X86-SSE-NEXT: xorl %ebp, %ebp ; X86-SSE-NEXT: xorl %ebp, %ebp
; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $-8, %ebx ; X86-SSE-NEXT: movl $-8, %ebx
; X86-SSE-NEXT: movl $0, %ecx ; X86-SSE-NEXT: movl $0, %ecx
; X86-SSE-NEXT: movl $0, %edx ; X86-SSE-NEXT: movl $0, %edx
@ -2935,7 +2935,7 @@ define i100 @test_signed_i100_f16(half %f) nounwind {
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-SSE-NEXT: .LBB28_2: ; X86-SSE-NEXT: .LBB28_2:
; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $-1, %eax ; X86-SSE-NEXT: movl $-1, %eax
; X86-SSE-NEXT: cmoval %eax, %edi ; X86-SSE-NEXT: cmoval %eax, %edi
; X86-SSE-NEXT: cmoval %eax, %edx ; X86-SSE-NEXT: cmoval %eax, %edx
@ -3002,7 +3002,7 @@ define i128 @test_signed_i128_f16(half %f) nounwind {
; X86-X87-NEXT: leal {{[0-9]+}}(%esp), %eax ; X86-X87-NEXT: leal {{[0-9]+}}(%esp), %eax
; X86-X87-NEXT: movl %eax, (%esp) ; X86-X87-NEXT: movl %eax, (%esp)
; X86-X87-NEXT: fsts {{[0-9]+}}(%esp) ; X86-X87-NEXT: fsts {{[0-9]+}}(%esp)
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fsts {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill ; X86-X87-NEXT: fsts {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fucompp
@ -3026,7 +3026,7 @@ define i128 @test_signed_i128_f16(half %f) nounwind {
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB29_6: ; X86-X87-NEXT: .LBB29_6:
; X86-X87-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-X87-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: flds {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload ; X86-X87-NEXT: flds {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -3108,7 +3108,7 @@ define i128 @test_signed_i128_f16(half %f) nounwind {
; X86-SSE-NEXT: xorl %ecx, %ecx ; X86-SSE-NEXT: xorl %ecx, %ecx
; X86-SSE-NEXT: movss {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 4-byte Reload ; X86-SSE-NEXT: movss {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 4-byte Reload
; X86-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero ; X86-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero
; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT: cmovbl %ecx, %eax ; X86-SSE-NEXT: cmovbl %ecx, %eax
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
@ -3117,7 +3117,7 @@ define i128 @test_signed_i128_f16(half %f) nounwind {
; X86-SSE-NEXT: cmovbl %ecx, %edi ; X86-SSE-NEXT: cmovbl %ecx, %edi
; X86-SSE-NEXT: movl $-2147483648, %ebx # imm = 0x80000000 ; X86-SSE-NEXT: movl $-2147483648, %ebx # imm = 0x80000000
; X86-SSE-NEXT: cmovael {{[0-9]+}}(%esp), %ebx ; X86-SSE-NEXT: cmovael {{[0-9]+}}(%esp), %ebx
; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $2147483647, %ebp # imm = 0x7FFFFFFF ; X86-SSE-NEXT: movl $2147483647, %ebp # imm = 0x7FFFFFFF
; X86-SSE-NEXT: cmovbel %ebx, %ebp ; X86-SSE-NEXT: cmovbel %ebx, %ebp
; X86-SSE-NEXT: movl $-1, %ebx ; X86-SSE-NEXT: movl $-1, %ebx
@ -3311,7 +3311,7 @@ define i8 @test_signed_i8_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: fists {{[0-9]+}}(%esp) ; X86-X87-NEXT: fists {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -3323,7 +3323,7 @@ define i8 @test_signed_i8_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movb {{[0-9]+}}(%esp), %dl ; X86-X87-NEXT: movb {{[0-9]+}}(%esp), %dl
; X86-X87-NEXT: .LBB31_2: ; X86-X87-NEXT: .LBB31_2:
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -3359,13 +3359,13 @@ define i8 @test_signed_i8_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: fists {{[0-9]+}}(%esp) ; X86-SSE-NEXT: fists {{[0-9]+}}(%esp)
; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax ; X86-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT: flds {{\.LCPI.*}} ; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fxch %st(1) ; X86-SSE-NEXT: fxch %st(1)
; X86-SSE-NEXT: fucomi %st(1), %st ; X86-SSE-NEXT: fucomi %st(1), %st
; X86-SSE-NEXT: fstp %st(1) ; X86-SSE-NEXT: fstp %st(1)
; X86-SSE-NEXT: movl $128, %ecx ; X86-SSE-NEXT: movl $128, %ecx
; X86-SSE-NEXT: cmovael %eax, %ecx ; X86-SSE-NEXT: cmovael %eax, %ecx
; X86-SSE-NEXT: flds {{\.LCPI.*}} ; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fxch %st(1) ; X86-SSE-NEXT: fxch %st(1)
; X86-SSE-NEXT: fucomi %st(1), %st ; X86-SSE-NEXT: fucomi %st(1), %st
; X86-SSE-NEXT: fstp %st(1) ; X86-SSE-NEXT: fstp %st(1)
@ -3422,7 +3422,7 @@ define i13 @test_signed_i13_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: fists {{[0-9]+}}(%esp) ; X86-X87-NEXT: fists {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -3434,7 +3434,7 @@ define i13 @test_signed_i13_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movzwl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB32_2: ; X86-X87-NEXT: .LBB32_2:
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -3470,7 +3470,7 @@ define i13 @test_signed_i13_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-SSE-NEXT: fists {{[0-9]+}}(%esp) ; X86-SSE-NEXT: fists {{[0-9]+}}(%esp)
; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-SSE-NEXT: flds {{\.LCPI.*}} ; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fxch %st(1) ; X86-SSE-NEXT: fxch %st(1)
; X86-SSE-NEXT: fucomi %st(1), %st ; X86-SSE-NEXT: fucomi %st(1), %st
; X86-SSE-NEXT: fstp %st(1) ; X86-SSE-NEXT: fstp %st(1)
@ -3479,7 +3479,7 @@ define i13 @test_signed_i13_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: # %bb.1: ; X86-SSE-NEXT: # %bb.1:
; X86-SSE-NEXT: movzwl {{[0-9]+}}(%esp), %eax ; X86-SSE-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT: .LBB32_2: ; X86-SSE-NEXT: .LBB32_2:
; X86-SSE-NEXT: flds {{\.LCPI.*}} ; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fxch %st(1) ; X86-SSE-NEXT: fxch %st(1)
; X86-SSE-NEXT: fucomi %st(1), %st ; X86-SSE-NEXT: fucomi %st(1), %st
; X86-SSE-NEXT: fstp %st(1) ; X86-SSE-NEXT: fstp %st(1)
@ -3538,7 +3538,7 @@ define i16 @test_signed_i16_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: fists {{[0-9]+}}(%esp) ; X86-X87-NEXT: fists {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -3550,7 +3550,7 @@ define i16 @test_signed_i16_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movzwl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB33_2: ; X86-X87-NEXT: .LBB33_2:
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -3586,7 +3586,7 @@ define i16 @test_signed_i16_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-SSE-NEXT: fists {{[0-9]+}}(%esp) ; X86-SSE-NEXT: fists {{[0-9]+}}(%esp)
; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-SSE-NEXT: flds {{\.LCPI.*}} ; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fxch %st(1) ; X86-SSE-NEXT: fxch %st(1)
; X86-SSE-NEXT: fucomi %st(1), %st ; X86-SSE-NEXT: fucomi %st(1), %st
; X86-SSE-NEXT: fstp %st(1) ; X86-SSE-NEXT: fstp %st(1)
@ -3595,7 +3595,7 @@ define i16 @test_signed_i16_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: # %bb.1: ; X86-SSE-NEXT: # %bb.1:
; X86-SSE-NEXT: movzwl {{[0-9]+}}(%esp), %eax ; X86-SSE-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT: .LBB33_2: ; X86-SSE-NEXT: .LBB33_2:
; X86-SSE-NEXT: flds {{\.LCPI.*}} ; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fxch %st(1) ; X86-SSE-NEXT: fxch %st(1)
; X86-SSE-NEXT: fucomi %st(1), %st ; X86-SSE-NEXT: fucomi %st(1), %st
; X86-SSE-NEXT: fstp %st(1) ; X86-SSE-NEXT: fstp %st(1)
@ -3654,7 +3654,7 @@ define i19 @test_signed_i19_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: fistl {{[0-9]+}}(%esp) ; X86-X87-NEXT: fistl {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw (%esp) ; X86-X87-NEXT: fldcw (%esp)
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -3666,7 +3666,7 @@ define i19 @test_signed_i19_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB34_2: ; X86-X87-NEXT: .LBB34_2:
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -3702,7 +3702,7 @@ define i19 @test_signed_i19_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-SSE-NEXT: fistl {{[0-9]+}}(%esp) ; X86-SSE-NEXT: fistl {{[0-9]+}}(%esp)
; X86-SSE-NEXT: fldcw (%esp) ; X86-SSE-NEXT: fldcw (%esp)
; X86-SSE-NEXT: flds {{\.LCPI.*}} ; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fxch %st(1) ; X86-SSE-NEXT: fxch %st(1)
; X86-SSE-NEXT: fucomi %st(1), %st ; X86-SSE-NEXT: fucomi %st(1), %st
; X86-SSE-NEXT: fstp %st(1) ; X86-SSE-NEXT: fstp %st(1)
@ -3711,7 +3711,7 @@ define i19 @test_signed_i19_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: # %bb.1: ; X86-SSE-NEXT: # %bb.1:
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT: .LBB34_2: ; X86-SSE-NEXT: .LBB34_2:
; X86-SSE-NEXT: flds {{\.LCPI.*}} ; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fxch %st(1) ; X86-SSE-NEXT: fxch %st(1)
; X86-SSE-NEXT: fucomi %st(1), %st ; X86-SSE-NEXT: fucomi %st(1), %st
; X86-SSE-NEXT: fstp %st(1) ; X86-SSE-NEXT: fstp %st(1)
@ -3768,7 +3768,7 @@ define i32 @test_signed_i32_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: fistl {{[0-9]+}}(%esp) ; X86-X87-NEXT: fistl {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw (%esp) ; X86-X87-NEXT: fldcw (%esp)
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -3780,7 +3780,7 @@ define i32 @test_signed_i32_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB35_2: ; X86-X87-NEXT: .LBB35_2:
; X86-X87-NEXT: fldl {{\.LCPI.*}} ; X86-X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -3816,7 +3816,7 @@ define i32 @test_signed_i32_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-SSE-NEXT: fistl {{[0-9]+}}(%esp) ; X86-SSE-NEXT: fistl {{[0-9]+}}(%esp)
; X86-SSE-NEXT: fldcw (%esp) ; X86-SSE-NEXT: fldcw (%esp)
; X86-SSE-NEXT: flds {{\.LCPI.*}} ; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fxch %st(1) ; X86-SSE-NEXT: fxch %st(1)
; X86-SSE-NEXT: fucomi %st(1), %st ; X86-SSE-NEXT: fucomi %st(1), %st
; X86-SSE-NEXT: fstp %st(1) ; X86-SSE-NEXT: fstp %st(1)
@ -3825,7 +3825,7 @@ define i32 @test_signed_i32_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: # %bb.1: ; X86-SSE-NEXT: # %bb.1:
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT: .LBB35_2: ; X86-SSE-NEXT: .LBB35_2:
; X86-SSE-NEXT: fldl {{\.LCPI.*}} ; X86-SSE-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fxch %st(1) ; X86-SSE-NEXT: fxch %st(1)
; X86-SSE-NEXT: fucomi %st(1), %st ; X86-SSE-NEXT: fucomi %st(1), %st
; X86-SSE-NEXT: fstp %st(1) ; X86-SSE-NEXT: fstp %st(1)
@ -3885,7 +3885,7 @@ define i50 @test_signed_i50_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: fld %st(0) ; X86-X87-NEXT: fld %st(0)
; X86-X87-NEXT: fistpll {{[0-9]+}}(%esp) ; X86-X87-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -3903,7 +3903,7 @@ define i50 @test_signed_i50_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: # %bb.3: ; X86-X87-NEXT: # %bb.3:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-X87-NEXT: .LBB36_4: ; X86-X87-NEXT: .LBB36_4:
; X86-X87-NEXT: fldl {{\.LCPI.*}} ; X86-X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -3950,7 +3950,7 @@ define i50 @test_signed_i50_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: fistpll {{[0-9]+}}(%esp) ; X86-SSE-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-SSE-NEXT: xorl %ecx, %ecx ; X86-SSE-NEXT: xorl %ecx, %ecx
; X86-SSE-NEXT: flds {{\.LCPI.*}} ; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fxch %st(1) ; X86-SSE-NEXT: fxch %st(1)
; X86-SSE-NEXT: fucomi %st(1), %st ; X86-SSE-NEXT: fucomi %st(1), %st
; X86-SSE-NEXT: fstp %st(1) ; X86-SSE-NEXT: fstp %st(1)
@ -3958,7 +3958,7 @@ define i50 @test_signed_i50_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: cmovbl %ecx, %esi ; X86-SSE-NEXT: cmovbl %ecx, %esi
; X86-SSE-NEXT: movl $-131072, %eax # imm = 0xFFFE0000 ; X86-SSE-NEXT: movl $-131072, %eax # imm = 0xFFFE0000
; X86-SSE-NEXT: cmovael {{[0-9]+}}(%esp), %eax ; X86-SSE-NEXT: cmovael {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT: fldl {{\.LCPI.*}} ; X86-SSE-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fxch %st(1) ; X86-SSE-NEXT: fxch %st(1)
; X86-SSE-NEXT: fucomi %st(1), %st ; X86-SSE-NEXT: fucomi %st(1), %st
; X86-SSE-NEXT: fstp %st(1) ; X86-SSE-NEXT: fstp %st(1)
@ -4024,7 +4024,7 @@ define i64 @test_signed_i64_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: fld %st(0) ; X86-X87-NEXT: fld %st(0)
; X86-X87-NEXT: fistpll {{[0-9]+}}(%esp) ; X86-X87-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -4042,7 +4042,7 @@ define i64 @test_signed_i64_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: # %bb.3: ; X86-X87-NEXT: # %bb.3:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-X87-NEXT: .LBB37_4: ; X86-X87-NEXT: .LBB37_4:
; X86-X87-NEXT: fldt {{\.LCPI.*}} ; X86-X87-NEXT: fldt {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -4089,7 +4089,7 @@ define i64 @test_signed_i64_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: fistpll {{[0-9]+}}(%esp) ; X86-SSE-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-SSE-NEXT: xorl %ecx, %ecx ; X86-SSE-NEXT: xorl %ecx, %ecx
; X86-SSE-NEXT: flds {{\.LCPI.*}} ; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fxch %st(1) ; X86-SSE-NEXT: fxch %st(1)
; X86-SSE-NEXT: fucomi %st(1), %st ; X86-SSE-NEXT: fucomi %st(1), %st
; X86-SSE-NEXT: fstp %st(1) ; X86-SSE-NEXT: fstp %st(1)
@ -4097,7 +4097,7 @@ define i64 @test_signed_i64_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: cmovbl %ecx, %esi ; X86-SSE-NEXT: cmovbl %ecx, %esi
; X86-SSE-NEXT: movl $-2147483648, %eax # imm = 0x80000000 ; X86-SSE-NEXT: movl $-2147483648, %eax # imm = 0x80000000
; X86-SSE-NEXT: cmovael {{[0-9]+}}(%esp), %eax ; X86-SSE-NEXT: cmovael {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT: fldt {{\.LCPI.*}} ; X86-SSE-NEXT: fldt {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fxch %st(1) ; X86-SSE-NEXT: fxch %st(1)
; X86-SSE-NEXT: fucomi %st(1), %st ; X86-SSE-NEXT: fucomi %st(1), %st
; X86-SSE-NEXT: fstp %st(1) ; X86-SSE-NEXT: fstp %st(1)
@ -4161,7 +4161,7 @@ define i100 @test_signed_i100_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: fstpt {{[0-9]+}}(%esp) ; X86-X87-NEXT: fstpt {{[0-9]+}}(%esp)
; X86-X87-NEXT: leal {{[0-9]+}}(%esp), %eax ; X86-X87-NEXT: leal {{[0-9]+}}(%esp), %eax
; X86-X87-NEXT: movl %eax, (%esp) ; X86-X87-NEXT: movl %eax, (%esp)
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fld %st(1) ; X86-X87-NEXT: fld %st(1)
; X86-X87-NEXT: fstpt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Spill ; X86-X87-NEXT: fstpt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Spill
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
@ -4190,7 +4190,7 @@ define i100 @test_signed_i100_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: # %bb.5: ; X86-X87-NEXT: # %bb.5:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-X87-NEXT: .LBB38_6: ; X86-X87-NEXT: .LBB38_6:
; X86-X87-NEXT: fldt {{\.LCPI.*}} ; X86-X87-NEXT: fldt {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fldt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Reload ; X86-X87-NEXT: fldt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Reload
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -4258,7 +4258,7 @@ define i100 @test_signed_i100_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: subl $4, %esp ; X86-SSE-NEXT: subl $4, %esp
; X86-SSE-NEXT: fldt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Reload ; X86-SSE-NEXT: fldt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Reload
; X86-SSE-NEXT: xorl %ebp, %ebp ; X86-SSE-NEXT: xorl %ebp, %ebp
; X86-SSE-NEXT: flds {{\.LCPI.*}} ; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fxch %st(1) ; X86-SSE-NEXT: fxch %st(1)
; X86-SSE-NEXT: fucomi %st(1), %st ; X86-SSE-NEXT: fucomi %st(1), %st
; X86-SSE-NEXT: fstp %st(1) ; X86-SSE-NEXT: fstp %st(1)
@ -4273,7 +4273,7 @@ define i100 @test_signed_i100_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-SSE-NEXT: .LBB38_2: ; X86-SSE-NEXT: .LBB38_2:
; X86-SSE-NEXT: fldt {{\.LCPI.*}} ; X86-SSE-NEXT: fldt {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fxch %st(1) ; X86-SSE-NEXT: fxch %st(1)
; X86-SSE-NEXT: fucomi %st(1), %st ; X86-SSE-NEXT: fucomi %st(1), %st
; X86-SSE-NEXT: fstp %st(1) ; X86-SSE-NEXT: fstp %st(1)
@ -4347,7 +4347,7 @@ define i128 @test_signed_i128_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: fstpt {{[0-9]+}}(%esp) ; X86-X87-NEXT: fstpt {{[0-9]+}}(%esp)
; X86-X87-NEXT: leal {{[0-9]+}}(%esp), %eax ; X86-X87-NEXT: leal {{[0-9]+}}(%esp), %eax
; X86-X87-NEXT: movl %eax, (%esp) ; X86-X87-NEXT: movl %eax, (%esp)
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fld %st(1) ; X86-X87-NEXT: fld %st(1)
; X86-X87-NEXT: fstpt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Spill ; X86-X87-NEXT: fstpt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Spill
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
@ -4372,7 +4372,7 @@ define i128 @test_signed_i128_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB39_6: ; X86-X87-NEXT: .LBB39_6:
; X86-X87-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-X87-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-X87-NEXT: fldt {{\.LCPI.*}} ; X86-X87-NEXT: fldt {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fldt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Reload ; X86-X87-NEXT: fldt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Reload
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1) ; X86-X87-NEXT: fstp %st(1)
@ -4449,7 +4449,7 @@ define i128 @test_signed_i128_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: calll __fixxfti ; X86-SSE-NEXT: calll __fixxfti
; X86-SSE-NEXT: subl $4, %esp ; X86-SSE-NEXT: subl $4, %esp
; X86-SSE-NEXT: xorl %ecx, %ecx ; X86-SSE-NEXT: xorl %ecx, %ecx
; X86-SSE-NEXT: flds {{\.LCPI.*}} ; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fldt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Reload ; X86-SSE-NEXT: fldt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Reload
; X86-SSE-NEXT: fucomi %st(1), %st ; X86-SSE-NEXT: fucomi %st(1), %st
; X86-SSE-NEXT: fstp %st(1) ; X86-SSE-NEXT: fstp %st(1)
@ -4461,7 +4461,7 @@ define i128 @test_signed_i128_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: cmovbl %ecx, %edi ; X86-SSE-NEXT: cmovbl %ecx, %edi
; X86-SSE-NEXT: movl $-2147483648, %ebx # imm = 0x80000000 ; X86-SSE-NEXT: movl $-2147483648, %ebx # imm = 0x80000000
; X86-SSE-NEXT: cmovael {{[0-9]+}}(%esp), %ebx ; X86-SSE-NEXT: cmovael {{[0-9]+}}(%esp), %ebx
; X86-SSE-NEXT: fldt {{\.LCPI.*}} ; X86-SSE-NEXT: fldt {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fxch %st(1) ; X86-SSE-NEXT: fxch %st(1)
; X86-SSE-NEXT: fucomi %st(1), %st ; X86-SSE-NEXT: fucomi %st(1), %st
; X86-SSE-NEXT: fstp %st(1) ; X86-SSE-NEXT: fstp %st(1)

View File

@ -107,7 +107,7 @@ define i8 @test_unsigned_i8_f32(float %f) nounwind {
; X86-X87-NEXT: .LBB1_1: ; X86-X87-NEXT: .LBB1_1:
; X86-X87-NEXT: xorl %ecx, %ecx ; X86-X87-NEXT: xorl %ecx, %ecx
; X86-X87-NEXT: .LBB1_3: ; X86-X87-NEXT: .LBB1_3:
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax ; X86-X87-NEXT: fnstsw %ax
@ -168,7 +168,7 @@ define i13 @test_unsigned_i13_f32(float %f) nounwind {
; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB2_2: ; X86-X87-NEXT: .LBB2_2:
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax ; X86-X87-NEXT: fnstsw %ax
@ -230,7 +230,7 @@ define i16 @test_unsigned_i16_f32(float %f) nounwind {
; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB3_2: ; X86-X87-NEXT: .LBB3_2:
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax ; X86-X87-NEXT: fnstsw %ax
@ -293,7 +293,7 @@ define i19 @test_unsigned_i19_f32(float %f) nounwind {
; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB4_2: ; X86-X87-NEXT: .LBB4_2:
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax ; X86-X87-NEXT: fnstsw %ax
@ -312,7 +312,7 @@ define i19 @test_unsigned_i19_f32(float %f) nounwind {
; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-SSE-NEXT: xorps %xmm1, %xmm1 ; X86-SSE-NEXT: xorps %xmm1, %xmm1
; X86-SSE-NEXT: maxss %xmm1, %xmm0 ; X86-SSE-NEXT: maxss %xmm1, %xmm0
; X86-SSE-NEXT: minss {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: minss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: cvttss2si %xmm0, %eax ; X86-SSE-NEXT: cvttss2si %xmm0, %eax
; X86-SSE-NEXT: retl ; X86-SSE-NEXT: retl
; ;
@ -352,7 +352,7 @@ define i32 @test_unsigned_i32_f32(float %f) nounwind {
; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB5_2: ; X86-X87-NEXT: .LBB5_2:
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax ; X86-X87-NEXT: fnstsw %ax
@ -381,7 +381,7 @@ define i32 @test_unsigned_i32_f32(float %f) nounwind {
; X86-SSE-NEXT: xorps %xmm1, %xmm1 ; X86-SSE-NEXT: xorps %xmm1, %xmm1
; X86-SSE-NEXT: ucomiss %xmm1, %xmm0 ; X86-SSE-NEXT: ucomiss %xmm1, %xmm0
; X86-SSE-NEXT: cmovael %ecx, %edx ; X86-SSE-NEXT: cmovael %ecx, %edx
; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $-1, %eax ; X86-SSE-NEXT: movl $-1, %eax
; X86-SSE-NEXT: cmovbel %edx, %eax ; X86-SSE-NEXT: cmovbel %edx, %eax
; X86-SSE-NEXT: retl ; X86-SSE-NEXT: retl
@ -407,7 +407,7 @@ define i50 @test_unsigned_i50_f32(float %f) nounwind {
; X86-X87-NEXT: pushl %esi ; X86-X87-NEXT: pushl %esi
; X86-X87-NEXT: subl $16, %esp ; X86-X87-NEXT: subl $16, %esp
; X86-X87-NEXT: flds {{[0-9]+}}(%esp) ; X86-X87-NEXT: flds {{[0-9]+}}(%esp)
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fnstsw %ax ; X86-X87-NEXT: fnstsw %ax
; X86-X87-NEXT: xorl %ecx, %ecx ; X86-X87-NEXT: xorl %ecx, %ecx
@ -449,7 +449,7 @@ define i50 @test_unsigned_i50_f32(float %f) nounwind {
; X86-X87-NEXT: # %bb.5: ; X86-X87-NEXT: # %bb.5:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-X87-NEXT: .LBB6_6: ; X86-X87-NEXT: .LBB6_6:
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax ; X86-X87-NEXT: fnstsw %ax
@ -503,7 +503,7 @@ define i50 @test_unsigned_i50_f32(float %f) nounwind {
; X86-SSE-NEXT: xorl {{[0-9]+}}(%esp), %eax ; X86-SSE-NEXT: xorl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %esi ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-SSE-NEXT: .LBB6_4: ; X86-SSE-NEXT: .LBB6_4:
; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $262143, %edx # imm = 0x3FFFF ; X86-SSE-NEXT: movl $262143, %edx # imm = 0x3FFFF
; X86-SSE-NEXT: cmovbel %eax, %edx ; X86-SSE-NEXT: cmovbel %eax, %edx
; X86-SSE-NEXT: movl $-1, %eax ; X86-SSE-NEXT: movl $-1, %eax
@ -534,7 +534,7 @@ define i64 @test_unsigned_i64_f32(float %f) nounwind {
; X86-X87-NEXT: pushl %esi ; X86-X87-NEXT: pushl %esi
; X86-X87-NEXT: subl $20, %esp ; X86-X87-NEXT: subl $20, %esp
; X86-X87-NEXT: flds {{[0-9]+}}(%esp) ; X86-X87-NEXT: flds {{[0-9]+}}(%esp)
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fnstsw %ax ; X86-X87-NEXT: fnstsw %ax
; X86-X87-NEXT: xorl %ecx, %ecx ; X86-X87-NEXT: xorl %ecx, %ecx
@ -576,7 +576,7 @@ define i64 @test_unsigned_i64_f32(float %f) nounwind {
; X86-X87-NEXT: # %bb.5: ; X86-X87-NEXT: # %bb.5:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %esi ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-X87-NEXT: .LBB7_6: ; X86-X87-NEXT: .LBB7_6:
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax ; X86-X87-NEXT: fnstsw %ax
@ -627,7 +627,7 @@ define i64 @test_unsigned_i64_f32(float %f) nounwind {
; X86-SSE-NEXT: xorl {{[0-9]+}}(%esp), %edx ; X86-SSE-NEXT: xorl {{[0-9]+}}(%esp), %edx
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT: .LBB7_4: ; X86-SSE-NEXT: .LBB7_4:
; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $-1, %ecx ; X86-SSE-NEXT: movl $-1, %ecx
; X86-SSE-NEXT: cmoval %ecx, %edx ; X86-SSE-NEXT: cmoval %ecx, %edx
; X86-SSE-NEXT: cmoval %ecx, %eax ; X86-SSE-NEXT: cmoval %ecx, %eax
@ -698,7 +698,7 @@ define i100 @test_unsigned_i100_f32(float %f) nounwind {
; X86-X87-NEXT: .LBB8_6: ; X86-X87-NEXT: .LBB8_6:
; X86-X87-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-X87-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: flds {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload ; X86-X87-NEXT: flds {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax ; X86-X87-NEXT: fnstsw %ax
@ -759,7 +759,7 @@ define i100 @test_unsigned_i100_f32(float %f) nounwind {
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-SSE-NEXT: .LBB8_2: ; X86-SSE-NEXT: .LBB8_2:
; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $15, %ebx ; X86-SSE-NEXT: movl $15, %ebx
; X86-SSE-NEXT: cmovbel %edi, %ebx ; X86-SSE-NEXT: cmovbel %edi, %ebx
; X86-SSE-NEXT: movl $-1, %edi ; X86-SSE-NEXT: movl $-1, %edi
@ -843,7 +843,7 @@ define i128 @test_unsigned_i128_f32(float %f) nounwind {
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ebx ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ebx
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-X87-NEXT: .LBB9_6: ; X86-X87-NEXT: .LBB9_6:
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: flds {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload ; X86-X87-NEXT: flds {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax ; X86-X87-NEXT: fnstsw %ax
@ -900,7 +900,7 @@ define i128 @test_unsigned_i128_f32(float %f) nounwind {
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-SSE-NEXT: .LBB9_2: ; X86-SSE-NEXT: .LBB9_2:
; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $-1, %ebx ; X86-SSE-NEXT: movl $-1, %ebx
; X86-SSE-NEXT: cmoval %ebx, %edi ; X86-SSE-NEXT: cmoval %ebx, %edi
; X86-SSE-NEXT: cmoval %ebx, %edx ; X86-SSE-NEXT: cmoval %ebx, %edx
@ -1043,7 +1043,7 @@ define i8 @test_unsigned_i8_f64(double %f) nounwind {
; X86-X87-NEXT: .LBB11_1: ; X86-X87-NEXT: .LBB11_1:
; X86-X87-NEXT: xorl %ecx, %ecx ; X86-X87-NEXT: xorl %ecx, %ecx
; X86-X87-NEXT: .LBB11_3: ; X86-X87-NEXT: .LBB11_3:
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax ; X86-X87-NEXT: fnstsw %ax
@ -1104,7 +1104,7 @@ define i13 @test_unsigned_i13_f64(double %f) nounwind {
; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB12_2: ; X86-X87-NEXT: .LBB12_2:
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax ; X86-X87-NEXT: fnstsw %ax
@ -1166,7 +1166,7 @@ define i16 @test_unsigned_i16_f64(double %f) nounwind {
; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB13_2: ; X86-X87-NEXT: .LBB13_2:
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax ; X86-X87-NEXT: fnstsw %ax
@ -1229,7 +1229,7 @@ define i19 @test_unsigned_i19_f64(double %f) nounwind {
; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB14_2: ; X86-X87-NEXT: .LBB14_2:
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax ; X86-X87-NEXT: fnstsw %ax
@ -1248,7 +1248,7 @@ define i19 @test_unsigned_i19_f64(double %f) nounwind {
; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X86-SSE-NEXT: xorpd %xmm1, %xmm1 ; X86-SSE-NEXT: xorpd %xmm1, %xmm1
; X86-SSE-NEXT: maxsd %xmm1, %xmm0 ; X86-SSE-NEXT: maxsd %xmm1, %xmm0
; X86-SSE-NEXT: minsd {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: minsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: cvttsd2si %xmm0, %eax ; X86-SSE-NEXT: cvttsd2si %xmm0, %eax
; X86-SSE-NEXT: retl ; X86-SSE-NEXT: retl
; ;
@ -1288,7 +1288,7 @@ define i32 @test_unsigned_i32_f64(double %f) nounwind {
; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB15_2: ; X86-X87-NEXT: .LBB15_2:
; X86-X87-NEXT: fldl {{\.LCPI.*}} ; X86-X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax ; X86-X87-NEXT: fnstsw %ax
@ -1307,7 +1307,7 @@ define i32 @test_unsigned_i32_f64(double %f) nounwind {
; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X86-SSE-NEXT: xorpd %xmm1, %xmm1 ; X86-SSE-NEXT: xorpd %xmm1, %xmm1
; X86-SSE-NEXT: maxsd %xmm1, %xmm0 ; X86-SSE-NEXT: maxsd %xmm1, %xmm0
; X86-SSE-NEXT: minsd {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: minsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: cvttsd2si %xmm0, %ecx ; X86-SSE-NEXT: cvttsd2si %xmm0, %ecx
; X86-SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero ; X86-SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; X86-SSE-NEXT: movapd %xmm0, %xmm2 ; X86-SSE-NEXT: movapd %xmm0, %xmm2
@ -1337,7 +1337,7 @@ define i50 @test_unsigned_i50_f64(double %f) nounwind {
; X86-X87-NEXT: pushl %esi ; X86-X87-NEXT: pushl %esi
; X86-X87-NEXT: subl $16, %esp ; X86-X87-NEXT: subl $16, %esp
; X86-X87-NEXT: fldl {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldl {{[0-9]+}}(%esp)
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fnstsw %ax ; X86-X87-NEXT: fnstsw %ax
; X86-X87-NEXT: xorl %ecx, %ecx ; X86-X87-NEXT: xorl %ecx, %ecx
@ -1379,7 +1379,7 @@ define i50 @test_unsigned_i50_f64(double %f) nounwind {
; X86-X87-NEXT: # %bb.5: ; X86-X87-NEXT: # %bb.5:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-X87-NEXT: .LBB16_6: ; X86-X87-NEXT: .LBB16_6:
; X86-X87-NEXT: fldl {{\.LCPI.*}} ; X86-X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax ; X86-X87-NEXT: fnstsw %ax
@ -1433,7 +1433,7 @@ define i50 @test_unsigned_i50_f64(double %f) nounwind {
; X86-SSE-NEXT: xorl {{[0-9]+}}(%esp), %eax ; X86-SSE-NEXT: xorl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %esi ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-SSE-NEXT: .LBB16_4: ; X86-SSE-NEXT: .LBB16_4:
; X86-SSE-NEXT: ucomisd {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: ucomisd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $262143, %edx # imm = 0x3FFFF ; X86-SSE-NEXT: movl $262143, %edx # imm = 0x3FFFF
; X86-SSE-NEXT: cmovbel %eax, %edx ; X86-SSE-NEXT: cmovbel %eax, %edx
; X86-SSE-NEXT: movl $-1, %eax ; X86-SSE-NEXT: movl $-1, %eax
@ -1460,7 +1460,7 @@ define i64 @test_unsigned_i64_f64(double %f) nounwind {
; X86-X87-NEXT: pushl %esi ; X86-X87-NEXT: pushl %esi
; X86-X87-NEXT: subl $20, %esp ; X86-X87-NEXT: subl $20, %esp
; X86-X87-NEXT: fldl {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldl {{[0-9]+}}(%esp)
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fnstsw %ax ; X86-X87-NEXT: fnstsw %ax
; X86-X87-NEXT: xorl %ecx, %ecx ; X86-X87-NEXT: xorl %ecx, %ecx
@ -1502,7 +1502,7 @@ define i64 @test_unsigned_i64_f64(double %f) nounwind {
; X86-X87-NEXT: # %bb.5: ; X86-X87-NEXT: # %bb.5:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %esi ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-X87-NEXT: .LBB17_6: ; X86-X87-NEXT: .LBB17_6:
; X86-X87-NEXT: fldl {{\.LCPI.*}} ; X86-X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax ; X86-X87-NEXT: fnstsw %ax
@ -1553,7 +1553,7 @@ define i64 @test_unsigned_i64_f64(double %f) nounwind {
; X86-SSE-NEXT: xorl {{[0-9]+}}(%esp), %edx ; X86-SSE-NEXT: xorl {{[0-9]+}}(%esp), %edx
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT: .LBB17_4: ; X86-SSE-NEXT: .LBB17_4:
; X86-SSE-NEXT: ucomisd {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: ucomisd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $-1, %ecx ; X86-SSE-NEXT: movl $-1, %ecx
; X86-SSE-NEXT: cmoval %ecx, %edx ; X86-SSE-NEXT: cmoval %ecx, %edx
; X86-SSE-NEXT: cmoval %ecx, %eax ; X86-SSE-NEXT: cmoval %ecx, %eax
@ -1624,7 +1624,7 @@ define i100 @test_unsigned_i100_f64(double %f) nounwind {
; X86-X87-NEXT: .LBB18_6: ; X86-X87-NEXT: .LBB18_6:
; X86-X87-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-X87-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: fldl {{\.LCPI.*}} ; X86-X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fldl {{[-0-9]+}}(%e{{[sb]}}p) # 8-byte Folded Reload ; X86-X87-NEXT: fldl {{[-0-9]+}}(%e{{[sb]}}p) # 8-byte Folded Reload
; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax ; X86-X87-NEXT: fnstsw %ax
@ -1685,7 +1685,7 @@ define i100 @test_unsigned_i100_f64(double %f) nounwind {
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-SSE-NEXT: .LBB18_2: ; X86-SSE-NEXT: .LBB18_2:
; X86-SSE-NEXT: ucomisd {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: ucomisd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $15, %ebx ; X86-SSE-NEXT: movl $15, %ebx
; X86-SSE-NEXT: cmovbel %edi, %ebx ; X86-SSE-NEXT: cmovbel %edi, %ebx
; X86-SSE-NEXT: movl $-1, %edi ; X86-SSE-NEXT: movl $-1, %edi
@ -1769,7 +1769,7 @@ define i128 @test_unsigned_i128_f64(double %f) nounwind {
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ebx ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ebx
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-X87-NEXT: .LBB19_6: ; X86-X87-NEXT: .LBB19_6:
; X86-X87-NEXT: fldl {{\.LCPI.*}} ; X86-X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fldl {{[-0-9]+}}(%e{{[sb]}}p) # 8-byte Folded Reload ; X86-X87-NEXT: fldl {{[-0-9]+}}(%e{{[sb]}}p) # 8-byte Folded Reload
; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax ; X86-X87-NEXT: fnstsw %ax
@ -1826,7 +1826,7 @@ define i128 @test_unsigned_i128_f64(double %f) nounwind {
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-SSE-NEXT: .LBB19_2: ; X86-SSE-NEXT: .LBB19_2:
; X86-SSE-NEXT: ucomisd {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: ucomisd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $-1, %ebx ; X86-SSE-NEXT: movl $-1, %ebx
; X86-SSE-NEXT: cmoval %ebx, %edi ; X86-SSE-NEXT: cmoval %ebx, %edi
; X86-SSE-NEXT: cmoval %ebx, %edx ; X86-SSE-NEXT: cmoval %ebx, %edx
@ -1983,7 +1983,7 @@ define i8 @test_unsigned_i8_f16(half %f) nounwind {
; X86-X87-NEXT: .LBB21_1: ; X86-X87-NEXT: .LBB21_1:
; X86-X87-NEXT: xorl %ecx, %ecx ; X86-X87-NEXT: xorl %ecx, %ecx
; X86-X87-NEXT: .LBB21_3: ; X86-X87-NEXT: .LBB21_3:
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax ; X86-X87-NEXT: fnstsw %ax
@ -2056,7 +2056,7 @@ define i13 @test_unsigned_i13_f16(half %f) nounwind {
; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB22_2: ; X86-X87-NEXT: .LBB22_2:
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax ; X86-X87-NEXT: fnstsw %ax
@ -2130,7 +2130,7 @@ define i16 @test_unsigned_i16_f16(half %f) nounwind {
; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB23_2: ; X86-X87-NEXT: .LBB23_2:
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax ; X86-X87-NEXT: fnstsw %ax
@ -2205,7 +2205,7 @@ define i19 @test_unsigned_i19_f16(half %f) nounwind {
; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB24_2: ; X86-X87-NEXT: .LBB24_2:
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax ; X86-X87-NEXT: fnstsw %ax
@ -2229,7 +2229,7 @@ define i19 @test_unsigned_i19_f16(half %f) nounwind {
; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-SSE-NEXT: xorps %xmm1, %xmm1 ; X86-SSE-NEXT: xorps %xmm1, %xmm1
; X86-SSE-NEXT: maxss %xmm1, %xmm0 ; X86-SSE-NEXT: maxss %xmm1, %xmm0
; X86-SSE-NEXT: minss {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: minss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: cvttss2si %xmm0, %eax ; X86-SSE-NEXT: cvttss2si %xmm0, %eax
; X86-SSE-NEXT: addl $12, %esp ; X86-SSE-NEXT: addl $12, %esp
; X86-SSE-NEXT: retl ; X86-SSE-NEXT: retl
@ -2276,7 +2276,7 @@ define i32 @test_unsigned_i32_f16(half %f) nounwind {
; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB25_2: ; X86-X87-NEXT: .LBB25_2:
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax ; X86-X87-NEXT: fnstsw %ax
@ -2310,7 +2310,7 @@ define i32 @test_unsigned_i32_f16(half %f) nounwind {
; X86-SSE-NEXT: xorps %xmm1, %xmm1 ; X86-SSE-NEXT: xorps %xmm1, %xmm1
; X86-SSE-NEXT: ucomiss %xmm1, %xmm0 ; X86-SSE-NEXT: ucomiss %xmm1, %xmm0
; X86-SSE-NEXT: cmovael %ecx, %edx ; X86-SSE-NEXT: cmovael %ecx, %edx
; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $-1, %eax ; X86-SSE-NEXT: movl $-1, %eax
; X86-SSE-NEXT: cmovbel %edx, %eax ; X86-SSE-NEXT: cmovbel %edx, %eax
; X86-SSE-NEXT: addl $12, %esp ; X86-SSE-NEXT: addl $12, %esp
@ -2343,7 +2343,7 @@ define i50 @test_unsigned_i50_f16(half %f) nounwind {
; X86-X87-NEXT: movzwl {{[0-9]+}}(%esp), %eax ; X86-X87-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-X87-NEXT: movl %eax, (%esp) ; X86-X87-NEXT: movl %eax, (%esp)
; X86-X87-NEXT: calll __gnu_h2f_ieee ; X86-X87-NEXT: calll __gnu_h2f_ieee
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fnstsw %ax ; X86-X87-NEXT: fnstsw %ax
@ -2385,7 +2385,7 @@ define i50 @test_unsigned_i50_f16(half %f) nounwind {
; X86-X87-NEXT: # %bb.5: ; X86-X87-NEXT: # %bb.5:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-X87-NEXT: .LBB26_6: ; X86-X87-NEXT: .LBB26_6:
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax ; X86-X87-NEXT: fnstsw %ax
@ -2443,7 +2443,7 @@ define i50 @test_unsigned_i50_f16(half %f) nounwind {
; X86-SSE-NEXT: xorl {{[0-9]+}}(%esp), %eax ; X86-SSE-NEXT: xorl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %esi ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-SSE-NEXT: .LBB26_4: ; X86-SSE-NEXT: .LBB26_4:
; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $262143, %edx # imm = 0x3FFFF ; X86-SSE-NEXT: movl $262143, %edx # imm = 0x3FFFF
; X86-SSE-NEXT: cmovbel %eax, %edx ; X86-SSE-NEXT: cmovbel %eax, %edx
; X86-SSE-NEXT: movl $-1, %eax ; X86-SSE-NEXT: movl $-1, %eax
@ -2480,7 +2480,7 @@ define i64 @test_unsigned_i64_f16(half %f) nounwind {
; X86-X87-NEXT: movzwl {{[0-9]+}}(%esp), %eax ; X86-X87-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-X87-NEXT: movl %eax, (%esp) ; X86-X87-NEXT: movl %eax, (%esp)
; X86-X87-NEXT: calll __gnu_h2f_ieee ; X86-X87-NEXT: calll __gnu_h2f_ieee
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fnstsw %ax ; X86-X87-NEXT: fnstsw %ax
@ -2522,7 +2522,7 @@ define i64 @test_unsigned_i64_f16(half %f) nounwind {
; X86-X87-NEXT: # %bb.5: ; X86-X87-NEXT: # %bb.5:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %esi ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-X87-NEXT: .LBB27_6: ; X86-X87-NEXT: .LBB27_6:
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax ; X86-X87-NEXT: fnstsw %ax
@ -2577,7 +2577,7 @@ define i64 @test_unsigned_i64_f16(half %f) nounwind {
; X86-SSE-NEXT: xorl {{[0-9]+}}(%esp), %edx ; X86-SSE-NEXT: xorl {{[0-9]+}}(%esp), %edx
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT: .LBB27_4: ; X86-SSE-NEXT: .LBB27_4:
; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $-1, %ecx ; X86-SSE-NEXT: movl $-1, %ecx
; X86-SSE-NEXT: cmoval %ecx, %edx ; X86-SSE-NEXT: cmoval %ecx, %edx
; X86-SSE-NEXT: cmoval %ecx, %eax ; X86-SSE-NEXT: cmoval %ecx, %eax
@ -2654,7 +2654,7 @@ define i100 @test_unsigned_i100_f16(half %f) nounwind {
; X86-X87-NEXT: .LBB28_6: ; X86-X87-NEXT: .LBB28_6:
; X86-X87-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-X87-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: flds {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload ; X86-X87-NEXT: flds {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax ; X86-X87-NEXT: fnstsw %ax
@ -2721,7 +2721,7 @@ define i100 @test_unsigned_i100_f16(half %f) nounwind {
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-SSE-NEXT: .LBB28_2: ; X86-SSE-NEXT: .LBB28_2:
; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $15, %ebx ; X86-SSE-NEXT: movl $15, %ebx
; X86-SSE-NEXT: cmovbel %edi, %ebx ; X86-SSE-NEXT: cmovbel %edi, %ebx
; X86-SSE-NEXT: movl $-1, %edi ; X86-SSE-NEXT: movl $-1, %edi
@ -2809,7 +2809,7 @@ define i128 @test_unsigned_i128_f16(half %f) nounwind {
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ebx ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ebx
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-X87-NEXT: .LBB29_6: ; X86-X87-NEXT: .LBB29_6:
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: flds {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload ; X86-X87-NEXT: flds {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax ; X86-X87-NEXT: fnstsw %ax
@ -2872,7 +2872,7 @@ define i128 @test_unsigned_i128_f16(half %f) nounwind {
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-SSE-NEXT: .LBB29_2: ; X86-SSE-NEXT: .LBB29_2:
; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $-1, %ebx ; X86-SSE-NEXT: movl $-1, %ebx
; X86-SSE-NEXT: cmoval %ebx, %edi ; X86-SSE-NEXT: cmoval %ebx, %edi
; X86-SSE-NEXT: cmoval %ebx, %edx ; X86-SSE-NEXT: cmoval %ebx, %edx
@ -3051,7 +3051,7 @@ define i8 @test_unsigned_i8_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: .LBB31_1: ; X86-X87-NEXT: .LBB31_1:
; X86-X87-NEXT: xorl %ecx, %ecx ; X86-X87-NEXT: xorl %ecx, %ecx
; X86-X87-NEXT: .LBB31_3: ; X86-X87-NEXT: .LBB31_3:
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax ; X86-X87-NEXT: fnstsw %ax
@ -3083,7 +3083,7 @@ define i8 @test_unsigned_i8_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: fucomi %st(1), %st ; X86-SSE-NEXT: fucomi %st(1), %st
; X86-SSE-NEXT: fstp %st(1) ; X86-SSE-NEXT: fstp %st(1)
; X86-SSE-NEXT: cmovael %eax, %ecx ; X86-SSE-NEXT: cmovael %eax, %ecx
; X86-SSE-NEXT: flds {{\.LCPI.*}} ; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fxch %st(1) ; X86-SSE-NEXT: fxch %st(1)
; X86-SSE-NEXT: fucompi %st(1), %st ; X86-SSE-NEXT: fucompi %st(1), %st
; X86-SSE-NEXT: fstp %st(0) ; X86-SSE-NEXT: fstp %st(0)
@ -3146,7 +3146,7 @@ define i13 @test_unsigned_i13_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB32_2: ; X86-X87-NEXT: .LBB32_2:
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax ; X86-X87-NEXT: fnstsw %ax
@ -3181,7 +3181,7 @@ define i13 @test_unsigned_i13_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: # %bb.1: ; X86-SSE-NEXT: # %bb.1:
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-SSE-NEXT: .LBB32_2: ; X86-SSE-NEXT: .LBB32_2:
; X86-SSE-NEXT: flds {{\.LCPI.*}} ; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fxch %st(1) ; X86-SSE-NEXT: fxch %st(1)
; X86-SSE-NEXT: fucompi %st(1), %st ; X86-SSE-NEXT: fucompi %st(1), %st
; X86-SSE-NEXT: fstp %st(0) ; X86-SSE-NEXT: fstp %st(0)
@ -3246,7 +3246,7 @@ define i16 @test_unsigned_i16_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB33_2: ; X86-X87-NEXT: .LBB33_2:
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax ; X86-X87-NEXT: fnstsw %ax
@ -3281,7 +3281,7 @@ define i16 @test_unsigned_i16_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: # %bb.1: ; X86-SSE-NEXT: # %bb.1:
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-SSE-NEXT: .LBB33_2: ; X86-SSE-NEXT: .LBB33_2:
; X86-SSE-NEXT: flds {{\.LCPI.*}} ; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fxch %st(1) ; X86-SSE-NEXT: fxch %st(1)
; X86-SSE-NEXT: fucompi %st(1), %st ; X86-SSE-NEXT: fucompi %st(1), %st
; X86-SSE-NEXT: fstp %st(0) ; X86-SSE-NEXT: fstp %st(0)
@ -3347,7 +3347,7 @@ define i19 @test_unsigned_i19_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB34_2: ; X86-X87-NEXT: .LBB34_2:
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax ; X86-X87-NEXT: fnstsw %ax
@ -3382,7 +3382,7 @@ define i19 @test_unsigned_i19_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: # %bb.1: ; X86-SSE-NEXT: # %bb.1:
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-SSE-NEXT: .LBB34_2: ; X86-SSE-NEXT: .LBB34_2:
; X86-SSE-NEXT: flds {{\.LCPI.*}} ; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fxch %st(1) ; X86-SSE-NEXT: fxch %st(1)
; X86-SSE-NEXT: fucompi %st(1), %st ; X86-SSE-NEXT: fucompi %st(1), %st
; X86-SSE-NEXT: fstp %st(0) ; X86-SSE-NEXT: fstp %st(0)
@ -3447,7 +3447,7 @@ define i32 @test_unsigned_i32_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB35_2: ; X86-X87-NEXT: .LBB35_2:
; X86-X87-NEXT: fldl {{\.LCPI.*}} ; X86-X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax ; X86-X87-NEXT: fnstsw %ax
@ -3482,7 +3482,7 @@ define i32 @test_unsigned_i32_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: # %bb.1: ; X86-SSE-NEXT: # %bb.1:
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-SSE-NEXT: .LBB35_2: ; X86-SSE-NEXT: .LBB35_2:
; X86-SSE-NEXT: fldl {{\.LCPI.*}} ; X86-SSE-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fxch %st(1) ; X86-SSE-NEXT: fxch %st(1)
; X86-SSE-NEXT: fucompi %st(1), %st ; X86-SSE-NEXT: fucompi %st(1), %st
; X86-SSE-NEXT: fstp %st(0) ; X86-SSE-NEXT: fstp %st(0)
@ -3528,7 +3528,7 @@ define i50 @test_unsigned_i50_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: pushl %esi ; X86-X87-NEXT: pushl %esi
; X86-X87-NEXT: subl $16, %esp ; X86-X87-NEXT: subl $16, %esp
; X86-X87-NEXT: fldt {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldt {{[0-9]+}}(%esp)
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fnstsw %ax ; X86-X87-NEXT: fnstsw %ax
; X86-X87-NEXT: xorl %ecx, %ecx ; X86-X87-NEXT: xorl %ecx, %ecx
@ -3570,7 +3570,7 @@ define i50 @test_unsigned_i50_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: # %bb.5: ; X86-X87-NEXT: # %bb.5:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-X87-NEXT: .LBB36_6: ; X86-X87-NEXT: .LBB36_6:
; X86-X87-NEXT: fldl {{\.LCPI.*}} ; X86-X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax ; X86-X87-NEXT: fnstsw %ax
@ -3595,7 +3595,7 @@ define i50 @test_unsigned_i50_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: pushl %esi ; X86-SSE-NEXT: pushl %esi
; X86-SSE-NEXT: subl $16, %esp ; X86-SSE-NEXT: subl $16, %esp
; X86-SSE-NEXT: fldt {{[0-9]+}}(%esp) ; X86-SSE-NEXT: fldt {{[0-9]+}}(%esp)
; X86-SSE-NEXT: flds {{\.LCPI.*}} ; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: xorl %eax, %eax ; X86-SSE-NEXT: xorl %eax, %eax
; X86-SSE-NEXT: fucomi %st(1), %st ; X86-SSE-NEXT: fucomi %st(1), %st
; X86-SSE-NEXT: setbe %cl ; X86-SSE-NEXT: setbe %cl
@ -3625,7 +3625,7 @@ define i50 @test_unsigned_i50_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-SSE-NEXT: movl %eax, %esi ; X86-SSE-NEXT: movl %eax, %esi
; X86-SSE-NEXT: .LBB36_2: ; X86-SSE-NEXT: .LBB36_2:
; X86-SSE-NEXT: fldl {{\.LCPI.*}} ; X86-SSE-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fxch %st(1) ; X86-SSE-NEXT: fxch %st(1)
; X86-SSE-NEXT: fucompi %st(1), %st ; X86-SSE-NEXT: fucompi %st(1), %st
; X86-SSE-NEXT: fstp %st(0) ; X86-SSE-NEXT: fstp %st(0)
@ -3682,7 +3682,7 @@ define i64 @test_unsigned_i64_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: pushl %esi ; X86-X87-NEXT: pushl %esi
; X86-X87-NEXT: subl $20, %esp ; X86-X87-NEXT: subl $20, %esp
; X86-X87-NEXT: fldt {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldt {{[0-9]+}}(%esp)
; X86-X87-NEXT: flds {{\.LCPI.*}} ; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fnstsw %ax ; X86-X87-NEXT: fnstsw %ax
; X86-X87-NEXT: xorl %ecx, %ecx ; X86-X87-NEXT: xorl %ecx, %ecx
@ -3724,7 +3724,7 @@ define i64 @test_unsigned_i64_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: # %bb.5: ; X86-X87-NEXT: # %bb.5:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %esi ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-X87-NEXT: .LBB37_6: ; X86-X87-NEXT: .LBB37_6:
; X86-X87-NEXT: fldt {{\.LCPI.*}} ; X86-X87-NEXT: fldt {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax ; X86-X87-NEXT: fnstsw %ax
@ -3747,7 +3747,7 @@ define i64 @test_unsigned_i64_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: pushl %ebx ; X86-SSE-NEXT: pushl %ebx
; X86-SSE-NEXT: subl $16, %esp ; X86-SSE-NEXT: subl $16, %esp
; X86-SSE-NEXT: fldt {{[0-9]+}}(%esp) ; X86-SSE-NEXT: fldt {{[0-9]+}}(%esp)
; X86-SSE-NEXT: flds {{\.LCPI.*}} ; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: xorl %ecx, %ecx ; X86-SSE-NEXT: xorl %ecx, %ecx
; X86-SSE-NEXT: fucomi %st(1), %st ; X86-SSE-NEXT: fucomi %st(1), %st
; X86-SSE-NEXT: setbe %bl ; X86-SSE-NEXT: setbe %bl
@ -3777,7 +3777,7 @@ define i64 @test_unsigned_i64_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT: movl %ecx, %edx ; X86-SSE-NEXT: movl %ecx, %edx
; X86-SSE-NEXT: .LBB37_2: ; X86-SSE-NEXT: .LBB37_2:
; X86-SSE-NEXT: fldt {{\.LCPI.*}} ; X86-SSE-NEXT: fldt {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fxch %st(1) ; X86-SSE-NEXT: fxch %st(1)
; X86-SSE-NEXT: fucompi %st(1), %st ; X86-SSE-NEXT: fucompi %st(1), %st
; X86-SSE-NEXT: fstp %st(0) ; X86-SSE-NEXT: fstp %st(0)
@ -3869,7 +3869,7 @@ define i100 @test_unsigned_i100_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: .LBB38_6: ; X86-X87-NEXT: .LBB38_6:
; X86-X87-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-X87-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: fldt {{\.LCPI.*}} ; X86-X87-NEXT: fldt {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fldt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Reload ; X86-X87-NEXT: fldt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Reload
; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax ; X86-X87-NEXT: fnstsw %ax
@ -3933,7 +3933,7 @@ define i100 @test_unsigned_i100_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-SSE-NEXT: .LBB38_2: ; X86-SSE-NEXT: .LBB38_2:
; X86-SSE-NEXT: fldt {{\.LCPI.*}} ; X86-SSE-NEXT: fldt {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fxch %st(1) ; X86-SSE-NEXT: fxch %st(1)
; X86-SSE-NEXT: fucompi %st(1), %st ; X86-SSE-NEXT: fucompi %st(1), %st
; X86-SSE-NEXT: fstp %st(0) ; X86-SSE-NEXT: fstp %st(0)
@ -4028,7 +4028,7 @@ define i128 @test_unsigned_i128_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ebx ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ebx
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-X87-NEXT: .LBB39_6: ; X86-X87-NEXT: .LBB39_6:
; X86-X87-NEXT: fldt {{\.LCPI.*}} ; X86-X87-NEXT: fldt {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fldt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Reload ; X86-X87-NEXT: fldt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Reload
; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax ; X86-X87-NEXT: fnstsw %ax
@ -4088,7 +4088,7 @@ define i128 @test_unsigned_i128_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-SSE-NEXT: .LBB39_2: ; X86-SSE-NEXT: .LBB39_2:
; X86-SSE-NEXT: fldt {{\.LCPI.*}} ; X86-SSE-NEXT: fldt {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fxch %st(1) ; X86-SSE-NEXT: fxch %st(1)
; X86-SSE-NEXT: fucompi %st(1), %st ; X86-SSE-NEXT: fucompi %st(1), %st
; X86-SSE-NEXT: fstp %st(0) ; X86-SSE-NEXT: fstp %st(0)

View File

@ -130,9 +130,9 @@ define i32 @rotl_i32(i32 %x, i32 %z) nounwind {
define <4 x i32> @rotl_v4i32(<4 x i32> %x, <4 x i32> %z) nounwind { define <4 x i32> @rotl_v4i32(<4 x i32> %x, <4 x i32> %z) nounwind {
; X32-SSE2-LABEL: rotl_v4i32: ; X32-SSE2-LABEL: rotl_v4i32:
; X32-SSE2: # %bb.0: ; X32-SSE2: # %bb.0:
; X32-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1 ; X32-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X32-SSE2-NEXT: pslld $23, %xmm1 ; X32-SSE2-NEXT: pslld $23, %xmm1
; X32-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm1 ; X32-SSE2-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X32-SSE2-NEXT: cvttps2dq %xmm1, %xmm1 ; X32-SSE2-NEXT: cvttps2dq %xmm1, %xmm1
; X32-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; X32-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; X32-SSE2-NEXT: pmuludq %xmm1, %xmm0 ; X32-SSE2-NEXT: pmuludq %xmm1, %xmm0
@ -324,9 +324,9 @@ define <4 x i32> @rotr_v4i32(<4 x i32> %x, <4 x i32> %z) nounwind {
; X32-SSE2: # %bb.0: ; X32-SSE2: # %bb.0:
; X32-SSE2-NEXT: pxor %xmm2, %xmm2 ; X32-SSE2-NEXT: pxor %xmm2, %xmm2
; X32-SSE2-NEXT: psubd %xmm1, %xmm2 ; X32-SSE2-NEXT: psubd %xmm1, %xmm2
; X32-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm2 ; X32-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm2
; X32-SSE2-NEXT: pslld $23, %xmm2 ; X32-SSE2-NEXT: pslld $23, %xmm2
; X32-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm2 ; X32-SSE2-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm2
; X32-SSE2-NEXT: cvttps2dq %xmm2, %xmm1 ; X32-SSE2-NEXT: cvttps2dq %xmm2, %xmm1
; X32-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; X32-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; X32-SSE2-NEXT: pmuludq %xmm1, %xmm0 ; X32-SSE2-NEXT: pmuludq %xmm1, %xmm0

View File

@ -8,7 +8,7 @@ define <4 x double> @PR43402(i64 %x) {
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] ; CHECK-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
; CHECK-NEXT: vsubpd {{\.LCPI.*}}, %xmm0, %xmm0 ; CHECK-NEXT: vsubpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; CHECK-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] ; CHECK-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; CHECK-NEXT: vaddsd %xmm0, %xmm1, %xmm0 ; CHECK-NEXT: vaddsd %xmm0, %xmm1, %xmm0
; CHECK-NEXT: vbroadcastsd %xmm0, %ymm0 ; CHECK-NEXT: vbroadcastsd %xmm0, %ymm0

View File

@ -364,7 +364,7 @@ define void @test_uitofp_i64(i64 %a, half* %p) #0 {
; CHECK-I686-NEXT: movlps %xmm0, {{[0-9]+}}(%esp) ; CHECK-I686-NEXT: movlps %xmm0, {{[0-9]+}}(%esp)
; CHECK-I686-NEXT: shrl $31, %eax ; CHECK-I686-NEXT: shrl $31, %eax
; CHECK-I686-NEXT: fildll {{[0-9]+}}(%esp) ; CHECK-I686-NEXT: fildll {{[0-9]+}}(%esp)
; CHECK-I686-NEXT: fadds {{\.LCPI.*}}(,%eax,4) ; CHECK-I686-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; CHECK-I686-NEXT: fstps (%esp) ; CHECK-I686-NEXT: fstps (%esp)
; CHECK-I686-NEXT: calll __gnu_f2h_ieee ; CHECK-I686-NEXT: calll __gnu_f2h_ieee
; CHECK-I686-NEXT: movw %ax, (%esi) ; CHECK-I686-NEXT: movw %ax, (%esi)

View File

@ -499,7 +499,7 @@ define <4 x i1> @vec_4xi32_splat_eq(<4 x i32> %x, <4 x i32> %y) nounwind {
; X86-SSE2: # %bb.0: ; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pxor %xmm2, %xmm2 ; X86-SSE2-NEXT: pxor %xmm2, %xmm2
; X86-SSE2-NEXT: pslld $23, %xmm1 ; X86-SSE2-NEXT: pslld $23, %xmm1
; X86-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm1 ; X86-SSE2-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: cvttps2dq %xmm1, %xmm1 ; X86-SSE2-NEXT: cvttps2dq %xmm1, %xmm1
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
; X86-SSE2-NEXT: pmuludq %xmm1, %xmm0 ; X86-SSE2-NEXT: pmuludq %xmm1, %xmm0
@ -508,7 +508,7 @@ define <4 x i1> @vec_4xi32_splat_eq(<4 x i32> %x, <4 x i32> %y) nounwind {
; X86-SSE2-NEXT: pmuludq %xmm3, %xmm1 ; X86-SSE2-NEXT: pmuludq %xmm3, %xmm1
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 ; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: pcmpeqd %xmm2, %xmm0 ; X86-SSE2-NEXT: pcmpeqd %xmm2, %xmm0
; X86-SSE2-NEXT: retl ; X86-SSE2-NEXT: retl
; ;
@ -586,7 +586,7 @@ define <4 x i1> @vec_4xi32_nonsplat_undef0_eq(<4 x i32> %x, <4 x i32> %y) nounwi
; X86-SSE2: # %bb.0: ; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pxor %xmm2, %xmm2 ; X86-SSE2-NEXT: pxor %xmm2, %xmm2
; X86-SSE2-NEXT: pslld $23, %xmm1 ; X86-SSE2-NEXT: pslld $23, %xmm1
; X86-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm1 ; X86-SSE2-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: cvttps2dq %xmm1, %xmm1 ; X86-SSE2-NEXT: cvttps2dq %xmm1, %xmm1
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
; X86-SSE2-NEXT: pmuludq %xmm1, %xmm0 ; X86-SSE2-NEXT: pmuludq %xmm1, %xmm0
@ -595,7 +595,7 @@ define <4 x i1> @vec_4xi32_nonsplat_undef0_eq(<4 x i32> %x, <4 x i32> %y) nounwi
; X86-SSE2-NEXT: pmuludq %xmm3, %xmm1 ; X86-SSE2-NEXT: pmuludq %xmm3, %xmm1
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 ; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: pcmpeqd %xmm2, %xmm0 ; X86-SSE2-NEXT: pcmpeqd %xmm2, %xmm0
; X86-SSE2-NEXT: retl ; X86-SSE2-NEXT: retl
; ;

View File

@ -461,7 +461,7 @@ define <4 x i1> @vec_4xi32_splat_eq(<4 x i32> %x, <4 x i32> %y) nounwind {
; X86-SSE2-LABEL: vec_4xi32_splat_eq: ; X86-SSE2-LABEL: vec_4xi32_splat_eq:
; X86-SSE2: # %bb.0: ; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pslld $23, %xmm1 ; X86-SSE2-NEXT: pslld $23, %xmm1
; X86-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm1 ; X86-SSE2-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: cvttps2dq %xmm1, %xmm1 ; X86-SSE2-NEXT: cvttps2dq %xmm1, %xmm1
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [1,1,1,1] ; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [1,1,1,1]
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
@ -510,7 +510,7 @@ define <4 x i1> @vec_4xi32_nonsplat_eq(<4 x i32> %x, <4 x i32> %y) nounwind {
; X86-SSE2-LABEL: vec_4xi32_nonsplat_eq: ; X86-SSE2-LABEL: vec_4xi32_nonsplat_eq:
; X86-SSE2: # %bb.0: ; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pslld $23, %xmm1 ; X86-SSE2-NEXT: pslld $23, %xmm1
; X86-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm1 ; X86-SSE2-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: cvttps2dq %xmm1, %xmm1 ; X86-SSE2-NEXT: cvttps2dq %xmm1, %xmm1
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,1,16776960,2147483648] ; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,1,16776960,2147483648]
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
@ -563,12 +563,12 @@ define <4 x i1> @vec_4xi32_nonsplat_undef0_eq(<4 x i32> %x, <4 x i32> %y) nounwi
; X86-SSE2-NEXT: movl $1, %eax ; X86-SSE2-NEXT: movl $1, %eax
; X86-SSE2-NEXT: movd %eax, %xmm2 ; X86-SSE2-NEXT: movd %eax, %xmm2
; X86-SSE2-NEXT: pslld $23, %xmm1 ; X86-SSE2-NEXT: pslld $23, %xmm1
; X86-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm1 ; X86-SSE2-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: cvttps2dq %xmm1, %xmm1 ; X86-SSE2-NEXT: cvttps2dq %xmm1, %xmm1
; X86-SSE2-NEXT: pmuludq %xmm1, %xmm2 ; X86-SSE2-NEXT: pmuludq %xmm1, %xmm2
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
; X86-SSE2-NEXT: pmuludq {{\.LCPI.*}}, %xmm1 ; X86-SSE2-NEXT: pmuludq {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] ; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; X86-SSE2-NEXT: pand %xmm2, %xmm0 ; X86-SSE2-NEXT: pand %xmm2, %xmm0
@ -611,7 +611,7 @@ define <4 x i1> @vec_4xi32_nonsplat_undef1_eq(<4 x i32> %x, <4 x i32> %y) nounwi
; X86-SSE2-LABEL: vec_4xi32_nonsplat_undef1_eq: ; X86-SSE2-LABEL: vec_4xi32_nonsplat_undef1_eq:
; X86-SSE2: # %bb.0: ; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pslld $23, %xmm1 ; X86-SSE2-NEXT: pslld $23, %xmm1
; X86-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm1 ; X86-SSE2-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: cvttps2dq %xmm1, %xmm1 ; X86-SSE2-NEXT: cvttps2dq %xmm1, %xmm1
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [1,1,1,1] ; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [1,1,1,1]
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
@ -661,12 +661,12 @@ define <4 x i1> @vec_4xi32_nonsplat_undef2_eq(<4 x i32> %x, <4 x i32> %y) nounwi
; X86-SSE2-NEXT: movl $1, %eax ; X86-SSE2-NEXT: movl $1, %eax
; X86-SSE2-NEXT: movd %eax, %xmm2 ; X86-SSE2-NEXT: movd %eax, %xmm2
; X86-SSE2-NEXT: pslld $23, %xmm1 ; X86-SSE2-NEXT: pslld $23, %xmm1
; X86-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm1 ; X86-SSE2-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: cvttps2dq %xmm1, %xmm1 ; X86-SSE2-NEXT: cvttps2dq %xmm1, %xmm1
; X86-SSE2-NEXT: pmuludq %xmm1, %xmm2 ; X86-SSE2-NEXT: pmuludq %xmm1, %xmm2
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
; X86-SSE2-NEXT: pmuludq {{\.LCPI.*}}, %xmm1 ; X86-SSE2-NEXT: pmuludq {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] ; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; X86-SSE2-NEXT: pand %xmm2, %xmm0 ; X86-SSE2-NEXT: pand %xmm2, %xmm0

View File

@ -14,7 +14,7 @@ define <2 x double> @mask_sitofp_2i64_2f64(<2 x i64> %a) nounwind {
; X86-SSE-LABEL: mask_sitofp_2i64_2f64: ; X86-SSE-LABEL: mask_sitofp_2i64_2f64:
; X86-SSE: # %bb.0: ; X86-SSE: # %bb.0:
; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: cvtdq2pd %xmm0, %xmm0 ; X86-SSE-NEXT: cvtdq2pd %xmm0, %xmm0
; X86-SSE-NEXT: retl ; X86-SSE-NEXT: retl
; ;
@ -32,7 +32,7 @@ define <2 x double> @mask_sitofp_2i64_2f64(<2 x i64> %a) nounwind {
; ;
; X86-AVX512DQ-LABEL: mask_sitofp_2i64_2f64: ; X86-AVX512DQ-LABEL: mask_sitofp_2i64_2f64:
; X86-AVX512DQ: # %bb.0: ; X86-AVX512DQ: # %bb.0:
; X86-AVX512DQ-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0 ; X86-AVX512DQ-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX512DQ-NEXT: vcvtqq2pd %xmm0, %xmm0 ; X86-AVX512DQ-NEXT: vcvtqq2pd %xmm0, %xmm0
; X86-AVX512DQ-NEXT: retl ; X86-AVX512DQ-NEXT: retl
; ;
@ -69,7 +69,7 @@ define <2 x double> @mask_uitofp_2i64_2f64(<2 x i64> %a) nounwind {
; X86-SSE-LABEL: mask_uitofp_2i64_2f64: ; X86-SSE-LABEL: mask_uitofp_2i64_2f64:
; X86-SSE: # %bb.0: ; X86-SSE: # %bb.0:
; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: cvtdq2pd %xmm0, %xmm0 ; X86-SSE-NEXT: cvtdq2pd %xmm0, %xmm0
; X86-SSE-NEXT: retl ; X86-SSE-NEXT: retl
; ;
@ -87,7 +87,7 @@ define <2 x double> @mask_uitofp_2i64_2f64(<2 x i64> %a) nounwind {
; ;
; X86-AVX512DQ-LABEL: mask_uitofp_2i64_2f64: ; X86-AVX512DQ-LABEL: mask_uitofp_2i64_2f64:
; X86-AVX512DQ: # %bb.0: ; X86-AVX512DQ: # %bb.0:
; X86-AVX512DQ-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0 ; X86-AVX512DQ-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX512DQ-NEXT: vcvtqq2pd %xmm0, %xmm0 ; X86-AVX512DQ-NEXT: vcvtqq2pd %xmm0, %xmm0
; X86-AVX512DQ-NEXT: retl ; X86-AVX512DQ-NEXT: retl
; ;
@ -124,7 +124,7 @@ define <4 x float> @mask_sitofp_4i64_4f32(<4 x i64> %a) nounwind {
; X86-SSE-LABEL: mask_sitofp_4i64_4f32: ; X86-SSE-LABEL: mask_sitofp_4i64_4f32:
; X86-SSE: # %bb.0: ; X86-SSE: # %bb.0:
; X86-SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] ; X86-SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; X86-SSE-NEXT: andps {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: cvtdq2ps %xmm0, %xmm0 ; X86-SSE-NEXT: cvtdq2ps %xmm0, %xmm0
; X86-SSE-NEXT: retl ; X86-SSE-NEXT: retl
; ;
@ -132,7 +132,7 @@ define <4 x float> @mask_sitofp_4i64_4f32(<4 x i64> %a) nounwind {
; X86-AVX: # %bb.0: ; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vextractf128 $1, %ymm0, %xmm1 ; X86-AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
; X86-AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] ; X86-AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; X86-AVX-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vcvtdq2ps %xmm0, %xmm0 ; X86-AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
; X86-AVX-NEXT: vzeroupper ; X86-AVX-NEXT: vzeroupper
; X86-AVX-NEXT: retl ; X86-AVX-NEXT: retl
@ -140,14 +140,14 @@ define <4 x float> @mask_sitofp_4i64_4f32(<4 x i64> %a) nounwind {
; X86-AVX512F-LABEL: mask_sitofp_4i64_4f32: ; X86-AVX512F-LABEL: mask_sitofp_4i64_4f32:
; X86-AVX512F: # %bb.0: ; X86-AVX512F: # %bb.0:
; X86-AVX512F-NEXT: vpmovqd %ymm0, %xmm0 ; X86-AVX512F-NEXT: vpmovqd %ymm0, %xmm0
; X86-AVX512F-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 ; X86-AVX512F-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX512F-NEXT: vcvtdq2ps %xmm0, %xmm0 ; X86-AVX512F-NEXT: vcvtdq2ps %xmm0, %xmm0
; X86-AVX512F-NEXT: vzeroupper ; X86-AVX512F-NEXT: vzeroupper
; X86-AVX512F-NEXT: retl ; X86-AVX512F-NEXT: retl
; ;
; X86-AVX512DQ-LABEL: mask_sitofp_4i64_4f32: ; X86-AVX512DQ-LABEL: mask_sitofp_4i64_4f32:
; X86-AVX512DQ: # %bb.0: ; X86-AVX512DQ: # %bb.0:
; X86-AVX512DQ-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0 ; X86-AVX512DQ-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X86-AVX512DQ-NEXT: vcvtqq2ps %ymm0, %xmm0 ; X86-AVX512DQ-NEXT: vcvtqq2ps %ymm0, %xmm0
; X86-AVX512DQ-NEXT: vzeroupper ; X86-AVX512DQ-NEXT: vzeroupper
; X86-AVX512DQ-NEXT: retl ; X86-AVX512DQ-NEXT: retl
@ -191,7 +191,7 @@ define <4 x float> @mask_uitofp_4i64_4f32(<4 x i64> %a) nounwind {
; X86-SSE-LABEL: mask_uitofp_4i64_4f32: ; X86-SSE-LABEL: mask_uitofp_4i64_4f32:
; X86-SSE: # %bb.0: ; X86-SSE: # %bb.0:
; X86-SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] ; X86-SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; X86-SSE-NEXT: andps {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: cvtdq2ps %xmm0, %xmm0 ; X86-SSE-NEXT: cvtdq2ps %xmm0, %xmm0
; X86-SSE-NEXT: retl ; X86-SSE-NEXT: retl
; ;
@ -199,7 +199,7 @@ define <4 x float> @mask_uitofp_4i64_4f32(<4 x i64> %a) nounwind {
; X86-AVX: # %bb.0: ; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vextractf128 $1, %ymm0, %xmm1 ; X86-AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
; X86-AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] ; X86-AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; X86-AVX-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vcvtdq2ps %xmm0, %xmm0 ; X86-AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
; X86-AVX-NEXT: vzeroupper ; X86-AVX-NEXT: vzeroupper
; X86-AVX-NEXT: retl ; X86-AVX-NEXT: retl
@ -207,14 +207,14 @@ define <4 x float> @mask_uitofp_4i64_4f32(<4 x i64> %a) nounwind {
; X86-AVX512F-LABEL: mask_uitofp_4i64_4f32: ; X86-AVX512F-LABEL: mask_uitofp_4i64_4f32:
; X86-AVX512F: # %bb.0: ; X86-AVX512F: # %bb.0:
; X86-AVX512F-NEXT: vpmovqd %ymm0, %xmm0 ; X86-AVX512F-NEXT: vpmovqd %ymm0, %xmm0
; X86-AVX512F-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 ; X86-AVX512F-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX512F-NEXT: vcvtdq2ps %xmm0, %xmm0 ; X86-AVX512F-NEXT: vcvtdq2ps %xmm0, %xmm0
; X86-AVX512F-NEXT: vzeroupper ; X86-AVX512F-NEXT: vzeroupper
; X86-AVX512F-NEXT: retl ; X86-AVX512F-NEXT: retl
; ;
; X86-AVX512DQ-LABEL: mask_uitofp_4i64_4f32: ; X86-AVX512DQ-LABEL: mask_uitofp_4i64_4f32:
; X86-AVX512DQ: # %bb.0: ; X86-AVX512DQ: # %bb.0:
; X86-AVX512DQ-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0 ; X86-AVX512DQ-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X86-AVX512DQ-NEXT: vcvtqq2ps %ymm0, %xmm0 ; X86-AVX512DQ-NEXT: vcvtqq2ps %ymm0, %xmm0
; X86-AVX512DQ-NEXT: vzeroupper ; X86-AVX512DQ-NEXT: vzeroupper
; X86-AVX512DQ-NEXT: retl ; X86-AVX512DQ-NEXT: retl
@ -270,7 +270,7 @@ define <2 x double> @clamp_sitofp_2i64_2f64(<2 x i64> %a) nounwind {
; X86-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3] ; X86-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
; X86-SSE-NEXT: por %xmm2, %xmm3 ; X86-SSE-NEXT: por %xmm2, %xmm3
; X86-SSE-NEXT: pand %xmm3, %xmm0 ; X86-SSE-NEXT: pand %xmm3, %xmm0
; X86-SSE-NEXT: pandn {{\.LCPI.*}}, %xmm3 ; X86-SSE-NEXT: pandn {{\.LCPI[0-9]+_[0-9]+}}, %xmm3
; X86-SSE-NEXT: por %xmm0, %xmm3 ; X86-SSE-NEXT: por %xmm0, %xmm3
; X86-SSE-NEXT: pxor %xmm3, %xmm1 ; X86-SSE-NEXT: pxor %xmm3, %xmm1
; X86-SSE-NEXT: movdqa {{.*#+}} xmm0 = [2147483903,0,2147483903,0] ; X86-SSE-NEXT: movdqa {{.*#+}} xmm0 = [2147483903,0,2147483903,0]
@ -283,7 +283,7 @@ define <2 x double> @clamp_sitofp_2i64_2f64(<2 x i64> %a) nounwind {
; X86-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3] ; X86-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
; X86-SSE-NEXT: por %xmm0, %xmm1 ; X86-SSE-NEXT: por %xmm0, %xmm1
; X86-SSE-NEXT: pand %xmm1, %xmm3 ; X86-SSE-NEXT: pand %xmm1, %xmm3
; X86-SSE-NEXT: pandn {{\.LCPI.*}}, %xmm1 ; X86-SSE-NEXT: pandn {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE-NEXT: por %xmm3, %xmm1 ; X86-SSE-NEXT: por %xmm3, %xmm1
; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3] ; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
; X86-SSE-NEXT: cvtdq2pd %xmm0, %xmm0 ; X86-SSE-NEXT: cvtdq2pd %xmm0, %xmm0
@ -305,16 +305,16 @@ define <2 x double> @clamp_sitofp_2i64_2f64(<2 x i64> %a) nounwind {
; ;
; X86-AVX512F-LABEL: clamp_sitofp_2i64_2f64: ; X86-AVX512F-LABEL: clamp_sitofp_2i64_2f64:
; X86-AVX512F: # %bb.0: ; X86-AVX512F: # %bb.0:
; X86-AVX512F-NEXT: vpmaxsq {{\.LCPI.*}}, %xmm0, %xmm0 ; X86-AVX512F-NEXT: vpmaxsq {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX512F-NEXT: vpminsq {{\.LCPI.*}}, %xmm0, %xmm0 ; X86-AVX512F-NEXT: vpminsq {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX512F-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; X86-AVX512F-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; X86-AVX512F-NEXT: vcvtdq2pd %xmm0, %xmm0 ; X86-AVX512F-NEXT: vcvtdq2pd %xmm0, %xmm0
; X86-AVX512F-NEXT: retl ; X86-AVX512F-NEXT: retl
; ;
; X86-AVX512DQ-LABEL: clamp_sitofp_2i64_2f64: ; X86-AVX512DQ-LABEL: clamp_sitofp_2i64_2f64:
; X86-AVX512DQ: # %bb.0: ; X86-AVX512DQ: # %bb.0:
; X86-AVX512DQ-NEXT: vpmaxsq {{\.LCPI.*}}, %xmm0, %xmm0 ; X86-AVX512DQ-NEXT: vpmaxsq {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX512DQ-NEXT: vpminsq {{\.LCPI.*}}, %xmm0, %xmm0 ; X86-AVX512DQ-NEXT: vpminsq {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX512DQ-NEXT: vcvtqq2pd %xmm0, %xmm0 ; X86-AVX512DQ-NEXT: vcvtqq2pd %xmm0, %xmm0
; X86-AVX512DQ-NEXT: retl ; X86-AVX512DQ-NEXT: retl
; ;

View File

@ -16,7 +16,7 @@ define <16 x i8> @elt0_v16i8(i8 %x) {
; X86-SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; X86-SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X86-SSE2-NEXT: movaps {{.*#+}} xmm0 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] ; X86-SSE2-NEXT: movaps {{.*#+}} xmm0 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; X86-SSE2-NEXT: andnps %xmm1, %xmm0 ; X86-SSE2-NEXT: andnps %xmm1, %xmm0
; X86-SSE2-NEXT: orps {{\.LCPI.*}}, %xmm0 ; X86-SSE2-NEXT: orps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: retl ; X86-SSE2-NEXT: retl
; ;
; X64-SSE2-LABEL: elt0_v16i8: ; X64-SSE2-LABEL: elt0_v16i8:
@ -393,7 +393,7 @@ define <8 x i64> @elt5_v8i64(i64 %x) {
; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX1-NEXT: vmovaps {{.*#+}} xmm1 = [4,0,0,0] ; X86-AVX1-NEXT: vmovaps {{.*#+}} xmm1 = [4,0,0,0]
; X86-AVX1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0] ; X86-AVX1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; X86-AVX1-NEXT: vinsertf128 $1, {{\.LCPI.*}}, %ymm0, %ymm1 ; X86-AVX1-NEXT: vinsertf128 $1, {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm1
; X86-AVX1-NEXT: vmovaps {{.*#+}} ymm0 = [42,0,1,0,2,0,3,0] ; X86-AVX1-NEXT: vmovaps {{.*#+}} ymm0 = [42,0,1,0,2,0,3,0]
; X86-AVX1-NEXT: retl ; X86-AVX1-NEXT: retl
; ;
@ -410,7 +410,7 @@ define <8 x i64> @elt5_v8i64(i64 %x) {
; X86-AVX2-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; X86-AVX2-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX2-NEXT: vmovaps {{.*#+}} xmm1 = [4,0,0,0] ; X86-AVX2-NEXT: vmovaps {{.*#+}} xmm1 = [4,0,0,0]
; X86-AVX2-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0] ; X86-AVX2-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; X86-AVX2-NEXT: vinsertf128 $1, {{\.LCPI.*}}, %ymm0, %ymm1 ; X86-AVX2-NEXT: vinsertf128 $1, {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm1
; X86-AVX2-NEXT: vmovaps {{.*#+}} ymm0 = [42,0,1,0,2,0,3,0] ; X86-AVX2-NEXT: vmovaps {{.*#+}} ymm0 = [42,0,1,0,2,0,3,0]
; X86-AVX2-NEXT: retl ; X86-AVX2-NEXT: retl
; ;
@ -428,7 +428,7 @@ define <8 x i64> @elt5_v8i64(i64 %x) {
; X86-AVX512F-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero ; X86-AVX512F-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; X86-AVX512F-NEXT: vmovaps {{.*#+}} xmm2 = [4,0,0,0] ; X86-AVX512F-NEXT: vmovaps {{.*#+}} xmm2 = [4,0,0,0]
; X86-AVX512F-NEXT: vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0] ; X86-AVX512F-NEXT: vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
; X86-AVX512F-NEXT: vinsertf128 $1, {{\.LCPI.*}}, %ymm1, %ymm1 ; X86-AVX512F-NEXT: vinsertf128 $1, {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm1
; X86-AVX512F-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 ; X86-AVX512F-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
; X86-AVX512F-NEXT: retl ; X86-AVX512F-NEXT: retl
; ;

View File

@ -1380,7 +1380,7 @@ define <8 x float> @arg_f32_v8f32(<8 x float> %v, float %x, i32 %y) nounwind {
; AVX1-NEXT: vmovd %edi, %xmm2 ; AVX1-NEXT: vmovd %edi, %xmm2
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0] ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
; AVX1-NEXT: vpcmpeqd {{.*}}(%rip), %xmm2, %xmm3 ; AVX1-NEXT: vpcmpeqd {{.*}}(%rip), %xmm2, %xmm3
; AVX1-NEXT: vpcmpeqd {{\.LCPI.*}}+{{.*}}(%rip), %xmm2, %xmm2 ; AVX1-NEXT: vpcmpeqd {{\.LCPI[0-9]+_[0-9]+}}+{{.*}}(%rip), %xmm2, %xmm2
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
; AVX1-NEXT: vblendvps %ymm2, %ymm1, %ymm0, %ymm0 ; AVX1-NEXT: vblendvps %ymm2, %ymm1, %ymm0, %ymm0
; AVX1-NEXT: retq ; AVX1-NEXT: retq
@ -1424,7 +1424,7 @@ define <4 x double> @arg_f64_v4f64(<4 x double> %v, double %x, i32 %y) nounwind
; AVX1-NEXT: vmovq %rax, %xmm2 ; AVX1-NEXT: vmovq %rax, %xmm2
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,0,1] ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,0,1]
; AVX1-NEXT: vpcmpeqq {{.*}}(%rip), %xmm2, %xmm3 ; AVX1-NEXT: vpcmpeqq {{.*}}(%rip), %xmm2, %xmm3
; AVX1-NEXT: vpcmpeqq {{\.LCPI.*}}+{{.*}}(%rip), %xmm2, %xmm2 ; AVX1-NEXT: vpcmpeqq {{\.LCPI[0-9]+_[0-9]+}}+{{.*}}(%rip), %xmm2, %xmm2
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
; AVX1-NEXT: vblendvpd %ymm2, %ymm1, %ymm0, %ymm0 ; AVX1-NEXT: vblendvpd %ymm2, %ymm1, %ymm0, %ymm0
; AVX1-NEXT: retq ; AVX1-NEXT: retq
@ -1661,7 +1661,7 @@ define <8 x float> @load_f32_v8f32(<8 x float> %v, float* %p, i32 %y) nounwind {
; AVX1-NEXT: vmovd %esi, %xmm1 ; AVX1-NEXT: vmovd %esi, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0] ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
; AVX1-NEXT: vpcmpeqd {{.*}}(%rip), %xmm1, %xmm2 ; AVX1-NEXT: vpcmpeqd {{.*}}(%rip), %xmm1, %xmm2
; AVX1-NEXT: vpcmpeqd {{\.LCPI.*}}+{{.*}}(%rip), %xmm1, %xmm1 ; AVX1-NEXT: vpcmpeqd {{\.LCPI[0-9]+_[0-9]+}}+{{.*}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
; AVX1-NEXT: vbroadcastss (%rdi), %ymm2 ; AVX1-NEXT: vbroadcastss (%rdi), %ymm2
; AVX1-NEXT: vblendvps %ymm1, %ymm2, %ymm0, %ymm0 ; AVX1-NEXT: vblendvps %ymm1, %ymm2, %ymm0, %ymm0
@ -1706,7 +1706,7 @@ define <4 x double> @load_f64_v4f64(<4 x double> %v, double* %p, i32 %y) nounwin
; AVX1-NEXT: vmovq %rax, %xmm1 ; AVX1-NEXT: vmovq %rax, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,1] ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
; AVX1-NEXT: vpcmpeqq {{.*}}(%rip), %xmm1, %xmm2 ; AVX1-NEXT: vpcmpeqq {{.*}}(%rip), %xmm1, %xmm2
; AVX1-NEXT: vpcmpeqq {{\.LCPI.*}}+{{.*}}(%rip), %xmm1, %xmm1 ; AVX1-NEXT: vpcmpeqq {{\.LCPI[0-9]+_[0-9]+}}+{{.*}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
; AVX1-NEXT: vbroadcastsd (%rdi), %ymm2 ; AVX1-NEXT: vbroadcastsd (%rdi), %ymm2
; AVX1-NEXT: vblendvpd %ymm1, %ymm2, %ymm0, %ymm0 ; AVX1-NEXT: vblendvpd %ymm1, %ymm2, %ymm0, %ymm0

View File

@ -74,7 +74,7 @@ define <4 x float> @knownbits_insert_uitofp(<4 x i32> %a0, i16 %a1, i16 %a2) nou
define <4 x i32> @knownbits_mask_shuffle_sext(<8 x i16> %a0) nounwind { define <4 x i32> @knownbits_mask_shuffle_sext(<8 x i16> %a0) nounwind {
; X32-LABEL: knownbits_mask_shuffle_sext: ; X32-LABEL: knownbits_mask_shuffle_sext:
; X32: # %bb.0: ; X32: # %bb.0:
; X32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 ; X32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X32-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; X32-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X32-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] ; X32-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; X32-NEXT: retl ; X32-NEXT: retl
@ -94,7 +94,7 @@ define <4 x i32> @knownbits_mask_shuffle_sext(<8 x i16> %a0) nounwind {
define <4 x i32> @knownbits_mask_shuffle_shuffle_sext(<8 x i16> %a0) nounwind { define <4 x i32> @knownbits_mask_shuffle_shuffle_sext(<8 x i16> %a0) nounwind {
; X32-LABEL: knownbits_mask_shuffle_shuffle_sext: ; X32-LABEL: knownbits_mask_shuffle_shuffle_sext:
; X32: # %bb.0: ; X32: # %bb.0:
; X32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 ; X32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X32-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; X32-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X32-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] ; X32-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; X32-NEXT: retl ; X32-NEXT: retl
@ -115,7 +115,7 @@ define <4 x i32> @knownbits_mask_shuffle_shuffle_sext(<8 x i16> %a0) nounwind {
define <4 x i32> @knownbits_mask_shuffle_shuffle_undef_sext(<8 x i16> %a0) nounwind { define <4 x i32> @knownbits_mask_shuffle_shuffle_undef_sext(<8 x i16> %a0) nounwind {
; X32-LABEL: knownbits_mask_shuffle_shuffle_undef_sext: ; X32-LABEL: knownbits_mask_shuffle_shuffle_undef_sext:
; X32: # %bb.0: ; X32: # %bb.0:
; X32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 ; X32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] ; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
; X32-NEXT: vpmovsxwd %xmm0, %xmm0 ; X32-NEXT: vpmovsxwd %xmm0, %xmm0
; X32-NEXT: retl ; X32-NEXT: retl
@ -136,7 +136,7 @@ define <4 x i32> @knownbits_mask_shuffle_shuffle_undef_sext(<8 x i16> %a0) nounw
define <4 x float> @knownbits_mask_shuffle_uitofp(<4 x i32> %a0) nounwind { define <4 x float> @knownbits_mask_shuffle_uitofp(<4 x i32> %a0) nounwind {
; X32-LABEL: knownbits_mask_shuffle_uitofp: ; X32-LABEL: knownbits_mask_shuffle_uitofp:
; X32: # %bb.0: ; X32: # %bb.0:
; X32-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0 ; X32-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,2,3,3] ; X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,2,3,3]
; X32-NEXT: vcvtdq2ps %xmm0, %xmm0 ; X32-NEXT: vcvtdq2ps %xmm0, %xmm0
; X32-NEXT: retl ; X32-NEXT: retl
@ -173,8 +173,8 @@ define <4 x float> @knownbits_mask_or_shuffle_uitofp(<4 x i32> %a0) nounwind {
define <4 x float> @knownbits_mask_xor_shuffle_uitofp(<4 x i32> %a0) nounwind { define <4 x float> @knownbits_mask_xor_shuffle_uitofp(<4 x i32> %a0) nounwind {
; X32-LABEL: knownbits_mask_xor_shuffle_uitofp: ; X32-LABEL: knownbits_mask_xor_shuffle_uitofp:
; X32: # %bb.0: ; X32: # %bb.0:
; X32-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0 ; X32-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X32-NEXT: vxorps {{\.LCPI.*}}, %xmm0, %xmm0 ; X32-NEXT: vxorps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,2,3,3] ; X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,2,3,3]
; X32-NEXT: vcvtdq2ps %xmm0, %xmm0 ; X32-NEXT: vcvtdq2ps %xmm0, %xmm0
; X32-NEXT: retl ; X32-NEXT: retl
@ -384,8 +384,8 @@ declare <4 x i32> @llvm.bswap.v4i32(<4 x i32>)
define <8 x float> @knownbits_mask_concat_uitofp(<4 x i32> %a0, <4 x i32> %a1) nounwind { define <8 x float> @knownbits_mask_concat_uitofp(<4 x i32> %a0, <4 x i32> %a1) nounwind {
; X32-LABEL: knownbits_mask_concat_uitofp: ; X32-LABEL: knownbits_mask_concat_uitofp:
; X32: # %bb.0: ; X32: # %bb.0:
; X32-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0 ; X32-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X32-NEXT: vandps {{\.LCPI.*}}, %xmm1, %xmm1 ; X32-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1
; X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,0,2] ; X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,0,2]
; X32-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[1,3,1,3] ; X32-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[1,3,1,3]
; X32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; X32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
@ -432,8 +432,8 @@ define <4 x float> @knownbits_lshr_bitcast_shuffle_uitofp(<2 x i64> %a0, <4 x i3
define <4 x float> @knownbits_smax_smin_shuffle_uitofp(<4 x i32> %a0) { define <4 x float> @knownbits_smax_smin_shuffle_uitofp(<4 x i32> %a0) {
; X32-LABEL: knownbits_smax_smin_shuffle_uitofp: ; X32-LABEL: knownbits_smax_smin_shuffle_uitofp:
; X32: # %bb.0: ; X32: # %bb.0:
; X32-NEXT: vpminsd {{\.LCPI.*}}, %xmm0, %xmm0 ; X32-NEXT: vpminsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X32-NEXT: vpmaxsd {{\.LCPI.*}}, %xmm0, %xmm0 ; X32-NEXT: vpmaxsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,3] ; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,3]
; X32-NEXT: vcvtdq2ps %xmm0, %xmm0 ; X32-NEXT: vcvtdq2ps %xmm0, %xmm0
; X32-NEXT: retl ; X32-NEXT: retl
@ -457,7 +457,7 @@ declare <4 x i32> @llvm.smax.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
define <4 x float> @knownbits_umin_shuffle_uitofp(<4 x i32> %a0) { define <4 x float> @knownbits_umin_shuffle_uitofp(<4 x i32> %a0) {
; X32-LABEL: knownbits_umin_shuffle_uitofp: ; X32-LABEL: knownbits_umin_shuffle_uitofp:
; X32: # %bb.0: ; X32: # %bb.0:
; X32-NEXT: vpminud {{\.LCPI.*}}, %xmm0, %xmm0 ; X32-NEXT: vpminud {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,3] ; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,3]
; X32-NEXT: vcvtdq2ps %xmm0, %xmm0 ; X32-NEXT: vcvtdq2ps %xmm0, %xmm0
; X32-NEXT: retl ; X32-NEXT: retl
@ -495,8 +495,8 @@ declare <4 x i32> @llvm.umax.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
define <4 x float> @knownbits_mask_umax_shuffle_uitofp(<4 x i32> %a0) { define <4 x float> @knownbits_mask_umax_shuffle_uitofp(<4 x i32> %a0) {
; X32-LABEL: knownbits_mask_umax_shuffle_uitofp: ; X32-LABEL: knownbits_mask_umax_shuffle_uitofp:
; X32: # %bb.0: ; X32: # %bb.0:
; X32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 ; X32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X32-NEXT: vpmaxud {{\.LCPI.*}}, %xmm0, %xmm0 ; X32-NEXT: vpmaxud {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,3] ; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,3]
; X32-NEXT: vcvtdq2ps %xmm0, %xmm0 ; X32-NEXT: vcvtdq2ps %xmm0, %xmm0
; X32-NEXT: retl ; X32-NEXT: retl
@ -540,7 +540,7 @@ define <4 x float> @knownbits_abs_uitofp(<4 x i32> %a0) {
; X32-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7] ; X32-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
; X32-NEXT: vpsrld $16, %xmm0, %xmm0 ; X32-NEXT: vpsrld $16, %xmm0, %xmm0
; X32-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7] ; X32-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
; X32-NEXT: vsubps {{\.LCPI.*}}, %xmm0, %xmm0 ; X32-NEXT: vsubps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X32-NEXT: vaddps %xmm0, %xmm1, %xmm0 ; X32-NEXT: vaddps %xmm0, %xmm1, %xmm0
; X32-NEXT: retl ; X32-NEXT: retl
; ;
@ -563,7 +563,7 @@ define <4 x float> @knownbits_abs_uitofp(<4 x i32> %a0) {
define <4 x float> @knownbits_or_abs_uitofp(<4 x i32> %a0) { define <4 x float> @knownbits_or_abs_uitofp(<4 x i32> %a0) {
; X32-LABEL: knownbits_or_abs_uitofp: ; X32-LABEL: knownbits_or_abs_uitofp:
; X32: # %bb.0: ; X32: # %bb.0:
; X32-NEXT: vpor {{\.LCPI.*}}, %xmm0, %xmm0 ; X32-NEXT: vpor {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,0,2] ; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,0,2]
; X32-NEXT: vpabsd %xmm0, %xmm0 ; X32-NEXT: vpabsd %xmm0, %xmm0
; X32-NEXT: vcvtdq2ps %xmm0, %xmm0 ; X32-NEXT: vcvtdq2ps %xmm0, %xmm0
@ -593,8 +593,8 @@ define <4 x float> @knownbits_and_select_shuffle_uitofp(<4 x i32> %a0, <4 x i32>
; X32-NEXT: andl $-16, %esp ; X32-NEXT: andl $-16, %esp
; X32-NEXT: subl $16, %esp ; X32-NEXT: subl $16, %esp
; X32-NEXT: vmovaps 8(%ebp), %xmm3 ; X32-NEXT: vmovaps 8(%ebp), %xmm3
; X32-NEXT: vandps {{\.LCPI.*}}, %xmm2, %xmm2 ; X32-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm2, %xmm2
; X32-NEXT: vandps {{\.LCPI.*}}, %xmm3, %xmm3 ; X32-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm3, %xmm3
; X32-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; X32-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; X32-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0 ; X32-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
; X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,2,2] ; X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,2,2]
@ -630,7 +630,7 @@ define <4 x float> @knownbits_lshr_and_select_shuffle_uitofp(<4 x i32> %a0, <4 x
; X32-NEXT: subl $16, %esp ; X32-NEXT: subl $16, %esp
; X32-NEXT: vmovaps 8(%ebp), %xmm3 ; X32-NEXT: vmovaps 8(%ebp), %xmm3
; X32-NEXT: vpsrld $5, %xmm2, %xmm2 ; X32-NEXT: vpsrld $5, %xmm2, %xmm2
; X32-NEXT: vandps {{\.LCPI.*}}, %xmm3, %xmm3 ; X32-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm3, %xmm3
; X32-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; X32-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; X32-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0 ; X32-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
; X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,2,2] ; X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,2,2]

View File

@ -515,7 +515,7 @@ define <4 x i32> @signbits_mask_ashr_smax(<4 x i32> %a0, <4 x i32> %a1) {
; X86-NEXT: vpsrad $25, %xmm1, %xmm1 ; X86-NEXT: vpsrad $25, %xmm1, %xmm1
; X86-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0 ; X86-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
; X86-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] ; X86-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; X86-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 ; X86-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-NEXT: retl ; X86-NEXT: retl
; ;
; X64-AVX1-LABEL: signbits_mask_ashr_smax: ; X64-AVX1-LABEL: signbits_mask_ashr_smax:
@ -553,7 +553,7 @@ define <4 x i32> @signbits_mask_ashr_smin(<4 x i32> %a0, <4 x i32> %a1) {
; X86-NEXT: vpsrad $25, %xmm1, %xmm1 ; X86-NEXT: vpsrad $25, %xmm1, %xmm1
; X86-NEXT: vpminsd %xmm1, %xmm0, %xmm0 ; X86-NEXT: vpminsd %xmm1, %xmm0, %xmm0
; X86-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] ; X86-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; X86-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 ; X86-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-NEXT: retl ; X86-NEXT: retl
; ;
; X64-AVX1-LABEL: signbits_mask_ashr_smin: ; X64-AVX1-LABEL: signbits_mask_ashr_smin:
@ -591,7 +591,7 @@ define <4 x i32> @signbits_mask_ashr_umax(<4 x i32> %a0, <4 x i32> %a1) {
; X86-NEXT: vpsrad $25, %xmm1, %xmm1 ; X86-NEXT: vpsrad $25, %xmm1, %xmm1
; X86-NEXT: vpmaxud %xmm1, %xmm0, %xmm0 ; X86-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
; X86-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] ; X86-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; X86-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 ; X86-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-NEXT: retl ; X86-NEXT: retl
; ;
; X64-AVX1-LABEL: signbits_mask_ashr_umax: ; X64-AVX1-LABEL: signbits_mask_ashr_umax:
@ -629,7 +629,7 @@ define <4 x i32> @signbits_mask_ashr_umin(<4 x i32> %a0, <4 x i32> %a1) {
; X86-NEXT: vpsrad $25, %xmm1, %xmm1 ; X86-NEXT: vpsrad $25, %xmm1, %xmm1
; X86-NEXT: vpminud %xmm1, %xmm0, %xmm0 ; X86-NEXT: vpminud %xmm1, %xmm0, %xmm0
; X86-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] ; X86-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; X86-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 ; X86-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-NEXT: retl ; X86-NEXT: retl
; ;
; X64-AVX1-LABEL: signbits_mask_ashr_umin: ; X64-AVX1-LABEL: signbits_mask_ashr_umin:
@ -674,7 +674,7 @@ define void @cross_bb_signbits_insert_subvec(<32 x i8>* %ptr, <32 x i8> %x, <32
; X86-NEXT: vpcmpeqb %xmm3, %xmm0, %xmm0 ; X86-NEXT: vpcmpeqb %xmm3, %xmm0, %xmm0
; X86-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; X86-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; X86-NEXT: vandnps %ymm1, %ymm0, %ymm1 ; X86-NEXT: vandnps %ymm1, %ymm0, %ymm1
; X86-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0 ; X86-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X86-NEXT: vorps %ymm1, %ymm0, %ymm0 ; X86-NEXT: vorps %ymm1, %ymm0, %ymm0
; X86-NEXT: vmovaps %ymm0, (%eax) ; X86-NEXT: vmovaps %ymm0, (%eax)
; X86-NEXT: vzeroupper ; X86-NEXT: vzeroupper

View File

@ -8,7 +8,7 @@ define float @f1(float %x) nounwind noinline {
; precision6: # %bb.0: # %entry ; precision6: # %bb.0: # %entry
; precision6-NEXT: subl $20, %esp ; precision6-NEXT: subl $20, %esp
; precision6-NEXT: flds {{[0-9]+}}(%esp) ; precision6-NEXT: flds {{[0-9]+}}(%esp)
; precision6-NEXT: fmuls {{\.LCPI.*}} ; precision6-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
; precision6-NEXT: fnstcw (%esp) ; precision6-NEXT: fnstcw (%esp)
; precision6-NEXT: movzwl (%esp), %eax ; precision6-NEXT: movzwl (%esp), %eax
; precision6-NEXT: orl $3072, %eax # imm = 0xC00 ; precision6-NEXT: orl $3072, %eax # imm = 0xC00
@ -20,10 +20,10 @@ define float @f1(float %x) nounwind noinline {
; precision6-NEXT: movl %eax, {{[0-9]+}}(%esp) ; precision6-NEXT: movl %eax, {{[0-9]+}}(%esp)
; precision6-NEXT: fisubl {{[0-9]+}}(%esp) ; precision6-NEXT: fisubl {{[0-9]+}}(%esp)
; precision6-NEXT: fld %st(0) ; precision6-NEXT: fld %st(0)
; precision6-NEXT: fmuls {{\.LCPI.*}} ; precision6-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
; precision6-NEXT: fadds {{\.LCPI.*}} ; precision6-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision6-NEXT: fmulp %st, %st(1) ; precision6-NEXT: fmulp %st, %st(1)
; precision6-NEXT: fadds {{\.LCPI.*}} ; precision6-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision6-NEXT: fstps {{[0-9]+}}(%esp) ; precision6-NEXT: fstps {{[0-9]+}}(%esp)
; precision6-NEXT: shll $23, %eax ; precision6-NEXT: shll $23, %eax
; precision6-NEXT: addl {{[0-9]+}}(%esp), %eax ; precision6-NEXT: addl {{[0-9]+}}(%esp), %eax
@ -36,7 +36,7 @@ define float @f1(float %x) nounwind noinline {
; precision12: # %bb.0: # %entry ; precision12: # %bb.0: # %entry
; precision12-NEXT: subl $20, %esp ; precision12-NEXT: subl $20, %esp
; precision12-NEXT: flds {{[0-9]+}}(%esp) ; precision12-NEXT: flds {{[0-9]+}}(%esp)
; precision12-NEXT: fmuls {{\.LCPI.*}} ; precision12-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: fnstcw (%esp) ; precision12-NEXT: fnstcw (%esp)
; precision12-NEXT: movzwl (%esp), %eax ; precision12-NEXT: movzwl (%esp), %eax
; precision12-NEXT: orl $3072, %eax # imm = 0xC00 ; precision12-NEXT: orl $3072, %eax # imm = 0xC00
@ -48,12 +48,12 @@ define float @f1(float %x) nounwind noinline {
; precision12-NEXT: movl %eax, {{[0-9]+}}(%esp) ; precision12-NEXT: movl %eax, {{[0-9]+}}(%esp)
; precision12-NEXT: fisubl {{[0-9]+}}(%esp) ; precision12-NEXT: fisubl {{[0-9]+}}(%esp)
; precision12-NEXT: fld %st(0) ; precision12-NEXT: fld %st(0)
; precision12-NEXT: fmuls {{\.LCPI.*}} ; precision12-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: fadds {{\.LCPI.*}} ; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: fmul %st(1), %st ; precision12-NEXT: fmul %st(1), %st
; precision12-NEXT: fadds {{\.LCPI.*}} ; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: fmulp %st, %st(1) ; precision12-NEXT: fmulp %st, %st(1)
; precision12-NEXT: fadds {{\.LCPI.*}} ; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: fstps {{[0-9]+}}(%esp) ; precision12-NEXT: fstps {{[0-9]+}}(%esp)
; precision12-NEXT: shll $23, %eax ; precision12-NEXT: shll $23, %eax
; precision12-NEXT: addl {{[0-9]+}}(%esp), %eax ; precision12-NEXT: addl {{[0-9]+}}(%esp), %eax
@ -66,7 +66,7 @@ define float @f1(float %x) nounwind noinline {
; precision18: # %bb.0: # %entry ; precision18: # %bb.0: # %entry
; precision18-NEXT: subl $20, %esp ; precision18-NEXT: subl $20, %esp
; precision18-NEXT: flds {{[0-9]+}}(%esp) ; precision18-NEXT: flds {{[0-9]+}}(%esp)
; precision18-NEXT: fmuls {{\.LCPI.*}} ; precision18-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fnstcw (%esp) ; precision18-NEXT: fnstcw (%esp)
; precision18-NEXT: movzwl (%esp), %eax ; precision18-NEXT: movzwl (%esp), %eax
; precision18-NEXT: orl $3072, %eax # imm = 0xC00 ; precision18-NEXT: orl $3072, %eax # imm = 0xC00
@ -78,16 +78,16 @@ define float @f1(float %x) nounwind noinline {
; precision18-NEXT: movl %eax, {{[0-9]+}}(%esp) ; precision18-NEXT: movl %eax, {{[0-9]+}}(%esp)
; precision18-NEXT: fisubl {{[0-9]+}}(%esp) ; precision18-NEXT: fisubl {{[0-9]+}}(%esp)
; precision18-NEXT: fld %st(0) ; precision18-NEXT: fld %st(0)
; precision18-NEXT: fmuls {{\.LCPI.*}} ; precision18-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fadds {{\.LCPI.*}} ; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmul %st(1), %st ; precision18-NEXT: fmul %st(1), %st
; precision18-NEXT: fadds {{\.LCPI.*}} ; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmul %st(1), %st ; precision18-NEXT: fmul %st(1), %st
; precision18-NEXT: fadds {{\.LCPI.*}} ; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmul %st(1), %st ; precision18-NEXT: fmul %st(1), %st
; precision18-NEXT: fadds {{\.LCPI.*}} ; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmul %st(1), %st ; precision18-NEXT: fmul %st(1), %st
; precision18-NEXT: fadds {{\.LCPI.*}} ; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmulp %st, %st(1) ; precision18-NEXT: fmulp %st, %st(1)
; precision18-NEXT: fld1 ; precision18-NEXT: fld1
; precision18-NEXT: faddp %st, %st(1) ; precision18-NEXT: faddp %st, %st(1)
@ -122,10 +122,10 @@ define float @f2(float %x) nounwind noinline {
; precision6-NEXT: movl %eax, {{[0-9]+}}(%esp) ; precision6-NEXT: movl %eax, {{[0-9]+}}(%esp)
; precision6-NEXT: fisubl {{[0-9]+}}(%esp) ; precision6-NEXT: fisubl {{[0-9]+}}(%esp)
; precision6-NEXT: fld %st(0) ; precision6-NEXT: fld %st(0)
; precision6-NEXT: fmuls {{\.LCPI.*}} ; precision6-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
; precision6-NEXT: fadds {{\.LCPI.*}} ; precision6-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision6-NEXT: fmulp %st, %st(1) ; precision6-NEXT: fmulp %st, %st(1)
; precision6-NEXT: fadds {{\.LCPI.*}} ; precision6-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision6-NEXT: fstps {{[0-9]+}}(%esp) ; precision6-NEXT: fstps {{[0-9]+}}(%esp)
; precision6-NEXT: shll $23, %eax ; precision6-NEXT: shll $23, %eax
; precision6-NEXT: addl {{[0-9]+}}(%esp), %eax ; precision6-NEXT: addl {{[0-9]+}}(%esp), %eax
@ -149,12 +149,12 @@ define float @f2(float %x) nounwind noinline {
; precision12-NEXT: movl %eax, {{[0-9]+}}(%esp) ; precision12-NEXT: movl %eax, {{[0-9]+}}(%esp)
; precision12-NEXT: fisubl {{[0-9]+}}(%esp) ; precision12-NEXT: fisubl {{[0-9]+}}(%esp)
; precision12-NEXT: fld %st(0) ; precision12-NEXT: fld %st(0)
; precision12-NEXT: fmuls {{\.LCPI.*}} ; precision12-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: fadds {{\.LCPI.*}} ; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: fmul %st(1), %st ; precision12-NEXT: fmul %st(1), %st
; precision12-NEXT: fadds {{\.LCPI.*}} ; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: fmulp %st, %st(1) ; precision12-NEXT: fmulp %st, %st(1)
; precision12-NEXT: fadds {{\.LCPI.*}} ; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: fstps {{[0-9]+}}(%esp) ; precision12-NEXT: fstps {{[0-9]+}}(%esp)
; precision12-NEXT: shll $23, %eax ; precision12-NEXT: shll $23, %eax
; precision12-NEXT: addl {{[0-9]+}}(%esp), %eax ; precision12-NEXT: addl {{[0-9]+}}(%esp), %eax
@ -178,16 +178,16 @@ define float @f2(float %x) nounwind noinline {
; precision18-NEXT: movl %eax, {{[0-9]+}}(%esp) ; precision18-NEXT: movl %eax, {{[0-9]+}}(%esp)
; precision18-NEXT: fisubl {{[0-9]+}}(%esp) ; precision18-NEXT: fisubl {{[0-9]+}}(%esp)
; precision18-NEXT: fld %st(0) ; precision18-NEXT: fld %st(0)
; precision18-NEXT: fmuls {{\.LCPI.*}} ; precision18-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fadds {{\.LCPI.*}} ; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmul %st(1), %st ; precision18-NEXT: fmul %st(1), %st
; precision18-NEXT: fadds {{\.LCPI.*}} ; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmul %st(1), %st ; precision18-NEXT: fmul %st(1), %st
; precision18-NEXT: fadds {{\.LCPI.*}} ; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmul %st(1), %st ; precision18-NEXT: fmul %st(1), %st
; precision18-NEXT: fadds {{\.LCPI.*}} ; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmul %st(1), %st ; precision18-NEXT: fmul %st(1), %st
; precision18-NEXT: fadds {{\.LCPI.*}} ; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmulp %st, %st(1) ; precision18-NEXT: fmulp %st, %st(1)
; precision18-NEXT: fld1 ; precision18-NEXT: fld1
; precision18-NEXT: faddp %st, %st(1) ; precision18-NEXT: faddp %st, %st(1)
@ -211,7 +211,7 @@ define float @f3(float %x) nounwind noinline {
; precision6: # %bb.0: # %entry ; precision6: # %bb.0: # %entry
; precision6-NEXT: subl $20, %esp ; precision6-NEXT: subl $20, %esp
; precision6-NEXT: flds {{[0-9]+}}(%esp) ; precision6-NEXT: flds {{[0-9]+}}(%esp)
; precision6-NEXT: fmuls {{\.LCPI.*}} ; precision6-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
; precision6-NEXT: fnstcw (%esp) ; precision6-NEXT: fnstcw (%esp)
; precision6-NEXT: movzwl (%esp), %eax ; precision6-NEXT: movzwl (%esp), %eax
; precision6-NEXT: orl $3072, %eax # imm = 0xC00 ; precision6-NEXT: orl $3072, %eax # imm = 0xC00
@ -223,10 +223,10 @@ define float @f3(float %x) nounwind noinline {
; precision6-NEXT: movl %eax, {{[0-9]+}}(%esp) ; precision6-NEXT: movl %eax, {{[0-9]+}}(%esp)
; precision6-NEXT: fisubl {{[0-9]+}}(%esp) ; precision6-NEXT: fisubl {{[0-9]+}}(%esp)
; precision6-NEXT: fld %st(0) ; precision6-NEXT: fld %st(0)
; precision6-NEXT: fmuls {{\.LCPI.*}} ; precision6-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
; precision6-NEXT: fadds {{\.LCPI.*}} ; precision6-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision6-NEXT: fmulp %st, %st(1) ; precision6-NEXT: fmulp %st, %st(1)
; precision6-NEXT: fadds {{\.LCPI.*}} ; precision6-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision6-NEXT: fstps {{[0-9]+}}(%esp) ; precision6-NEXT: fstps {{[0-9]+}}(%esp)
; precision6-NEXT: shll $23, %eax ; precision6-NEXT: shll $23, %eax
; precision6-NEXT: addl {{[0-9]+}}(%esp), %eax ; precision6-NEXT: addl {{[0-9]+}}(%esp), %eax
@ -239,7 +239,7 @@ define float @f3(float %x) nounwind noinline {
; precision12: # %bb.0: # %entry ; precision12: # %bb.0: # %entry
; precision12-NEXT: subl $20, %esp ; precision12-NEXT: subl $20, %esp
; precision12-NEXT: flds {{[0-9]+}}(%esp) ; precision12-NEXT: flds {{[0-9]+}}(%esp)
; precision12-NEXT: fmuls {{\.LCPI.*}} ; precision12-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: fnstcw (%esp) ; precision12-NEXT: fnstcw (%esp)
; precision12-NEXT: movzwl (%esp), %eax ; precision12-NEXT: movzwl (%esp), %eax
; precision12-NEXT: orl $3072, %eax # imm = 0xC00 ; precision12-NEXT: orl $3072, %eax # imm = 0xC00
@ -251,12 +251,12 @@ define float @f3(float %x) nounwind noinline {
; precision12-NEXT: movl %eax, {{[0-9]+}}(%esp) ; precision12-NEXT: movl %eax, {{[0-9]+}}(%esp)
; precision12-NEXT: fisubl {{[0-9]+}}(%esp) ; precision12-NEXT: fisubl {{[0-9]+}}(%esp)
; precision12-NEXT: fld %st(0) ; precision12-NEXT: fld %st(0)
; precision12-NEXT: fmuls {{\.LCPI.*}} ; precision12-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: fadds {{\.LCPI.*}} ; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: fmul %st(1), %st ; precision12-NEXT: fmul %st(1), %st
; precision12-NEXT: fadds {{\.LCPI.*}} ; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: fmulp %st, %st(1) ; precision12-NEXT: fmulp %st, %st(1)
; precision12-NEXT: fadds {{\.LCPI.*}} ; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: fstps {{[0-9]+}}(%esp) ; precision12-NEXT: fstps {{[0-9]+}}(%esp)
; precision12-NEXT: shll $23, %eax ; precision12-NEXT: shll $23, %eax
; precision12-NEXT: addl {{[0-9]+}}(%esp), %eax ; precision12-NEXT: addl {{[0-9]+}}(%esp), %eax
@ -269,7 +269,7 @@ define float @f3(float %x) nounwind noinline {
; precision18: # %bb.0: # %entry ; precision18: # %bb.0: # %entry
; precision18-NEXT: subl $20, %esp ; precision18-NEXT: subl $20, %esp
; precision18-NEXT: flds {{[0-9]+}}(%esp) ; precision18-NEXT: flds {{[0-9]+}}(%esp)
; precision18-NEXT: fmuls {{\.LCPI.*}} ; precision18-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fnstcw (%esp) ; precision18-NEXT: fnstcw (%esp)
; precision18-NEXT: movzwl (%esp), %eax ; precision18-NEXT: movzwl (%esp), %eax
; precision18-NEXT: orl $3072, %eax # imm = 0xC00 ; precision18-NEXT: orl $3072, %eax # imm = 0xC00
@ -281,16 +281,16 @@ define float @f3(float %x) nounwind noinline {
; precision18-NEXT: movl %eax, {{[0-9]+}}(%esp) ; precision18-NEXT: movl %eax, {{[0-9]+}}(%esp)
; precision18-NEXT: fisubl {{[0-9]+}}(%esp) ; precision18-NEXT: fisubl {{[0-9]+}}(%esp)
; precision18-NEXT: fld %st(0) ; precision18-NEXT: fld %st(0)
; precision18-NEXT: fmuls {{\.LCPI.*}} ; precision18-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fadds {{\.LCPI.*}} ; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmul %st(1), %st ; precision18-NEXT: fmul %st(1), %st
; precision18-NEXT: fadds {{\.LCPI.*}} ; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmul %st(1), %st ; precision18-NEXT: fmul %st(1), %st
; precision18-NEXT: fadds {{\.LCPI.*}} ; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmul %st(1), %st ; precision18-NEXT: fmul %st(1), %st
; precision18-NEXT: fadds {{\.LCPI.*}} ; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmul %st(1), %st ; precision18-NEXT: fmul %st(1), %st
; precision18-NEXT: fadds {{\.LCPI.*}} ; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmulp %st, %st(1) ; precision18-NEXT: fmulp %st, %st(1)
; precision18-NEXT: fld1 ; precision18-NEXT: fld1
; precision18-NEXT: faddp %st, %st(1) ; precision18-NEXT: faddp %st, %st(1)
@ -324,12 +324,12 @@ define float @f4(float %x) nounwind noinline {
; precision6-NEXT: movl %eax, {{[0-9]+}}(%esp) ; precision6-NEXT: movl %eax, {{[0-9]+}}(%esp)
; precision6-NEXT: flds (%esp) ; precision6-NEXT: flds (%esp)
; precision6-NEXT: fld %st(0) ; precision6-NEXT: fld %st(0)
; precision6-NEXT: fmuls {{\.LCPI.*}} ; precision6-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
; precision6-NEXT: fadds {{\.LCPI.*}} ; precision6-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision6-NEXT: fmulp %st, %st(1) ; precision6-NEXT: fmulp %st, %st(1)
; precision6-NEXT: fadds {{\.LCPI.*}} ; precision6-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision6-NEXT: fildl {{[0-9]+}}(%esp) ; precision6-NEXT: fildl {{[0-9]+}}(%esp)
; precision6-NEXT: fmuls {{\.LCPI.*}} ; precision6-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
; precision6-NEXT: faddp %st, %st(1) ; precision6-NEXT: faddp %st, %st(1)
; precision6-NEXT: addl $8, %esp ; precision6-NEXT: addl $8, %esp
; precision6-NEXT: retl ; precision6-NEXT: retl
@ -348,16 +348,16 @@ define float @f4(float %x) nounwind noinline {
; precision12-NEXT: movl %eax, {{[0-9]+}}(%esp) ; precision12-NEXT: movl %eax, {{[0-9]+}}(%esp)
; precision12-NEXT: flds (%esp) ; precision12-NEXT: flds (%esp)
; precision12-NEXT: fld %st(0) ; precision12-NEXT: fld %st(0)
; precision12-NEXT: fmuls {{\.LCPI.*}} ; precision12-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: fadds {{\.LCPI.*}} ; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: fmul %st(1), %st ; precision12-NEXT: fmul %st(1), %st
; precision12-NEXT: fadds {{\.LCPI.*}} ; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: fmul %st(1), %st ; precision12-NEXT: fmul %st(1), %st
; precision12-NEXT: fadds {{\.LCPI.*}} ; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: fmulp %st, %st(1) ; precision12-NEXT: fmulp %st, %st(1)
; precision12-NEXT: fadds {{\.LCPI.*}} ; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: fildl {{[0-9]+}}(%esp) ; precision12-NEXT: fildl {{[0-9]+}}(%esp)
; precision12-NEXT: fmuls {{\.LCPI.*}} ; precision12-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: faddp %st, %st(1) ; precision12-NEXT: faddp %st, %st(1)
; precision12-NEXT: addl $8, %esp ; precision12-NEXT: addl $8, %esp
; precision12-NEXT: retl ; precision12-NEXT: retl
@ -376,20 +376,20 @@ define float @f4(float %x) nounwind noinline {
; precision18-NEXT: movl %eax, {{[0-9]+}}(%esp) ; precision18-NEXT: movl %eax, {{[0-9]+}}(%esp)
; precision18-NEXT: flds (%esp) ; precision18-NEXT: flds (%esp)
; precision18-NEXT: fld %st(0) ; precision18-NEXT: fld %st(0)
; precision18-NEXT: fmuls {{\.LCPI.*}} ; precision18-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fadds {{\.LCPI.*}} ; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmul %st(1), %st ; precision18-NEXT: fmul %st(1), %st
; precision18-NEXT: fadds {{\.LCPI.*}} ; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmul %st(1), %st ; precision18-NEXT: fmul %st(1), %st
; precision18-NEXT: fadds {{\.LCPI.*}} ; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmul %st(1), %st ; precision18-NEXT: fmul %st(1), %st
; precision18-NEXT: fadds {{\.LCPI.*}} ; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmul %st(1), %st ; precision18-NEXT: fmul %st(1), %st
; precision18-NEXT: fadds {{\.LCPI.*}} ; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmulp %st, %st(1) ; precision18-NEXT: fmulp %st, %st(1)
; precision18-NEXT: fadds {{\.LCPI.*}} ; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fildl {{[0-9]+}}(%esp) ; precision18-NEXT: fildl {{[0-9]+}}(%esp)
; precision18-NEXT: fmuls {{\.LCPI.*}} ; precision18-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: faddp %st, %st(1) ; precision18-NEXT: faddp %st, %st(1)
; precision18-NEXT: addl $8, %esp ; precision18-NEXT: addl $8, %esp
; precision18-NEXT: retl ; precision18-NEXT: retl
@ -416,10 +416,10 @@ define float @f5(float %x) nounwind noinline {
; precision6-NEXT: movl %eax, {{[0-9]+}}(%esp) ; precision6-NEXT: movl %eax, {{[0-9]+}}(%esp)
; precision6-NEXT: flds (%esp) ; precision6-NEXT: flds (%esp)
; precision6-NEXT: fld %st(0) ; precision6-NEXT: fld %st(0)
; precision6-NEXT: fmuls {{\.LCPI.*}} ; precision6-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
; precision6-NEXT: fadds {{\.LCPI.*}} ; precision6-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision6-NEXT: fmulp %st, %st(1) ; precision6-NEXT: fmulp %st, %st(1)
; precision6-NEXT: fadds {{\.LCPI.*}} ; precision6-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision6-NEXT: fiaddl {{[0-9]+}}(%esp) ; precision6-NEXT: fiaddl {{[0-9]+}}(%esp)
; precision6-NEXT: addl $8, %esp ; precision6-NEXT: addl $8, %esp
; precision6-NEXT: retl ; precision6-NEXT: retl
@ -438,14 +438,14 @@ define float @f5(float %x) nounwind noinline {
; precision12-NEXT: movl %eax, {{[0-9]+}}(%esp) ; precision12-NEXT: movl %eax, {{[0-9]+}}(%esp)
; precision12-NEXT: flds (%esp) ; precision12-NEXT: flds (%esp)
; precision12-NEXT: fld %st(0) ; precision12-NEXT: fld %st(0)
; precision12-NEXT: fmuls {{\.LCPI.*}} ; precision12-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: fadds {{\.LCPI.*}} ; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: fmul %st(1), %st ; precision12-NEXT: fmul %st(1), %st
; precision12-NEXT: fadds {{\.LCPI.*}} ; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: fmul %st(1), %st ; precision12-NEXT: fmul %st(1), %st
; precision12-NEXT: fadds {{\.LCPI.*}} ; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: fmulp %st, %st(1) ; precision12-NEXT: fmulp %st, %st(1)
; precision12-NEXT: fadds {{\.LCPI.*}} ; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: fiaddl {{[0-9]+}}(%esp) ; precision12-NEXT: fiaddl {{[0-9]+}}(%esp)
; precision12-NEXT: addl $8, %esp ; precision12-NEXT: addl $8, %esp
; precision12-NEXT: retl ; precision12-NEXT: retl
@ -464,18 +464,18 @@ define float @f5(float %x) nounwind noinline {
; precision18-NEXT: movl %eax, {{[0-9]+}}(%esp) ; precision18-NEXT: movl %eax, {{[0-9]+}}(%esp)
; precision18-NEXT: flds (%esp) ; precision18-NEXT: flds (%esp)
; precision18-NEXT: fld %st(0) ; precision18-NEXT: fld %st(0)
; precision18-NEXT: fmuls {{\.LCPI.*}} ; precision18-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fadds {{\.LCPI.*}} ; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmul %st(1), %st ; precision18-NEXT: fmul %st(1), %st
; precision18-NEXT: fadds {{\.LCPI.*}} ; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmul %st(1), %st ; precision18-NEXT: fmul %st(1), %st
; precision18-NEXT: fadds {{\.LCPI.*}} ; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmul %st(1), %st ; precision18-NEXT: fmul %st(1), %st
; precision18-NEXT: fadds {{\.LCPI.*}} ; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmul %st(1), %st ; precision18-NEXT: fmul %st(1), %st
; precision18-NEXT: fadds {{\.LCPI.*}} ; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmulp %st, %st(1) ; precision18-NEXT: fmulp %st, %st(1)
; precision18-NEXT: fadds {{\.LCPI.*}} ; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fiaddl {{[0-9]+}}(%esp) ; precision18-NEXT: fiaddl {{[0-9]+}}(%esp)
; precision18-NEXT: addl $8, %esp ; precision18-NEXT: addl $8, %esp
; precision18-NEXT: retl ; precision18-NEXT: retl
@ -502,12 +502,12 @@ define float @f6(float %x) nounwind noinline {
; precision6-NEXT: movl %eax, {{[0-9]+}}(%esp) ; precision6-NEXT: movl %eax, {{[0-9]+}}(%esp)
; precision6-NEXT: flds (%esp) ; precision6-NEXT: flds (%esp)
; precision6-NEXT: fld %st(0) ; precision6-NEXT: fld %st(0)
; precision6-NEXT: fmuls {{\.LCPI.*}} ; precision6-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
; precision6-NEXT: fadds {{\.LCPI.*}} ; precision6-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision6-NEXT: fmulp %st, %st(1) ; precision6-NEXT: fmulp %st, %st(1)
; precision6-NEXT: fadds {{\.LCPI.*}} ; precision6-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision6-NEXT: fildl {{[0-9]+}}(%esp) ; precision6-NEXT: fildl {{[0-9]+}}(%esp)
; precision6-NEXT: fmuls {{\.LCPI.*}} ; precision6-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
; precision6-NEXT: faddp %st, %st(1) ; precision6-NEXT: faddp %st, %st(1)
; precision6-NEXT: addl $8, %esp ; precision6-NEXT: addl $8, %esp
; precision6-NEXT: retl ; precision6-NEXT: retl
@ -526,14 +526,14 @@ define float @f6(float %x) nounwind noinline {
; precision12-NEXT: movl %eax, {{[0-9]+}}(%esp) ; precision12-NEXT: movl %eax, {{[0-9]+}}(%esp)
; precision12-NEXT: flds (%esp) ; precision12-NEXT: flds (%esp)
; precision12-NEXT: fld %st(0) ; precision12-NEXT: fld %st(0)
; precision12-NEXT: fmuls {{\.LCPI.*}} ; precision12-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: fadds {{\.LCPI.*}} ; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: fmul %st(1), %st ; precision12-NEXT: fmul %st(1), %st
; precision12-NEXT: fadds {{\.LCPI.*}} ; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: fmulp %st, %st(1) ; precision12-NEXT: fmulp %st, %st(1)
; precision12-NEXT: fadds {{\.LCPI.*}} ; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: fildl {{[0-9]+}}(%esp) ; precision12-NEXT: fildl {{[0-9]+}}(%esp)
; precision12-NEXT: fmuls {{\.LCPI.*}} ; precision12-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: faddp %st, %st(1) ; precision12-NEXT: faddp %st, %st(1)
; precision12-NEXT: addl $8, %esp ; precision12-NEXT: addl $8, %esp
; precision12-NEXT: retl ; precision12-NEXT: retl
@ -552,18 +552,18 @@ define float @f6(float %x) nounwind noinline {
; precision18-NEXT: movl %eax, {{[0-9]+}}(%esp) ; precision18-NEXT: movl %eax, {{[0-9]+}}(%esp)
; precision18-NEXT: flds (%esp) ; precision18-NEXT: flds (%esp)
; precision18-NEXT: fld %st(0) ; precision18-NEXT: fld %st(0)
; precision18-NEXT: fmuls {{\.LCPI.*}} ; precision18-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fadds {{\.LCPI.*}} ; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmul %st(1), %st ; precision18-NEXT: fmul %st(1), %st
; precision18-NEXT: fadds {{\.LCPI.*}} ; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmul %st(1), %st ; precision18-NEXT: fmul %st(1), %st
; precision18-NEXT: fadds {{\.LCPI.*}} ; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmul %st(1), %st ; precision18-NEXT: fmul %st(1), %st
; precision18-NEXT: fadds {{\.LCPI.*}} ; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmulp %st, %st(1) ; precision18-NEXT: fmulp %st, %st(1)
; precision18-NEXT: fadds {{\.LCPI.*}} ; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fildl {{[0-9]+}}(%esp) ; precision18-NEXT: fildl {{[0-9]+}}(%esp)
; precision18-NEXT: fmuls {{\.LCPI.*}} ; precision18-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: faddp %st, %st(1) ; precision18-NEXT: faddp %st, %st(1)
; precision18-NEXT: addl $8, %esp ; precision18-NEXT: addl $8, %esp
; precision18-NEXT: retl ; precision18-NEXT: retl

View File

@ -517,13 +517,13 @@ define <8 x i32> @test9(%struct.ST* %base, <8 x i64> %ind1, <8 x i32>%ind5) {
; SKX_LARGE: # %bb.0: # %entry ; SKX_LARGE: # %bb.0: # %entry
; SKX_LARGE-NEXT: vpbroadcastq %rdi, %zmm2 ; SKX_LARGE-NEXT: vpbroadcastq %rdi, %zmm2
; SKX_LARGE-NEXT: vpmovzxdq {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero ; SKX_LARGE-NEXT: vpmovzxdq {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero
; SKX_LARGE-NEXT: movabsq ${{\.LCPI.*}}, %rax ; SKX_LARGE-NEXT: movabsq ${{\.LCPI[0-9]+_[0-9]+}}, %rax
; SKX_LARGE-NEXT: vpmuldq (%rax){1to8}, %zmm1, %zmm1 ; SKX_LARGE-NEXT: vpmuldq (%rax){1to8}, %zmm1, %zmm1
; SKX_LARGE-NEXT: movabsq ${{\.LCPI.*}}, %rax ; SKX_LARGE-NEXT: movabsq ${{\.LCPI[0-9]+_[0-9]+}}, %rax
; SKX_LARGE-NEXT: vpmullq (%rax){1to8}, %zmm0, %zmm0 ; SKX_LARGE-NEXT: vpmullq (%rax){1to8}, %zmm0, %zmm0
; SKX_LARGE-NEXT: vpaddq %zmm1, %zmm0, %zmm0 ; SKX_LARGE-NEXT: vpaddq %zmm1, %zmm0, %zmm0
; SKX_LARGE-NEXT: vpaddq %zmm0, %zmm2, %zmm0 ; SKX_LARGE-NEXT: vpaddq %zmm0, %zmm2, %zmm0
; SKX_LARGE-NEXT: movabsq ${{\.LCPI.*}}, %rax ; SKX_LARGE-NEXT: movabsq ${{\.LCPI[0-9]+_[0-9]+}}, %rax
; SKX_LARGE-NEXT: vpaddq (%rax){1to8}, %zmm0, %zmm1 ; SKX_LARGE-NEXT: vpaddq (%rax){1to8}, %zmm0, %zmm1
; SKX_LARGE-NEXT: kxnorw %k0, %k0, %k1 ; SKX_LARGE-NEXT: kxnorw %k0, %k0, %k1
; SKX_LARGE-NEXT: vpgatherqd (,%zmm1), %ymm0 {%k1} ; SKX_LARGE-NEXT: vpgatherqd (,%zmm1), %ymm0 {%k1}
@ -531,12 +531,12 @@ define <8 x i32> @test9(%struct.ST* %base, <8 x i64> %ind1, <8 x i32>%ind5) {
; ;
; SKX_32-LABEL: test9: ; SKX_32-LABEL: test9:
; SKX_32: # %bb.0: # %entry ; SKX_32: # %bb.0: # %entry
; SKX_32-NEXT: vpmulld {{\.LCPI.*}}{1to8}, %ymm1, %ymm1 ; SKX_32-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}{1to8}, %ymm1, %ymm1
; SKX_32-NEXT: vpmovqd %zmm0, %ymm0 ; SKX_32-NEXT: vpmovqd %zmm0, %ymm0
; SKX_32-NEXT: vpmulld {{\.LCPI.*}}{1to8}, %ymm0, %ymm0 ; SKX_32-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}{1to8}, %ymm0, %ymm0
; SKX_32-NEXT: vpaddd {{[0-9]+}}(%esp){1to8}, %ymm0, %ymm0 ; SKX_32-NEXT: vpaddd {{[0-9]+}}(%esp){1to8}, %ymm0, %ymm0
; SKX_32-NEXT: vpaddd %ymm1, %ymm0, %ymm0 ; SKX_32-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; SKX_32-NEXT: vpaddd {{\.LCPI.*}}{1to8}, %ymm0, %ymm1 ; SKX_32-NEXT: vpaddd {{\.LCPI[0-9]+_[0-9]+}}{1to8}, %ymm0, %ymm1
; SKX_32-NEXT: kxnorw %k0, %k0, %k1 ; SKX_32-NEXT: kxnorw %k0, %k0, %k1
; SKX_32-NEXT: vpgatherdd (,%ymm1), %ymm0 {%k1} ; SKX_32-NEXT: vpgatherdd (,%ymm1), %ymm0 {%k1}
; SKX_32-NEXT: retl ; SKX_32-NEXT: retl
@ -603,13 +603,13 @@ define <8 x i32> @test10(%struct.ST* %base, <8 x i64> %i1, <8 x i32>%ind5) {
; SKX_LARGE: # %bb.0: # %entry ; SKX_LARGE: # %bb.0: # %entry
; SKX_LARGE-NEXT: vpbroadcastq %rdi, %zmm2 ; SKX_LARGE-NEXT: vpbroadcastq %rdi, %zmm2
; SKX_LARGE-NEXT: vpmovzxdq {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero ; SKX_LARGE-NEXT: vpmovzxdq {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero
; SKX_LARGE-NEXT: movabsq ${{\.LCPI.*}}, %rax ; SKX_LARGE-NEXT: movabsq ${{\.LCPI[0-9]+_[0-9]+}}, %rax
; SKX_LARGE-NEXT: vpmuldq (%rax){1to8}, %zmm1, %zmm1 ; SKX_LARGE-NEXT: vpmuldq (%rax){1to8}, %zmm1, %zmm1
; SKX_LARGE-NEXT: movabsq ${{\.LCPI.*}}, %rax ; SKX_LARGE-NEXT: movabsq ${{\.LCPI[0-9]+_[0-9]+}}, %rax
; SKX_LARGE-NEXT: vpmullq (%rax){1to8}, %zmm0, %zmm0 ; SKX_LARGE-NEXT: vpmullq (%rax){1to8}, %zmm0, %zmm0
; SKX_LARGE-NEXT: vpaddq %zmm1, %zmm0, %zmm0 ; SKX_LARGE-NEXT: vpaddq %zmm1, %zmm0, %zmm0
; SKX_LARGE-NEXT: vpaddq %zmm0, %zmm2, %zmm0 ; SKX_LARGE-NEXT: vpaddq %zmm0, %zmm2, %zmm0
; SKX_LARGE-NEXT: movabsq ${{\.LCPI.*}}, %rax ; SKX_LARGE-NEXT: movabsq ${{\.LCPI[0-9]+_[0-9]+}}, %rax
; SKX_LARGE-NEXT: vpaddq (%rax){1to8}, %zmm0, %zmm1 ; SKX_LARGE-NEXT: vpaddq (%rax){1to8}, %zmm0, %zmm1
; SKX_LARGE-NEXT: kxnorw %k0, %k0, %k1 ; SKX_LARGE-NEXT: kxnorw %k0, %k0, %k1
; SKX_LARGE-NEXT: vpgatherqd (,%zmm1), %ymm0 {%k1} ; SKX_LARGE-NEXT: vpgatherqd (,%zmm1), %ymm0 {%k1}
@ -617,12 +617,12 @@ define <8 x i32> @test10(%struct.ST* %base, <8 x i64> %i1, <8 x i32>%ind5) {
; ;
; SKX_32-LABEL: test10: ; SKX_32-LABEL: test10:
; SKX_32: # %bb.0: # %entry ; SKX_32: # %bb.0: # %entry
; SKX_32-NEXT: vpmulld {{\.LCPI.*}}{1to8}, %ymm1, %ymm1 ; SKX_32-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}{1to8}, %ymm1, %ymm1
; SKX_32-NEXT: vpmovqd %zmm0, %ymm0 ; SKX_32-NEXT: vpmovqd %zmm0, %ymm0
; SKX_32-NEXT: vpmulld {{\.LCPI.*}}{1to8}, %ymm0, %ymm0 ; SKX_32-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}{1to8}, %ymm0, %ymm0
; SKX_32-NEXT: vpaddd {{[0-9]+}}(%esp){1to8}, %ymm0, %ymm0 ; SKX_32-NEXT: vpaddd {{[0-9]+}}(%esp){1to8}, %ymm0, %ymm0
; SKX_32-NEXT: vpaddd %ymm1, %ymm0, %ymm0 ; SKX_32-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; SKX_32-NEXT: vpaddd {{\.LCPI.*}}{1to8}, %ymm0, %ymm1 ; SKX_32-NEXT: vpaddd {{\.LCPI[0-9]+_[0-9]+}}{1to8}, %ymm0, %ymm1
; SKX_32-NEXT: kxnorw %k0, %k0, %k1 ; SKX_32-NEXT: kxnorw %k0, %k0, %k1
; SKX_32-NEXT: vpgatherdd (,%ymm1), %ymm0 {%k1} ; SKX_32-NEXT: vpgatherdd (,%ymm1), %ymm0 {%k1}
; SKX_32-NEXT: retl ; SKX_32-NEXT: retl
@ -2893,7 +2893,7 @@ define <16 x float> @zext_index(float* %base, <16 x i32> %ind) {
; KNL_32-LABEL: zext_index: ; KNL_32-LABEL: zext_index:
; KNL_32: # %bb.0: ; KNL_32: # %bb.0:
; KNL_32-NEXT: movl {{[0-9]+}}(%esp), %eax ; KNL_32-NEXT: movl {{[0-9]+}}(%esp), %eax
; KNL_32-NEXT: vpandd {{\.LCPI.*}}{1to16}, %zmm0, %zmm1 ; KNL_32-NEXT: vpandd {{\.LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm1
; KNL_32-NEXT: kxnorw %k0, %k0, %k1 ; KNL_32-NEXT: kxnorw %k0, %k0, %k1
; KNL_32-NEXT: vgatherdps (%eax,%zmm1,4), %zmm0 {%k1} ; KNL_32-NEXT: vgatherdps (%eax,%zmm1,4), %zmm0 {%k1}
; KNL_32-NEXT: retl ; KNL_32-NEXT: retl
@ -2907,7 +2907,7 @@ define <16 x float> @zext_index(float* %base, <16 x i32> %ind) {
; ;
; SKX_LARGE-LABEL: zext_index: ; SKX_LARGE-LABEL: zext_index:
; SKX_LARGE: # %bb.0: ; SKX_LARGE: # %bb.0:
; SKX_LARGE-NEXT: movabsq ${{\.LCPI.*}}, %rax ; SKX_LARGE-NEXT: movabsq ${{\.LCPI[0-9]+_[0-9]+}}, %rax
; SKX_LARGE-NEXT: vandps (%rax){1to16}, %zmm0, %zmm1 ; SKX_LARGE-NEXT: vandps (%rax){1to16}, %zmm0, %zmm1
; SKX_LARGE-NEXT: kxnorw %k0, %k0, %k1 ; SKX_LARGE-NEXT: kxnorw %k0, %k0, %k1
; SKX_LARGE-NEXT: vgatherdps (%rdi,%zmm1,4), %zmm0 {%k1} ; SKX_LARGE-NEXT: vgatherdps (%rdi,%zmm1,4), %zmm0 {%k1}
@ -2916,7 +2916,7 @@ define <16 x float> @zext_index(float* %base, <16 x i32> %ind) {
; SKX_32-LABEL: zext_index: ; SKX_32-LABEL: zext_index:
; SKX_32: # %bb.0: ; SKX_32: # %bb.0:
; SKX_32-NEXT: movl {{[0-9]+}}(%esp), %eax ; SKX_32-NEXT: movl {{[0-9]+}}(%esp), %eax
; SKX_32-NEXT: vandps {{\.LCPI.*}}{1to16}, %zmm0, %zmm1 ; SKX_32-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm1
; SKX_32-NEXT: kxnorw %k0, %k0, %k1 ; SKX_32-NEXT: kxnorw %k0, %k0, %k1
; SKX_32-NEXT: vgatherdps (%eax,%zmm1,4), %zmm0 {%k1} ; SKX_32-NEXT: vgatherdps (%eax,%zmm1,4), %zmm0 {%k1}
; SKX_32-NEXT: retl ; SKX_32-NEXT: retl
@ -3184,7 +3184,7 @@ define <2 x i64> @gather_2i64_constant_indices(i64* %ptr, <2 x i1> %mask) {
; SKX_LARGE: # %bb.0: ; SKX_LARGE: # %bb.0:
; SKX_LARGE-NEXT: vpsllq $63, %xmm0, %xmm0 ; SKX_LARGE-NEXT: vpsllq $63, %xmm0, %xmm0
; SKX_LARGE-NEXT: vpmovq2m %xmm0, %k1 ; SKX_LARGE-NEXT: vpmovq2m %xmm0, %k1
; SKX_LARGE-NEXT: movabsq ${{\.LCPI.*}}, %rax ; SKX_LARGE-NEXT: movabsq ${{\.LCPI[0-9]+_[0-9]+}}, %rax
; SKX_LARGE-NEXT: vmovdqa (%rax), %xmm1 ; SKX_LARGE-NEXT: vmovdqa (%rax), %xmm1
; SKX_LARGE-NEXT: vpxor %xmm0, %xmm0, %xmm0 ; SKX_LARGE-NEXT: vpxor %xmm0, %xmm0, %xmm0
; SKX_LARGE-NEXT: vpgatherdq (%rdi,%xmm1,8), %xmm0 {%k1} ; SKX_LARGE-NEXT: vpgatherdq (%rdi,%xmm1,8), %xmm0 {%k1}
@ -3241,7 +3241,7 @@ define <16 x i32> @gather_16i64_constant_indices(i32* %ptr, <16 x i1> %mask) {
; SKX_LARGE-NEXT: vpmovsxbd %xmm0, %zmm0 ; SKX_LARGE-NEXT: vpmovsxbd %xmm0, %zmm0
; SKX_LARGE-NEXT: vpslld $31, %zmm0, %zmm0 ; SKX_LARGE-NEXT: vpslld $31, %zmm0, %zmm0
; SKX_LARGE-NEXT: vpmovd2m %zmm0, %k1 ; SKX_LARGE-NEXT: vpmovd2m %zmm0, %k1
; SKX_LARGE-NEXT: movabsq ${{\.LCPI.*}}, %rax ; SKX_LARGE-NEXT: movabsq ${{\.LCPI[0-9]+_[0-9]+}}, %rax
; SKX_LARGE-NEXT: vmovdqa64 (%rax), %zmm1 ; SKX_LARGE-NEXT: vmovdqa64 (%rax), %zmm1
; SKX_LARGE-NEXT: vpxor %xmm0, %xmm0, %xmm0 ; SKX_LARGE-NEXT: vpxor %xmm0, %xmm0, %xmm0
; SKX_LARGE-NEXT: vpgatherdd (%rdi,%zmm1,4), %zmm0 {%k1} ; SKX_LARGE-NEXT: vpgatherdd (%rdi,%zmm1,4), %zmm0 {%k1}
@ -3300,7 +3300,7 @@ define void @scatter_2i64_constant_indices(i32* %ptr, <2 x i1> %mask, <2 x i32>
; SKX_LARGE: # %bb.0: ; SKX_LARGE: # %bb.0:
; SKX_LARGE-NEXT: vpsllq $63, %xmm0, %xmm0 ; SKX_LARGE-NEXT: vpsllq $63, %xmm0, %xmm0
; SKX_LARGE-NEXT: vpmovq2m %xmm0, %k1 ; SKX_LARGE-NEXT: vpmovq2m %xmm0, %k1
; SKX_LARGE-NEXT: movabsq ${{\.LCPI.*}}, %rax ; SKX_LARGE-NEXT: movabsq ${{\.LCPI[0-9]+_[0-9]+}}, %rax
; SKX_LARGE-NEXT: vmovdqa (%rax), %xmm0 ; SKX_LARGE-NEXT: vmovdqa (%rax), %xmm0
; SKX_LARGE-NEXT: vpscatterdd %xmm1, (%rdi,%xmm0,4) {%k1} ; SKX_LARGE-NEXT: vpscatterdd %xmm1, (%rdi,%xmm0,4) {%k1}
; SKX_LARGE-NEXT: retq ; SKX_LARGE-NEXT: retq
@ -3355,7 +3355,7 @@ define void @scatter_16i64_constant_indices(i32* %ptr, <16 x i1> %mask, <16 x i3
; SKX_LARGE-NEXT: vpmovsxbd %xmm0, %zmm0 ; SKX_LARGE-NEXT: vpmovsxbd %xmm0, %zmm0
; SKX_LARGE-NEXT: vpslld $31, %zmm0, %zmm0 ; SKX_LARGE-NEXT: vpslld $31, %zmm0, %zmm0
; SKX_LARGE-NEXT: vpmovd2m %zmm0, %k1 ; SKX_LARGE-NEXT: vpmovd2m %zmm0, %k1
; SKX_LARGE-NEXT: movabsq ${{\.LCPI.*}}, %rax ; SKX_LARGE-NEXT: movabsq ${{\.LCPI[0-9]+_[0-9]+}}, %rax
; SKX_LARGE-NEXT: vmovdqa64 (%rax), %zmm0 ; SKX_LARGE-NEXT: vmovdqa64 (%rax), %zmm0
; SKX_LARGE-NEXT: vpscatterdd %zmm1, (%rdi,%zmm0,4) {%k1} ; SKX_LARGE-NEXT: vpscatterdd %zmm1, (%rdi,%zmm0,4) {%k1}
; SKX_LARGE-NEXT: vzeroupper ; SKX_LARGE-NEXT: vzeroupper
@ -3506,7 +3506,7 @@ define <8 x i64> @pr45906(<8 x %struct.foo*> %ptr) {
; ;
; SKX_LARGE-LABEL: pr45906: ; SKX_LARGE-LABEL: pr45906:
; SKX_LARGE: # %bb.0: # %bb ; SKX_LARGE: # %bb.0: # %bb
; SKX_LARGE-NEXT: movabsq ${{\.LCPI.*}}, %rax ; SKX_LARGE-NEXT: movabsq ${{\.LCPI[0-9]+_[0-9]+}}, %rax
; SKX_LARGE-NEXT: vpaddq (%rax){1to8}, %zmm0, %zmm1 ; SKX_LARGE-NEXT: vpaddq (%rax){1to8}, %zmm0, %zmm1
; SKX_LARGE-NEXT: kxnorw %k0, %k0, %k1 ; SKX_LARGE-NEXT: kxnorw %k0, %k0, %k1
; SKX_LARGE-NEXT: vpgatherqq (,%zmm1), %zmm0 {%k1} ; SKX_LARGE-NEXT: vpgatherqq (,%zmm1), %zmm0 {%k1}
@ -3514,7 +3514,7 @@ define <8 x i64> @pr45906(<8 x %struct.foo*> %ptr) {
; ;
; SKX_32-LABEL: pr45906: ; SKX_32-LABEL: pr45906:
; SKX_32: # %bb.0: # %bb ; SKX_32: # %bb.0: # %bb
; SKX_32-NEXT: vpaddd {{\.LCPI.*}}{1to8}, %ymm0, %ymm1 ; SKX_32-NEXT: vpaddd {{\.LCPI[0-9]+_[0-9]+}}{1to8}, %ymm0, %ymm1
; SKX_32-NEXT: kxnorw %k0, %k0, %k1 ; SKX_32-NEXT: kxnorw %k0, %k0, %k1
; SKX_32-NEXT: vpgatherdq (,%ymm1), %zmm0 {%k1} ; SKX_32-NEXT: vpgatherdq (,%ymm1), %zmm0 {%k1}
; SKX_32-NEXT: retl ; SKX_32-NEXT: retl

View File

@ -456,7 +456,7 @@ define i1 @length16_eq_const(i8* %X) nounwind minsize {
; X86-SSE2: # %bb.0: ; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: movdqu (%eax), %xmm0 ; X86-SSE2-NEXT: movdqu (%eax), %xmm0
; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0 ; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: pmovmskb %xmm0, %eax ; X86-SSE2-NEXT: pmovmskb %xmm0, %eax
; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; X86-SSE2-NEXT: sete %al ; X86-SSE2-NEXT: sete %al

View File

@ -1480,7 +1480,7 @@ define i1 @length16_eq_const(i8* %X) nounwind {
; X86-SSE2: # %bb.0: ; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: movdqu (%eax), %xmm0 ; X86-SSE2-NEXT: movdqu (%eax), %xmm0
; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0 ; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: pmovmskb %xmm0, %eax ; X86-SSE2-NEXT: pmovmskb %xmm0, %eax
; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; X86-SSE2-NEXT: sete %al ; X86-SSE2-NEXT: sete %al
@ -1490,7 +1490,7 @@ define i1 @length16_eq_const(i8* %X) nounwind {
; X86-SSE41: # %bb.0: ; X86-SSE41: # %bb.0:
; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE41-NEXT: movdqu (%eax), %xmm0 ; X86-SSE41-NEXT: movdqu (%eax), %xmm0
; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm0 ; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE41-NEXT: ptest %xmm0, %xmm0 ; X86-SSE41-NEXT: ptest %xmm0, %xmm0
; X86-SSE41-NEXT: sete %al ; X86-SSE41-NEXT: sete %al
; X86-SSE41-NEXT: retl ; X86-SSE41-NEXT: retl
@ -1823,8 +1823,8 @@ define i1 @length24_eq_const(i8* %X) nounwind {
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: movdqu (%eax), %xmm0 ; X86-SSE2-NEXT: movdqu (%eax), %xmm0
; X86-SSE2-NEXT: movdqu 8(%eax), %xmm1 ; X86-SSE2-NEXT: movdqu 8(%eax), %xmm1
; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm1 ; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0 ; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: pand %xmm1, %xmm0 ; X86-SSE2-NEXT: pand %xmm1, %xmm0
; X86-SSE2-NEXT: pmovmskb %xmm0, %eax ; X86-SSE2-NEXT: pmovmskb %xmm0, %eax
; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
@ -1836,8 +1836,8 @@ define i1 @length24_eq_const(i8* %X) nounwind {
; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE41-NEXT: movdqu (%eax), %xmm0 ; X86-SSE41-NEXT: movdqu (%eax), %xmm0
; X86-SSE41-NEXT: movdqu 8(%eax), %xmm1 ; X86-SSE41-NEXT: movdqu 8(%eax), %xmm1
; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm1 ; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm0 ; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE41-NEXT: por %xmm1, %xmm0 ; X86-SSE41-NEXT: por %xmm1, %xmm0
; X86-SSE41-NEXT: ptest %xmm0, %xmm0 ; X86-SSE41-NEXT: ptest %xmm0, %xmm0
; X86-SSE41-NEXT: setne %al ; X86-SSE41-NEXT: setne %al
@ -2312,8 +2312,8 @@ define i1 @length31_eq_const(i8* %X) nounwind {
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: movdqu (%eax), %xmm0 ; X86-SSE2-NEXT: movdqu (%eax), %xmm0
; X86-SSE2-NEXT: movdqu 15(%eax), %xmm1 ; X86-SSE2-NEXT: movdqu 15(%eax), %xmm1
; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm1 ; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0 ; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: pand %xmm1, %xmm0 ; X86-SSE2-NEXT: pand %xmm1, %xmm0
; X86-SSE2-NEXT: pmovmskb %xmm0, %eax ; X86-SSE2-NEXT: pmovmskb %xmm0, %eax
; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
@ -2325,8 +2325,8 @@ define i1 @length31_eq_const(i8* %X) nounwind {
; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE41-NEXT: movdqu (%eax), %xmm0 ; X86-SSE41-NEXT: movdqu (%eax), %xmm0
; X86-SSE41-NEXT: movdqu 15(%eax), %xmm1 ; X86-SSE41-NEXT: movdqu 15(%eax), %xmm1
; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm1 ; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm0 ; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE41-NEXT: por %xmm1, %xmm0 ; X86-SSE41-NEXT: por %xmm1, %xmm0
; X86-SSE41-NEXT: ptest %xmm0, %xmm0 ; X86-SSE41-NEXT: ptest %xmm0, %xmm0
; X86-SSE41-NEXT: setne %al ; X86-SSE41-NEXT: setne %al
@ -2816,8 +2816,8 @@ define i1 @length32_eq_const(i8* %X) nounwind {
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: movdqu (%eax), %xmm0 ; X86-SSE2-NEXT: movdqu (%eax), %xmm0
; X86-SSE2-NEXT: movdqu 16(%eax), %xmm1 ; X86-SSE2-NEXT: movdqu 16(%eax), %xmm1
; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm1 ; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0 ; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: pand %xmm1, %xmm0 ; X86-SSE2-NEXT: pand %xmm1, %xmm0
; X86-SSE2-NEXT: pmovmskb %xmm0, %eax ; X86-SSE2-NEXT: pmovmskb %xmm0, %eax
; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
@ -2829,8 +2829,8 @@ define i1 @length32_eq_const(i8* %X) nounwind {
; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE41-NEXT: movdqu (%eax), %xmm0 ; X86-SSE41-NEXT: movdqu (%eax), %xmm0
; X86-SSE41-NEXT: movdqu 16(%eax), %xmm1 ; X86-SSE41-NEXT: movdqu 16(%eax), %xmm1
; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm1 ; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm0 ; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE41-NEXT: por %xmm1, %xmm0 ; X86-SSE41-NEXT: por %xmm1, %xmm0
; X86-SSE41-NEXT: ptest %xmm0, %xmm0 ; X86-SSE41-NEXT: ptest %xmm0, %xmm0
; X86-SSE41-NEXT: setne %al ; X86-SSE41-NEXT: setne %al
@ -3293,9 +3293,9 @@ define i1 @length48_eq_const(i8* %X) nounwind {
; X86-SSE2-NEXT: movdqu (%eax), %xmm0 ; X86-SSE2-NEXT: movdqu (%eax), %xmm0
; X86-SSE2-NEXT: movdqu 16(%eax), %xmm1 ; X86-SSE2-NEXT: movdqu 16(%eax), %xmm1
; X86-SSE2-NEXT: movdqu 32(%eax), %xmm2 ; X86-SSE2-NEXT: movdqu 32(%eax), %xmm2
; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm1 ; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0 ; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm2 ; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm2
; X86-SSE2-NEXT: pand %xmm1, %xmm2 ; X86-SSE2-NEXT: pand %xmm1, %xmm2
; X86-SSE2-NEXT: pand %xmm0, %xmm2 ; X86-SSE2-NEXT: pand %xmm0, %xmm2
; X86-SSE2-NEXT: pmovmskb %xmm2, %eax ; X86-SSE2-NEXT: pmovmskb %xmm2, %eax
@ -3309,9 +3309,9 @@ define i1 @length48_eq_const(i8* %X) nounwind {
; X86-SSE41-NEXT: movdqu (%eax), %xmm0 ; X86-SSE41-NEXT: movdqu (%eax), %xmm0
; X86-SSE41-NEXT: movdqu 16(%eax), %xmm1 ; X86-SSE41-NEXT: movdqu 16(%eax), %xmm1
; X86-SSE41-NEXT: movdqu 32(%eax), %xmm2 ; X86-SSE41-NEXT: movdqu 32(%eax), %xmm2
; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm1 ; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm0 ; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm2 ; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm2
; X86-SSE41-NEXT: por %xmm1, %xmm2 ; X86-SSE41-NEXT: por %xmm1, %xmm2
; X86-SSE41-NEXT: por %xmm0, %xmm2 ; X86-SSE41-NEXT: por %xmm0, %xmm2
; X86-SSE41-NEXT: ptest %xmm2, %xmm2 ; X86-SSE41-NEXT: ptest %xmm2, %xmm2
@ -3673,12 +3673,12 @@ define i1 @length63_eq_const(i8* %X) nounwind {
; X86-SSE2-NEXT: movdqu 16(%eax), %xmm1 ; X86-SSE2-NEXT: movdqu 16(%eax), %xmm1
; X86-SSE2-NEXT: movdqu 32(%eax), %xmm2 ; X86-SSE2-NEXT: movdqu 32(%eax), %xmm2
; X86-SSE2-NEXT: movdqu 47(%eax), %xmm3 ; X86-SSE2-NEXT: movdqu 47(%eax), %xmm3
; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm3 ; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm3
; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm2 ; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm2
; X86-SSE2-NEXT: pand %xmm3, %xmm2 ; X86-SSE2-NEXT: pand %xmm3, %xmm2
; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm1 ; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: pand %xmm2, %xmm1 ; X86-SSE2-NEXT: pand %xmm2, %xmm1
; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0 ; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: pand %xmm1, %xmm0 ; X86-SSE2-NEXT: pand %xmm1, %xmm0
; X86-SSE2-NEXT: pmovmskb %xmm0, %eax ; X86-SSE2-NEXT: pmovmskb %xmm0, %eax
; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
@ -3692,12 +3692,12 @@ define i1 @length63_eq_const(i8* %X) nounwind {
; X86-SSE41-NEXT: movdqu 16(%eax), %xmm1 ; X86-SSE41-NEXT: movdqu 16(%eax), %xmm1
; X86-SSE41-NEXT: movdqu 32(%eax), %xmm2 ; X86-SSE41-NEXT: movdqu 32(%eax), %xmm2
; X86-SSE41-NEXT: movdqu 47(%eax), %xmm3 ; X86-SSE41-NEXT: movdqu 47(%eax), %xmm3
; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm3 ; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm3
; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm2 ; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm2
; X86-SSE41-NEXT: por %xmm3, %xmm2 ; X86-SSE41-NEXT: por %xmm3, %xmm2
; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm1 ; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE41-NEXT: por %xmm2, %xmm1 ; X86-SSE41-NEXT: por %xmm2, %xmm1
; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm0 ; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE41-NEXT: por %xmm1, %xmm0 ; X86-SSE41-NEXT: por %xmm1, %xmm0
; X86-SSE41-NEXT: ptest %xmm0, %xmm0 ; X86-SSE41-NEXT: ptest %xmm0, %xmm0
; X86-SSE41-NEXT: sete %al ; X86-SSE41-NEXT: sete %al
@ -4079,12 +4079,12 @@ define i1 @length64_eq_const(i8* %X) nounwind {
; X86-SSE2-NEXT: movdqu 16(%eax), %xmm1 ; X86-SSE2-NEXT: movdqu 16(%eax), %xmm1
; X86-SSE2-NEXT: movdqu 32(%eax), %xmm2 ; X86-SSE2-NEXT: movdqu 32(%eax), %xmm2
; X86-SSE2-NEXT: movdqu 48(%eax), %xmm3 ; X86-SSE2-NEXT: movdqu 48(%eax), %xmm3
; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm3 ; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm3
; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm2 ; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm2
; X86-SSE2-NEXT: pand %xmm3, %xmm2 ; X86-SSE2-NEXT: pand %xmm3, %xmm2
; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm1 ; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: pand %xmm2, %xmm1 ; X86-SSE2-NEXT: pand %xmm2, %xmm1
; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0 ; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: pand %xmm1, %xmm0 ; X86-SSE2-NEXT: pand %xmm1, %xmm0
; X86-SSE2-NEXT: pmovmskb %xmm0, %eax ; X86-SSE2-NEXT: pmovmskb %xmm0, %eax
; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
@ -4098,12 +4098,12 @@ define i1 @length64_eq_const(i8* %X) nounwind {
; X86-SSE41-NEXT: movdqu 16(%eax), %xmm1 ; X86-SSE41-NEXT: movdqu 16(%eax), %xmm1
; X86-SSE41-NEXT: movdqu 32(%eax), %xmm2 ; X86-SSE41-NEXT: movdqu 32(%eax), %xmm2
; X86-SSE41-NEXT: movdqu 48(%eax), %xmm3 ; X86-SSE41-NEXT: movdqu 48(%eax), %xmm3
; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm3 ; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm3
; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm2 ; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm2
; X86-SSE41-NEXT: por %xmm3, %xmm2 ; X86-SSE41-NEXT: por %xmm3, %xmm2
; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm1 ; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE41-NEXT: por %xmm2, %xmm1 ; X86-SSE41-NEXT: por %xmm2, %xmm1
; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm0 ; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE41-NEXT: por %xmm1, %xmm0 ; X86-SSE41-NEXT: por %xmm1, %xmm0
; X86-SSE41-NEXT: ptest %xmm0, %xmm0 ; X86-SSE41-NEXT: ptest %xmm0, %xmm0
; X86-SSE41-NEXT: sete %al ; X86-SSE41-NEXT: sete %al

View File

@ -590,7 +590,7 @@ define i1 @length16_eq_const(i8* %X) nounwind optsize {
; X86-SSE2: # %bb.0: ; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: movdqu (%eax), %xmm0 ; X86-SSE2-NEXT: movdqu (%eax), %xmm0
; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0 ; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: pmovmskb %xmm0, %eax ; X86-SSE2-NEXT: pmovmskb %xmm0, %eax
; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; X86-SSE2-NEXT: sete %al ; X86-SSE2-NEXT: sete %al
@ -715,8 +715,8 @@ define i1 @length24_eq_const(i8* %X) nounwind optsize {
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: movdqu (%eax), %xmm0 ; X86-SSE2-NEXT: movdqu (%eax), %xmm0
; X86-SSE2-NEXT: movdqu 8(%eax), %xmm1 ; X86-SSE2-NEXT: movdqu 8(%eax), %xmm1
; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm1 ; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0 ; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: pand %xmm1, %xmm0 ; X86-SSE2-NEXT: pand %xmm1, %xmm0
; X86-SSE2-NEXT: pmovmskb %xmm0, %eax ; X86-SSE2-NEXT: pmovmskb %xmm0, %eax
; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
@ -854,8 +854,8 @@ define i1 @length32_eq_const(i8* %X) nounwind optsize {
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: movdqu (%eax), %xmm0 ; X86-SSE2-NEXT: movdqu (%eax), %xmm0
; X86-SSE2-NEXT: movdqu 16(%eax), %xmm1 ; X86-SSE2-NEXT: movdqu 16(%eax), %xmm1
; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm1 ; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0 ; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: pand %xmm1, %xmm0 ; X86-SSE2-NEXT: pand %xmm1, %xmm0
; X86-SSE2-NEXT: pmovmskb %xmm0, %eax ; X86-SSE2-NEXT: pmovmskb %xmm0, %eax
; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF

View File

@ -590,7 +590,7 @@ define i1 @length16_eq_const(i8* %X) nounwind !prof !14 {
; X86-SSE2: # %bb.0: ; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: movdqu (%eax), %xmm0 ; X86-SSE2-NEXT: movdqu (%eax), %xmm0
; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0 ; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: pmovmskb %xmm0, %eax ; X86-SSE2-NEXT: pmovmskb %xmm0, %eax
; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; X86-SSE2-NEXT: sete %al ; X86-SSE2-NEXT: sete %al
@ -715,8 +715,8 @@ define i1 @length24_eq_const(i8* %X) nounwind !prof !14 {
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: movdqu (%eax), %xmm0 ; X86-SSE2-NEXT: movdqu (%eax), %xmm0
; X86-SSE2-NEXT: movdqu 8(%eax), %xmm1 ; X86-SSE2-NEXT: movdqu 8(%eax), %xmm1
; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm1 ; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0 ; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: pand %xmm1, %xmm0 ; X86-SSE2-NEXT: pand %xmm1, %xmm0
; X86-SSE2-NEXT: pmovmskb %xmm0, %eax ; X86-SSE2-NEXT: pmovmskb %xmm0, %eax
; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
@ -854,8 +854,8 @@ define i1 @length32_eq_const(i8* %X) nounwind !prof !14 {
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: movdqu (%eax), %xmm0 ; X86-SSE2-NEXT: movdqu (%eax), %xmm0
; X86-SSE2-NEXT: movdqu 16(%eax), %xmm1 ; X86-SSE2-NEXT: movdqu 16(%eax), %xmm1
; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm1 ; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0 ; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: pand %xmm1, %xmm0 ; X86-SSE2-NEXT: pand %xmm1, %xmm0
; X86-SSE2-NEXT: pmovmskb %xmm0, %eax ; X86-SSE2-NEXT: pmovmskb %xmm0, %eax
; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF

View File

@ -1485,7 +1485,7 @@ define i1 @length16_eq_const(i8* %X) nounwind {
; X86-SSE2: # %bb.0: ; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: movdqu (%eax), %xmm0 ; X86-SSE2-NEXT: movdqu (%eax), %xmm0
; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0 ; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: pmovmskb %xmm0, %eax ; X86-SSE2-NEXT: pmovmskb %xmm0, %eax
; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; X86-SSE2-NEXT: sete %al ; X86-SSE2-NEXT: sete %al
@ -1495,7 +1495,7 @@ define i1 @length16_eq_const(i8* %X) nounwind {
; X86-SSE41: # %bb.0: ; X86-SSE41: # %bb.0:
; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE41-NEXT: movdqu (%eax), %xmm0 ; X86-SSE41-NEXT: movdqu (%eax), %xmm0
; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm0 ; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE41-NEXT: ptest %xmm0, %xmm0 ; X86-SSE41-NEXT: ptest %xmm0, %xmm0
; X86-SSE41-NEXT: sete %al ; X86-SSE41-NEXT: sete %al
; X86-SSE41-NEXT: retl ; X86-SSE41-NEXT: retl
@ -1756,8 +1756,8 @@ define i1 @length24_eq_const(i8* %X) nounwind {
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: movdqu (%eax), %xmm0 ; X86-SSE2-NEXT: movdqu (%eax), %xmm0
; X86-SSE2-NEXT: movdqu 8(%eax), %xmm1 ; X86-SSE2-NEXT: movdqu 8(%eax), %xmm1
; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm1 ; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0 ; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: pand %xmm1, %xmm0 ; X86-SSE2-NEXT: pand %xmm1, %xmm0
; X86-SSE2-NEXT: pmovmskb %xmm0, %eax ; X86-SSE2-NEXT: pmovmskb %xmm0, %eax
; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
@ -1769,8 +1769,8 @@ define i1 @length24_eq_const(i8* %X) nounwind {
; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE41-NEXT: movdqu (%eax), %xmm0 ; X86-SSE41-NEXT: movdqu (%eax), %xmm0
; X86-SSE41-NEXT: movdqu 8(%eax), %xmm1 ; X86-SSE41-NEXT: movdqu 8(%eax), %xmm1
; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm1 ; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm0 ; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE41-NEXT: por %xmm1, %xmm0 ; X86-SSE41-NEXT: por %xmm1, %xmm0
; X86-SSE41-NEXT: ptest %xmm0, %xmm0 ; X86-SSE41-NEXT: ptest %xmm0, %xmm0
; X86-SSE41-NEXT: setne %al ; X86-SSE41-NEXT: setne %al
@ -2152,8 +2152,8 @@ define i1 @length31_eq_const(i8* %X) nounwind {
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: movdqu (%eax), %xmm0 ; X86-SSE2-NEXT: movdqu (%eax), %xmm0
; X86-SSE2-NEXT: movdqu 15(%eax), %xmm1 ; X86-SSE2-NEXT: movdqu 15(%eax), %xmm1
; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm1 ; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0 ; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: pand %xmm1, %xmm0 ; X86-SSE2-NEXT: pand %xmm1, %xmm0
; X86-SSE2-NEXT: pmovmskb %xmm0, %eax ; X86-SSE2-NEXT: pmovmskb %xmm0, %eax
; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
@ -2165,8 +2165,8 @@ define i1 @length31_eq_const(i8* %X) nounwind {
; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE41-NEXT: movdqu (%eax), %xmm0 ; X86-SSE41-NEXT: movdqu (%eax), %xmm0
; X86-SSE41-NEXT: movdqu 15(%eax), %xmm1 ; X86-SSE41-NEXT: movdqu 15(%eax), %xmm1
; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm1 ; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm0 ; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE41-NEXT: por %xmm1, %xmm0 ; X86-SSE41-NEXT: por %xmm1, %xmm0
; X86-SSE41-NEXT: ptest %xmm0, %xmm0 ; X86-SSE41-NEXT: ptest %xmm0, %xmm0
; X86-SSE41-NEXT: setne %al ; X86-SSE41-NEXT: setne %al
@ -2563,8 +2563,8 @@ define i1 @length32_eq_const(i8* %X) nounwind {
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: movdqu (%eax), %xmm0 ; X86-SSE2-NEXT: movdqu (%eax), %xmm0
; X86-SSE2-NEXT: movdqu 16(%eax), %xmm1 ; X86-SSE2-NEXT: movdqu 16(%eax), %xmm1
; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm1 ; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0 ; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: pand %xmm1, %xmm0 ; X86-SSE2-NEXT: pand %xmm1, %xmm0
; X86-SSE2-NEXT: pmovmskb %xmm0, %eax ; X86-SSE2-NEXT: pmovmskb %xmm0, %eax
; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
@ -2576,8 +2576,8 @@ define i1 @length32_eq_const(i8* %X) nounwind {
; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE41-NEXT: movdqu (%eax), %xmm0 ; X86-SSE41-NEXT: movdqu (%eax), %xmm0
; X86-SSE41-NEXT: movdqu 16(%eax), %xmm1 ; X86-SSE41-NEXT: movdqu 16(%eax), %xmm1
; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm1 ; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm0 ; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE41-NEXT: por %xmm1, %xmm0 ; X86-SSE41-NEXT: por %xmm1, %xmm0
; X86-SSE41-NEXT: ptest %xmm0, %xmm0 ; X86-SSE41-NEXT: ptest %xmm0, %xmm0
; X86-SSE41-NEXT: setne %al ; X86-SSE41-NEXT: setne %al

View File

@ -501,7 +501,7 @@ define <16 x i16> @merge_16i16_i16_0uu3zzuuuuuzCuEF(i16* %ptr) nounwind uwtable
; X86-AVX: # %bb.0: ; X86-AVX: # %bb.0:
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX-NEXT: vmovups (%eax), %ymm0 ; X86-AVX-NEXT: vmovups (%eax), %ymm0
; X86-AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0 ; X86-AVX-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X86-AVX-NEXT: retl ; X86-AVX-NEXT: retl
%ptr0 = getelementptr inbounds i16, i16* %ptr, i64 0 %ptr0 = getelementptr inbounds i16, i16* %ptr, i64 0
%ptr3 = getelementptr inbounds i16, i16* %ptr, i64 3 %ptr3 = getelementptr inbounds i16, i16* %ptr, i64 3

View File

@ -138,7 +138,7 @@ define <8 x double> @merge_8f64_f64_1u3u5zu8(double* %ptr) nounwind uwtable noin
; X86-AVX512F: # %bb.0: ; X86-AVX512F: # %bb.0:
; X86-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX512F-NEXT: vmovdqu64 8(%eax), %zmm0 ; X86-AVX512F-NEXT: vmovdqu64 8(%eax), %zmm0
; X86-AVX512F-NEXT: vpandq {{\.LCPI.*}}, %zmm0, %zmm0 ; X86-AVX512F-NEXT: vpandq {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %zmm0
; X86-AVX512F-NEXT: retl ; X86-AVX512F-NEXT: retl
%ptr0 = getelementptr inbounds double, double* %ptr, i64 1 %ptr0 = getelementptr inbounds double, double* %ptr, i64 1
%ptr2 = getelementptr inbounds double, double* %ptr, i64 3 %ptr2 = getelementptr inbounds double, double* %ptr, i64 3
@ -217,7 +217,7 @@ define <8 x i64> @merge_8i64_i64_1u3u5zu8(i64* %ptr) nounwind uwtable noinline s
; X86-AVX512F: # %bb.0: ; X86-AVX512F: # %bb.0:
; X86-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX512F-NEXT: vmovdqu64 8(%eax), %zmm0 ; X86-AVX512F-NEXT: vmovdqu64 8(%eax), %zmm0
; X86-AVX512F-NEXT: vpandd {{\.LCPI.*}}, %zmm0, %zmm0 ; X86-AVX512F-NEXT: vpandd {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %zmm0
; X86-AVX512F-NEXT: retl ; X86-AVX512F-NEXT: retl
%ptr0 = getelementptr inbounds i64, i64* %ptr, i64 1 %ptr0 = getelementptr inbounds i64, i64* %ptr, i64 1
%ptr2 = getelementptr inbounds i64, i64* %ptr, i64 3 %ptr2 = getelementptr inbounds i64, i64* %ptr, i64 3
@ -436,7 +436,7 @@ define <16 x i32> @merge_16i32_i32_0uu3zzuuuuuzCuEF(i32* %ptr) nounwind uwtable
; X86-AVX512F: # %bb.0: ; X86-AVX512F: # %bb.0:
; X86-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX512F-NEXT: vmovdqu64 (%eax), %zmm0 ; X86-AVX512F-NEXT: vmovdqu64 (%eax), %zmm0
; X86-AVX512F-NEXT: vpandd {{\.LCPI.*}}, %zmm0, %zmm0 ; X86-AVX512F-NEXT: vpandd {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %zmm0
; X86-AVX512F-NEXT: retl ; X86-AVX512F-NEXT: retl
%ptr0 = getelementptr inbounds i32, i32* %ptr, i64 0 %ptr0 = getelementptr inbounds i32, i32* %ptr, i64 0
%ptr3 = getelementptr inbounds i32, i32* %ptr, i64 3 %ptr3 = getelementptr inbounds i32, i32* %ptr, i64 3

View File

@ -33,7 +33,7 @@ define void @test0(x86_mmx* %A, x86_mmx* %B) {
; X32-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; X32-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; X32-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; X32-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; X32-NEXT: pmullw %xmm0, %xmm1 ; X32-NEXT: pmullw %xmm0, %xmm1
; X32-NEXT: pand {{\.LCPI.*}}, %xmm1 ; X32-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X32-NEXT: packuswb %xmm1, %xmm1 ; X32-NEXT: packuswb %xmm1, %xmm1
; X32-NEXT: movq %xmm1, (%eax) ; X32-NEXT: movq %xmm1, (%eax)
; X32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero ; X32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
@ -658,7 +658,7 @@ define i64 @pr43922() {
; X32-NEXT: .cfi_def_cfa_register %ebp ; X32-NEXT: .cfi_def_cfa_register %ebp
; X32-NEXT: andl $-8, %esp ; X32-NEXT: andl $-8, %esp
; X32-NEXT: subl $8, %esp ; X32-NEXT: subl $8, %esp
; X32-NEXT: movq {{\.LCPI.*}}, %mm0 # mm0 = 0x7AAAAAAA7AAAAAAA ; X32-NEXT: movq {{\.LCPI[0-9]+_[0-9]+}}, %mm0 # mm0 = 0x7AAAAAAA7AAAAAAA
; X32-NEXT: psrad $255, %mm0 ; X32-NEXT: psrad $255, %mm0
; X32-NEXT: movq %mm0, (%esp) ; X32-NEXT: movq %mm0, (%esp)
; X32-NEXT: movl (%esp), %eax ; X32-NEXT: movl (%esp), %eax

View File

@ -32,7 +32,7 @@ define double @mmx_zero(double, double, double, double) nounwind {
; X86-NEXT: paddw %mm2, %mm0 ; X86-NEXT: paddw %mm2, %mm0
; X86-NEXT: paddw %mm6, %mm0 ; X86-NEXT: paddw %mm6, %mm0
; X86-NEXT: pmuludq %mm3, %mm0 ; X86-NEXT: pmuludq %mm3, %mm0
; X86-NEXT: paddw {{\.LCPI.*}}, %mm0 ; X86-NEXT: paddw {{\.LCPI[0-9]+_[0-9]+}}, %mm0
; X86-NEXT: paddw %mm1, %mm0 ; X86-NEXT: paddw %mm1, %mm0
; X86-NEXT: pmuludq %mm7, %mm0 ; X86-NEXT: pmuludq %mm7, %mm0
; X86-NEXT: pmuludq (%esp), %mm0 # 8-byte Folded Reload ; X86-NEXT: pmuludq (%esp), %mm0 # 8-byte Folded Reload
@ -70,7 +70,7 @@ define double @mmx_zero(double, double, double, double) nounwind {
; X64-NEXT: paddw %mm2, %mm0 ; X64-NEXT: paddw %mm2, %mm0
; X64-NEXT: paddw %mm6, %mm0 ; X64-NEXT: paddw %mm6, %mm0
; X64-NEXT: pmuludq %mm3, %mm0 ; X64-NEXT: pmuludq %mm3, %mm0
; X64-NEXT: paddw {{\.LCPI.*}}, %mm0 ; X64-NEXT: paddw {{\.LCPI[0-9]+_[0-9]+}}, %mm0
; X64-NEXT: paddw %mm1, %mm0 ; X64-NEXT: paddw %mm1, %mm0
; X64-NEXT: pmuludq %mm7, %mm0 ; X64-NEXT: pmuludq %mm7, %mm0
; X64-NEXT: pmuludq {{[-0-9]+}}(%r{{[sb]}}p), %mm0 # 8-byte Folded Reload ; X64-NEXT: pmuludq {{[-0-9]+}}(%r{{[sb]}}p), %mm0 # 8-byte Folded Reload

View File

@ -10,7 +10,7 @@ define float @negfp(float %a, float %b) nounwind {
; CHECK-NEXT: pushl %eax ; CHECK-NEXT: pushl %eax
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: subss {{[0-9]+}}(%esp), %xmm0 ; CHECK-NEXT: subss {{[0-9]+}}(%esp), %xmm0
; CHECK-NEXT: xorps {{\.LCPI.*}}, %xmm0 ; CHECK-NEXT: xorps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; CHECK-NEXT: movss %xmm0, (%esp) ; CHECK-NEXT: movss %xmm0, (%esp)
; CHECK-NEXT: flds (%esp) ; CHECK-NEXT: flds (%esp)
; CHECK-NEXT: popl %eax ; CHECK-NEXT: popl %eax

View File

@ -20,21 +20,21 @@ define i32 @f(<4 x float> %A, i8* %B, <2 x double> %C, i32 %D, <2 x i64> %E, <4
; X86-SSE-NEXT: movl 8(%ebp), %esi ; X86-SSE-NEXT: movl 8(%ebp), %esi
; X86-SSE-NEXT: movl 80(%ebp), %edx ; X86-SSE-NEXT: movl 80(%ebp), %edx
; X86-SSE-NEXT: movl (%edx), %eax ; X86-SSE-NEXT: movl (%edx), %eax
; X86-SSE-NEXT: addps {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: addps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movntps %xmm0, (%esi) ; X86-SSE-NEXT: movntps %xmm0, (%esi)
; X86-SSE-NEXT: paddq {{\.LCPI.*}}, %xmm2 ; X86-SSE-NEXT: paddq {{\.LCPI[0-9]+_[0-9]+}}, %xmm2
; X86-SSE-NEXT: addl (%edx), %eax ; X86-SSE-NEXT: addl (%edx), %eax
; X86-SSE-NEXT: movntdq %xmm2, (%esi) ; X86-SSE-NEXT: movntdq %xmm2, (%esi)
; X86-SSE-NEXT: addpd {{\.LCPI.*}}, %xmm1 ; X86-SSE-NEXT: addpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE-NEXT: addl (%edx), %eax ; X86-SSE-NEXT: addl (%edx), %eax
; X86-SSE-NEXT: movntpd %xmm1, (%esi) ; X86-SSE-NEXT: movntpd %xmm1, (%esi)
; X86-SSE-NEXT: paddd {{\.LCPI.*}}, %xmm6 ; X86-SSE-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm6
; X86-SSE-NEXT: addl (%edx), %eax ; X86-SSE-NEXT: addl (%edx), %eax
; X86-SSE-NEXT: movntdq %xmm6, (%esi) ; X86-SSE-NEXT: movntdq %xmm6, (%esi)
; X86-SSE-NEXT: paddw {{\.LCPI.*}}, %xmm5 ; X86-SSE-NEXT: paddw {{\.LCPI[0-9]+_[0-9]+}}, %xmm5
; X86-SSE-NEXT: addl (%edx), %eax ; X86-SSE-NEXT: addl (%edx), %eax
; X86-SSE-NEXT: movntdq %xmm5, (%esi) ; X86-SSE-NEXT: movntdq %xmm5, (%esi)
; X86-SSE-NEXT: paddb {{\.LCPI.*}}, %xmm4 ; X86-SSE-NEXT: paddb {{\.LCPI[0-9]+_[0-9]+}}, %xmm4
; X86-SSE-NEXT: addl (%edx), %eax ; X86-SSE-NEXT: addl (%edx), %eax
; X86-SSE-NEXT: movntdq %xmm4, (%esi) ; X86-SSE-NEXT: movntdq %xmm4, (%esi)
; X86-SSE-NEXT: addl (%edx), %eax ; X86-SSE-NEXT: addl (%edx), %eax
@ -62,21 +62,21 @@ define i32 @f(<4 x float> %A, i8* %B, <2 x double> %C, i32 %D, <2 x i64> %E, <4
; X86-AVX-NEXT: movl 8(%ebp), %edx ; X86-AVX-NEXT: movl 8(%ebp), %edx
; X86-AVX-NEXT: movl 80(%ebp), %esi ; X86-AVX-NEXT: movl 80(%ebp), %esi
; X86-AVX-NEXT: movl (%esi), %eax ; X86-AVX-NEXT: movl (%esi), %eax
; X86-AVX-NEXT: vaddps {{\.LCPI.*}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vaddps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vmovntps %xmm0, (%edx) ; X86-AVX-NEXT: vmovntps %xmm0, (%edx)
; X86-AVX-NEXT: vpaddq {{\.LCPI.*}}, %xmm2, %xmm0 ; X86-AVX-NEXT: vpaddq {{\.LCPI[0-9]+_[0-9]+}}, %xmm2, %xmm0
; X86-AVX-NEXT: addl (%esi), %eax ; X86-AVX-NEXT: addl (%esi), %eax
; X86-AVX-NEXT: vmovntdq %xmm0, (%edx) ; X86-AVX-NEXT: vmovntdq %xmm0, (%edx)
; X86-AVX-NEXT: vaddpd {{\.LCPI.*}}, %xmm1, %xmm0 ; X86-AVX-NEXT: vaddpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm0
; X86-AVX-NEXT: addl (%esi), %eax ; X86-AVX-NEXT: addl (%esi), %eax
; X86-AVX-NEXT: vmovntpd %xmm0, (%edx) ; X86-AVX-NEXT: vmovntpd %xmm0, (%edx)
; X86-AVX-NEXT: vpaddd {{\.LCPI.*}}, %xmm6, %xmm0 ; X86-AVX-NEXT: vpaddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm6, %xmm0
; X86-AVX-NEXT: addl (%esi), %eax ; X86-AVX-NEXT: addl (%esi), %eax
; X86-AVX-NEXT: vmovntdq %xmm0, (%edx) ; X86-AVX-NEXT: vmovntdq %xmm0, (%edx)
; X86-AVX-NEXT: vpaddw {{\.LCPI.*}}, %xmm5, %xmm0 ; X86-AVX-NEXT: vpaddw {{\.LCPI[0-9]+_[0-9]+}}, %xmm5, %xmm0
; X86-AVX-NEXT: addl (%esi), %eax ; X86-AVX-NEXT: addl (%esi), %eax
; X86-AVX-NEXT: vmovntdq %xmm0, (%edx) ; X86-AVX-NEXT: vmovntdq %xmm0, (%edx)
; X86-AVX-NEXT: vpaddb {{\.LCPI.*}}, %xmm4, %xmm0 ; X86-AVX-NEXT: vpaddb {{\.LCPI[0-9]+_[0-9]+}}, %xmm4, %xmm0
; X86-AVX-NEXT: addl (%esi), %eax ; X86-AVX-NEXT: addl (%esi), %eax
; X86-AVX-NEXT: vmovntdq %xmm0, (%edx) ; X86-AVX-NEXT: vmovntdq %xmm0, (%edx)
; X86-AVX-NEXT: addl (%esi), %eax ; X86-AVX-NEXT: addl (%esi), %eax

View File

@ -121,14 +121,14 @@ define <8 x i16> @trunc_ashr_v4i32_icmp_v4i32(<4 x i32> %a, <4 x i32> %b) nounwi
; X86-SSE-LABEL: trunc_ashr_v4i32_icmp_v4i32: ; X86-SSE-LABEL: trunc_ashr_v4i32_icmp_v4i32:
; X86-SSE: # %bb.0: ; X86-SSE: # %bb.0:
; X86-SSE-NEXT: psrad $31, %xmm0 ; X86-SSE-NEXT: psrad $31, %xmm0
; X86-SSE-NEXT: pcmpgtd {{\.LCPI.*}}, %xmm1 ; X86-SSE-NEXT: pcmpgtd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE-NEXT: packssdw %xmm1, %xmm0 ; X86-SSE-NEXT: packssdw %xmm1, %xmm0
; X86-SSE-NEXT: retl ; X86-SSE-NEXT: retl
; ;
; X86-AVX-LABEL: trunc_ashr_v4i32_icmp_v4i32: ; X86-AVX-LABEL: trunc_ashr_v4i32_icmp_v4i32:
; X86-AVX: # %bb.0: ; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpsrad $31, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsrad $31, %xmm0, %xmm0
; X86-AVX-NEXT: vpcmpgtd {{\.LCPI.*}}, %xmm1, %xmm1 ; X86-AVX-NEXT: vpcmpgtd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1
; X86-AVX-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 ; X86-AVX-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
; X86-AVX-NEXT: retl ; X86-AVX-NEXT: retl
; ;

View File

@ -10,7 +10,7 @@ define void @foo(i32 %n, double* nocapture %p) nounwind {
; CHECK-NEXT: .LBB0_1: # %bb ; CHECK-NEXT: .LBB0_1: # %bb
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: fldl (%eax,%ecx,8) ; CHECK-NEXT: fldl (%eax,%ecx,8)
; CHECK-NEXT: fmull {{\.LCPI.*}} ; CHECK-NEXT: fmull {{\.LCPI[0-9]+_[0-9]+}}
; CHECK-NEXT: fstpl (%eax,%ecx,8) ; CHECK-NEXT: fstpl (%eax,%ecx,8)
; CHECK-NEXT: decl %ecx ; CHECK-NEXT: decl %ecx
; CHECK-NEXT: js .LBB0_1 ; CHECK-NEXT: js .LBB0_1

View File

@ -133,7 +133,7 @@ define <4 x i32> @ICMP0(<4 x i8*>* %p0, <4 x i8*>* %p1) nounwind {
; CHECK-NEXT: movdqa (%ecx), %xmm0 ; CHECK-NEXT: movdqa (%ecx), %xmm0
; CHECK-NEXT: pcmpgtd (%eax), %xmm0 ; CHECK-NEXT: pcmpgtd (%eax), %xmm0
; CHECK-NEXT: movaps {{.*#+}} xmm1 = [9,8,7,6] ; CHECK-NEXT: movaps {{.*#+}} xmm1 = [9,8,7,6]
; CHECK-NEXT: blendvps %xmm0, {{\.LCPI.*}}, %xmm1 ; CHECK-NEXT: blendvps %xmm0, {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; CHECK-NEXT: movaps %xmm1, %xmm0 ; CHECK-NEXT: movaps %xmm1, %xmm0
; CHECK-NEXT: retl ; CHECK-NEXT: retl
entry: entry:
@ -152,7 +152,7 @@ define <4 x i32> @ICMP1(<4 x i8*>* %p0, <4 x i8*>* %p1) nounwind {
; CHECK-NEXT: movdqa (%ecx), %xmm0 ; CHECK-NEXT: movdqa (%ecx), %xmm0
; CHECK-NEXT: pcmpeqd (%eax), %xmm0 ; CHECK-NEXT: pcmpeqd (%eax), %xmm0
; CHECK-NEXT: movaps {{.*#+}} xmm1 = [9,8,7,6] ; CHECK-NEXT: movaps {{.*#+}} xmm1 = [9,8,7,6]
; CHECK-NEXT: blendvps %xmm0, {{\.LCPI.*}}, %xmm1 ; CHECK-NEXT: blendvps %xmm0, {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; CHECK-NEXT: movaps %xmm1, %xmm0 ; CHECK-NEXT: movaps %xmm1, %xmm0
; CHECK-NEXT: retl ; CHECK-NEXT: retl
entry: entry:

View File

@ -253,7 +253,7 @@ define i64 @cnt64(i64 %x) nounwind readnone {
; X86-SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero ; X86-SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; X86-SSE2-NEXT: movdqa %xmm0, %xmm1 ; X86-SSE2-NEXT: movdqa %xmm0, %xmm1
; X86-SSE2-NEXT: psrlw $1, %xmm1 ; X86-SSE2-NEXT: psrlw $1, %xmm1
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1 ; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: psubb %xmm1, %xmm0 ; X86-SSE2-NEXT: psubb %xmm1, %xmm0
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51] ; X86-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
; X86-SSE2-NEXT: movdqa %xmm0, %xmm2 ; X86-SSE2-NEXT: movdqa %xmm0, %xmm2
@ -264,7 +264,7 @@ define i64 @cnt64(i64 %x) nounwind readnone {
; X86-SSE2-NEXT: movdqa %xmm0, %xmm1 ; X86-SSE2-NEXT: movdqa %xmm0, %xmm1
; X86-SSE2-NEXT: psrlw $4, %xmm1 ; X86-SSE2-NEXT: psrlw $4, %xmm1
; X86-SSE2-NEXT: paddb %xmm0, %xmm1 ; X86-SSE2-NEXT: paddb %xmm0, %xmm1
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1 ; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: pxor %xmm0, %xmm0 ; X86-SSE2-NEXT: pxor %xmm0, %xmm0
; X86-SSE2-NEXT: psadbw %xmm1, %xmm0 ; X86-SSE2-NEXT: psadbw %xmm1, %xmm0
; X86-SSE2-NEXT: movd %xmm0, %eax ; X86-SSE2-NEXT: movd %xmm0, %eax
@ -749,7 +749,7 @@ define i64 @cnt64_optsize(i64 %x) nounwind readnone optsize {
; X86-SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero ; X86-SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; X86-SSE2-NEXT: movdqa %xmm0, %xmm1 ; X86-SSE2-NEXT: movdqa %xmm0, %xmm1
; X86-SSE2-NEXT: psrlw $1, %xmm1 ; X86-SSE2-NEXT: psrlw $1, %xmm1
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1 ; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: psubb %xmm1, %xmm0 ; X86-SSE2-NEXT: psubb %xmm1, %xmm0
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51] ; X86-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
; X86-SSE2-NEXT: movdqa %xmm0, %xmm2 ; X86-SSE2-NEXT: movdqa %xmm0, %xmm2
@ -760,7 +760,7 @@ define i64 @cnt64_optsize(i64 %x) nounwind readnone optsize {
; X86-SSE2-NEXT: movdqa %xmm0, %xmm1 ; X86-SSE2-NEXT: movdqa %xmm0, %xmm1
; X86-SSE2-NEXT: psrlw $4, %xmm1 ; X86-SSE2-NEXT: psrlw $4, %xmm1
; X86-SSE2-NEXT: paddb %xmm0, %xmm1 ; X86-SSE2-NEXT: paddb %xmm0, %xmm1
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1 ; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: pxor %xmm0, %xmm0 ; X86-SSE2-NEXT: pxor %xmm0, %xmm0
; X86-SSE2-NEXT: psadbw %xmm1, %xmm0 ; X86-SSE2-NEXT: psadbw %xmm1, %xmm0
; X86-SSE2-NEXT: movd %xmm0, %eax ; X86-SSE2-NEXT: movd %xmm0, %eax
@ -1178,7 +1178,7 @@ define i64 @cnt64_pgso(i64 %x) nounwind readnone !prof !14 {
; X86-SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero ; X86-SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; X86-SSE2-NEXT: movdqa %xmm0, %xmm1 ; X86-SSE2-NEXT: movdqa %xmm0, %xmm1
; X86-SSE2-NEXT: psrlw $1, %xmm1 ; X86-SSE2-NEXT: psrlw $1, %xmm1
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1 ; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: psubb %xmm1, %xmm0 ; X86-SSE2-NEXT: psubb %xmm1, %xmm0
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51] ; X86-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
; X86-SSE2-NEXT: movdqa %xmm0, %xmm2 ; X86-SSE2-NEXT: movdqa %xmm0, %xmm2
@ -1189,7 +1189,7 @@ define i64 @cnt64_pgso(i64 %x) nounwind readnone !prof !14 {
; X86-SSE2-NEXT: movdqa %xmm0, %xmm1 ; X86-SSE2-NEXT: movdqa %xmm0, %xmm1
; X86-SSE2-NEXT: psrlw $4, %xmm1 ; X86-SSE2-NEXT: psrlw $4, %xmm1
; X86-SSE2-NEXT: paddb %xmm0, %xmm1 ; X86-SSE2-NEXT: paddb %xmm0, %xmm1
; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1 ; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: pxor %xmm0, %xmm0 ; X86-SSE2-NEXT: pxor %xmm0, %xmm0
; X86-SSE2-NEXT: psadbw %xmm1, %xmm0 ; X86-SSE2-NEXT: psadbw %xmm1, %xmm0
; X86-SSE2-NEXT: movd %xmm0, %eax ; X86-SSE2-NEXT: movd %xmm0, %eax

View File

@ -19,10 +19,10 @@ define void @test_convert_float2_ulong2(<2 x i64>* nocapture %src, <2 x float>*
; CHECK-NEXT: movl %edx, {{[0-9]+}}(%esp) ; CHECK-NEXT: movl %edx, {{[0-9]+}}(%esp)
; CHECK-NEXT: shrl $31, %ecx ; CHECK-NEXT: shrl $31, %ecx
; CHECK-NEXT: fildll (%esp) ; CHECK-NEXT: fildll (%esp)
; CHECK-NEXT: fadds {{\.LCPI.*}}(,%ecx,4) ; CHECK-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%ecx,4)
; CHECK-NEXT: shrl $31, %esi ; CHECK-NEXT: shrl $31, %esi
; CHECK-NEXT: fildll {{[0-9]+}}(%esp) ; CHECK-NEXT: fildll {{[0-9]+}}(%esp)
; CHECK-NEXT: fadds {{\.LCPI.*}}(,%esi,4) ; CHECK-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%esi,4)
; CHECK-NEXT: fstps 84(%eax) ; CHECK-NEXT: fstps 84(%eax)
; CHECK-NEXT: fstps 80(%eax) ; CHECK-NEXT: fstps 80(%eax)
; CHECK-NEXT: addl $20, %esp ; CHECK-NEXT: addl $20, %esp

View File

@ -48,8 +48,8 @@ define void @computeJD(%struct.DateTime*) nounwind {
; CHECK-NEXT: leal 257(%ecx,%edx), %eax ; CHECK-NEXT: leal 257(%ecx,%edx), %eax
; CHECK-NEXT: movl %eax, {{[0-9]+}}(%esp) ; CHECK-NEXT: movl %eax, {{[0-9]+}}(%esp)
; CHECK-NEXT: fildl {{[0-9]+}}(%esp) ; CHECK-NEXT: fildl {{[0-9]+}}(%esp)
; CHECK-NEXT: fadds {{\.LCPI.*}} ; CHECK-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; CHECK-NEXT: fmuls {{\.LCPI.*}} ; CHECK-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
; CHECK-NEXT: fnstcw {{[0-9]+}}(%esp) ; CHECK-NEXT: fnstcw {{[0-9]+}}(%esp)
; CHECK-NEXT: movzwl {{[0-9]+}}(%esp), %eax ; CHECK-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: orl $3072, %eax # imm = 0xC00 ; CHECK-NEXT: orl $3072, %eax # imm = 0xC00
@ -62,7 +62,7 @@ define void @computeJD(%struct.DateTime*) nounwind {
; CHECK-NEXT: imull $60000, 24(%ebx), %ecx # imm = 0xEA60 ; CHECK-NEXT: imull $60000, 24(%ebx), %ecx # imm = 0xEA60
; CHECK-NEXT: addl %eax, %ecx ; CHECK-NEXT: addl %eax, %ecx
; CHECK-NEXT: fldl 28(%ebx) ; CHECK-NEXT: fldl 28(%ebx)
; CHECK-NEXT: fmuls {{\.LCPI.*}} ; CHECK-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
; CHECK-NEXT: fnstcw {{[0-9]+}}(%esp) ; CHECK-NEXT: fnstcw {{[0-9]+}}(%esp)
; CHECK-NEXT: movzwl {{[0-9]+}}(%esp), %eax ; CHECK-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: orl $3072, %eax # imm = 0xC00 ; CHECK-NEXT: orl $3072, %eax # imm = 0xC00

View File

@ -6,18 +6,18 @@ define void @pr34605(i8* nocapture %s, i32 %p) {
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: vpbroadcastd {{[0-9]+}}(%esp), %zmm0 ; CHECK-NEXT: vpbroadcastd {{[0-9]+}}(%esp), %zmm0
; CHECK-NEXT: vpcmpeqd {{\.LCPI.*}}, %zmm0, %k0 ; CHECK-NEXT: vpcmpeqd {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %k0
; CHECK-NEXT: vpcmpeqd {{\.LCPI.*}}, %zmm0, %k1 ; CHECK-NEXT: vpcmpeqd {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %k1
; CHECK-NEXT: kunpckwd %k0, %k1, %k0 ; CHECK-NEXT: kunpckwd %k0, %k1, %k0
; CHECK-NEXT: vpcmpeqd {{\.LCPI.*}}, %zmm0, %k1 ; CHECK-NEXT: vpcmpeqd {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %k1
; CHECK-NEXT: vpcmpeqd {{\.LCPI.*}}, %zmm0, %k2 ; CHECK-NEXT: vpcmpeqd {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %k2
; CHECK-NEXT: kunpckwd %k1, %k2, %k1 ; CHECK-NEXT: kunpckwd %k1, %k2, %k1
; CHECK-NEXT: kunpckdq %k0, %k1, %k0 ; CHECK-NEXT: kunpckdq %k0, %k1, %k0
; CHECK-NEXT: movl $1, %ecx ; CHECK-NEXT: movl $1, %ecx
; CHECK-NEXT: kmovd %ecx, %k1 ; CHECK-NEXT: kmovd %ecx, %k1
; CHECK-NEXT: kmovd %k1, %k1 ; CHECK-NEXT: kmovd %k1, %k1
; CHECK-NEXT: kandq %k1, %k0, %k1 ; CHECK-NEXT: kandq %k1, %k0, %k1
; CHECK-NEXT: vmovdqu8 {{\.LCPI.*}}, %zmm0 {%k1} {z} ; CHECK-NEXT: vmovdqu8 {{\.LCPI[0-9]+_[0-9]+}}, %zmm0 {%k1} {z}
; CHECK-NEXT: vmovdqu64 %zmm0, (%eax) ; CHECK-NEXT: vmovdqu64 %zmm0, (%eax)
; CHECK-NEXT: vpxor %xmm0, %xmm0, %xmm0 ; CHECK-NEXT: vpxor %xmm0, %xmm0, %xmm0
; CHECK-NEXT: vmovdqu64 %zmm0, 64(%eax) ; CHECK-NEXT: vmovdqu64 %zmm0, 64(%eax)

View File

@ -41,7 +41,7 @@ define zeroext i1 @_Z8test_cosv() {
; CHECK-NEXT: .cfi_def_cfa_offset 12 ; CHECK-NEXT: .cfi_def_cfa_offset 12
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT: divss {{\.LCPI.*}}, %xmm0 ; CHECK-NEXT: divss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; CHECK-NEXT: movss %xmm0, {{[0-9]+}}(%esp) ; CHECK-NEXT: movss %xmm0, {{[0-9]+}}(%esp)
; CHECK-NEXT: flds {{[0-9]+}}(%esp) ; CHECK-NEXT: flds {{[0-9]+}}(%esp)
; CHECK-NEXT: #APP ; CHECK-NEXT: #APP
@ -51,7 +51,7 @@ define zeroext i1 @_Z8test_cosv() {
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: ucomiss %xmm0, %xmm1 ; CHECK-NEXT: ucomiss %xmm0, %xmm1
; CHECK-NEXT: setae %cl ; CHECK-NEXT: setae %cl
; CHECK-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 ; CHECK-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; CHECK-NEXT: setae %al ; CHECK-NEXT: setae %al
; CHECK-NEXT: andb %cl, %al ; CHECK-NEXT: andb %cl, %al
; CHECK-NEXT: addl $8, %esp ; CHECK-NEXT: addl $8, %esp

View File

@ -7,7 +7,7 @@ define <8 x i32> @foo(<8 x i64> %x, <4 x i64> %y) {
; CHECK-LABEL: foo: ; CHECK-LABEL: foo:
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vandps %ymm2, %ymm0, %ymm0 ; CHECK-NEXT: vandps %ymm2, %ymm0, %ymm0
; CHECK-NEXT: vandps {{\.LCPI.*}}, %ymm1, %ymm1 ; CHECK-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm1
; CHECK-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3] ; CHECK-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; CHECK-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm2[0,2],ymm0[4,6],ymm2[4,6] ; CHECK-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm2[0,2],ymm0[4,6],ymm2[4,6]

View File

@ -22,7 +22,7 @@ define void @f(<16 x i8>* %out, <16 x i8> %in, i1 %flag) {
; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0] ; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
; CHECK-NEXT: paddb %xmm1, %xmm1 ; CHECK-NEXT: paddb %xmm1, %xmm1
; CHECK-NEXT: pxor %xmm0, %xmm1 ; CHECK-NEXT: pxor %xmm0, %xmm1
; CHECK-NEXT: pxor {{\.LCPI.*}}@GOTOFF(%eax), %xmm1 ; CHECK-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}@GOTOFF(%eax), %xmm1
; CHECK-NEXT: movdqa %xmm1, (%ecx) ; CHECK-NEXT: movdqa %xmm1, (%ecx)
; CHECK-NEXT: retl ; CHECK-NEXT: retl
entry: entry:

View File

@ -13,7 +13,7 @@ define <7 x i1> @create_mask7(i64 %0) {
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: mov rax, rdi ; CHECK-NEXT: mov rax, rdi
; CHECK-NEXT: vpbroadcastq zmm0, rsi ; CHECK-NEXT: vpbroadcastq zmm0, rsi
; CHECK-NEXT: vpcmpnleuq k0, zmm0, zmmword ptr [rip + {{\.LCPI.*}}] ; CHECK-NEXT: vpcmpnleuq k0, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}]
; CHECK-NEXT: kshiftrb k1, k0, 6 ; CHECK-NEXT: kshiftrb k1, k0, 6
; CHECK-NEXT: kmovd r8d, k1 ; CHECK-NEXT: kmovd r8d, k1
; CHECK-NEXT: kshiftrb k1, k0, 5 ; CHECK-NEXT: kshiftrb k1, k0, 5
@ -57,8 +57,8 @@ define <16 x i1> @create_mask16(i64 %0) {
; CHECK-LABEL: create_mask16: ; CHECK-LABEL: create_mask16:
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vpbroadcastq zmm0, rdi ; CHECK-NEXT: vpbroadcastq zmm0, rdi
; CHECK-NEXT: vpcmpnleuq k0, zmm0, zmmword ptr [rip + {{\.LCPI.*}}] ; CHECK-NEXT: vpcmpnleuq k0, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}]
; CHECK-NEXT: vpcmpnleuq k1, zmm0, zmmword ptr [rip + {{\.LCPI.*}}] ; CHECK-NEXT: vpcmpnleuq k1, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}]
; CHECK-NEXT: kunpckbw k0, k1, k0 ; CHECK-NEXT: kunpckbw k0, k1, k0
; CHECK-NEXT: vpmovm2b xmm0, k0 ; CHECK-NEXT: vpmovm2b xmm0, k0
; CHECK-NEXT: vzeroupper ; CHECK-NEXT: vzeroupper
@ -71,11 +71,11 @@ define <32 x i1> @create_mask32(i64 %0) {
; CHECK-LABEL: create_mask32: ; CHECK-LABEL: create_mask32:
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vpbroadcastq zmm0, rdi ; CHECK-NEXT: vpbroadcastq zmm0, rdi
; CHECK-NEXT: vpcmpnleuq k0, zmm0, zmmword ptr [rip + {{\.LCPI.*}}] ; CHECK-NEXT: vpcmpnleuq k0, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}]
; CHECK-NEXT: vpcmpnleuq k1, zmm0, zmmword ptr [rip + {{\.LCPI.*}}] ; CHECK-NEXT: vpcmpnleuq k1, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}]
; CHECK-NEXT: vpcmpnleuq k2, zmm0, zmmword ptr [rip + {{\.LCPI.*}}] ; CHECK-NEXT: vpcmpnleuq k2, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}]
; CHECK-NEXT: kunpckbw k0, k1, k0 ; CHECK-NEXT: kunpckbw k0, k1, k0
; CHECK-NEXT: vpcmpnleuq k1, zmm0, zmmword ptr [rip + {{\.LCPI.*}}] ; CHECK-NEXT: vpcmpnleuq k1, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}]
; CHECK-NEXT: kunpckbw k1, k1, k2 ; CHECK-NEXT: kunpckbw k1, k1, k2
; CHECK-NEXT: kunpckwd k0, k1, k0 ; CHECK-NEXT: kunpckwd k0, k1, k0
; CHECK-NEXT: vpmovm2b ymm0, k0 ; CHECK-NEXT: vpmovm2b ymm0, k0
@ -88,18 +88,18 @@ define <64 x i1> @create_mask64(i64 %0) {
; CHECK-LABEL: create_mask64: ; CHECK-LABEL: create_mask64:
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vpbroadcastq zmm0, rdi ; CHECK-NEXT: vpbroadcastq zmm0, rdi
; CHECK-NEXT: vpcmpnleuq k0, zmm0, zmmword ptr [rip + {{\.LCPI.*}}] ; CHECK-NEXT: vpcmpnleuq k0, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}]
; CHECK-NEXT: vpcmpnleuq k1, zmm0, zmmword ptr [rip + {{\.LCPI.*}}] ; CHECK-NEXT: vpcmpnleuq k1, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}]
; CHECK-NEXT: vpcmpnleuq k2, zmm0, zmmword ptr [rip + {{\.LCPI.*}}] ; CHECK-NEXT: vpcmpnleuq k2, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}]
; CHECK-NEXT: kunpckbw k0, k1, k0 ; CHECK-NEXT: kunpckbw k0, k1, k0
; CHECK-NEXT: vpcmpnleuq k1, zmm0, zmmword ptr [rip + {{\.LCPI.*}}] ; CHECK-NEXT: vpcmpnleuq k1, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}]
; CHECK-NEXT: kunpckbw k1, k1, k2 ; CHECK-NEXT: kunpckbw k1, k1, k2
; CHECK-NEXT: vpcmpnleuq k2, zmm0, zmmword ptr [rip + {{\.LCPI.*}}] ; CHECK-NEXT: vpcmpnleuq k2, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}]
; CHECK-NEXT: kunpckwd k0, k1, k0 ; CHECK-NEXT: kunpckwd k0, k1, k0
; CHECK-NEXT: vpcmpnleuq k1, zmm0, zmmword ptr [rip + {{\.LCPI.*}}] ; CHECK-NEXT: vpcmpnleuq k1, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}]
; CHECK-NEXT: kunpckbw k1, k1, k2 ; CHECK-NEXT: kunpckbw k1, k1, k2
; CHECK-NEXT: vpcmpnleuq k2, zmm0, zmmword ptr [rip + {{\.LCPI.*}}] ; CHECK-NEXT: vpcmpnleuq k2, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}]
; CHECK-NEXT: vpcmpnleuq k3, zmm0, zmmword ptr [rip + {{\.LCPI.*}}] ; CHECK-NEXT: vpcmpnleuq k3, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}]
; CHECK-NEXT: kunpckbw k2, k3, k2 ; CHECK-NEXT: kunpckbw k2, k3, k2
; CHECK-NEXT: kunpckwd k1, k2, k1 ; CHECK-NEXT: kunpckwd k1, k2, k1
; CHECK-NEXT: kunpckdq k0, k1, k0 ; CHECK-NEXT: kunpckdq k0, k1, k0
@ -113,7 +113,7 @@ define <16 x i1> @create_mask16_i32(i32 %0) {
; CHECK-LABEL: create_mask16_i32: ; CHECK-LABEL: create_mask16_i32:
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vpbroadcastd zmm0, edi ; CHECK-NEXT: vpbroadcastd zmm0, edi
; CHECK-NEXT: vpcmpnleud k0, zmm0, zmmword ptr [rip + {{\.LCPI.*}}] ; CHECK-NEXT: vpcmpnleud k0, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}]
; CHECK-NEXT: vpmovm2b xmm0, k0 ; CHECK-NEXT: vpmovm2b xmm0, k0
; CHECK-NEXT: vzeroupper ; CHECK-NEXT: vzeroupper
; CHECK-NEXT: ret ; CHECK-NEXT: ret
@ -125,11 +125,11 @@ define <64 x i1> @create_mask64_i32(i32 %0) {
; CHECK-LABEL: create_mask64_i32: ; CHECK-LABEL: create_mask64_i32:
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vpbroadcastd zmm0, edi ; CHECK-NEXT: vpbroadcastd zmm0, edi
; CHECK-NEXT: vpcmpnleud k0, zmm0, zmmword ptr [rip + {{\.LCPI.*}}] ; CHECK-NEXT: vpcmpnleud k0, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}]
; CHECK-NEXT: vpcmpnleud k1, zmm0, zmmword ptr [rip + {{\.LCPI.*}}] ; CHECK-NEXT: vpcmpnleud k1, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}]
; CHECK-NEXT: vpcmpnleud k2, zmm0, zmmword ptr [rip + {{\.LCPI.*}}] ; CHECK-NEXT: vpcmpnleud k2, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}]
; CHECK-NEXT: kunpckwd k0, k1, k0 ; CHECK-NEXT: kunpckwd k0, k1, k0
; CHECK-NEXT: vpcmpnleud k1, zmm0, zmmword ptr [rip + {{\.LCPI.*}}] ; CHECK-NEXT: vpcmpnleud k1, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}]
; CHECK-NEXT: kunpckwd k1, k1, k2 ; CHECK-NEXT: kunpckwd k1, k1, k2
; CHECK-NEXT: kunpckdq k0, k1, k0 ; CHECK-NEXT: kunpckdq k0, k1, k0
; CHECK-NEXT: vpmovm2b zmm0, k0 ; CHECK-NEXT: vpmovm2b zmm0, k0

View File

@ -109,7 +109,7 @@ define <4 x i32> @vrolw_extract_mul_with_mask(<4 x i32> %i) nounwind {
; X86-NEXT: vpbroadcastd {{.*#+}} xmm1 = [9,9,9,9] ; X86-NEXT: vpbroadcastd {{.*#+}} xmm1 = [9,9,9,9]
; X86-NEXT: vpmulld %xmm1, %xmm0, %xmm0 ; X86-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; X86-NEXT: vprold $7, %zmm0, %zmm0 ; X86-NEXT: vprold $7, %zmm0, %zmm0
; X86-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 ; X86-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-NEXT: vzeroupper ; X86-NEXT: vzeroupper
; X86-NEXT: retl ; X86-NEXT: retl
; ;
@ -132,8 +132,8 @@ define <4 x i32> @vrolw_extract_mul_with_mask(<4 x i32> %i) nounwind {
define <32 x i16> @illegal_no_extract_mul(<32 x i16> %i) nounwind { define <32 x i16> @illegal_no_extract_mul(<32 x i16> %i) nounwind {
; X86-LABEL: illegal_no_extract_mul: ; X86-LABEL: illegal_no_extract_mul:
; X86: # %bb.0: ; X86: # %bb.0:
; X86-NEXT: vpmullw {{\.LCPI.*}}, %zmm0, %zmm1 ; X86-NEXT: vpmullw {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %zmm1
; X86-NEXT: vpmullw {{\.LCPI.*}}, %zmm0, %zmm0 ; X86-NEXT: vpmullw {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %zmm0
; X86-NEXT: vpsrlw $10, %zmm0, %zmm0 ; X86-NEXT: vpsrlw $10, %zmm0, %zmm0
; X86-NEXT: vporq %zmm0, %zmm1, %zmm0 ; X86-NEXT: vporq %zmm0, %zmm1, %zmm0
; X86-NEXT: retl ; X86-NEXT: retl

View File

@ -267,7 +267,7 @@ define i64 @f_to_u64(float %a) nounwind {
; X87-LIN: # %bb.0: ; X87-LIN: # %bb.0:
; X87-LIN-NEXT: subl $20, %esp ; X87-LIN-NEXT: subl $20, %esp
; X87-LIN-NEXT: flds {{[0-9]+}}(%esp) ; X87-LIN-NEXT: flds {{[0-9]+}}(%esp)
; X87-LIN-NEXT: flds {{\.LCPI.*}} ; X87-LIN-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X87-LIN-NEXT: fucom %st(1) ; X87-LIN-NEXT: fucom %st(1)
; X87-LIN-NEXT: fnstsw %ax ; X87-LIN-NEXT: fnstsw %ax
; X87-LIN-NEXT: xorl %edx, %edx ; X87-LIN-NEXT: xorl %edx, %edx
@ -691,7 +691,7 @@ define i64 @d_to_u64(double %a) nounwind {
; X87-LIN: # %bb.0: ; X87-LIN: # %bb.0:
; X87-LIN-NEXT: subl $20, %esp ; X87-LIN-NEXT: subl $20, %esp
; X87-LIN-NEXT: fldl {{[0-9]+}}(%esp) ; X87-LIN-NEXT: fldl {{[0-9]+}}(%esp)
; X87-LIN-NEXT: flds {{\.LCPI.*}} ; X87-LIN-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X87-LIN-NEXT: fucom %st(1) ; X87-LIN-NEXT: fucom %st(1)
; X87-LIN-NEXT: fnstsw %ax ; X87-LIN-NEXT: fnstsw %ax
; X87-LIN-NEXT: xorl %edx, %edx ; X87-LIN-NEXT: xorl %edx, %edx
@ -914,7 +914,7 @@ define i64 @x_to_u64(x86_fp80 %a) nounwind {
; X86-AVX512-LIN: # %bb.0: ; X86-AVX512-LIN: # %bb.0:
; X86-AVX512-LIN-NEXT: subl $12, %esp ; X86-AVX512-LIN-NEXT: subl $12, %esp
; X86-AVX512-LIN-NEXT: fldt {{[0-9]+}}(%esp) ; X86-AVX512-LIN-NEXT: fldt {{[0-9]+}}(%esp)
; X86-AVX512-LIN-NEXT: flds {{\.LCPI.*}} ; X86-AVX512-LIN-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-AVX512-LIN-NEXT: xorl %edx, %edx ; X86-AVX512-LIN-NEXT: xorl %edx, %edx
; X86-AVX512-LIN-NEXT: fucomi %st(1), %st ; X86-AVX512-LIN-NEXT: fucomi %st(1), %st
; X86-AVX512-LIN-NEXT: fldz ; X86-AVX512-LIN-NEXT: fldz
@ -990,7 +990,7 @@ define i64 @x_to_u64(x86_fp80 %a) nounwind {
; X86-SSE3-LIN: # %bb.0: ; X86-SSE3-LIN: # %bb.0:
; X86-SSE3-LIN-NEXT: subl $12, %esp ; X86-SSE3-LIN-NEXT: subl $12, %esp
; X86-SSE3-LIN-NEXT: fldt {{[0-9]+}}(%esp) ; X86-SSE3-LIN-NEXT: fldt {{[0-9]+}}(%esp)
; X86-SSE3-LIN-NEXT: flds {{\.LCPI.*}} ; X86-SSE3-LIN-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE3-LIN-NEXT: xorl %edx, %edx ; X86-SSE3-LIN-NEXT: xorl %edx, %edx
; X86-SSE3-LIN-NEXT: fucomi %st(1), %st ; X86-SSE3-LIN-NEXT: fucomi %st(1), %st
; X86-SSE3-LIN-NEXT: fldz ; X86-SSE3-LIN-NEXT: fldz
@ -1072,7 +1072,7 @@ define i64 @x_to_u64(x86_fp80 %a) nounwind {
; X86-SSE2-LIN: # %bb.0: ; X86-SSE2-LIN: # %bb.0:
; X86-SSE2-LIN-NEXT: subl $20, %esp ; X86-SSE2-LIN-NEXT: subl $20, %esp
; X86-SSE2-LIN-NEXT: fldt {{[0-9]+}}(%esp) ; X86-SSE2-LIN-NEXT: fldt {{[0-9]+}}(%esp)
; X86-SSE2-LIN-NEXT: flds {{\.LCPI.*}} ; X86-SSE2-LIN-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE2-LIN-NEXT: xorl %edx, %edx ; X86-SSE2-LIN-NEXT: xorl %edx, %edx
; X86-SSE2-LIN-NEXT: fucomi %st(1), %st ; X86-SSE2-LIN-NEXT: fucomi %st(1), %st
; X86-SSE2-LIN-NEXT: setbe %dl ; X86-SSE2-LIN-NEXT: setbe %dl
@ -1180,7 +1180,7 @@ define i64 @x_to_u64(x86_fp80 %a) nounwind {
; X87-LIN: # %bb.0: ; X87-LIN: # %bb.0:
; X87-LIN-NEXT: subl $20, %esp ; X87-LIN-NEXT: subl $20, %esp
; X87-LIN-NEXT: fldt {{[0-9]+}}(%esp) ; X87-LIN-NEXT: fldt {{[0-9]+}}(%esp)
; X87-LIN-NEXT: flds {{\.LCPI.*}} ; X87-LIN-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X87-LIN-NEXT: fucom %st(1) ; X87-LIN-NEXT: fucom %st(1)
; X87-LIN-NEXT: fnstsw %ax ; X87-LIN-NEXT: fnstsw %ax
; X87-LIN-NEXT: xorl %edx, %edx ; X87-LIN-NEXT: xorl %edx, %edx

View File

@ -33,8 +33,8 @@ define float @u32_to_f(i32 %a) nounwind {
; SSE2_32: # %bb.0: ; SSE2_32: # %bb.0:
; SSE2_32-NEXT: pushl %eax ; SSE2_32-NEXT: pushl %eax
; SSE2_32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSE2_32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2_32-NEXT: orpd {{\.LCPI.*}}, %xmm0 ; SSE2_32-NEXT: orpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE2_32-NEXT: subsd {{\.LCPI.*}}, %xmm0 ; SSE2_32-NEXT: subsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE2_32-NEXT: cvtsd2ss %xmm0, %xmm0 ; SSE2_32-NEXT: cvtsd2ss %xmm0, %xmm0
; SSE2_32-NEXT: movss %xmm0, (%esp) ; SSE2_32-NEXT: movss %xmm0, (%esp)
; SSE2_32-NEXT: flds (%esp) ; SSE2_32-NEXT: flds (%esp)
@ -147,8 +147,8 @@ define double @u32_to_d(i32 %a) nounwind {
; SSE2_32-NEXT: andl $-8, %esp ; SSE2_32-NEXT: andl $-8, %esp
; SSE2_32-NEXT: subl $8, %esp ; SSE2_32-NEXT: subl $8, %esp
; SSE2_32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSE2_32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2_32-NEXT: orpd {{\.LCPI.*}}, %xmm0 ; SSE2_32-NEXT: orpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE2_32-NEXT: subsd {{\.LCPI.*}}, %xmm0 ; SSE2_32-NEXT: subsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE2_32-NEXT: movsd %xmm0, (%esp) ; SSE2_32-NEXT: movsd %xmm0, (%esp)
; SSE2_32-NEXT: fldl (%esp) ; SSE2_32-NEXT: fldl (%esp)
; SSE2_32-NEXT: movl %ebp, %esp ; SSE2_32-NEXT: movl %ebp, %esp
@ -333,7 +333,7 @@ define float @u64_to_f(i64 %a) nounwind {
; AVX512F_32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp) ; AVX512F_32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
; AVX512F_32-NEXT: shrl $31, %eax ; AVX512F_32-NEXT: shrl $31, %eax
; AVX512F_32-NEXT: fildll {{[0-9]+}}(%esp) ; AVX512F_32-NEXT: fildll {{[0-9]+}}(%esp)
; AVX512F_32-NEXT: fadds {{\.LCPI.*}}(,%eax,4) ; AVX512F_32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; AVX512F_32-NEXT: fstps {{[0-9]+}}(%esp) ; AVX512F_32-NEXT: fstps {{[0-9]+}}(%esp)
; AVX512F_32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; AVX512F_32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX512F_32-NEXT: vmovss %xmm0, (%esp) ; AVX512F_32-NEXT: vmovss %xmm0, (%esp)
@ -353,7 +353,7 @@ define float @u64_to_f(i64 %a) nounwind {
; SSE2_32-NEXT: movlps %xmm0, {{[0-9]+}}(%esp) ; SSE2_32-NEXT: movlps %xmm0, {{[0-9]+}}(%esp)
; SSE2_32-NEXT: shrl $31, %eax ; SSE2_32-NEXT: shrl $31, %eax
; SSE2_32-NEXT: fildll {{[0-9]+}}(%esp) ; SSE2_32-NEXT: fildll {{[0-9]+}}(%esp)
; SSE2_32-NEXT: fadds {{\.LCPI.*}}(,%eax,4) ; SSE2_32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; SSE2_32-NEXT: fstps {{[0-9]+}}(%esp) ; SSE2_32-NEXT: fstps {{[0-9]+}}(%esp)
; SSE2_32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSE2_32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2_32-NEXT: movss %xmm0, (%esp) ; SSE2_32-NEXT: movss %xmm0, (%esp)
@ -392,7 +392,7 @@ define float @u64_to_f(i64 %a) nounwind {
; SSE1_32-NEXT: fstpl {{[0-9]+}}(%esp) ; SSE1_32-NEXT: fstpl {{[0-9]+}}(%esp)
; SSE1_32-NEXT: shrl $31, %ecx ; SSE1_32-NEXT: shrl $31, %ecx
; SSE1_32-NEXT: fildll {{[0-9]+}}(%esp) ; SSE1_32-NEXT: fildll {{[0-9]+}}(%esp)
; SSE1_32-NEXT: fadds {{\.LCPI.*}}(,%ecx,4) ; SSE1_32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%ecx,4)
; SSE1_32-NEXT: fstps {{[0-9]+}}(%esp) ; SSE1_32-NEXT: fstps {{[0-9]+}}(%esp)
; SSE1_32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSE1_32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE1_32-NEXT: movss %xmm0, (%esp) ; SSE1_32-NEXT: movss %xmm0, (%esp)
@ -413,7 +413,7 @@ define float @u64_to_f(i64 %a) nounwind {
; X87-NEXT: movl %eax, {{[0-9]+}}(%esp) ; X87-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X87-NEXT: shrl $31, %ecx ; X87-NEXT: shrl $31, %ecx
; X87-NEXT: fildll {{[0-9]+}}(%esp) ; X87-NEXT: fildll {{[0-9]+}}(%esp)
; X87-NEXT: fadds {{\.LCPI.*}}(,%ecx,4) ; X87-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%ecx,4)
; X87-NEXT: fstps {{[0-9]+}}(%esp) ; X87-NEXT: fstps {{[0-9]+}}(%esp)
; X87-NEXT: flds {{[0-9]+}}(%esp) ; X87-NEXT: flds {{[0-9]+}}(%esp)
; X87-NEXT: movl %ebp, %esp ; X87-NEXT: movl %ebp, %esp
@ -652,7 +652,7 @@ define double @u64_to_d(i64 %a) nounwind {
; AVX512F_32-NEXT: subl $8, %esp ; AVX512F_32-NEXT: subl $8, %esp
; AVX512F_32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; AVX512F_32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX512F_32-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] ; AVX512F_32-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
; AVX512F_32-NEXT: vsubpd {{\.LCPI.*}}, %xmm0, %xmm0 ; AVX512F_32-NEXT: vsubpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; AVX512F_32-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX512F_32-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX512F_32-NEXT: vaddsd %xmm0, %xmm1, %xmm0 ; AVX512F_32-NEXT: vaddsd %xmm0, %xmm1, %xmm0
; AVX512F_32-NEXT: vmovsd %xmm0, (%esp) ; AVX512F_32-NEXT: vmovsd %xmm0, (%esp)
@ -669,7 +669,7 @@ define double @u64_to_d(i64 %a) nounwind {
; SSE2_32-NEXT: subl $8, %esp ; SSE2_32-NEXT: subl $8, %esp
; SSE2_32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; SSE2_32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE2_32-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] ; SSE2_32-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
; SSE2_32-NEXT: subpd {{\.LCPI.*}}, %xmm0 ; SSE2_32-NEXT: subpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE2_32-NEXT: movapd %xmm0, %xmm1 ; SSE2_32-NEXT: movapd %xmm0, %xmm1
; SSE2_32-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE2_32-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; SSE2_32-NEXT: addsd %xmm0, %xmm1 ; SSE2_32-NEXT: addsd %xmm0, %xmm1
@ -701,7 +701,7 @@ define double @u64_to_d(i64 %a) nounwind {
; SSE1_32-NEXT: movl %eax, (%esp) ; SSE1_32-NEXT: movl %eax, (%esp)
; SSE1_32-NEXT: shrl $31, %ecx ; SSE1_32-NEXT: shrl $31, %ecx
; SSE1_32-NEXT: fildll (%esp) ; SSE1_32-NEXT: fildll (%esp)
; SSE1_32-NEXT: fadds {{\.LCPI.*}}(,%ecx,4) ; SSE1_32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%ecx,4)
; SSE1_32-NEXT: fstpl {{[0-9]+}}(%esp) ; SSE1_32-NEXT: fstpl {{[0-9]+}}(%esp)
; SSE1_32-NEXT: fldl {{[0-9]+}}(%esp) ; SSE1_32-NEXT: fldl {{[0-9]+}}(%esp)
; SSE1_32-NEXT: movl %ebp, %esp ; SSE1_32-NEXT: movl %ebp, %esp
@ -720,7 +720,7 @@ define double @u64_to_d(i64 %a) nounwind {
; X87-NEXT: movl %eax, (%esp) ; X87-NEXT: movl %eax, (%esp)
; X87-NEXT: shrl $31, %ecx ; X87-NEXT: shrl $31, %ecx
; X87-NEXT: fildll (%esp) ; X87-NEXT: fildll (%esp)
; X87-NEXT: fadds {{\.LCPI.*}}(,%ecx,4) ; X87-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%ecx,4)
; X87-NEXT: fstpl {{[0-9]+}}(%esp) ; X87-NEXT: fstpl {{[0-9]+}}(%esp)
; X87-NEXT: fldl {{[0-9]+}}(%esp) ; X87-NEXT: fldl {{[0-9]+}}(%esp)
; X87-NEXT: movl %ebp, %esp ; X87-NEXT: movl %ebp, %esp
@ -774,7 +774,7 @@ define double @u64_to_d_optsize(i64 %a) nounwind optsize {
; AVX512F_32-NEXT: subl $8, %esp ; AVX512F_32-NEXT: subl $8, %esp
; AVX512F_32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; AVX512F_32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX512F_32-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] ; AVX512F_32-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
; AVX512F_32-NEXT: vsubpd {{\.LCPI.*}}, %xmm0, %xmm0 ; AVX512F_32-NEXT: vsubpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; AVX512F_32-NEXT: vhaddpd %xmm0, %xmm0, %xmm0 ; AVX512F_32-NEXT: vhaddpd %xmm0, %xmm0, %xmm0
; AVX512F_32-NEXT: vmovlpd %xmm0, (%esp) ; AVX512F_32-NEXT: vmovlpd %xmm0, (%esp)
; AVX512F_32-NEXT: fldl (%esp) ; AVX512F_32-NEXT: fldl (%esp)
@ -790,7 +790,7 @@ define double @u64_to_d_optsize(i64 %a) nounwind optsize {
; SSE2_32-NEXT: subl $8, %esp ; SSE2_32-NEXT: subl $8, %esp
; SSE2_32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; SSE2_32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE2_32-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] ; SSE2_32-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
; SSE2_32-NEXT: subpd {{\.LCPI.*}}, %xmm0 ; SSE2_32-NEXT: subpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE2_32-NEXT: movapd %xmm0, %xmm1 ; SSE2_32-NEXT: movapd %xmm0, %xmm1
; SSE2_32-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE2_32-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; SSE2_32-NEXT: addsd %xmm0, %xmm1 ; SSE2_32-NEXT: addsd %xmm0, %xmm1
@ -822,7 +822,7 @@ define double @u64_to_d_optsize(i64 %a) nounwind optsize {
; SSE1_32-NEXT: movl %eax, (%esp) ; SSE1_32-NEXT: movl %eax, (%esp)
; SSE1_32-NEXT: shrl $31, %ecx ; SSE1_32-NEXT: shrl $31, %ecx
; SSE1_32-NEXT: fildll (%esp) ; SSE1_32-NEXT: fildll (%esp)
; SSE1_32-NEXT: fadds {{\.LCPI.*}}(,%ecx,4) ; SSE1_32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%ecx,4)
; SSE1_32-NEXT: fstpl {{[0-9]+}}(%esp) ; SSE1_32-NEXT: fstpl {{[0-9]+}}(%esp)
; SSE1_32-NEXT: fldl {{[0-9]+}}(%esp) ; SSE1_32-NEXT: fldl {{[0-9]+}}(%esp)
; SSE1_32-NEXT: movl %ebp, %esp ; SSE1_32-NEXT: movl %ebp, %esp
@ -841,7 +841,7 @@ define double @u64_to_d_optsize(i64 %a) nounwind optsize {
; X87-NEXT: movl %eax, (%esp) ; X87-NEXT: movl %eax, (%esp)
; X87-NEXT: shrl $31, %ecx ; X87-NEXT: shrl $31, %ecx
; X87-NEXT: fildll (%esp) ; X87-NEXT: fildll (%esp)
; X87-NEXT: fadds {{\.LCPI.*}}(,%ecx,4) ; X87-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%ecx,4)
; X87-NEXT: fstpl {{[0-9]+}}(%esp) ; X87-NEXT: fstpl {{[0-9]+}}(%esp)
; X87-NEXT: fldl {{[0-9]+}}(%esp) ; X87-NEXT: fldl {{[0-9]+}}(%esp)
; X87-NEXT: movl %ebp, %esp ; X87-NEXT: movl %ebp, %esp
@ -1076,7 +1076,7 @@ define x86_fp80 @u64_to_x(i64 %a) nounwind {
; CHECK32-NEXT: movl %eax, (%esp) ; CHECK32-NEXT: movl %eax, (%esp)
; CHECK32-NEXT: shrl $31, %ecx ; CHECK32-NEXT: shrl $31, %ecx
; CHECK32-NEXT: fildll (%esp) ; CHECK32-NEXT: fildll (%esp)
; CHECK32-NEXT: fadds {{\.LCPI.*}}(,%ecx,4) ; CHECK32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%ecx,4)
; CHECK32-NEXT: movl %ebp, %esp ; CHECK32-NEXT: movl %ebp, %esp
; CHECK32-NEXT: popl %ebp ; CHECK32-NEXT: popl %ebp
; CHECK32-NEXT: retl ; CHECK32-NEXT: retl
@ -1088,7 +1088,7 @@ define x86_fp80 @u64_to_x(i64 %a) nounwind {
; CHECK64-NEXT: testq %rdi, %rdi ; CHECK64-NEXT: testq %rdi, %rdi
; CHECK64-NEXT: sets %al ; CHECK64-NEXT: sets %al
; CHECK64-NEXT: fildll -{{[0-9]+}}(%rsp) ; CHECK64-NEXT: fildll -{{[0-9]+}}(%rsp)
; CHECK64-NEXT: fadds {{\.LCPI.*}}(,%rax,4) ; CHECK64-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%rax,4)
; CHECK64-NEXT: retq ; CHECK64-NEXT: retq
%r = uitofp i64 %a to x86_fp80 %r = uitofp i64 %a to x86_fp80
ret x86_fp80 %r ret x86_fp80 %r

View File

@ -16,7 +16,7 @@ define float @icmp_select_fp_constants(i32 %x) nounwind readnone {
; X86-NEXT: xorl %eax, %eax ; X86-NEXT: xorl %eax, %eax
; X86-NEXT: cmpl $0, {{[0-9]+}}(%esp) ; X86-NEXT: cmpl $0, {{[0-9]+}}(%esp)
; X86-NEXT: sete %al ; X86-NEXT: sete %al
; X86-NEXT: flds {{\.LCPI.*}}(,%eax,4) ; X86-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; X86-NEXT: retl ; X86-NEXT: retl
; ;
; X64-SSE-LABEL: icmp_select_fp_constants: ; X64-SSE-LABEL: icmp_select_fp_constants:
@ -46,7 +46,7 @@ define float @fcmp_select_fp_constants(float %x) nounwind readnone {
; X86-SSE-NEXT: cmpneqss {{[0-9]+}}(%esp), %xmm0 ; X86-SSE-NEXT: cmpneqss {{[0-9]+}}(%esp), %xmm0
; X86-SSE-NEXT: movd %xmm0, %eax ; X86-SSE-NEXT: movd %xmm0, %eax
; X86-SSE-NEXT: andl $1, %eax ; X86-SSE-NEXT: andl $1, %eax
; X86-SSE-NEXT: flds {{\.LCPI.*}}(,%eax,4) ; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; X86-SSE-NEXT: retl ; X86-SSE-NEXT: retl
; ;
; X86-AVX2-LABEL: fcmp_select_fp_constants: ; X86-AVX2-LABEL: fcmp_select_fp_constants:
@ -55,15 +55,15 @@ define float @fcmp_select_fp_constants(float %x) nounwind readnone {
; X86-AVX2-NEXT: vcmpneqss {{[0-9]+}}(%esp), %xmm0, %xmm0 ; X86-AVX2-NEXT: vcmpneqss {{[0-9]+}}(%esp), %xmm0, %xmm0
; X86-AVX2-NEXT: vmovd %xmm0, %eax ; X86-AVX2-NEXT: vmovd %xmm0, %eax
; X86-AVX2-NEXT: andl $1, %eax ; X86-AVX2-NEXT: andl $1, %eax
; X86-AVX2-NEXT: flds {{\.LCPI.*}}(,%eax,4) ; X86-AVX2-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; X86-AVX2-NEXT: retl ; X86-AVX2-NEXT: retl
; ;
; X86-AVX512F-LABEL: fcmp_select_fp_constants: ; X86-AVX512F-LABEL: fcmp_select_fp_constants:
; X86-AVX512F: # %bb.0: ; X86-AVX512F: # %bb.0:
; X86-AVX512F-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86-AVX512F-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-AVX512F-NEXT: vcmpneqss {{\.LCPI.*}}, %xmm0, %k0 ; X86-AVX512F-NEXT: vcmpneqss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %k0
; X86-AVX512F-NEXT: kmovw %k0, %eax ; X86-AVX512F-NEXT: kmovw %k0, %eax
; X86-AVX512F-NEXT: flds {{\.LCPI.*}}(,%eax,4) ; X86-AVX512F-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; X86-AVX512F-NEXT: retl ; X86-AVX512F-NEXT: retl
; ;
; X64-SSE-LABEL: fcmp_select_fp_constants: ; X64-SSE-LABEL: fcmp_select_fp_constants:

View File

@ -163,7 +163,7 @@ define float @test3(i32 %x) nounwind readnone {
; MCU-NEXT: xorl %ecx, %ecx ; MCU-NEXT: xorl %ecx, %ecx
; MCU-NEXT: testl %eax, %eax ; MCU-NEXT: testl %eax, %eax
; MCU-NEXT: sete %cl ; MCU-NEXT: sete %cl
; MCU-NEXT: flds {{\.LCPI.*}}(,%ecx,4) ; MCU-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}(,%ecx,4)
; MCU-NEXT: retl ; MCU-NEXT: retl
entry: entry:
%0 = icmp eq i32 %x, 0 %0 = icmp eq i32 %x, 0
@ -197,7 +197,7 @@ define signext i8 @test4(i8* nocapture %P, double %F) nounwind readonly {
; MCU: # %bb.0: # %entry ; MCU: # %bb.0: # %entry
; MCU-NEXT: movl %eax, %ecx ; MCU-NEXT: movl %eax, %ecx
; MCU-NEXT: fldl {{[0-9]+}}(%esp) ; MCU-NEXT: fldl {{[0-9]+}}(%esp)
; MCU-NEXT: flds {{\.LCPI.*}} ; MCU-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; MCU-NEXT: fucompp ; MCU-NEXT: fucompp
; MCU-NEXT: fnstsw %ax ; MCU-NEXT: fnstsw %ax
; MCU-NEXT: xorl %edx, %edx ; MCU-NEXT: xorl %edx, %edx
@ -422,7 +422,7 @@ define x86_fp80 @test7(i32 %tmp8) nounwind {
; MCU-NEXT: notl %eax ; MCU-NEXT: notl %eax
; MCU-NEXT: shrl $27, %eax ; MCU-NEXT: shrl $27, %eax
; MCU-NEXT: andl $-16, %eax ; MCU-NEXT: andl $-16, %eax
; MCU-NEXT: fldt {{\.LCPI.*}}(%eax) ; MCU-NEXT: fldt {{\.LCPI[0-9]+_[0-9]+}}(%eax)
; MCU-NEXT: retl ; MCU-NEXT: retl
%tmp9 = icmp sgt i32 %tmp8, -1 %tmp9 = icmp sgt i32 %tmp8, -1
%retval = select i1 %tmp9, x86_fp80 0xK4005B400000000000000, x86_fp80 0xK40078700000000000000 %retval = select i1 %tmp9, x86_fp80 0xK4005B400000000000000, x86_fp80 0xK40078700000000000000

View File

@ -22,7 +22,7 @@ define <8 x i16> @pr25080(<8 x i32> %a) {
; KNL-32-LABEL: pr25080: ; KNL-32-LABEL: pr25080:
; KNL-32: # %bb.0: # %entry ; KNL-32: # %bb.0: # %entry
; KNL-32-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; KNL-32-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; KNL-32-NEXT: vptestnmd {{\.LCPI.*}}{1to16}, %zmm0, %k0 ; KNL-32-NEXT: vptestnmd {{\.LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %k0
; KNL-32-NEXT: movb $15, %al ; KNL-32-NEXT: movb $15, %al
; KNL-32-NEXT: kmovw %eax, %k1 ; KNL-32-NEXT: kmovw %eax, %k1
; KNL-32-NEXT: korw %k1, %k0, %k1 ; KNL-32-NEXT: korw %k1, %k0, %k1

View File

@ -4,7 +4,7 @@
define x86_fp80 @test2() nounwind { define x86_fp80 @test2() nounwind {
; CHECK-LABEL: test2: ; CHECK-LABEL: test2:
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: flds {{\.LCPI.*}} ; CHECK-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; CHECK-NEXT: retl ; CHECK-NEXT: retl
entry: entry:
ret x86_fp80 0xK3FFFC000000000000000 ret x86_fp80 0xK3FFFC000000000000000

View File

@ -1406,7 +1406,7 @@ define void @mul_2xi8_varconst1(i8* nocapture readonly %a, i64 %index) {
; X86-SSE-NEXT: pxor %xmm1, %xmm1 ; X86-SSE-NEXT: pxor %xmm1, %xmm1
; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] ; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] ; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; X86-SSE-NEXT: pmaddwd {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: pmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movq %xmm0, (%edx,%eax,4) ; X86-SSE-NEXT: movq %xmm0, (%edx,%eax,4)
; X86-SSE-NEXT: retl ; X86-SSE-NEXT: retl
; ;
@ -1418,7 +1418,7 @@ define void @mul_2xi8_varconst1(i8* nocapture readonly %a, i64 %index) {
; X86-AVX-NEXT: movzwl (%ecx,%eax), %ecx ; X86-AVX-NEXT: movzwl (%ecx,%eax), %ecx
; X86-AVX-NEXT: vmovd %ecx, %xmm0 ; X86-AVX-NEXT: vmovd %ecx, %xmm0
; X86-AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero ; X86-AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; X86-AVX-NEXT: vpmaddwd {{\.LCPI.*}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4) ; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4)
; X86-AVX-NEXT: retl ; X86-AVX-NEXT: retl
; ;
@ -1471,7 +1471,7 @@ define void @mul_2xi8_varconst2(i8* nocapture readonly %a, i64 %index) {
; X86-SSE-NEXT: movd %ecx, %xmm0 ; X86-SSE-NEXT: movd %ecx, %xmm0
; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; X86-SSE-NEXT: psraw $8, %xmm0 ; X86-SSE-NEXT: psraw $8, %xmm0
; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,1,4,5,6,7] ; X86-SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,1,4,5,6,7]
; X86-SSE-NEXT: psrad $16, %xmm0 ; X86-SSE-NEXT: psrad $16, %xmm0
; X86-SSE-NEXT: movq %xmm0, (%edx,%eax,4) ; X86-SSE-NEXT: movq %xmm0, (%edx,%eax,4)
@ -1485,7 +1485,7 @@ define void @mul_2xi8_varconst2(i8* nocapture readonly %a, i64 %index) {
; X86-AVX-NEXT: movzwl (%ecx,%eax), %ecx ; X86-AVX-NEXT: movzwl (%ecx,%eax), %ecx
; X86-AVX-NEXT: vmovd %ecx, %xmm0 ; X86-AVX-NEXT: vmovd %ecx, %xmm0
; X86-AVX-NEXT: vpmovsxbd %xmm0, %xmm0 ; X86-AVX-NEXT: vpmovsxbd %xmm0, %xmm0
; X86-AVX-NEXT: vpmulld {{\.LCPI.*}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4) ; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4)
; X86-AVX-NEXT: retl ; X86-AVX-NEXT: retl
; ;
@ -1540,7 +1540,7 @@ define void @mul_2xi8_varconst3(i8* nocapture readonly %a, i64 %index) {
; X86-SSE-NEXT: pxor %xmm1, %xmm1 ; X86-SSE-NEXT: pxor %xmm1, %xmm1
; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] ; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] ; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; X86-SSE-NEXT: pmaddwd {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: pmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movq %xmm0, (%edx,%eax,4) ; X86-SSE-NEXT: movq %xmm0, (%edx,%eax,4)
; X86-SSE-NEXT: retl ; X86-SSE-NEXT: retl
; ;
@ -1552,7 +1552,7 @@ define void @mul_2xi8_varconst3(i8* nocapture readonly %a, i64 %index) {
; X86-AVX-NEXT: movzwl (%ecx,%eax), %ecx ; X86-AVX-NEXT: movzwl (%ecx,%eax), %ecx
; X86-AVX-NEXT: vmovd %ecx, %xmm0 ; X86-AVX-NEXT: vmovd %ecx, %xmm0
; X86-AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero ; X86-AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; X86-AVX-NEXT: vpmaddwd {{\.LCPI.*}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4) ; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4)
; X86-AVX-NEXT: retl ; X86-AVX-NEXT: retl
; ;
@ -1621,7 +1621,7 @@ define void @mul_2xi8_varconst4(i8* nocapture readonly %a, i64 %index) {
; X86-AVX-NEXT: movzwl (%ecx,%eax), %ecx ; X86-AVX-NEXT: movzwl (%ecx,%eax), %ecx
; X86-AVX-NEXT: vmovd %ecx, %xmm0 ; X86-AVX-NEXT: vmovd %ecx, %xmm0
; X86-AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero ; X86-AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; X86-AVX-NEXT: vpmulld {{\.LCPI.*}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4) ; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4)
; X86-AVX-NEXT: retl ; X86-AVX-NEXT: retl
; ;
@ -1693,7 +1693,7 @@ define void @mul_2xi8_varconst5(i8* nocapture readonly %a, i64 %index) {
; X86-AVX-NEXT: movzwl (%ecx,%eax), %ecx ; X86-AVX-NEXT: movzwl (%ecx,%eax), %ecx
; X86-AVX-NEXT: vmovd %ecx, %xmm0 ; X86-AVX-NEXT: vmovd %ecx, %xmm0
; X86-AVX-NEXT: vpmovsxbd %xmm0, %xmm0 ; X86-AVX-NEXT: vpmovsxbd %xmm0, %xmm0
; X86-AVX-NEXT: vpmulld {{\.LCPI.*}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4) ; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4)
; X86-AVX-NEXT: retl ; X86-AVX-NEXT: retl
; ;
@ -1765,7 +1765,7 @@ define void @mul_2xi8_varconst6(i8* nocapture readonly %a, i64 %index) {
; X86-AVX-NEXT: movzwl (%ecx,%eax), %ecx ; X86-AVX-NEXT: movzwl (%ecx,%eax), %ecx
; X86-AVX-NEXT: vmovd %ecx, %xmm0 ; X86-AVX-NEXT: vmovd %ecx, %xmm0
; X86-AVX-NEXT: vpmovsxbd %xmm0, %xmm0 ; X86-AVX-NEXT: vpmovsxbd %xmm0, %xmm0
; X86-AVX-NEXT: vpmulld {{\.LCPI.*}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4) ; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4)
; X86-AVX-NEXT: retl ; X86-AVX-NEXT: retl
; ;
@ -1833,7 +1833,7 @@ define void @mul_2xi16_varconst1(i8* nocapture readonly %a, i64 %index) {
; X86-AVX-NEXT: movl c, %edx ; X86-AVX-NEXT: movl c, %edx
; X86-AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86-AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero ; X86-AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; X86-AVX-NEXT: vpmulld {{\.LCPI.*}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4) ; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4)
; X86-AVX-NEXT: retl ; X86-AVX-NEXT: retl
; ;
@ -1897,7 +1897,7 @@ define void @mul_2xi16_varconst2(i8* nocapture readonly %a, i64 %index) {
; X86-AVX-NEXT: movl c, %edx ; X86-AVX-NEXT: movl c, %edx
; X86-AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86-AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-AVX-NEXT: vpmovsxwd %xmm0, %xmm0 ; X86-AVX-NEXT: vpmovsxwd %xmm0, %xmm0
; X86-AVX-NEXT: vpmulld {{\.LCPI.*}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4) ; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4)
; X86-AVX-NEXT: retl ; X86-AVX-NEXT: retl
; ;
@ -1947,7 +1947,7 @@ define void @mul_2xi16_varconst3(i8* nocapture readonly %a, i64 %index) {
; X86-SSE-NEXT: movl c, %edx ; X86-SSE-NEXT: movl c, %edx
; X86-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-SSE-NEXT: psrld $16, %xmm0 ; X86-SSE-NEXT: psrld $16, %xmm0
; X86-SSE-NEXT: pmuludq {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: pmuludq {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: psllq $32, %xmm0 ; X86-SSE-NEXT: psllq $32, %xmm0
; X86-SSE-NEXT: movq %xmm0, (%edx,%eax,4) ; X86-SSE-NEXT: movq %xmm0, (%edx,%eax,4)
; X86-SSE-NEXT: retl ; X86-SSE-NEXT: retl
@ -1959,7 +1959,7 @@ define void @mul_2xi16_varconst3(i8* nocapture readonly %a, i64 %index) {
; X86-AVX-NEXT: movl c, %edx ; X86-AVX-NEXT: movl c, %edx
; X86-AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86-AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero ; X86-AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; X86-AVX-NEXT: vpmulld {{\.LCPI.*}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4) ; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4)
; X86-AVX-NEXT: retl ; X86-AVX-NEXT: retl
; ;
@ -2009,7 +2009,7 @@ define void @mul_2xi16_varconst4(i8* nocapture readonly %a, i64 %index) {
; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] ; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; X86-SSE-NEXT: psrad $16, %xmm0 ; X86-SSE-NEXT: psrad $16, %xmm0
; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] ; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
; X86-SSE-NEXT: pmuludq {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: pmuludq {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: psllq $32, %xmm0 ; X86-SSE-NEXT: psllq $32, %xmm0
; X86-SSE-NEXT: movq %xmm0, (%edx,%eax,4) ; X86-SSE-NEXT: movq %xmm0, (%edx,%eax,4)
; X86-SSE-NEXT: retl ; X86-SSE-NEXT: retl
@ -2021,7 +2021,7 @@ define void @mul_2xi16_varconst4(i8* nocapture readonly %a, i64 %index) {
; X86-AVX-NEXT: movl c, %edx ; X86-AVX-NEXT: movl c, %edx
; X86-AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86-AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-AVX-NEXT: vpmovsxwd %xmm0, %xmm0 ; X86-AVX-NEXT: vpmovsxwd %xmm0, %xmm0
; X86-AVX-NEXT: vpmulld {{\.LCPI.*}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4) ; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4)
; X86-AVX-NEXT: retl ; X86-AVX-NEXT: retl
; ;

View File

@ -261,7 +261,7 @@ define <4 x i32> @vec_sink_add_of_const_to_add0(<4 x i32> %a, <4 x i32> %b) {
; X32-LABEL: vec_sink_add_of_const_to_add0: ; X32-LABEL: vec_sink_add_of_const_to_add0:
; X32: # %bb.0: ; X32: # %bb.0:
; X32-NEXT: paddd %xmm1, %xmm0 ; X32-NEXT: paddd %xmm1, %xmm0
; X32-NEXT: paddd {{\.LCPI.*}}, %xmm0 ; X32-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X32-NEXT: retl ; X32-NEXT: retl
; ;
; X64-LABEL: vec_sink_add_of_const_to_add0: ; X64-LABEL: vec_sink_add_of_const_to_add0:
@ -277,7 +277,7 @@ define <4 x i32> @vec_sink_add_of_const_to_add1(<4 x i32> %a, <4 x i32> %b) {
; X32-LABEL: vec_sink_add_of_const_to_add1: ; X32-LABEL: vec_sink_add_of_const_to_add1:
; X32: # %bb.0: ; X32: # %bb.0:
; X32-NEXT: paddd %xmm1, %xmm0 ; X32-NEXT: paddd %xmm1, %xmm0
; X32-NEXT: paddd {{\.LCPI.*}}, %xmm0 ; X32-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X32-NEXT: retl ; X32-NEXT: retl
; ;
; X64-LABEL: vec_sink_add_of_const_to_add1: ; X64-LABEL: vec_sink_add_of_const_to_add1:
@ -297,7 +297,7 @@ define <4 x i32> @vec_sink_sub_of_const_to_add0(<4 x i32> %a, <4 x i32> %b) {
; X32-LABEL: vec_sink_sub_of_const_to_add0: ; X32-LABEL: vec_sink_sub_of_const_to_add0:
; X32: # %bb.0: ; X32: # %bb.0:
; X32-NEXT: paddd %xmm1, %xmm0 ; X32-NEXT: paddd %xmm1, %xmm0
; X32-NEXT: psubd {{\.LCPI.*}}, %xmm0 ; X32-NEXT: psubd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X32-NEXT: retl ; X32-NEXT: retl
; ;
; X64-LABEL: vec_sink_sub_of_const_to_add0: ; X64-LABEL: vec_sink_sub_of_const_to_add0:
@ -313,7 +313,7 @@ define <4 x i32> @vec_sink_sub_of_const_to_add1(<4 x i32> %a, <4 x i32> %b) {
; X32-LABEL: vec_sink_sub_of_const_to_add1: ; X32-LABEL: vec_sink_sub_of_const_to_add1:
; X32: # %bb.0: ; X32: # %bb.0:
; X32-NEXT: paddd %xmm1, %xmm0 ; X32-NEXT: paddd %xmm1, %xmm0
; X32-NEXT: psubd {{\.LCPI.*}}, %xmm0 ; X32-NEXT: psubd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X32-NEXT: retl ; X32-NEXT: retl
; ;
; X64-LABEL: vec_sink_sub_of_const_to_add1: ; X64-LABEL: vec_sink_sub_of_const_to_add1:
@ -333,7 +333,7 @@ define <4 x i32> @vec_sink_sub_from_const_to_add0(<4 x i32> %a, <4 x i32> %b) {
; X32-LABEL: vec_sink_sub_from_const_to_add0: ; X32-LABEL: vec_sink_sub_from_const_to_add0:
; X32: # %bb.0: ; X32: # %bb.0:
; X32-NEXT: psubd %xmm0, %xmm1 ; X32-NEXT: psubd %xmm0, %xmm1
; X32-NEXT: paddd {{\.LCPI.*}}, %xmm1 ; X32-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X32-NEXT: movdqa %xmm1, %xmm0 ; X32-NEXT: movdqa %xmm1, %xmm0
; X32-NEXT: retl ; X32-NEXT: retl
; ;
@ -351,7 +351,7 @@ define <4 x i32> @vec_sink_sub_from_const_to_add1(<4 x i32> %a, <4 x i32> %b) {
; X32-LABEL: vec_sink_sub_from_const_to_add1: ; X32-LABEL: vec_sink_sub_from_const_to_add1:
; X32: # %bb.0: ; X32: # %bb.0:
; X32-NEXT: psubd %xmm0, %xmm1 ; X32-NEXT: psubd %xmm0, %xmm1
; X32-NEXT: paddd {{\.LCPI.*}}, %xmm1 ; X32-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X32-NEXT: movdqa %xmm1, %xmm0 ; X32-NEXT: movdqa %xmm1, %xmm0
; X32-NEXT: retl ; X32-NEXT: retl
; ;
@ -373,7 +373,7 @@ define <4 x i32> @vec_sink_add_of_const_to_sub(<4 x i32> %a, <4 x i32> %b) {
; X32-LABEL: vec_sink_add_of_const_to_sub: ; X32-LABEL: vec_sink_add_of_const_to_sub:
; X32: # %bb.0: ; X32: # %bb.0:
; X32-NEXT: psubd %xmm1, %xmm0 ; X32-NEXT: psubd %xmm1, %xmm0
; X32-NEXT: paddd {{\.LCPI.*}}, %xmm0 ; X32-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X32-NEXT: retl ; X32-NEXT: retl
; ;
; X64-LABEL: vec_sink_add_of_const_to_sub: ; X64-LABEL: vec_sink_add_of_const_to_sub:
@ -389,7 +389,7 @@ define <4 x i32> @vec_sink_add_of_const_to_sub2(<4 x i32> %a, <4 x i32> %b) {
; X32-LABEL: vec_sink_add_of_const_to_sub2: ; X32-LABEL: vec_sink_add_of_const_to_sub2:
; X32: # %bb.0: ; X32: # %bb.0:
; X32-NEXT: psubd %xmm0, %xmm1 ; X32-NEXT: psubd %xmm0, %xmm1
; X32-NEXT: psubd {{\.LCPI.*}}, %xmm1 ; X32-NEXT: psubd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X32-NEXT: movdqa %xmm1, %xmm0 ; X32-NEXT: movdqa %xmm1, %xmm0
; X32-NEXT: retl ; X32-NEXT: retl
; ;
@ -411,7 +411,7 @@ define <4 x i32> @vec_sink_sub_of_const_to_sub(<4 x i32> %a, <4 x i32> %b) {
; X32-LABEL: vec_sink_sub_of_const_to_sub: ; X32-LABEL: vec_sink_sub_of_const_to_sub:
; X32: # %bb.0: ; X32: # %bb.0:
; X32-NEXT: psubd %xmm1, %xmm0 ; X32-NEXT: psubd %xmm1, %xmm0
; X32-NEXT: psubd {{\.LCPI.*}}, %xmm0 ; X32-NEXT: psubd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X32-NEXT: retl ; X32-NEXT: retl
; ;
; X64-LABEL: vec_sink_sub_of_const_to_sub: ; X64-LABEL: vec_sink_sub_of_const_to_sub:
@ -427,7 +427,7 @@ define <4 x i32> @vec_sink_sub_of_const_to_sub2(<4 x i32> %a, <4 x i32> %b) {
; X32-LABEL: vec_sink_sub_of_const_to_sub2: ; X32-LABEL: vec_sink_sub_of_const_to_sub2:
; X32: # %bb.0: ; X32: # %bb.0:
; X32-NEXT: psubd %xmm0, %xmm1 ; X32-NEXT: psubd %xmm0, %xmm1
; X32-NEXT: paddd {{\.LCPI.*}}, %xmm1 ; X32-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X32-NEXT: movdqa %xmm1, %xmm0 ; X32-NEXT: movdqa %xmm1, %xmm0
; X32-NEXT: retl ; X32-NEXT: retl
; ;
@ -461,7 +461,7 @@ define <4 x i32> @vec_sink_sub_from_const_to_sub2(<4 x i32> %a, <4 x i32> %b) {
; X32-LABEL: vec_sink_sub_from_const_to_sub2: ; X32-LABEL: vec_sink_sub_from_const_to_sub2:
; X32: # %bb.0: ; X32: # %bb.0:
; X32-NEXT: paddd %xmm1, %xmm0 ; X32-NEXT: paddd %xmm1, %xmm0
; X32-NEXT: psubd {{\.LCPI.*}}, %xmm0 ; X32-NEXT: psubd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X32-NEXT: retl ; X32-NEXT: retl
; ;
; X64-LABEL: vec_sink_sub_from_const_to_sub2: ; X64-LABEL: vec_sink_sub_from_const_to_sub2:

View File

@ -21,7 +21,7 @@ define <4 x i32> @test_mul_v4i32_v4i8(<4 x i8> %A) {
; CHECK32-LABEL: test_mul_v4i32_v4i8: ; CHECK32-LABEL: test_mul_v4i32_v4i8:
; CHECK32: # %bb.0: ; CHECK32: # %bb.0:
; CHECK32-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero ; CHECK32-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; CHECK32-NEXT: pmaddwd {{\.LCPI.*}}, %xmm0 ; CHECK32-NEXT: pmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; CHECK32-NEXT: retl ; CHECK32-NEXT: retl
; ;
; CHECK64-LABEL: test_mul_v4i32_v4i8: ; CHECK64-LABEL: test_mul_v4i32_v4i8:
@ -33,7 +33,7 @@ define <4 x i32> @test_mul_v4i32_v4i8(<4 x i8> %A) {
; SSE4-32-LABEL: test_mul_v4i32_v4i8: ; SSE4-32-LABEL: test_mul_v4i32_v4i8:
; SSE4-32: # %bb.0: ; SSE4-32: # %bb.0:
; SSE4-32-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero ; SSE4-32-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; SSE4-32-NEXT: pmaddwd {{\.LCPI.*}}, %xmm0 ; SSE4-32-NEXT: pmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE4-32-NEXT: retl ; SSE4-32-NEXT: retl
; ;
; SSE4-64-LABEL: test_mul_v4i32_v4i8: ; SSE4-64-LABEL: test_mul_v4i32_v4i8:
@ -45,7 +45,7 @@ define <4 x i32> @test_mul_v4i32_v4i8(<4 x i8> %A) {
; AVX2-32-LABEL: test_mul_v4i32_v4i8: ; AVX2-32-LABEL: test_mul_v4i32_v4i8:
; AVX2-32: # %bb.0: ; AVX2-32: # %bb.0:
; AVX2-32-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero ; AVX2-32-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX2-32-NEXT: vpmaddwd {{\.LCPI.*}}, %xmm0, %xmm0 ; AVX2-32-NEXT: vpmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; AVX2-32-NEXT: retl ; AVX2-32-NEXT: retl
; ;
; AVX2-64-LABEL: test_mul_v4i32_v4i8: ; AVX2-64-LABEL: test_mul_v4i32_v4i8:
@ -57,7 +57,7 @@ define <4 x i32> @test_mul_v4i32_v4i8(<4 x i8> %A) {
; AVX512DQ-32-LABEL: test_mul_v4i32_v4i8: ; AVX512DQ-32-LABEL: test_mul_v4i32_v4i8:
; AVX512DQ-32: # %bb.0: ; AVX512DQ-32: # %bb.0:
; AVX512DQ-32-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero ; AVX512DQ-32-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX512DQ-32-NEXT: vpmaddwd {{\.LCPI.*}}, %xmm0, %xmm0 ; AVX512DQ-32-NEXT: vpmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; AVX512DQ-32-NEXT: retl ; AVX512DQ-32-NEXT: retl
; ;
; AVX512DQ-64-LABEL: test_mul_v4i32_v4i8: ; AVX512DQ-64-LABEL: test_mul_v4i32_v4i8:
@ -69,7 +69,7 @@ define <4 x i32> @test_mul_v4i32_v4i8(<4 x i8> %A) {
; AVX512BW-32-LABEL: test_mul_v4i32_v4i8: ; AVX512BW-32-LABEL: test_mul_v4i32_v4i8:
; AVX512BW-32: # %bb.0: ; AVX512BW-32: # %bb.0:
; AVX512BW-32-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero ; AVX512BW-32-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX512BW-32-NEXT: vpmaddwd {{\.LCPI.*}}, %xmm0, %xmm0 ; AVX512BW-32-NEXT: vpmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; AVX512BW-32-NEXT: retl ; AVX512BW-32-NEXT: retl
; ;
; AVX512BW-64-LABEL: test_mul_v4i32_v4i8: ; AVX512BW-64-LABEL: test_mul_v4i32_v4i8:
@ -168,7 +168,7 @@ define <8 x i32> @test_mul_v8i32_v8i8(<8 x i8> %A) {
; AVX2-32-LABEL: test_mul_v8i32_v8i8: ; AVX2-32-LABEL: test_mul_v8i32_v8i8:
; AVX2-32: # %bb.0: ; AVX2-32: # %bb.0:
; AVX2-32-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero ; AVX2-32-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; AVX2-32-NEXT: vpmaddwd {{\.LCPI.*}}, %ymm0, %ymm0 ; AVX2-32-NEXT: vpmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; AVX2-32-NEXT: retl ; AVX2-32-NEXT: retl
; ;
; AVX2-64-LABEL: test_mul_v8i32_v8i8: ; AVX2-64-LABEL: test_mul_v8i32_v8i8:
@ -180,7 +180,7 @@ define <8 x i32> @test_mul_v8i32_v8i8(<8 x i8> %A) {
; AVX512DQ-32-LABEL: test_mul_v8i32_v8i8: ; AVX512DQ-32-LABEL: test_mul_v8i32_v8i8:
; AVX512DQ-32: # %bb.0: ; AVX512DQ-32: # %bb.0:
; AVX512DQ-32-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero ; AVX512DQ-32-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; AVX512DQ-32-NEXT: vpmaddwd {{\.LCPI.*}}, %ymm0, %ymm0 ; AVX512DQ-32-NEXT: vpmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; AVX512DQ-32-NEXT: retl ; AVX512DQ-32-NEXT: retl
; ;
; AVX512DQ-64-LABEL: test_mul_v8i32_v8i8: ; AVX512DQ-64-LABEL: test_mul_v8i32_v8i8:
@ -192,7 +192,7 @@ define <8 x i32> @test_mul_v8i32_v8i8(<8 x i8> %A) {
; AVX512BW-32-LABEL: test_mul_v8i32_v8i8: ; AVX512BW-32-LABEL: test_mul_v8i32_v8i8:
; AVX512BW-32: # %bb.0: ; AVX512BW-32: # %bb.0:
; AVX512BW-32-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero ; AVX512BW-32-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; AVX512BW-32-NEXT: vpmaddwd {{\.LCPI.*}}, %ymm0, %ymm0 ; AVX512BW-32-NEXT: vpmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; AVX512BW-32-NEXT: retl ; AVX512BW-32-NEXT: retl
; ;
; AVX512BW-64-LABEL: test_mul_v8i32_v8i8: ; AVX512BW-64-LABEL: test_mul_v8i32_v8i8:
@ -359,7 +359,7 @@ define <16 x i32> @test_mul_v16i32_v16i8(<16 x i8> %A) {
; AVX512DQ-32-LABEL: test_mul_v16i32_v16i8: ; AVX512DQ-32-LABEL: test_mul_v16i32_v16i8:
; AVX512DQ-32: # %bb.0: ; AVX512DQ-32: # %bb.0:
; AVX512DQ-32-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero ; AVX512DQ-32-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512DQ-32-NEXT: vpmulld {{\.LCPI.*}}{1to16}, %zmm0, %zmm0 ; AVX512DQ-32-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm0
; AVX512DQ-32-NEXT: retl ; AVX512DQ-32-NEXT: retl
; ;
; AVX512DQ-64-LABEL: test_mul_v16i32_v16i8: ; AVX512DQ-64-LABEL: test_mul_v16i32_v16i8:
@ -371,7 +371,7 @@ define <16 x i32> @test_mul_v16i32_v16i8(<16 x i8> %A) {
; AVX512BW-32-LABEL: test_mul_v16i32_v16i8: ; AVX512BW-32-LABEL: test_mul_v16i32_v16i8:
; AVX512BW-32: # %bb.0: ; AVX512BW-32: # %bb.0:
; AVX512BW-32-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero ; AVX512BW-32-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512BW-32-NEXT: vpmaddwd {{\.LCPI.*}}, %zmm0, %zmm0 ; AVX512BW-32-NEXT: vpmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %zmm0
; AVX512BW-32-NEXT: retl ; AVX512BW-32-NEXT: retl
; ;
; AVX512BW-64-LABEL: test_mul_v16i32_v16i8: ; AVX512BW-64-LABEL: test_mul_v16i32_v16i8:
@ -383,7 +383,7 @@ define <16 x i32> @test_mul_v16i32_v16i8(<16 x i8> %A) {
; KNL-32-LABEL: test_mul_v16i32_v16i8: ; KNL-32-LABEL: test_mul_v16i32_v16i8:
; KNL-32: # %bb.0: ; KNL-32: # %bb.0:
; KNL-32-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero ; KNL-32-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; KNL-32-NEXT: vpmulld {{\.LCPI.*}}{1to16}, %zmm0, %zmm0 ; KNL-32-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm0
; KNL-32-NEXT: retl ; KNL-32-NEXT: retl
; ;
; KNL-64-LABEL: test_mul_v16i32_v16i8: ; KNL-64-LABEL: test_mul_v16i32_v16i8:
@ -418,7 +418,7 @@ define <4 x i32> @test_mul_v4i32_v4i16(<4 x i16> %A) {
; SSE4-32-LABEL: test_mul_v4i32_v4i16: ; SSE4-32-LABEL: test_mul_v4i32_v4i16:
; SSE4-32: # %bb.0: ; SSE4-32: # %bb.0:
; SSE4-32-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero ; SSE4-32-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; SSE4-32-NEXT: pmulld {{\.LCPI.*}}, %xmm0 ; SSE4-32-NEXT: pmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE4-32-NEXT: retl ; SSE4-32-NEXT: retl
; ;
; SSE4-64-LABEL: test_mul_v4i32_v4i16: ; SSE4-64-LABEL: test_mul_v4i32_v4i16:
@ -666,7 +666,7 @@ define <16 x i32> @test_mul_v16i32_v16i16(<16 x i16> %A) {
; AVX512-32-LABEL: test_mul_v16i32_v16i16: ; AVX512-32-LABEL: test_mul_v16i32_v16i16:
; AVX512-32: # %bb.0: ; AVX512-32: # %bb.0:
; AVX512-32-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero ; AVX512-32-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; AVX512-32-NEXT: vpmulld {{\.LCPI.*}}{1to16}, %zmm0, %zmm0 ; AVX512-32-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm0
; AVX512-32-NEXT: retl ; AVX512-32-NEXT: retl
; ;
; AVX512-64-LABEL: test_mul_v16i32_v16i16: ; AVX512-64-LABEL: test_mul_v16i32_v16i16:
@ -687,7 +687,7 @@ define <4 x i32> @test_mul_v4i32_v4i8_minsize(<4 x i8> %A) minsize {
; CHECK32-LABEL: test_mul_v4i32_v4i8_minsize: ; CHECK32-LABEL: test_mul_v4i32_v4i8_minsize:
; CHECK32: # %bb.0: ; CHECK32: # %bb.0:
; CHECK32-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero ; CHECK32-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; CHECK32-NEXT: pmaddwd {{\.LCPI.*}}, %xmm0 ; CHECK32-NEXT: pmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; CHECK32-NEXT: retl ; CHECK32-NEXT: retl
; ;
; CHECK64-LABEL: test_mul_v4i32_v4i8_minsize: ; CHECK64-LABEL: test_mul_v4i32_v4i8_minsize:
@ -699,7 +699,7 @@ define <4 x i32> @test_mul_v4i32_v4i8_minsize(<4 x i8> %A) minsize {
; SSE4-32-LABEL: test_mul_v4i32_v4i8_minsize: ; SSE4-32-LABEL: test_mul_v4i32_v4i8_minsize:
; SSE4-32: # %bb.0: ; SSE4-32: # %bb.0:
; SSE4-32-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero ; SSE4-32-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; SSE4-32-NEXT: pmaddwd {{\.LCPI.*}}, %xmm0 ; SSE4-32-NEXT: pmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE4-32-NEXT: retl ; SSE4-32-NEXT: retl
; ;
; SSE4-64-LABEL: test_mul_v4i32_v4i8_minsize: ; SSE4-64-LABEL: test_mul_v4i32_v4i8_minsize:
@ -711,7 +711,7 @@ define <4 x i32> @test_mul_v4i32_v4i8_minsize(<4 x i8> %A) minsize {
; AVX2-32-LABEL: test_mul_v4i32_v4i8_minsize: ; AVX2-32-LABEL: test_mul_v4i32_v4i8_minsize:
; AVX2-32: # %bb.0: ; AVX2-32: # %bb.0:
; AVX2-32-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero ; AVX2-32-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX2-32-NEXT: vpmaddwd {{\.LCPI.*}}, %xmm0, %xmm0 ; AVX2-32-NEXT: vpmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; AVX2-32-NEXT: retl ; AVX2-32-NEXT: retl
; ;
; AVX2-64-LABEL: test_mul_v4i32_v4i8_minsize: ; AVX2-64-LABEL: test_mul_v4i32_v4i8_minsize:
@ -723,7 +723,7 @@ define <4 x i32> @test_mul_v4i32_v4i8_minsize(<4 x i8> %A) minsize {
; AVX512DQ-32-LABEL: test_mul_v4i32_v4i8_minsize: ; AVX512DQ-32-LABEL: test_mul_v4i32_v4i8_minsize:
; AVX512DQ-32: # %bb.0: ; AVX512DQ-32: # %bb.0:
; AVX512DQ-32-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero ; AVX512DQ-32-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX512DQ-32-NEXT: vpmaddwd {{\.LCPI.*}}, %xmm0, %xmm0 ; AVX512DQ-32-NEXT: vpmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; AVX512DQ-32-NEXT: retl ; AVX512DQ-32-NEXT: retl
; ;
; AVX512DQ-64-LABEL: test_mul_v4i32_v4i8_minsize: ; AVX512DQ-64-LABEL: test_mul_v4i32_v4i8_minsize:
@ -735,7 +735,7 @@ define <4 x i32> @test_mul_v4i32_v4i8_minsize(<4 x i8> %A) minsize {
; AVX512BW-32-LABEL: test_mul_v4i32_v4i8_minsize: ; AVX512BW-32-LABEL: test_mul_v4i32_v4i8_minsize:
; AVX512BW-32: # %bb.0: ; AVX512BW-32: # %bb.0:
; AVX512BW-32-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero ; AVX512BW-32-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX512BW-32-NEXT: vpmaddwd {{\.LCPI.*}}, %xmm0, %xmm0 ; AVX512BW-32-NEXT: vpmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; AVX512BW-32-NEXT: retl ; AVX512BW-32-NEXT: retl
; ;
; AVX512BW-64-LABEL: test_mul_v4i32_v4i8_minsize: ; AVX512BW-64-LABEL: test_mul_v4i32_v4i8_minsize:
@ -826,7 +826,7 @@ define <8 x i32> @test_mul_v8i32_v8i8_minsize(<8 x i8> %A) minsize {
; AVX2-32-LABEL: test_mul_v8i32_v8i8_minsize: ; AVX2-32-LABEL: test_mul_v8i32_v8i8_minsize:
; AVX2-32: # %bb.0: ; AVX2-32: # %bb.0:
; AVX2-32-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero ; AVX2-32-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; AVX2-32-NEXT: vpmaddwd {{\.LCPI.*}}, %ymm0, %ymm0 ; AVX2-32-NEXT: vpmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; AVX2-32-NEXT: retl ; AVX2-32-NEXT: retl
; ;
; AVX2-64-LABEL: test_mul_v8i32_v8i8_minsize: ; AVX2-64-LABEL: test_mul_v8i32_v8i8_minsize:
@ -838,7 +838,7 @@ define <8 x i32> @test_mul_v8i32_v8i8_minsize(<8 x i8> %A) minsize {
; AVX512DQ-32-LABEL: test_mul_v8i32_v8i8_minsize: ; AVX512DQ-32-LABEL: test_mul_v8i32_v8i8_minsize:
; AVX512DQ-32: # %bb.0: ; AVX512DQ-32: # %bb.0:
; AVX512DQ-32-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero ; AVX512DQ-32-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; AVX512DQ-32-NEXT: vpmaddwd {{\.LCPI.*}}, %ymm0, %ymm0 ; AVX512DQ-32-NEXT: vpmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; AVX512DQ-32-NEXT: retl ; AVX512DQ-32-NEXT: retl
; ;
; AVX512DQ-64-LABEL: test_mul_v8i32_v8i8_minsize: ; AVX512DQ-64-LABEL: test_mul_v8i32_v8i8_minsize:
@ -850,7 +850,7 @@ define <8 x i32> @test_mul_v8i32_v8i8_minsize(<8 x i8> %A) minsize {
; AVX512BW-32-LABEL: test_mul_v8i32_v8i8_minsize: ; AVX512BW-32-LABEL: test_mul_v8i32_v8i8_minsize:
; AVX512BW-32: # %bb.0: ; AVX512BW-32: # %bb.0:
; AVX512BW-32-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero ; AVX512BW-32-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; AVX512BW-32-NEXT: vpmaddwd {{\.LCPI.*}}, %ymm0, %ymm0 ; AVX512BW-32-NEXT: vpmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; AVX512BW-32-NEXT: retl ; AVX512BW-32-NEXT: retl
; ;
; AVX512BW-64-LABEL: test_mul_v8i32_v8i8_minsize: ; AVX512BW-64-LABEL: test_mul_v8i32_v8i8_minsize:
@ -997,7 +997,7 @@ define <16 x i32> @test_mul_v16i32_v16i8_minsize(<16 x i8> %A) minsize {
; AVX512DQ-32-LABEL: test_mul_v16i32_v16i8_minsize: ; AVX512DQ-32-LABEL: test_mul_v16i32_v16i8_minsize:
; AVX512DQ-32: # %bb.0: ; AVX512DQ-32: # %bb.0:
; AVX512DQ-32-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero ; AVX512DQ-32-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512DQ-32-NEXT: vpmulld {{\.LCPI.*}}{1to16}, %zmm0, %zmm0 ; AVX512DQ-32-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm0
; AVX512DQ-32-NEXT: retl ; AVX512DQ-32-NEXT: retl
; ;
; AVX512DQ-64-LABEL: test_mul_v16i32_v16i8_minsize: ; AVX512DQ-64-LABEL: test_mul_v16i32_v16i8_minsize:
@ -1009,7 +1009,7 @@ define <16 x i32> @test_mul_v16i32_v16i8_minsize(<16 x i8> %A) minsize {
; AVX512BW-32-LABEL: test_mul_v16i32_v16i8_minsize: ; AVX512BW-32-LABEL: test_mul_v16i32_v16i8_minsize:
; AVX512BW-32: # %bb.0: ; AVX512BW-32: # %bb.0:
; AVX512BW-32-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero ; AVX512BW-32-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512BW-32-NEXT: vpmaddwd {{\.LCPI.*}}, %zmm0, %zmm0 ; AVX512BW-32-NEXT: vpmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %zmm0
; AVX512BW-32-NEXT: retl ; AVX512BW-32-NEXT: retl
; ;
; AVX512BW-64-LABEL: test_mul_v16i32_v16i8_minsize: ; AVX512BW-64-LABEL: test_mul_v16i32_v16i8_minsize:
@ -1021,7 +1021,7 @@ define <16 x i32> @test_mul_v16i32_v16i8_minsize(<16 x i8> %A) minsize {
; KNL-32-LABEL: test_mul_v16i32_v16i8_minsize: ; KNL-32-LABEL: test_mul_v16i32_v16i8_minsize:
; KNL-32: # %bb.0: ; KNL-32: # %bb.0:
; KNL-32-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero ; KNL-32-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; KNL-32-NEXT: vpmulld {{\.LCPI.*}}{1to16}, %zmm0, %zmm0 ; KNL-32-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm0
; KNL-32-NEXT: retl ; KNL-32-NEXT: retl
; ;
; KNL-64-LABEL: test_mul_v16i32_v16i8_minsize: ; KNL-64-LABEL: test_mul_v16i32_v16i8_minsize:
@ -1038,7 +1038,7 @@ define <4 x i32> @test_mul_v4i32_v4i16_minsize(<4 x i16> %A) minsize {
; CHECK32-LABEL: test_mul_v4i32_v4i16_minsize: ; CHECK32-LABEL: test_mul_v4i32_v4i16_minsize:
; CHECK32: # %bb.0: ; CHECK32: # %bb.0:
; CHECK32-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero ; CHECK32-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; CHECK32-NEXT: pmulld {{\.LCPI.*}}, %xmm0 ; CHECK32-NEXT: pmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; CHECK32-NEXT: retl ; CHECK32-NEXT: retl
; ;
; CHECK64-LABEL: test_mul_v4i32_v4i16_minsize: ; CHECK64-LABEL: test_mul_v4i32_v4i16_minsize:
@ -1050,7 +1050,7 @@ define <4 x i32> @test_mul_v4i32_v4i16_minsize(<4 x i16> %A) minsize {
; SSE4-32-LABEL: test_mul_v4i32_v4i16_minsize: ; SSE4-32-LABEL: test_mul_v4i32_v4i16_minsize:
; SSE4-32: # %bb.0: ; SSE4-32: # %bb.0:
; SSE4-32-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero ; SSE4-32-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; SSE4-32-NEXT: pmulld {{\.LCPI.*}}, %xmm0 ; SSE4-32-NEXT: pmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE4-32-NEXT: retl ; SSE4-32-NEXT: retl
; ;
; SSE4-64-LABEL: test_mul_v4i32_v4i16_minsize: ; SSE4-64-LABEL: test_mul_v4i32_v4i16_minsize:
@ -1260,7 +1260,7 @@ define <16 x i32> @test_mul_v16i32_v16i16_minsize(<16 x i16> %A) minsize {
; AVX512-32-LABEL: test_mul_v16i32_v16i16_minsize: ; AVX512-32-LABEL: test_mul_v16i32_v16i16_minsize:
; AVX512-32: # %bb.0: ; AVX512-32: # %bb.0:
; AVX512-32-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero ; AVX512-32-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; AVX512-32-NEXT: vpmulld {{\.LCPI.*}}{1to16}, %zmm0, %zmm0 ; AVX512-32-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm0
; AVX512-32-NEXT: retl ; AVX512-32-NEXT: retl
; ;
; AVX512-64-LABEL: test_mul_v16i32_v16i16_minsize: ; AVX512-64-LABEL: test_mul_v16i32_v16i16_minsize:

View File

@ -65,9 +65,9 @@ define float @int1(float %a, float %b) nounwind {
; X32: # %bb.0: ; X32: # %bb.0:
; X32-NEXT: pushl %eax ; X32-NEXT: pushl %eax
; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: andps {{\.LCPI.*}}, %xmm0 ; X32-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-NEXT: andps {{\.LCPI.*}}, %xmm1 ; X32-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X32-NEXT: orps %xmm0, %xmm1 ; X32-NEXT: orps %xmm0, %xmm1
; X32-NEXT: movss %xmm1, (%esp) ; X32-NEXT: movss %xmm1, (%esp)
; X32-NEXT: flds (%esp) ; X32-NEXT: flds (%esp)
@ -94,9 +94,9 @@ define double @int2(double %a, float %b, float %c) nounwind {
; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: addss 20(%ebp), %xmm0 ; X32-NEXT: addss 20(%ebp), %xmm0
; X32-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero ; X32-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; X32-NEXT: andps {{\.LCPI.*}}, %xmm1 ; X32-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X32-NEXT: cvtss2sd %xmm0, %xmm0 ; X32-NEXT: cvtss2sd %xmm0, %xmm0
; X32-NEXT: andps {{\.LCPI.*}}, %xmm0 ; X32-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X32-NEXT: orps %xmm1, %xmm0 ; X32-NEXT: orps %xmm1, %xmm0
; X32-NEXT: movlps %xmm0, (%esp) ; X32-NEXT: movlps %xmm0, (%esp)
; X32-NEXT: fldl (%esp) ; X32-NEXT: fldl (%esp)

View File

@ -14,7 +14,7 @@ define double @test1(double* %P) {
define double @test2() { define double @test2() {
; CHECK-LABEL: test2: ; CHECK-LABEL: test2:
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: fldl {{\.LCPI.*}} ; CHECK-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; CHECK-NEXT: retl ; CHECK-NEXT: retl
ret double 1.234560e+03 ret double 1.234560e+03
} }

View File

@ -7,7 +7,7 @@ define float @f32_pos(float %a, float %b) nounwind {
; X86: # %bb.0: ; X86: # %bb.0:
; X86-NEXT: pushl %eax ; X86-NEXT: pushl %eax
; X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-NEXT: andps {{\.LCPI.*}}, %xmm0 ; X86-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: movss %xmm0, (%esp) ; X86-NEXT: movss %xmm0, (%esp)
; X86-NEXT: flds (%esp) ; X86-NEXT: flds (%esp)
; X86-NEXT: popl %eax ; X86-NEXT: popl %eax
@ -26,7 +26,7 @@ define float @f32_neg(float %a, float %b) nounwind {
; X86: # %bb.0: ; X86: # %bb.0:
; X86-NEXT: pushl %eax ; X86-NEXT: pushl %eax
; X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-NEXT: orps {{\.LCPI.*}}, %xmm0 ; X86-NEXT: orps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: movss %xmm0, (%esp) ; X86-NEXT: movss %xmm0, (%esp)
; X86-NEXT: flds (%esp) ; X86-NEXT: flds (%esp)
; X86-NEXT: popl %eax ; X86-NEXT: popl %eax
@ -43,7 +43,7 @@ define float @f32_neg(float %a, float %b) nounwind {
define <4 x float> @v4f32_pos(<4 x float> %a, <4 x float> %b) nounwind { define <4 x float> @v4f32_pos(<4 x float> %a, <4 x float> %b) nounwind {
; X86-LABEL: v4f32_pos: ; X86-LABEL: v4f32_pos:
; X86: # %bb.0: ; X86: # %bb.0:
; X86-NEXT: andps {{\.LCPI.*}}, %xmm0 ; X86-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl ; X86-NEXT: retl
; ;
; X64-LABEL: v4f32_pos: ; X64-LABEL: v4f32_pos:
@ -57,7 +57,7 @@ define <4 x float> @v4f32_pos(<4 x float> %a, <4 x float> %b) nounwind {
define <4 x float> @v4f32_neg(<4 x float> %a, <4 x float> %b) nounwind { define <4 x float> @v4f32_neg(<4 x float> %a, <4 x float> %b) nounwind {
; X86-LABEL: v4f32_neg: ; X86-LABEL: v4f32_neg:
; X86: # %bb.0: ; X86: # %bb.0:
; X86-NEXT: orps {{\.LCPI.*}}, %xmm0 ; X86-NEXT: orps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl ; X86-NEXT: retl
; ;
; X64-LABEL: v4f32_neg: ; X64-LABEL: v4f32_neg:
@ -72,8 +72,8 @@ define <4 x float> @v4f32_const_mag(<4 x float> %a, <4 x float> %b) nounwind {
; X86-LABEL: v4f32_const_mag: ; X86-LABEL: v4f32_const_mag:
; X86: # %bb.0: ; X86: # %bb.0:
; X86-NEXT: movaps %xmm1, %xmm0 ; X86-NEXT: movaps %xmm1, %xmm0
; X86-NEXT: andps {{\.LCPI.*}}, %xmm0 ; X86-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: orps {{\.LCPI.*}}, %xmm0 ; X86-NEXT: orps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl ; X86-NEXT: retl
; ;
; X64-LABEL: v4f32_const_mag: ; X64-LABEL: v4f32_const_mag:

View File

@ -180,7 +180,7 @@ define <4 x i32> @PR30512(<4 x i32> %x, <4 x i32> %y) nounwind {
; X86-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero ; X86-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; X86-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] ; X86-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
; X86-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm1[0] ; X86-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm1[0]
; X86-NEXT: andps {{\.LCPI.*}}, %xmm2 ; X86-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm2
; X86-NEXT: movaps %xmm2, (%eax) ; X86-NEXT: movaps %xmm2, (%eax)
; X86-NEXT: addl $16, %esp ; X86-NEXT: addl $16, %esp
; X86-NEXT: popl %esi ; X86-NEXT: popl %esi
@ -238,7 +238,7 @@ define <4 x i32> @PR30512(<4 x i32> %x, <4 x i32> %y) nounwind {
define <2 x float> @PR31672() #0 { define <2 x float> @PR31672() #0 {
; X86-LABEL: PR31672: ; X86-LABEL: PR31672:
; X86: # %bb.0: ; X86: # %bb.0:
; X86-NEXT: sqrtps {{\.LCPI.*}}, %xmm0 ; X86-NEXT: sqrtps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl ; X86-NEXT: retl
; ;
; X64-LABEL: PR31672: ; X64-LABEL: PR31672:

View File

@ -675,7 +675,7 @@ define <2 x i64> @test_insert_64_zext(<2 x i64> %i) {
define <4 x i32> @PR19721(<4 x i32> %i) { define <4 x i32> @PR19721(<4 x i32> %i) {
; X86-SSE-LABEL: PR19721: ; X86-SSE-LABEL: PR19721:
; X86-SSE: # %bb.0: ; X86-SSE: # %bb.0:
; X86-SSE-NEXT: andps {{\.LCPI.*}}, %xmm0 ; X86-SSE-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: retl ; X86-SSE-NEXT: retl
; ;
; AVX-LABEL: PR19721: ; AVX-LABEL: PR19721:

View File

@ -397,7 +397,7 @@ define <4 x i32> @t17() nounwind {
; X86-LABEL: t17: ; X86-LABEL: t17:
; X86: # %bb.0: # %entry ; X86: # %bb.0: # %entry
; X86-NEXT: pshufd {{.*#+}} xmm0 = mem[0,1,0,1] ; X86-NEXT: pshufd {{.*#+}} xmm0 = mem[0,1,0,1]
; X86-NEXT: pand {{\.LCPI.*}}, %xmm0 ; X86-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl ; X86-NEXT: retl
; ;
; X64-LABEL: t17: ; X64-LABEL: t17:

View File

@ -18,7 +18,7 @@ define float @test(i64 %a) nounwind {
; X86-NEXT: movlps %xmm0, {{[0-9]+}}(%esp) ; X86-NEXT: movlps %xmm0, {{[0-9]+}}(%esp)
; X86-NEXT: shrl $31, %eax ; X86-NEXT: shrl $31, %eax
; X86-NEXT: fildll {{[0-9]+}}(%esp) ; X86-NEXT: fildll {{[0-9]+}}(%esp)
; X86-NEXT: fadds {{\.LCPI.*}}(,%eax,4) ; X86-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; X86-NEXT: fstps {{[0-9]+}}(%esp) ; X86-NEXT: fstps {{[0-9]+}}(%esp)
; X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-NEXT: movss %xmm0, (%esp) ; X86-NEXT: movss %xmm0, (%esp)

View File

@ -7,8 +7,8 @@ define float @test1(i32 %x) nounwind readnone {
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushl %eax ; CHECK-NEXT: pushl %eax
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: orpd {{\.LCPI.*}}, %xmm0 ; CHECK-NEXT: orpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; CHECK-NEXT: subsd {{\.LCPI.*}}, %xmm0 ; CHECK-NEXT: subsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; CHECK-NEXT: cvtsd2ss %xmm0, %xmm0 ; CHECK-NEXT: cvtsd2ss %xmm0, %xmm0
; CHECK-NEXT: movss %xmm0, (%esp) ; CHECK-NEXT: movss %xmm0, (%esp)
; CHECK-NEXT: flds (%esp) ; CHECK-NEXT: flds (%esp)
@ -26,8 +26,8 @@ define float @test2(<4 x i32> %x) nounwind readnone ssp {
; CHECK-NEXT: pushl %eax ; CHECK-NEXT: pushl %eax
; CHECK-NEXT: xorps %xmm1, %xmm1 ; CHECK-NEXT: xorps %xmm1, %xmm1
; CHECK-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3] ; CHECK-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; CHECK-NEXT: orps {{\.LCPI.*}}, %xmm1 ; CHECK-NEXT: orps {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; CHECK-NEXT: subsd {{\.LCPI.*}}, %xmm1 ; CHECK-NEXT: subsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; CHECK-NEXT: xorps %xmm0, %xmm0 ; CHECK-NEXT: xorps %xmm0, %xmm0
; CHECK-NEXT: cvtsd2ss %xmm1, %xmm0 ; CHECK-NEXT: cvtsd2ss %xmm1, %xmm0
; CHECK-NEXT: movss %xmm0, (%esp) ; CHECK-NEXT: movss %xmm0, (%esp)

View File

@ -9,13 +9,13 @@
define <4 x float> @mask_ucvt_4i32_4f32(<4 x i32> %a) { define <4 x float> @mask_ucvt_4i32_4f32(<4 x i32> %a) {
; X32-SSE-LABEL: mask_ucvt_4i32_4f32: ; X32-SSE-LABEL: mask_ucvt_4i32_4f32:
; X32-SSE: # %bb.0: ; X32-SSE: # %bb.0:
; X32-SSE-NEXT: andps {{\.LCPI.*}}, %xmm0 ; X32-SSE-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X32-SSE-NEXT: cvtdq2ps %xmm0, %xmm0 ; X32-SSE-NEXT: cvtdq2ps %xmm0, %xmm0
; X32-SSE-NEXT: retl ; X32-SSE-NEXT: retl
; ;
; X32-AVX-LABEL: mask_ucvt_4i32_4f32: ; X32-AVX-LABEL: mask_ucvt_4i32_4f32:
; X32-AVX: # %bb.0: ; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0 ; X32-AVX-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X32-AVX-NEXT: vcvtdq2ps %xmm0, %xmm0 ; X32-AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
; X32-AVX-NEXT: retl ; X32-AVX-NEXT: retl
; ;
@ -38,7 +38,7 @@ define <4 x float> @mask_ucvt_4i32_4f32(<4 x i32> %a) {
define <4 x double> @mask_ucvt_4i32_4f64(<4 x i32> %a) { define <4 x double> @mask_ucvt_4i32_4f64(<4 x i32> %a) {
; X32-SSE-LABEL: mask_ucvt_4i32_4f64: ; X32-SSE-LABEL: mask_ucvt_4i32_4f64:
; X32-SSE: # %bb.0: ; X32-SSE: # %bb.0:
; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 ; X32-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X32-SSE-NEXT: cvtdq2pd %xmm0, %xmm2 ; X32-SSE-NEXT: cvtdq2pd %xmm0, %xmm2
; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] ; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
; X32-SSE-NEXT: cvtdq2pd %xmm0, %xmm1 ; X32-SSE-NEXT: cvtdq2pd %xmm0, %xmm1
@ -47,7 +47,7 @@ define <4 x double> @mask_ucvt_4i32_4f64(<4 x i32> %a) {
; ;
; X32-AVX-LABEL: mask_ucvt_4i32_4f64: ; X32-AVX-LABEL: mask_ucvt_4i32_4f64:
; X32-AVX: # %bb.0: ; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0 ; X32-AVX-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X32-AVX-NEXT: vcvtdq2pd %xmm0, %ymm0 ; X32-AVX-NEXT: vcvtdq2pd %xmm0, %ymm0
; X32-AVX-NEXT: retl ; X32-AVX-NEXT: retl
; ;
@ -80,7 +80,7 @@ define <4 x float> @lshr_truncate_mask_ucvt_4i64_4f32(<4 x i64> *%p0) {
; X32-SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] ; X32-SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; X32-SSE-NEXT: psrld $16, %xmm0 ; X32-SSE-NEXT: psrld $16, %xmm0
; X32-SSE-NEXT: cvtdq2ps %xmm0, %xmm0 ; X32-SSE-NEXT: cvtdq2ps %xmm0, %xmm0
; X32-SSE-NEXT: mulps {{\.LCPI.*}}, %xmm0 ; X32-SSE-NEXT: mulps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X32-SSE-NEXT: retl ; X32-SSE-NEXT: retl
; ;
; X32-AVX-LABEL: lshr_truncate_mask_ucvt_4i64_4f32: ; X32-AVX-LABEL: lshr_truncate_mask_ucvt_4i64_4f32:
@ -90,7 +90,7 @@ define <4 x float> @lshr_truncate_mask_ucvt_4i64_4f32(<4 x i64> *%p0) {
; X32-AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],mem[0,2] ; X32-AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],mem[0,2]
; X32-AVX-NEXT: vpsrld $16, %xmm0, %xmm0 ; X32-AVX-NEXT: vpsrld $16, %xmm0, %xmm0
; X32-AVX-NEXT: vcvtdq2ps %xmm0, %xmm0 ; X32-AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
; X32-AVX-NEXT: vmulps {{\.LCPI.*}}, %xmm0, %xmm0 ; X32-AVX-NEXT: vmulps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X32-AVX-NEXT: retl ; X32-AVX-NEXT: retl
; ;
; X64-SSE-LABEL: lshr_truncate_mask_ucvt_4i64_4f32: ; X64-SSE-LABEL: lshr_truncate_mask_ucvt_4i64_4f32:

View File

@ -106,7 +106,7 @@ define i8 @and_pow_2(i8 %x, i8 %y) {
define <4 x i32> @vec_const_uniform_pow_2(<4 x i32> %x) { define <4 x i32> @vec_const_uniform_pow_2(<4 x i32> %x) {
; X86-LABEL: vec_const_uniform_pow_2: ; X86-LABEL: vec_const_uniform_pow_2:
; X86: # %bb.0: ; X86: # %bb.0:
; X86-NEXT: andps {{\.LCPI.*}}, %xmm0 ; X86-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl ; X86-NEXT: retl
; ;
; X64-LABEL: vec_const_uniform_pow_2: ; X64-LABEL: vec_const_uniform_pow_2:
@ -120,7 +120,7 @@ define <4 x i32> @vec_const_uniform_pow_2(<4 x i32> %x) {
define <4 x i32> @vec_const_nonuniform_pow_2(<4 x i32> %x) { define <4 x i32> @vec_const_nonuniform_pow_2(<4 x i32> %x) {
; X86-LABEL: vec_const_nonuniform_pow_2: ; X86-LABEL: vec_const_nonuniform_pow_2:
; X86: # %bb.0: ; X86: # %bb.0:
; X86-NEXT: andps {{\.LCPI.*}}, %xmm0 ; X86-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl ; X86-NEXT: retl
; ;
; X64-LABEL: vec_const_nonuniform_pow_2: ; X64-LABEL: vec_const_nonuniform_pow_2:

View File

@ -34,7 +34,7 @@ define <4 x i64> @var_shuffle_v4i64(<4 x i64> %v, <4 x i64> %indices) nounwind {
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: vpermilpd %ymm4, %ymm0, %ymm0 ; AVX1-NEXT: vpermilpd %ymm4, %ymm0, %ymm0
; AVX1-NEXT: vpcmpgtq {{.*}}(%rip), %xmm3, %xmm3 ; AVX1-NEXT: vpcmpgtq {{.*}}(%rip), %xmm3, %xmm3
; AVX1-NEXT: vpcmpgtq {{\.LCPI.*}}+{{.*}}(%rip), %xmm1, %xmm1 ; AVX1-NEXT: vpcmpgtq {{\.LCPI[0-9]+_[0-9]+}}+{{.*}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1
; AVX1-NEXT: vblendvpd %ymm1, %ymm2, %ymm0, %ymm0 ; AVX1-NEXT: vblendvpd %ymm1, %ymm2, %ymm0, %ymm0
; AVX1-NEXT: retq ; AVX1-NEXT: retq
@ -94,7 +94,7 @@ define <8 x i32> @var_shuffle_v8i32(<8 x i32> %v, <8 x i32> %indices) nounwind {
; AVX1-NEXT: vpermilps %ymm1, %ymm0, %ymm0 ; AVX1-NEXT: vpermilps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: vpcmpgtd {{.*}}(%rip), %xmm1, %xmm3 ; AVX1-NEXT: vpcmpgtd {{.*}}(%rip), %xmm1, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX1-NEXT: vpcmpgtd {{\.LCPI.*}}+{{.*}}(%rip), %xmm1, %xmm1 ; AVX1-NEXT: vpcmpgtd {{\.LCPI[0-9]+_[0-9]+}}+{{.*}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1
; AVX1-NEXT: vblendvps %ymm1, %ymm2, %ymm0, %ymm0 ; AVX1-NEXT: vblendvps %ymm1, %ymm2, %ymm0, %ymm0
; AVX1-NEXT: retq ; AVX1-NEXT: retq
@ -454,7 +454,7 @@ define <4 x double> @var_shuffle_v4f64(<4 x double> %v, <4 x i64> %indices) noun
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: vpermilpd %ymm4, %ymm0, %ymm0 ; AVX1-NEXT: vpermilpd %ymm4, %ymm0, %ymm0
; AVX1-NEXT: vpcmpgtq {{.*}}(%rip), %xmm3, %xmm3 ; AVX1-NEXT: vpcmpgtq {{.*}}(%rip), %xmm3, %xmm3
; AVX1-NEXT: vpcmpgtq {{\.LCPI.*}}+{{.*}}(%rip), %xmm1, %xmm1 ; AVX1-NEXT: vpcmpgtq {{\.LCPI[0-9]+_[0-9]+}}+{{.*}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1
; AVX1-NEXT: vblendvpd %ymm1, %ymm2, %ymm0, %ymm0 ; AVX1-NEXT: vblendvpd %ymm1, %ymm2, %ymm0, %ymm0
; AVX1-NEXT: retq ; AVX1-NEXT: retq
@ -514,7 +514,7 @@ define <8 x float> @var_shuffle_v8f32(<8 x float> %v, <8 x i32> %indices) nounwi
; AVX1-NEXT: vpermilps %ymm1, %ymm0, %ymm0 ; AVX1-NEXT: vpermilps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: vpcmpgtd {{.*}}(%rip), %xmm1, %xmm3 ; AVX1-NEXT: vpcmpgtd {{.*}}(%rip), %xmm1, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX1-NEXT: vpcmpgtd {{\.LCPI.*}}+{{.*}}(%rip), %xmm1, %xmm1 ; AVX1-NEXT: vpcmpgtd {{\.LCPI[0-9]+_[0-9]+}}+{{.*}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1
; AVX1-NEXT: vblendvps %ymm1, %ymm2, %ymm0, %ymm0 ; AVX1-NEXT: vblendvps %ymm1, %ymm2, %ymm0, %ymm0
; AVX1-NEXT: retq ; AVX1-NEXT: retq
@ -576,7 +576,7 @@ define <4 x i64> @var_shuffle_v4i64_from_v2i64(<2 x i64> %v, <4 x i64> %indices)
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm3 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm3
; AVX1-NEXT: vpermilpd %ymm3, %ymm0, %ymm0 ; AVX1-NEXT: vpermilpd %ymm3, %ymm0, %ymm0
; AVX1-NEXT: vpcmpgtq {{.*}}(%rip), %xmm2, %xmm2 ; AVX1-NEXT: vpcmpgtq {{.*}}(%rip), %xmm2, %xmm2
; AVX1-NEXT: vpcmpgtq {{\.LCPI.*}}+{{.*}}(%rip), %xmm1, %xmm1 ; AVX1-NEXT: vpcmpgtq {{\.LCPI[0-9]+_[0-9]+}}+{{.*}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
; AVX1-NEXT: vpermilpd %ymm3, %ymm0, %ymm2 ; AVX1-NEXT: vpermilpd %ymm3, %ymm0, %ymm2
; AVX1-NEXT: vblendvpd %ymm1, %ymm2, %ymm0, %ymm0 ; AVX1-NEXT: vblendvpd %ymm1, %ymm2, %ymm0, %ymm0
@ -638,7 +638,7 @@ define <8 x i32> @var_shuffle_v8i32_from_v4i32(<4 x i32> %v, <8 x i32> %indices)
; AVX1-NEXT: vpermilps %ymm1, %ymm0, %ymm0 ; AVX1-NEXT: vpermilps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: vpcmpgtd {{.*}}(%rip), %xmm1, %xmm3 ; AVX1-NEXT: vpcmpgtd {{.*}}(%rip), %xmm1, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX1-NEXT: vpcmpgtd {{\.LCPI.*}}+{{.*}}(%rip), %xmm1, %xmm1 ; AVX1-NEXT: vpcmpgtd {{\.LCPI[0-9]+_[0-9]+}}+{{.*}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1
; AVX1-NEXT: vblendvps %ymm1, %ymm2, %ymm0, %ymm0 ; AVX1-NEXT: vblendvps %ymm1, %ymm2, %ymm0, %ymm0
; AVX1-NEXT: retq ; AVX1-NEXT: retq
@ -997,7 +997,7 @@ define <4 x double> @var_shuffle_v4f64_from_v2f64(<2 x double> %v, <4 x i64> %in
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm3 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm3
; AVX1-NEXT: vpermilpd %ymm3, %ymm0, %ymm0 ; AVX1-NEXT: vpermilpd %ymm3, %ymm0, %ymm0
; AVX1-NEXT: vpcmpgtq {{.*}}(%rip), %xmm2, %xmm2 ; AVX1-NEXT: vpcmpgtq {{.*}}(%rip), %xmm2, %xmm2
; AVX1-NEXT: vpcmpgtq {{\.LCPI.*}}+{{.*}}(%rip), %xmm1, %xmm1 ; AVX1-NEXT: vpcmpgtq {{\.LCPI[0-9]+_[0-9]+}}+{{.*}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
; AVX1-NEXT: vpermilpd %ymm3, %ymm0, %ymm2 ; AVX1-NEXT: vpermilpd %ymm3, %ymm0, %ymm2
; AVX1-NEXT: vblendvpd %ymm1, %ymm2, %ymm0, %ymm0 ; AVX1-NEXT: vblendvpd %ymm1, %ymm2, %ymm0, %ymm0
@ -1059,7 +1059,7 @@ define <8 x float> @var_shuffle_v8f32_from_v4f32(<4 x float> %v, <8 x i32> %indi
; AVX1-NEXT: vpermilps %ymm1, %ymm0, %ymm0 ; AVX1-NEXT: vpermilps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: vpcmpgtd {{.*}}(%rip), %xmm1, %xmm3 ; AVX1-NEXT: vpcmpgtd {{.*}}(%rip), %xmm1, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX1-NEXT: vpcmpgtd {{\.LCPI.*}}+{{.*}}(%rip), %xmm1, %xmm1 ; AVX1-NEXT: vpcmpgtd {{\.LCPI[0-9]+_[0-9]+}}+{{.*}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1
; AVX1-NEXT: vblendvps %ymm1, %ymm2, %ymm0, %ymm0 ; AVX1-NEXT: vblendvps %ymm1, %ymm2, %ymm0, %ymm0
; AVX1-NEXT: retq ; AVX1-NEXT: retq

View File

@ -2031,7 +2031,7 @@ define <2 x i8> @strict_vector_fptosi_v2f64_to_v2i8(<2 x double> %a) #0 {
; SSE-32-LABEL: strict_vector_fptosi_v2f64_to_v2i8: ; SSE-32-LABEL: strict_vector_fptosi_v2f64_to_v2i8:
; SSE-32: # %bb.0: ; SSE-32: # %bb.0:
; SSE-32-NEXT: cvttpd2dq %xmm0, %xmm0 ; SSE-32-NEXT: cvttpd2dq %xmm0, %xmm0
; SSE-32-NEXT: andpd {{\.LCPI.*}}, %xmm0 ; SSE-32-NEXT: andpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE-32-NEXT: packuswb %xmm0, %xmm0 ; SSE-32-NEXT: packuswb %xmm0, %xmm0
; SSE-32-NEXT: packuswb %xmm0, %xmm0 ; SSE-32-NEXT: packuswb %xmm0, %xmm0
; SSE-32-NEXT: retl ; SSE-32-NEXT: retl
@ -2082,7 +2082,7 @@ define <2 x i8> @strict_vector_fptoui_v2f64_to_v2i8(<2 x double> %a) #0 {
; SSE-32-LABEL: strict_vector_fptoui_v2f64_to_v2i8: ; SSE-32-LABEL: strict_vector_fptoui_v2f64_to_v2i8:
; SSE-32: # %bb.0: ; SSE-32: # %bb.0:
; SSE-32-NEXT: cvttpd2dq %xmm0, %xmm0 ; SSE-32-NEXT: cvttpd2dq %xmm0, %xmm0
; SSE-32-NEXT: andpd {{\.LCPI.*}}, %xmm0 ; SSE-32-NEXT: andpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE-32-NEXT: packuswb %xmm0, %xmm0 ; SSE-32-NEXT: packuswb %xmm0, %xmm0
; SSE-32-NEXT: packuswb %xmm0, %xmm0 ; SSE-32-NEXT: packuswb %xmm0, %xmm0
; SSE-32-NEXT: retl ; SSE-32-NEXT: retl
@ -2134,7 +2134,7 @@ define <2 x i8> @strict_vector_fptosi_v2f32_to_v2i8(<2 x float> %a) #0 {
; SSE-32: # %bb.0: ; SSE-32: # %bb.0:
; SSE-32-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero ; SSE-32-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
; SSE-32-NEXT: cvttps2dq %xmm0, %xmm0 ; SSE-32-NEXT: cvttps2dq %xmm0, %xmm0
; SSE-32-NEXT: pand {{\.LCPI.*}}, %xmm0 ; SSE-32-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE-32-NEXT: packuswb %xmm0, %xmm0 ; SSE-32-NEXT: packuswb %xmm0, %xmm0
; SSE-32-NEXT: packuswb %xmm0, %xmm0 ; SSE-32-NEXT: packuswb %xmm0, %xmm0
; SSE-32-NEXT: retl ; SSE-32-NEXT: retl
@ -2192,7 +2192,7 @@ define <2 x i8> @strict_vector_fptoui_v2f32_to_v2i8(<2 x float> %a) #0 {
; SSE-32: # %bb.0: ; SSE-32: # %bb.0:
; SSE-32-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero ; SSE-32-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
; SSE-32-NEXT: cvttps2dq %xmm0, %xmm0 ; SSE-32-NEXT: cvttps2dq %xmm0, %xmm0
; SSE-32-NEXT: pand {{\.LCPI.*}}, %xmm0 ; SSE-32-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE-32-NEXT: packuswb %xmm0, %xmm0 ; SSE-32-NEXT: packuswb %xmm0, %xmm0
; SSE-32-NEXT: packuswb %xmm0, %xmm0 ; SSE-32-NEXT: packuswb %xmm0, %xmm0
; SSE-32-NEXT: retl ; SSE-32-NEXT: retl
@ -3037,7 +3037,7 @@ define <4 x i32> @strict_vector_fptoui_v4f32_to_v4i32(<4 x float> %a) #0 {
; SSE-32-NEXT: movaps %xmm0, %xmm3 ; SSE-32-NEXT: movaps %xmm0, %xmm3
; SSE-32-NEXT: cmpltps %xmm2, %xmm3 ; SSE-32-NEXT: cmpltps %xmm2, %xmm3
; SSE-32-NEXT: movaps %xmm3, %xmm1 ; SSE-32-NEXT: movaps %xmm3, %xmm1
; SSE-32-NEXT: andnps {{\.LCPI.*}}, %xmm1 ; SSE-32-NEXT: andnps {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; SSE-32-NEXT: andnps %xmm2, %xmm3 ; SSE-32-NEXT: andnps %xmm2, %xmm3
; SSE-32-NEXT: subps %xmm3, %xmm0 ; SSE-32-NEXT: subps %xmm3, %xmm0
; SSE-32-NEXT: cvttps2dq %xmm0, %xmm0 ; SSE-32-NEXT: cvttps2dq %xmm0, %xmm0

View File

@ -278,14 +278,14 @@ define <2 x float> @uitofp_v2i64_v2f32(<2 x i64> %x) #0 {
; SSE-32-NEXT: movd %xmm1, %eax ; SSE-32-NEXT: movd %xmm1, %eax
; SSE-32-NEXT: shrl $31, %eax ; SSE-32-NEXT: shrl $31, %eax
; SSE-32-NEXT: fildll {{[0-9]+}}(%esp) ; SSE-32-NEXT: fildll {{[0-9]+}}(%esp)
; SSE-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4) ; SSE-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; SSE-32-NEXT: fstps (%esp) ; SSE-32-NEXT: fstps (%esp)
; SSE-32-NEXT: wait ; SSE-32-NEXT: wait
; SSE-32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1] ; SSE-32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
; SSE-32-NEXT: movd %xmm0, %eax ; SSE-32-NEXT: movd %xmm0, %eax
; SSE-32-NEXT: shrl $31, %eax ; SSE-32-NEXT: shrl $31, %eax
; SSE-32-NEXT: fildll {{[0-9]+}}(%esp) ; SSE-32-NEXT: fildll {{[0-9]+}}(%esp)
; SSE-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4) ; SSE-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; SSE-32-NEXT: fstps {{[0-9]+}}(%esp) ; SSE-32-NEXT: fstps {{[0-9]+}}(%esp)
; SSE-32-NEXT: wait ; SSE-32-NEXT: wait
; SSE-32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; SSE-32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
@ -347,14 +347,14 @@ define <2 x float> @uitofp_v2i64_v2f32(<2 x i64> %x) #0 {
; SSE41-32-NEXT: movd %xmm1, %eax ; SSE41-32-NEXT: movd %xmm1, %eax
; SSE41-32-NEXT: shrl $31, %eax ; SSE41-32-NEXT: shrl $31, %eax
; SSE41-32-NEXT: fildll {{[0-9]+}}(%esp) ; SSE41-32-NEXT: fildll {{[0-9]+}}(%esp)
; SSE41-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4) ; SSE41-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; SSE41-32-NEXT: fstps (%esp) ; SSE41-32-NEXT: fstps (%esp)
; SSE41-32-NEXT: wait ; SSE41-32-NEXT: wait
; SSE41-32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1] ; SSE41-32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
; SSE41-32-NEXT: movd %xmm0, %eax ; SSE41-32-NEXT: movd %xmm0, %eax
; SSE41-32-NEXT: shrl $31, %eax ; SSE41-32-NEXT: shrl $31, %eax
; SSE41-32-NEXT: fildll {{[0-9]+}}(%esp) ; SSE41-32-NEXT: fildll {{[0-9]+}}(%esp)
; SSE41-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4) ; SSE41-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; SSE41-32-NEXT: fstps {{[0-9]+}}(%esp) ; SSE41-32-NEXT: fstps {{[0-9]+}}(%esp)
; SSE41-32-NEXT: wait ; SSE41-32-NEXT: wait
; SSE41-32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; SSE41-32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
@ -415,13 +415,13 @@ define <2 x float> @uitofp_v2i64_v2f32(<2 x i64> %x) #0 {
; AVX-32-NEXT: vextractps $1, %xmm0, %eax ; AVX-32-NEXT: vextractps $1, %xmm0, %eax
; AVX-32-NEXT: shrl $31, %eax ; AVX-32-NEXT: shrl $31, %eax
; AVX-32-NEXT: fildll {{[0-9]+}}(%esp) ; AVX-32-NEXT: fildll {{[0-9]+}}(%esp)
; AVX-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4) ; AVX-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; AVX-32-NEXT: fstps {{[0-9]+}}(%esp) ; AVX-32-NEXT: fstps {{[0-9]+}}(%esp)
; AVX-32-NEXT: wait ; AVX-32-NEXT: wait
; AVX-32-NEXT: vextractps $3, %xmm0, %eax ; AVX-32-NEXT: vextractps $3, %xmm0, %eax
; AVX-32-NEXT: shrl $31, %eax ; AVX-32-NEXT: shrl $31, %eax
; AVX-32-NEXT: fildll {{[0-9]+}}(%esp) ; AVX-32-NEXT: fildll {{[0-9]+}}(%esp)
; AVX-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4) ; AVX-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; AVX-32-NEXT: fstps (%esp) ; AVX-32-NEXT: fstps (%esp)
; AVX-32-NEXT: wait ; AVX-32-NEXT: wait
; AVX-32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; AVX-32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@ -526,7 +526,7 @@ define <4 x float> @sitofp_v4i1_v4f32(<4 x i1> %x) #0 {
define <4 x float> @uitofp_v4i1_v4f32(<4 x i1> %x) #0 { define <4 x float> @uitofp_v4i1_v4f32(<4 x i1> %x) #0 {
; SSE-32-LABEL: uitofp_v4i1_v4f32: ; SSE-32-LABEL: uitofp_v4i1_v4f32:
; SSE-32: # %bb.0: ; SSE-32: # %bb.0:
; SSE-32-NEXT: andps {{\.LCPI.*}}, %xmm0 ; SSE-32-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE-32-NEXT: cvtdq2ps %xmm0, %xmm0 ; SSE-32-NEXT: cvtdq2ps %xmm0, %xmm0
; SSE-32-NEXT: retl ; SSE-32-NEXT: retl
; ;
@ -538,7 +538,7 @@ define <4 x float> @uitofp_v4i1_v4f32(<4 x i1> %x) #0 {
; ;
; SSE41-32-LABEL: uitofp_v4i1_v4f32: ; SSE41-32-LABEL: uitofp_v4i1_v4f32:
; SSE41-32: # %bb.0: ; SSE41-32: # %bb.0:
; SSE41-32-NEXT: andps {{\.LCPI.*}}, %xmm0 ; SSE41-32-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE41-32-NEXT: cvtdq2ps %xmm0, %xmm0 ; SSE41-32-NEXT: cvtdq2ps %xmm0, %xmm0
; SSE41-32-NEXT: retl ; SSE41-32-NEXT: retl
; ;
@ -550,7 +550,7 @@ define <4 x float> @uitofp_v4i1_v4f32(<4 x i1> %x) #0 {
; ;
; AVX1-32-LABEL: uitofp_v4i1_v4f32: ; AVX1-32-LABEL: uitofp_v4i1_v4f32:
; AVX1-32: # %bb.0: ; AVX1-32: # %bb.0:
; AVX1-32-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0 ; AVX1-32-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; AVX1-32-NEXT: vcvtdq2ps %xmm0, %xmm0 ; AVX1-32-NEXT: vcvtdq2ps %xmm0, %xmm0
; AVX1-32-NEXT: retl ; AVX1-32-NEXT: retl
; ;
@ -569,7 +569,7 @@ define <4 x float> @uitofp_v4i1_v4f32(<4 x i1> %x) #0 {
; ;
; AVX512VL-32-LABEL: uitofp_v4i1_v4f32: ; AVX512VL-32-LABEL: uitofp_v4i1_v4f32:
; AVX512VL-32: # %bb.0: ; AVX512VL-32: # %bb.0:
; AVX512VL-32-NEXT: vpandd {{\.LCPI.*}}{1to4}, %xmm0, %xmm0 ; AVX512VL-32-NEXT: vpandd {{\.LCPI[0-9]+_[0-9]+}}{1to4}, %xmm0, %xmm0
; AVX512VL-32-NEXT: vcvtdq2ps %xmm0, %xmm0 ; AVX512VL-32-NEXT: vcvtdq2ps %xmm0, %xmm0
; AVX512VL-32-NEXT: retl ; AVX512VL-32-NEXT: retl
; ;
@ -588,7 +588,7 @@ define <4 x float> @uitofp_v4i1_v4f32(<4 x i1> %x) #0 {
; ;
; AVX512DQVL-32-LABEL: uitofp_v4i1_v4f32: ; AVX512DQVL-32-LABEL: uitofp_v4i1_v4f32:
; AVX512DQVL-32: # %bb.0: ; AVX512DQVL-32: # %bb.0:
; AVX512DQVL-32-NEXT: vandps {{\.LCPI.*}}{1to4}, %xmm0, %xmm0 ; AVX512DQVL-32-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}{1to4}, %xmm0, %xmm0
; AVX512DQVL-32-NEXT: vcvtdq2ps %xmm0, %xmm0 ; AVX512DQVL-32-NEXT: vcvtdq2ps %xmm0, %xmm0
; AVX512DQVL-32-NEXT: retl ; AVX512DQVL-32-NEXT: retl
; ;
@ -737,10 +737,10 @@ define <4 x float> @uitofp_v4i32_v4f32(<4 x i32> %x) #0 {
; SSE-32: # %bb.0: ; SSE-32: # %bb.0:
; SSE-32-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535] ; SSE-32-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535]
; SSE-32-NEXT: pand %xmm0, %xmm1 ; SSE-32-NEXT: pand %xmm0, %xmm1
; SSE-32-NEXT: por {{\.LCPI.*}}, %xmm1 ; SSE-32-NEXT: por {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; SSE-32-NEXT: psrld $16, %xmm0 ; SSE-32-NEXT: psrld $16, %xmm0
; SSE-32-NEXT: por {{\.LCPI.*}}, %xmm0 ; SSE-32-NEXT: por {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE-32-NEXT: subps {{\.LCPI.*}}, %xmm0 ; SSE-32-NEXT: subps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE-32-NEXT: addps %xmm1, %xmm0 ; SSE-32-NEXT: addps %xmm1, %xmm0
; SSE-32-NEXT: retl ; SSE-32-NEXT: retl
; ;
@ -759,10 +759,10 @@ define <4 x float> @uitofp_v4i32_v4f32(<4 x i32> %x) #0 {
; SSE41-32: # %bb.0: ; SSE41-32: # %bb.0:
; SSE41-32-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535] ; SSE41-32-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535]
; SSE41-32-NEXT: pand %xmm0, %xmm1 ; SSE41-32-NEXT: pand %xmm0, %xmm1
; SSE41-32-NEXT: por {{\.LCPI.*}}, %xmm1 ; SSE41-32-NEXT: por {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; SSE41-32-NEXT: psrld $16, %xmm0 ; SSE41-32-NEXT: psrld $16, %xmm0
; SSE41-32-NEXT: por {{\.LCPI.*}}, %xmm0 ; SSE41-32-NEXT: por {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE41-32-NEXT: subps {{\.LCPI.*}}, %xmm0 ; SSE41-32-NEXT: subps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE41-32-NEXT: addps %xmm1, %xmm0 ; SSE41-32-NEXT: addps %xmm1, %xmm0
; SSE41-32-NEXT: retl ; SSE41-32-NEXT: retl
; ;
@ -782,7 +782,7 @@ define <4 x float> @uitofp_v4i32_v4f32(<4 x i32> %x) #0 {
; AVX1-32-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7] ; AVX1-32-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
; AVX1-32-NEXT: vpsrld $16, %xmm0, %xmm0 ; AVX1-32-NEXT: vpsrld $16, %xmm0, %xmm0
; AVX1-32-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7] ; AVX1-32-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
; AVX1-32-NEXT: vsubps {{\.LCPI.*}}, %xmm0, %xmm0 ; AVX1-32-NEXT: vsubps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; AVX1-32-NEXT: vaddps %xmm0, %xmm1, %xmm0 ; AVX1-32-NEXT: vaddps %xmm0, %xmm1, %xmm0
; AVX1-32-NEXT: retl ; AVX1-32-NEXT: retl
; ;
@ -860,7 +860,7 @@ define <2 x double> @uitofp_v2i1_v2f64(<2 x i1> %x) #0 {
; SSE-32-LABEL: uitofp_v2i1_v2f64: ; SSE-32-LABEL: uitofp_v2i1_v2f64:
; SSE-32: # %bb.0: ; SSE-32: # %bb.0:
; SSE-32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; SSE-32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSE-32-NEXT: pand {{\.LCPI.*}}, %xmm0 ; SSE-32-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE-32-NEXT: cvtdq2pd %xmm0, %xmm0 ; SSE-32-NEXT: cvtdq2pd %xmm0, %xmm0
; SSE-32-NEXT: retl ; SSE-32-NEXT: retl
; ;
@ -874,7 +874,7 @@ define <2 x double> @uitofp_v2i1_v2f64(<2 x i1> %x) #0 {
; SSE41-32-LABEL: uitofp_v2i1_v2f64: ; SSE41-32-LABEL: uitofp_v2i1_v2f64:
; SSE41-32: # %bb.0: ; SSE41-32: # %bb.0:
; SSE41-32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; SSE41-32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSE41-32-NEXT: pand {{\.LCPI.*}}, %xmm0 ; SSE41-32-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE41-32-NEXT: cvtdq2pd %xmm0, %xmm0 ; SSE41-32-NEXT: cvtdq2pd %xmm0, %xmm0
; SSE41-32-NEXT: retl ; SSE41-32-NEXT: retl
; ;
@ -888,7 +888,7 @@ define <2 x double> @uitofp_v2i1_v2f64(<2 x i1> %x) #0 {
; AVX1-32-LABEL: uitofp_v2i1_v2f64: ; AVX1-32-LABEL: uitofp_v2i1_v2f64:
; AVX1-32: # %bb.0: ; AVX1-32: # %bb.0:
; AVX1-32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,2,3] ; AVX1-32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,2,3]
; AVX1-32-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0 ; AVX1-32-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; AVX1-32-NEXT: vcvtdq2pd %xmm0, %xmm0 ; AVX1-32-NEXT: vcvtdq2pd %xmm0, %xmm0
; AVX1-32-NEXT: retl ; AVX1-32-NEXT: retl
; ;
@ -910,7 +910,7 @@ define <2 x double> @uitofp_v2i1_v2f64(<2 x i1> %x) #0 {
; AVX512VL-32-LABEL: uitofp_v2i1_v2f64: ; AVX512VL-32-LABEL: uitofp_v2i1_v2f64:
; AVX512VL-32: # %bb.0: ; AVX512VL-32: # %bb.0:
; AVX512VL-32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; AVX512VL-32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; AVX512VL-32-NEXT: vpandd {{\.LCPI.*}}{1to4}, %xmm0, %xmm0 ; AVX512VL-32-NEXT: vpandd {{\.LCPI[0-9]+_[0-9]+}}{1to4}, %xmm0, %xmm0
; AVX512VL-32-NEXT: vcvtdq2pd %xmm0, %xmm0 ; AVX512VL-32-NEXT: vcvtdq2pd %xmm0, %xmm0
; AVX512VL-32-NEXT: retl ; AVX512VL-32-NEXT: retl
; ;
@ -932,7 +932,7 @@ define <2 x double> @uitofp_v2i1_v2f64(<2 x i1> %x) #0 {
; AVX512DQVL-32-LABEL: uitofp_v2i1_v2f64: ; AVX512DQVL-32-LABEL: uitofp_v2i1_v2f64:
; AVX512DQVL-32: # %bb.0: ; AVX512DQVL-32: # %bb.0:
; AVX512DQVL-32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,2,3] ; AVX512DQVL-32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,2,3]
; AVX512DQVL-32-NEXT: vandps {{\.LCPI.*}}{1to4}, %xmm0, %xmm0 ; AVX512DQVL-32-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}{1to4}, %xmm0, %xmm0
; AVX512DQVL-32-NEXT: vcvtdq2pd %xmm0, %xmm0 ; AVX512DQVL-32-NEXT: vcvtdq2pd %xmm0, %xmm0
; AVX512DQVL-32-NEXT: retl ; AVX512DQVL-32-NEXT: retl
; ;
@ -1276,14 +1276,14 @@ define <2 x double> @uitofp_v2i64_v2f64(<2 x i64> %x) #0 {
; SSE-32-NEXT: movd %xmm1, %eax ; SSE-32-NEXT: movd %xmm1, %eax
; SSE-32-NEXT: shrl $31, %eax ; SSE-32-NEXT: shrl $31, %eax
; SSE-32-NEXT: fildll {{[0-9]+}}(%esp) ; SSE-32-NEXT: fildll {{[0-9]+}}(%esp)
; SSE-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4) ; SSE-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; SSE-32-NEXT: fstpl {{[0-9]+}}(%esp) ; SSE-32-NEXT: fstpl {{[0-9]+}}(%esp)
; SSE-32-NEXT: wait ; SSE-32-NEXT: wait
; SSE-32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3] ; SSE-32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
; SSE-32-NEXT: movd %xmm0, %eax ; SSE-32-NEXT: movd %xmm0, %eax
; SSE-32-NEXT: shrl $31, %eax ; SSE-32-NEXT: shrl $31, %eax
; SSE-32-NEXT: fildll {{[0-9]+}}(%esp) ; SSE-32-NEXT: fildll {{[0-9]+}}(%esp)
; SSE-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4) ; SSE-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; SSE-32-NEXT: fstpl (%esp) ; SSE-32-NEXT: fstpl (%esp)
; SSE-32-NEXT: wait ; SSE-32-NEXT: wait
; SSE-32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; SSE-32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
@ -1344,14 +1344,14 @@ define <2 x double> @uitofp_v2i64_v2f64(<2 x i64> %x) #0 {
; SSE41-32-NEXT: movd %xmm1, %eax ; SSE41-32-NEXT: movd %xmm1, %eax
; SSE41-32-NEXT: shrl $31, %eax ; SSE41-32-NEXT: shrl $31, %eax
; SSE41-32-NEXT: fildll {{[0-9]+}}(%esp) ; SSE41-32-NEXT: fildll {{[0-9]+}}(%esp)
; SSE41-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4) ; SSE41-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; SSE41-32-NEXT: fstpl {{[0-9]+}}(%esp) ; SSE41-32-NEXT: fstpl {{[0-9]+}}(%esp)
; SSE41-32-NEXT: wait ; SSE41-32-NEXT: wait
; SSE41-32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3] ; SSE41-32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
; SSE41-32-NEXT: movd %xmm0, %eax ; SSE41-32-NEXT: movd %xmm0, %eax
; SSE41-32-NEXT: shrl $31, %eax ; SSE41-32-NEXT: shrl $31, %eax
; SSE41-32-NEXT: fildll {{[0-9]+}}(%esp) ; SSE41-32-NEXT: fildll {{[0-9]+}}(%esp)
; SSE41-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4) ; SSE41-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; SSE41-32-NEXT: fstpl (%esp) ; SSE41-32-NEXT: fstpl (%esp)
; SSE41-32-NEXT: wait ; SSE41-32-NEXT: wait
; SSE41-32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; SSE41-32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
@ -1411,13 +1411,13 @@ define <2 x double> @uitofp_v2i64_v2f64(<2 x i64> %x) #0 {
; AVX-32-NEXT: vextractps $1, %xmm0, %eax ; AVX-32-NEXT: vextractps $1, %xmm0, %eax
; AVX-32-NEXT: shrl $31, %eax ; AVX-32-NEXT: shrl $31, %eax
; AVX-32-NEXT: fildll {{[0-9]+}}(%esp) ; AVX-32-NEXT: fildll {{[0-9]+}}(%esp)
; AVX-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4) ; AVX-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; AVX-32-NEXT: fstpl {{[0-9]+}}(%esp) ; AVX-32-NEXT: fstpl {{[0-9]+}}(%esp)
; AVX-32-NEXT: wait ; AVX-32-NEXT: wait
; AVX-32-NEXT: vextractps $3, %xmm0, %eax ; AVX-32-NEXT: vextractps $3, %xmm0, %eax
; AVX-32-NEXT: shrl $31, %eax ; AVX-32-NEXT: shrl $31, %eax
; AVX-32-NEXT: fildll {{[0-9]+}}(%esp) ; AVX-32-NEXT: fildll {{[0-9]+}}(%esp)
; AVX-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4) ; AVX-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; AVX-32-NEXT: fstpl (%esp) ; AVX-32-NEXT: fstpl (%esp)
; AVX-32-NEXT: wait ; AVX-32-NEXT: wait
; AVX-32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; AVX-32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero

Some files were not shown because too many files have changed in this diff Show More