diff --git a/test/CodeGen/X86/bitcast-and-setcc-128.ll b/test/CodeGen/X86/bitcast-and-setcc-128.ll index 092b139fca2..1d78ee26a0b 100644 --- a/test/CodeGen/X86/bitcast-and-setcc-128.ll +++ b/test/CodeGen/X86/bitcast-and-setcc-128.ll @@ -1,48 +1,48 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+sse2 < %s | FileCheck %s --check-prefixes=SSE2-SSSE3,SSE2 -; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+ssse3 < %s | FileCheck %s --check-prefixes=SSE2-SSSE3,SSSE3 -; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx < %s | FileCheck %s --check-prefixes=AVX12,AVX1 -; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx2 < %s | FileCheck %s --check-prefixes=AVX12,AVX2 -; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx512f,+avx512vl,+avx512bw < %s | FileCheck %s --check-prefixes=AVX512 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE2-SSSE3,SSE2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefixes=SSE2-SSSE3,SSSE3 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX12,AVX1 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX12,AVX2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX512 define i8 @v8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c, <8 x i16> %d) { ; SSE2-LABEL: v8i16: -; SSE2: ## BB#0: +; SSE2: # BB#0: ; SSE2-NEXT: pcmpgtw %xmm1, %xmm0 ; SSE2-NEXT: pcmpgtw %xmm3, %xmm2 ; SSE2-NEXT: pand %xmm0, %xmm2 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm2 ; SSE2-NEXT: packuswb %xmm2, %xmm2 ; SSE2-NEXT: pmovmskb %xmm2, %eax -; SSE2-NEXT: ## kill: %AL %AL %EAX +; SSE2-NEXT: # kill: %AL %AL %EAX ; SSE2-NEXT: retq ; ; SSSE3-LABEL: v8i16: -; SSSE3: ## BB#0: +; SSSE3: # BB#0: ; SSSE3-NEXT: pcmpgtw %xmm1, %xmm0 ; SSSE3-NEXT: pcmpgtw %xmm3, %xmm2 ; SSSE3-NEXT: pand %xmm0, %xmm2 ; SSSE3-NEXT: pshufb {{.*#+}} xmm2 = xmm2[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] ; SSSE3-NEXT: pmovmskb %xmm2, %eax -; SSSE3-NEXT: ## kill: %AL %AL %EAX +; SSSE3-NEXT: # kill: %AL %AL %EAX ; SSSE3-NEXT: retq ; ; AVX12-LABEL: v8i16: -; AVX12: ## BB#0: +; AVX12: # BB#0: ; AVX12-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0 ; AVX12-NEXT: vpcmpgtw %xmm3, %xmm2, %xmm1 ; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX12-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] ; AVX12-NEXT: vpmovmskb %xmm0, %eax -; AVX12-NEXT: ## kill: %AL %AL %EAX +; AVX12-NEXT: # kill: %AL %AL %EAX ; AVX12-NEXT: retq ; ; AVX512-LABEL: v8i16: -; AVX512: ## BB#0: +; AVX512: # BB#0: ; AVX512-NEXT: vpcmpgtw %xmm1, %xmm0, %k1 ; AVX512-NEXT: vpcmpgtw %xmm3, %xmm2, %k0 {%k1} ; AVX512-NEXT: kmovd %k0, %eax -; AVX512-NEXT: ## kill: %AL %AL %EAX +; AVX512-NEXT: # kill: %AL %AL %EAX ; AVX512-NEXT: retq %x0 = icmp sgt <8 x i16> %a, %b %x1 = icmp sgt <8 x i16> %c, %d @@ -53,25 +53,25 @@ define i8 @v8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c, <8 x i16> %d) { define i4 @v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d) { ; SSE2-SSSE3-LABEL: v4i32: -; SSE2-SSSE3: ## BB#0: +; SSE2-SSSE3: # BB#0: ; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm0 ; SSE2-SSSE3-NEXT: pcmpgtd %xmm3, %xmm2 ; SSE2-SSSE3-NEXT: pand %xmm0, %xmm2 ; SSE2-SSSE3-NEXT: movmskps %xmm2, %eax -; SSE2-SSSE3-NEXT: ## kill: %AL %AL %EAX +; SSE2-SSSE3-NEXT: # kill: %AL %AL %EAX ; SSE2-SSSE3-NEXT: retq ; ; AVX12-LABEL: v4i32: -; AVX12: ## BB#0: +; AVX12: # BB#0: ; AVX12-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0 ; AVX12-NEXT: vpcmpgtd %xmm3, %xmm2, %xmm1 ; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX12-NEXT: vmovmskps %xmm0, %eax -; AVX12-NEXT: ## kill: %AL %AL %EAX +; AVX12-NEXT: # kill: %AL %AL %EAX ; AVX12-NEXT: retq ; ; AVX512-LABEL: v4i32: -; AVX512: ## BB#0: +; AVX512: # BB#0: ; AVX512-NEXT: vpcmpgtd %xmm1, %xmm0, %k1 ; AVX512-NEXT: vpcmpgtd %xmm3, %xmm2, %k0 {%k1} ; AVX512-NEXT: kmovd %k0, %eax @@ -87,25 +87,25 @@ define i4 @v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d) { define i4 @v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x float> %d) { ; SSE2-SSSE3-LABEL: v4f32: -; SSE2-SSSE3: ## BB#0: +; SSE2-SSSE3: # BB#0: ; SSE2-SSSE3-NEXT: cmpltps %xmm0, %xmm1 ; SSE2-SSSE3-NEXT: cmpltps %xmm2, %xmm3 ; SSE2-SSSE3-NEXT: andps %xmm1, %xmm3 ; SSE2-SSSE3-NEXT: movmskps %xmm3, %eax -; SSE2-SSSE3-NEXT: ## kill: %AL %AL %EAX +; SSE2-SSSE3-NEXT: # kill: %AL %AL %EAX ; SSE2-SSSE3-NEXT: retq ; ; AVX12-LABEL: v4f32: -; AVX12: ## BB#0: +; AVX12: # BB#0: ; AVX12-NEXT: vcmpltps %xmm0, %xmm1, %xmm0 ; AVX12-NEXT: vcmpltps %xmm2, %xmm3, %xmm1 ; AVX12-NEXT: vandps %xmm1, %xmm0, %xmm0 ; AVX12-NEXT: vmovmskps %xmm0, %eax -; AVX12-NEXT: ## kill: %AL %AL %EAX +; AVX12-NEXT: # kill: %AL %AL %EAX ; AVX12-NEXT: retq ; ; AVX512-LABEL: v4f32: -; AVX512: ## BB#0: +; AVX512: # BB#0: ; AVX512-NEXT: vcmpltps %xmm0, %xmm1, %k1 ; AVX512-NEXT: vcmpltps %xmm2, %xmm3, %k0 {%k1} ; AVX512-NEXT: kmovd %k0, %eax @@ -121,29 +121,29 @@ define i4 @v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x float> %d) define i16 @v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i8> %d) { ; SSE2-SSSE3-LABEL: v16i8: -; SSE2-SSSE3: ## BB#0: +; SSE2-SSSE3: # BB#0: ; SSE2-SSSE3-NEXT: pcmpgtb %xmm1, %xmm0 ; SSE2-SSSE3-NEXT: pcmpgtb %xmm3, %xmm2 ; SSE2-SSSE3-NEXT: pand %xmm0, %xmm2 ; SSE2-SSSE3-NEXT: pmovmskb %xmm2, %eax -; SSE2-SSSE3-NEXT: ## kill: %AX %AX %EAX +; SSE2-SSSE3-NEXT: # kill: %AX %AX %EAX ; SSE2-SSSE3-NEXT: retq ; ; AVX12-LABEL: v16i8: -; AVX12: ## BB#0: +; AVX12: # BB#0: ; AVX12-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm0 ; AVX12-NEXT: vpcmpgtb %xmm3, %xmm2, %xmm1 ; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX12-NEXT: vpmovmskb %xmm0, %eax -; AVX12-NEXT: ## kill: %AX %AX %EAX +; AVX12-NEXT: # kill: %AX %AX %EAX ; AVX12-NEXT: retq ; ; AVX512-LABEL: v16i8: -; AVX512: ## BB#0: +; AVX512: # BB#0: ; AVX512-NEXT: vpcmpgtb %xmm1, %xmm0, %k1 ; AVX512-NEXT: vpcmpgtb %xmm3, %xmm2, %k0 {%k1} ; AVX512-NEXT: kmovd %k0, %eax -; AVX512-NEXT: ## kill: %AX %AX %EAX +; AVX512-NEXT: # kill: %AX %AX %EAX ; AVX512-NEXT: retq %x0 = icmp sgt <16 x i8> %a, %b %x1 = icmp sgt <16 x i8> %c, %d @@ -154,7 +154,7 @@ define i16 @v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i8> %d) { define i2 @v2i8(<2 x i8> %a, <2 x i8> %b, <2 x i8> %c, <2 x i8> %d) { ; SSE2-SSSE3-LABEL: v2i8: -; SSE2-SSSE3: ## BB#0: +; SSE2-SSSE3: # BB#0: ; SSE2-SSSE3-NEXT: psllq $56, %xmm2 ; SSE2-SSSE3-NEXT: movdqa %xmm2, %xmm4 ; SSE2-SSSE3-NEXT: psrad $31, %xmm4 @@ -206,11 +206,11 @@ define i2 @v2i8(<2 x i8> %a, <2 x i8> %b, <2 x i8> %c, <2 x i8> %d) { ; SSE2-SSSE3-NEXT: por %xmm2, %xmm0 ; SSE2-SSSE3-NEXT: pand %xmm1, %xmm0 ; SSE2-SSSE3-NEXT: movmskpd %xmm0, %eax -; SSE2-SSSE3-NEXT: ## kill: %AL %AL %EAX +; SSE2-SSSE3-NEXT: # kill: %AL %AL %EAX ; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: v2i8: -; AVX1: ## BB#0: +; AVX1: # BB#0: ; AVX1-NEXT: vpsllq $56, %xmm3, %xmm3 ; AVX1-NEXT: vpsrad $31, %xmm3, %xmm4 ; AVX1-NEXT: vpsrad $24, %xmm3, %xmm3 @@ -235,11 +235,11 @@ define i2 @v2i8(<2 x i8> %a, <2 x i8> %b, <2 x i8> %c, <2 x i8> %d) { ; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vmovmskpd %xmm0, %eax -; AVX1-NEXT: ## kill: %AL %AL %EAX +; AVX1-NEXT: # kill: %AL %AL %EAX ; AVX1-NEXT: retq ; ; AVX2-LABEL: v2i8: -; AVX2: ## BB#0: +; AVX2: # BB#0: ; AVX2-NEXT: vpsllq $56, %xmm3, %xmm3 ; AVX2-NEXT: vpsrad $31, %xmm3, %xmm4 ; AVX2-NEXT: vpsrad $24, %xmm3, %xmm3 @@ -264,11 +264,11 @@ define i2 @v2i8(<2 x i8> %a, <2 x i8> %b, <2 x i8> %c, <2 x i8> %d) { ; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0 ; AVX2-NEXT: vmovmskpd %xmm0, %eax -; AVX2-NEXT: ## kill: %AL %AL %EAX +; AVX2-NEXT: # kill: %AL %AL %EAX ; AVX2-NEXT: retq ; ; AVX512-LABEL: v2i8: -; AVX512: ## BB#0: +; AVX512: # BB#0: ; AVX512-NEXT: vpsllq $56, %xmm3, %xmm3 ; AVX512-NEXT: vpsraq $56, %xmm3, %xmm3 ; AVX512-NEXT: vpsllq $56, %xmm2, %xmm2 @@ -292,7 +292,7 @@ define i2 @v2i8(<2 x i8> %a, <2 x i8> %b, <2 x i8> %c, <2 x i8> %d) { define i2 @v2i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, <2 x i16> %d) { ; SSE2-SSSE3-LABEL: v2i16: -; SSE2-SSSE3: ## BB#0: +; SSE2-SSSE3: # BB#0: ; SSE2-SSSE3-NEXT: psllq $48, %xmm2 ; SSE2-SSSE3-NEXT: movdqa %xmm2, %xmm4 ; SSE2-SSSE3-NEXT: psrad $31, %xmm4 @@ -344,11 +344,11 @@ define i2 @v2i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, <2 x i16> %d) { ; SSE2-SSSE3-NEXT: por %xmm2, %xmm0 ; SSE2-SSSE3-NEXT: pand %xmm1, %xmm0 ; SSE2-SSSE3-NEXT: movmskpd %xmm0, %eax -; SSE2-SSSE3-NEXT: ## kill: %AL %AL %EAX +; SSE2-SSSE3-NEXT: # kill: %AL %AL %EAX ; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: v2i16: -; AVX1: ## BB#0: +; AVX1: # BB#0: ; AVX1-NEXT: vpsllq $48, %xmm3, %xmm3 ; AVX1-NEXT: vpsrad $31, %xmm3, %xmm4 ; AVX1-NEXT: vpsrad $16, %xmm3, %xmm3 @@ -373,11 +373,11 @@ define i2 @v2i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, <2 x i16> %d) { ; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vmovmskpd %xmm0, %eax -; AVX1-NEXT: ## kill: %AL %AL %EAX +; AVX1-NEXT: # kill: %AL %AL %EAX ; AVX1-NEXT: retq ; ; AVX2-LABEL: v2i16: -; AVX2: ## BB#0: +; AVX2: # BB#0: ; AVX2-NEXT: vpsllq $48, %xmm3, %xmm3 ; AVX2-NEXT: vpsrad $31, %xmm3, %xmm4 ; AVX2-NEXT: vpsrad $16, %xmm3, %xmm3 @@ -402,11 +402,11 @@ define i2 @v2i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, <2 x i16> %d) { ; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0 ; AVX2-NEXT: vmovmskpd %xmm0, %eax -; AVX2-NEXT: ## kill: %AL %AL %EAX +; AVX2-NEXT: # kill: %AL %AL %EAX ; AVX2-NEXT: retq ; ; AVX512-LABEL: v2i16: -; AVX512: ## BB#0: +; AVX512: # BB#0: ; AVX512-NEXT: vpsllq $48, %xmm3, %xmm3 ; AVX512-NEXT: vpsraq $48, %xmm3, %xmm3 ; AVX512-NEXT: vpsllq $48, %xmm2, %xmm2 @@ -430,7 +430,7 @@ define i2 @v2i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, <2 x i16> %d) { define i2 @v2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x i32> %d) { ; SSE2-SSSE3-LABEL: v2i32: -; SSE2-SSSE3: ## BB#0: +; SSE2-SSSE3: # BB#0: ; SSE2-SSSE3-NEXT: psllq $32, %xmm2 ; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,3,2,3] ; SSE2-SSSE3-NEXT: psrad $31, %xmm2 @@ -474,11 +474,11 @@ define i2 @v2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x i32> %d) { ; SSE2-SSSE3-NEXT: por %xmm2, %xmm0 ; SSE2-SSSE3-NEXT: pand %xmm3, %xmm0 ; SSE2-SSSE3-NEXT: movmskpd %xmm0, %eax -; SSE2-SSSE3-NEXT: ## kill: %AL %AL %EAX +; SSE2-SSSE3-NEXT: # kill: %AL %AL %EAX ; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: v2i32: -; AVX1: ## BB#0: +; AVX1: # BB#0: ; AVX1-NEXT: vpsllq $32, %xmm3, %xmm3 ; AVX1-NEXT: vpsrad $31, %xmm3, %xmm4 ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] @@ -499,11 +499,11 @@ define i2 @v2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x i32> %d) { ; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vmovmskpd %xmm0, %eax -; AVX1-NEXT: ## kill: %AL %AL %EAX +; AVX1-NEXT: # kill: %AL %AL %EAX ; AVX1-NEXT: retq ; ; AVX2-LABEL: v2i32: -; AVX2: ## BB#0: +; AVX2: # BB#0: ; AVX2-NEXT: vpsllq $32, %xmm3, %xmm3 ; AVX2-NEXT: vpsrad $31, %xmm3, %xmm4 ; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] @@ -524,11 +524,11 @@ define i2 @v2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x i32> %d) { ; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0 ; AVX2-NEXT: vmovmskpd %xmm0, %eax -; AVX2-NEXT: ## kill: %AL %AL %EAX +; AVX2-NEXT: # kill: %AL %AL %EAX ; AVX2-NEXT: retq ; ; AVX512-LABEL: v2i32: -; AVX512: ## BB#0: +; AVX512: # BB#0: ; AVX512-NEXT: vpsllq $32, %xmm3, %xmm3 ; AVX512-NEXT: vpsraq $32, %xmm3, %xmm3 ; AVX512-NEXT: vpsllq $32, %xmm2, %xmm2 @@ -552,7 +552,7 @@ define i2 @v2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x i32> %d) { define i2 @v2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c, <2 x i64> %d) { ; SSE2-SSSE3-LABEL: v2i64: -; SSE2-SSSE3: ## BB#0: +; SSE2-SSSE3: # BB#0: ; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,0,2147483648,0] ; SSE2-SSSE3-NEXT: pxor %xmm4, %xmm1 ; SSE2-SSSE3-NEXT: pxor %xmm4, %xmm0 @@ -576,20 +576,20 @@ define i2 @v2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c, <2 x i64> %d) { ; SSE2-SSSE3-NEXT: por %xmm2, %xmm0 ; SSE2-SSSE3-NEXT: pand %xmm1, %xmm0 ; SSE2-SSSE3-NEXT: movmskpd %xmm0, %eax -; SSE2-SSSE3-NEXT: ## kill: %AL %AL %EAX +; SSE2-SSSE3-NEXT: # kill: %AL %AL %EAX ; SSE2-SSSE3-NEXT: retq ; ; AVX12-LABEL: v2i64: -; AVX12: ## BB#0: +; AVX12: # BB#0: ; AVX12-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 ; AVX12-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm1 ; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX12-NEXT: vmovmskpd %xmm0, %eax -; AVX12-NEXT: ## kill: %AL %AL %EAX +; AVX12-NEXT: # kill: %AL %AL %EAX ; AVX12-NEXT: retq ; ; AVX512-LABEL: v2i64: -; AVX512: ## BB#0: +; AVX512: # BB#0: ; AVX512-NEXT: vpcmpgtq %xmm1, %xmm0, %k1 ; AVX512-NEXT: vpcmpgtq %xmm3, %xmm2, %k0 {%k1} ; AVX512-NEXT: kmovd %k0, %eax @@ -605,25 +605,25 @@ define i2 @v2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c, <2 x i64> %d) { define i2 @v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x double> %d) { ; SSE2-SSSE3-LABEL: v2f64: -; SSE2-SSSE3: ## BB#0: +; SSE2-SSSE3: # BB#0: ; SSE2-SSSE3-NEXT: cmpltpd %xmm0, %xmm1 ; SSE2-SSSE3-NEXT: cmpltpd %xmm2, %xmm3 ; SSE2-SSSE3-NEXT: andpd %xmm1, %xmm3 ; SSE2-SSSE3-NEXT: movmskpd %xmm3, %eax -; SSE2-SSSE3-NEXT: ## kill: %AL %AL %EAX +; SSE2-SSSE3-NEXT: # kill: %AL %AL %EAX ; SSE2-SSSE3-NEXT: retq ; ; AVX12-LABEL: v2f64: -; AVX12: ## BB#0: +; AVX12: # BB#0: ; AVX12-NEXT: vcmpltpd %xmm0, %xmm1, %xmm0 ; AVX12-NEXT: vcmpltpd %xmm2, %xmm3, %xmm1 ; AVX12-NEXT: vandpd %xmm1, %xmm0, %xmm0 ; AVX12-NEXT: vmovmskpd %xmm0, %eax -; AVX12-NEXT: ## kill: %AL %AL %EAX +; AVX12-NEXT: # kill: %AL %AL %EAX ; AVX12-NEXT: retq ; ; AVX512-LABEL: v2f64: -; AVX512: ## BB#0: +; AVX512: # BB#0: ; AVX512-NEXT: vcmpltpd %xmm0, %xmm1, %k1 ; AVX512-NEXT: vcmpltpd %xmm2, %xmm3, %k0 {%k1} ; AVX512-NEXT: kmovd %k0, %eax @@ -639,7 +639,7 @@ define i2 @v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x double> define i4 @v4i8(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, <4 x i8> %d) { ; SSE2-SSSE3-LABEL: v4i8: -; SSE2-SSSE3: ## BB#0: +; SSE2-SSSE3: # BB#0: ; SSE2-SSSE3-NEXT: pslld $24, %xmm3 ; SSE2-SSSE3-NEXT: psrad $24, %xmm3 ; SSE2-SSSE3-NEXT: pslld $24, %xmm2 @@ -652,11 +652,11 @@ define i4 @v4i8(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, <4 x i8> %d) { ; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm0 ; SSE2-SSSE3-NEXT: pand %xmm2, %xmm0 ; SSE2-SSSE3-NEXT: movmskps %xmm0, %eax -; SSE2-SSSE3-NEXT: ## kill: %AL %AL %EAX +; SSE2-SSSE3-NEXT: # kill: %AL %AL %EAX ; SSE2-SSSE3-NEXT: retq ; ; AVX12-LABEL: v4i8: -; AVX12: ## BB#0: +; AVX12: # BB#0: ; AVX12-NEXT: vpslld $24, %xmm3, %xmm3 ; AVX12-NEXT: vpsrad $24, %xmm3, %xmm3 ; AVX12-NEXT: vpslld $24, %xmm2, %xmm2 @@ -669,11 +669,11 @@ define i4 @v4i8(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, <4 x i8> %d) { ; AVX12-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0 ; AVX12-NEXT: vpand %xmm2, %xmm0, %xmm0 ; AVX12-NEXT: vmovmskps %xmm0, %eax -; AVX12-NEXT: ## kill: %AL %AL %EAX +; AVX12-NEXT: # kill: %AL %AL %EAX ; AVX12-NEXT: retq ; ; AVX512-LABEL: v4i8: -; AVX512: ## BB#0: +; AVX512: # BB#0: ; AVX512-NEXT: vpslld $24, %xmm3, %xmm3 ; AVX512-NEXT: vpsrad $24, %xmm3, %xmm3 ; AVX512-NEXT: vpslld $24, %xmm2, %xmm2 @@ -697,7 +697,7 @@ define i4 @v4i8(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, <4 x i8> %d) { define i4 @v4i16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c, <4 x i16> %d) { ; SSE2-SSSE3-LABEL: v4i16: -; SSE2-SSSE3: ## BB#0: +; SSE2-SSSE3: # BB#0: ; SSE2-SSSE3-NEXT: pslld $16, %xmm3 ; SSE2-SSSE3-NEXT: psrad $16, %xmm3 ; SSE2-SSSE3-NEXT: pslld $16, %xmm2 @@ -710,11 +710,11 @@ define i4 @v4i16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c, <4 x i16> %d) { ; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm0 ; SSE2-SSSE3-NEXT: pand %xmm2, %xmm0 ; SSE2-SSSE3-NEXT: movmskps %xmm0, %eax -; SSE2-SSSE3-NEXT: ## kill: %AL %AL %EAX +; SSE2-SSSE3-NEXT: # kill: %AL %AL %EAX ; SSE2-SSSE3-NEXT: retq ; ; AVX12-LABEL: v4i16: -; AVX12: ## BB#0: +; AVX12: # BB#0: ; AVX12-NEXT: vpslld $16, %xmm3, %xmm3 ; AVX12-NEXT: vpsrad $16, %xmm3, %xmm3 ; AVX12-NEXT: vpslld $16, %xmm2, %xmm2 @@ -727,11 +727,11 @@ define i4 @v4i16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c, <4 x i16> %d) { ; AVX12-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0 ; AVX12-NEXT: vpand %xmm2, %xmm0, %xmm0 ; AVX12-NEXT: vmovmskps %xmm0, %eax -; AVX12-NEXT: ## kill: %AL %AL %EAX +; AVX12-NEXT: # kill: %AL %AL %EAX ; AVX12-NEXT: retq ; ; AVX512-LABEL: v4i16: -; AVX512: ## BB#0: +; AVX512: # BB#0: ; AVX512-NEXT: vpslld $16, %xmm3, %xmm3 ; AVX512-NEXT: vpsrad $16, %xmm3, %xmm3 ; AVX512-NEXT: vpslld $16, %xmm2, %xmm2 @@ -755,7 +755,7 @@ define i4 @v4i16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c, <4 x i16> %d) { define i8 @v8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i8> %d) { ; SSE2-LABEL: v8i8: -; SSE2: ## BB#0: +; SSE2: # BB#0: ; SSE2-NEXT: psllw $8, %xmm3 ; SSE2-NEXT: psraw $8, %xmm3 ; SSE2-NEXT: psllw $8, %xmm2 @@ -770,11 +770,11 @@ define i8 @v8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i8> %d) { ; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 ; SSE2-NEXT: packuswb %xmm0, %xmm0 ; SSE2-NEXT: pmovmskb %xmm0, %eax -; SSE2-NEXT: ## kill: %AL %AL %EAX +; SSE2-NEXT: # kill: %AL %AL %EAX ; SSE2-NEXT: retq ; ; SSSE3-LABEL: v8i8: -; SSSE3: ## BB#0: +; SSSE3: # BB#0: ; SSSE3-NEXT: psllw $8, %xmm3 ; SSSE3-NEXT: psraw $8, %xmm3 ; SSSE3-NEXT: psllw $8, %xmm2 @@ -788,11 +788,11 @@ define i8 @v8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i8> %d) { ; SSSE3-NEXT: pand %xmm2, %xmm0 ; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] ; SSSE3-NEXT: pmovmskb %xmm0, %eax -; SSSE3-NEXT: ## kill: %AL %AL %EAX +; SSSE3-NEXT: # kill: %AL %AL %EAX ; SSSE3-NEXT: retq ; ; AVX12-LABEL: v8i8: -; AVX12: ## BB#0: +; AVX12: # BB#0: ; AVX12-NEXT: vpsllw $8, %xmm3, %xmm3 ; AVX12-NEXT: vpsraw $8, %xmm3, %xmm3 ; AVX12-NEXT: vpsllw $8, %xmm2, %xmm2 @@ -806,11 +806,11 @@ define i8 @v8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i8> %d) { ; AVX12-NEXT: vpand %xmm2, %xmm0, %xmm0 ; AVX12-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] ; AVX12-NEXT: vpmovmskb %xmm0, %eax -; AVX12-NEXT: ## kill: %AL %AL %EAX +; AVX12-NEXT: # kill: %AL %AL %EAX ; AVX12-NEXT: retq ; ; AVX512-LABEL: v8i8: -; AVX512: ## BB#0: +; AVX512: # BB#0: ; AVX512-NEXT: vpsllw $8, %xmm3, %xmm3 ; AVX512-NEXT: vpsraw $8, %xmm3, %xmm3 ; AVX512-NEXT: vpsllw $8, %xmm2, %xmm2 @@ -822,7 +822,7 @@ define i8 @v8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i8> %d) { ; AVX512-NEXT: vpcmpgtw %xmm1, %xmm0, %k1 ; AVX512-NEXT: vpcmpgtw %xmm3, %xmm2, %k0 {%k1} ; AVX512-NEXT: kmovd %k0, %eax -; AVX512-NEXT: ## kill: %AL %AL %EAX +; AVX512-NEXT: # kill: %AL %AL %EAX ; AVX512-NEXT: retq %x0 = icmp sgt <8 x i8> %a, %b %x1 = icmp sgt <8 x i8> %c, %d diff --git a/test/CodeGen/X86/bitcast-and-setcc-256.ll b/test/CodeGen/X86/bitcast-and-setcc-256.ll index a6d6ca15530..95529686a58 100644 --- a/test/CodeGen/X86/bitcast-and-setcc-256.ll +++ b/test/CodeGen/X86/bitcast-and-setcc-256.ll @@ -1,13 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+SSE2 < %s | FileCheck %s --check-prefixes=SSE2-SSSE3,SSE2 -; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+SSSE3 < %s | FileCheck %s --check-prefixes=SSE2-SSSE3,SSSE3 -; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx < %s | FileCheck %s --check-prefixes=AVX12,AVX1 -; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx2 < %s | FileCheck %s --check-prefixes=AVX12,AVX2 -; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx512f,+avx512vl,+avx512bw < %s | FileCheck %s --check-prefix=AVX512 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+SSE2 | FileCheck %s --check-prefixes=SSE2-SSSE3,SSE2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+SSSE3 | FileCheck %s --check-prefixes=SSE2-SSSE3,SSSE3 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX12,AVX1 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX12,AVX2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl,+avx512bw | FileCheck %s --check-prefix=AVX512 define i4 @v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c, <4 x i64> %d) { ; SSE2-SSSE3-LABEL: v4i64: -; SSE2-SSSE3: ## BB#0: +; SSE2-SSSE3: # BB#0: ; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm8 = [2147483648,0,2147483648,0] ; SSE2-SSSE3-NEXT: pxor %xmm8, %xmm3 ; SSE2-SSSE3-NEXT: pxor %xmm8, %xmm1 @@ -57,11 +57,11 @@ define i4 @v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c, <4 x i64> %d) { ; SSE2-SSSE3-NEXT: psrad $31, %xmm2 ; SSE2-SSSE3-NEXT: pand %xmm0, %xmm2 ; SSE2-SSSE3-NEXT: movmskps %xmm2, %eax -; SSE2-SSSE3-NEXT: ## kill: %AL %AL %EAX +; SSE2-SSSE3-NEXT: # kill: %AL %AL %EAX ; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: v4i64: -; AVX1: ## BB#0: +; AVX1: # BB#0: ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5 ; AVX1-NEXT: vpcmpgtq %xmm4, %xmm5, %xmm4 @@ -74,12 +74,12 @@ define i4 @v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c, <4 x i64> %d) { ; AVX1-NEXT: vpacksswb %xmm1, %xmm2, %xmm1 ; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vmovmskps %xmm0, %eax -; AVX1-NEXT: ## kill: %AL %AL %EAX +; AVX1-NEXT: # kill: %AL %AL %EAX ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: v4i64: -; AVX2: ## BB#0: +; AVX2: # BB#0: ; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 @@ -88,12 +88,12 @@ define i4 @v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c, <4 x i64> %d) { ; AVX2-NEXT: vpacksswb %xmm2, %xmm1, %xmm1 ; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vmovmskps %xmm0, %eax -; AVX2-NEXT: ## kill: %AL %AL %EAX +; AVX2-NEXT: # kill: %AL %AL %EAX ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512-LABEL: v4i64: -; AVX512: ## BB#0: +; AVX512: # BB#0: ; AVX512-NEXT: vpcmpgtq %ymm1, %ymm0, %k1 ; AVX512-NEXT: vpcmpgtq %ymm3, %ymm2, %k0 {%k1} ; AVX512-NEXT: kmovd %k0, %eax @@ -110,7 +110,7 @@ define i4 @v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c, <4 x i64> %d) { define i4 @v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c, <4 x double> %d) { ; SSE2-SSSE3-LABEL: v4f64: -; SSE2-SSSE3: ## BB#0: +; SSE2-SSSE3: # BB#0: ; SSE2-SSSE3-NEXT: cmpltpd %xmm1, %xmm3 ; SSE2-SSSE3-NEXT: cmpltpd %xmm0, %xmm2 ; SSE2-SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[0,2] @@ -123,11 +123,11 @@ define i4 @v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c, <4 x double> ; SSE2-SSSE3-NEXT: psrad $31, %xmm6 ; SSE2-SSSE3-NEXT: pand %xmm2, %xmm6 ; SSE2-SSSE3-NEXT: movmskps %xmm6, %eax -; SSE2-SSSE3-NEXT: ## kill: %AL %AL %EAX +; SSE2-SSSE3-NEXT: # kill: %AL %AL %EAX ; SSE2-SSSE3-NEXT: retq ; ; AVX12-LABEL: v4f64: -; AVX12: ## BB#0: +; AVX12: # BB#0: ; AVX12-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0 ; AVX12-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX12-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 @@ -136,12 +136,12 @@ define i4 @v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c, <4 x double> ; AVX12-NEXT: vpacksswb %xmm2, %xmm1, %xmm1 ; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX12-NEXT: vmovmskps %xmm0, %eax -; AVX12-NEXT: ## kill: %AL %AL %EAX +; AVX12-NEXT: # kill: %AL %AL %EAX ; AVX12-NEXT: vzeroupper ; AVX12-NEXT: retq ; ; AVX512-LABEL: v4f64: -; AVX512: ## BB#0: +; AVX512: # BB#0: ; AVX512-NEXT: vcmpltpd %ymm0, %ymm1, %k1 ; AVX512-NEXT: vcmpltpd %ymm2, %ymm3, %k0 {%k1} ; AVX512-NEXT: kmovd %k0, %eax @@ -158,7 +158,7 @@ define i4 @v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c, <4 x double> define i16 @v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %c, <16 x i16> %d) { ; SSE2-LABEL: v16i16: -; SSE2: ## BB#0: +; SSE2: # BB#0: ; SSE2-NEXT: pcmpgtw %xmm3, %xmm1 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] ; SSE2-NEXT: pand %xmm3, %xmm1 @@ -181,11 +181,11 @@ define i16 @v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %c, <16 x i16> %d) { ; SSE2-NEXT: pcmpgtb %xmm4, %xmm2 ; SSE2-NEXT: pand %xmm1, %xmm2 ; SSE2-NEXT: pmovmskb %xmm2, %eax -; SSE2-NEXT: ## kill: %AX %AX %EAX +; SSE2-NEXT: # kill: %AX %AX %EAX ; SSE2-NEXT: retq ; ; SSSE3-LABEL: v16i16: -; SSSE3: ## BB#0: +; SSSE3: # BB#0: ; SSSE3-NEXT: pcmpgtw %xmm3, %xmm1 ; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> ; SSSE3-NEXT: pshufb %xmm3, %xmm1 @@ -208,11 +208,11 @@ define i16 @v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %c, <16 x i16> %d) { ; SSSE3-NEXT: pcmpgtb %xmm4, %xmm2 ; SSSE3-NEXT: pand %xmm1, %xmm2 ; SSSE3-NEXT: pmovmskb %xmm2, %eax -; SSSE3-NEXT: ## kill: %AX %AX %EAX +; SSSE3-NEXT: # kill: %AX %AX %EAX ; SSSE3-NEXT: retq ; ; AVX1-LABEL: v16i16: -; AVX1: ## BB#0: +; AVX1: # BB#0: ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5 ; AVX1-NEXT: vpcmpgtw %xmm4, %xmm5, %xmm4 @@ -225,12 +225,12 @@ define i16 @v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %c, <16 x i16> %d) { ; AVX1-NEXT: vpacksswb %xmm1, %xmm2, %xmm1 ; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax -; AVX1-NEXT: ## kill: %AX %AX %EAX +; AVX1-NEXT: # kill: %AX %AX %EAX ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: v16i16: -; AVX2: ## BB#0: +; AVX2: # BB#0: ; AVX2-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 @@ -239,16 +239,16 @@ define i16 @v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %c, <16 x i16> %d) { ; AVX2-NEXT: vpacksswb %xmm2, %xmm1, %xmm1 ; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpmovmskb %xmm0, %eax -; AVX2-NEXT: ## kill: %AX %AX %EAX +; AVX2-NEXT: # kill: %AX %AX %EAX ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512-LABEL: v16i16: -; AVX512: ## BB#0: +; AVX512: # BB#0: ; AVX512-NEXT: vpcmpgtw %ymm1, %ymm0, %k1 ; AVX512-NEXT: vpcmpgtw %ymm3, %ymm2, %k0 {%k1} ; AVX512-NEXT: kmovd %k0, %eax -; AVX512-NEXT: ## kill: %AX %AX %EAX +; AVX512-NEXT: # kill: %AX %AX %EAX ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %x0 = icmp sgt <16 x i16> %a, %b @@ -260,7 +260,7 @@ define i16 @v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %c, <16 x i16> %d) { define i8 @v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d) { ; SSE2-LABEL: v8i32: -; SSE2: ## BB#0: +; SSE2: # BB#0: ; SSE2-NEXT: pcmpgtd %xmm3, %xmm1 ; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7] ; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7] @@ -287,11 +287,11 @@ define i8 @v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d) { ; SSE2-NEXT: pand {{.*}}(%rip), %xmm2 ; SSE2-NEXT: packuswb %xmm2, %xmm2 ; SSE2-NEXT: pmovmskb %xmm2, %eax -; SSE2-NEXT: ## kill: %AL %AL %EAX +; SSE2-NEXT: # kill: %AL %AL %EAX ; SSE2-NEXT: retq ; ; SSSE3-LABEL: v8i32: -; SSSE3: ## BB#0: +; SSSE3: # BB#0: ; SSSE3-NEXT: pcmpgtd %xmm3, %xmm1 ; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] ; SSSE3-NEXT: pshufb %xmm3, %xmm1 @@ -310,11 +310,11 @@ define i8 @v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d) { ; SSSE3-NEXT: pand %xmm0, %xmm4 ; SSSE3-NEXT: pshufb {{.*#+}} xmm4 = xmm4[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] ; SSSE3-NEXT: pmovmskb %xmm4, %eax -; SSSE3-NEXT: ## kill: %AL %AL %EAX +; SSSE3-NEXT: # kill: %AL %AL %EAX ; SSSE3-NEXT: retq ; ; AVX1-LABEL: v8i32: -; AVX1: ## BB#0: +; AVX1: # BB#0: ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5 ; AVX1-NEXT: vpcmpgtd %xmm4, %xmm5, %xmm4 @@ -328,12 +328,12 @@ define i8 @v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d) { ; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] ; AVX1-NEXT: vpmovmskb %xmm0, %eax -; AVX1-NEXT: ## kill: %AL %AL %EAX +; AVX1-NEXT: # kill: %AL %AL %EAX ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: v8i32: -; AVX2: ## BB#0: +; AVX2: # BB#0: ; AVX2-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 @@ -343,16 +343,16 @@ define i8 @v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d) { ; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] ; AVX2-NEXT: vpmovmskb %xmm0, %eax -; AVX2-NEXT: ## kill: %AL %AL %EAX +; AVX2-NEXT: # kill: %AL %AL %EAX ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512-LABEL: v8i32: -; AVX512: ## BB#0: +; AVX512: # BB#0: ; AVX512-NEXT: vpcmpgtd %ymm1, %ymm0, %k1 ; AVX512-NEXT: vpcmpgtd %ymm3, %ymm2, %k0 {%k1} ; AVX512-NEXT: kmovd %k0, %eax -; AVX512-NEXT: ## kill: %AL %AL %EAX +; AVX512-NEXT: # kill: %AL %AL %EAX ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %x0 = icmp sgt <8 x i32> %a, %b @@ -364,7 +364,7 @@ define i8 @v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d) { define i8 @v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x float> %d) { ; SSE2-LABEL: v8f32: -; SSE2: ## BB#0: +; SSE2: # BB#0: ; SSE2-NEXT: cmpltps %xmm1, %xmm3 ; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm3[0,2,2,3,4,5,6,7] ; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7] @@ -391,11 +391,11 @@ define i8 @v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x float> %d) ; SSE2-NEXT: pand {{.*}}(%rip), %xmm2 ; SSE2-NEXT: packuswb %xmm2, %xmm2 ; SSE2-NEXT: pmovmskb %xmm2, %eax -; SSE2-NEXT: ## kill: %AL %AL %EAX +; SSE2-NEXT: # kill: %AL %AL %EAX ; SSE2-NEXT: retq ; ; SSSE3-LABEL: v8f32: -; SSSE3: ## BB#0: +; SSSE3: # BB#0: ; SSSE3-NEXT: cmpltps %xmm1, %xmm3 ; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] ; SSSE3-NEXT: pshufb %xmm1, %xmm3 @@ -414,11 +414,11 @@ define i8 @v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x float> %d) ; SSSE3-NEXT: pand %xmm2, %xmm6 ; SSSE3-NEXT: pshufb {{.*#+}} xmm6 = xmm6[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] ; SSSE3-NEXT: pmovmskb %xmm6, %eax -; SSSE3-NEXT: ## kill: %AL %AL %EAX +; SSSE3-NEXT: # kill: %AL %AL %EAX ; SSSE3-NEXT: retq ; ; AVX12-LABEL: v8f32: -; AVX12: ## BB#0: +; AVX12: # BB#0: ; AVX12-NEXT: vcmpltps %ymm0, %ymm1, %ymm0 ; AVX12-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX12-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 @@ -428,16 +428,16 @@ define i8 @v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x float> %d) ; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX12-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] ; AVX12-NEXT: vpmovmskb %xmm0, %eax -; AVX12-NEXT: ## kill: %AL %AL %EAX +; AVX12-NEXT: # kill: %AL %AL %EAX ; AVX12-NEXT: vzeroupper ; AVX12-NEXT: retq ; ; AVX512-LABEL: v8f32: -; AVX512: ## BB#0: +; AVX512: # BB#0: ; AVX512-NEXT: vcmpltps %ymm0, %ymm1, %k1 ; AVX512-NEXT: vcmpltps %ymm2, %ymm3, %k0 {%k1} ; AVX512-NEXT: kmovd %k0, %eax -; AVX512-NEXT: ## kill: %AL %AL %EAX +; AVX512-NEXT: # kill: %AL %AL %EAX ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %x0 = fcmp ogt <8 x float> %a, %b @@ -449,7 +449,7 @@ define i8 @v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x float> %d) define i32 @v32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8> %c, <32 x i8> %d) { ; SSE2-SSSE3-LABEL: v32i8: -; SSE2-SSSE3: ## BB#0: +; SSE2-SSSE3: # BB#0: ; SSE2-SSSE3-NEXT: pcmpgtb %xmm2, %xmm0 ; SSE2-SSSE3-NEXT: pcmpgtb %xmm3, %xmm1 ; SSE2-SSSE3-NEXT: pcmpgtb %xmm6, %xmm4 @@ -561,14 +561,14 @@ define i32 @v32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8> %c, <32 x i8> %d) { ; SSE2-SSSE3-NEXT: retq ; ; AVX1-LABEL: v32i8: -; AVX1: ## BB#0: +; AVX1: # BB#0: ; AVX1-NEXT: pushq %rbp -; AVX1-NEXT: Lcfi0: +; AVX1-NEXT: .Lcfi0: ; AVX1-NEXT: .cfi_def_cfa_offset 16 -; AVX1-NEXT: Lcfi1: +; AVX1-NEXT: .Lcfi1: ; AVX1-NEXT: .cfi_offset %rbp, -16 ; AVX1-NEXT: movq %rsp, %rbp -; AVX1-NEXT: Lcfi2: +; AVX1-NEXT: .Lcfi2: ; AVX1-NEXT: .cfi_def_cfa_register %rbp ; AVX1-NEXT: andq $-32, %rsp ; AVX1-NEXT: subq $32, %rsp @@ -687,7 +687,7 @@ define i32 @v32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8> %c, <32 x i8> %d) { ; AVX1-NEXT: retq ; ; AVX2-LABEL: v32i8: -; AVX2: ## BB#0: +; AVX2: # BB#0: ; AVX2-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpcmpgtb %ymm3, %ymm2, %ymm1 ; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0 @@ -696,7 +696,7 @@ define i32 @v32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8> %c, <32 x i8> %d) { ; AVX2-NEXT: retq ; ; AVX512-LABEL: v32i8: -; AVX512: ## BB#0: +; AVX512: # BB#0: ; AVX512-NEXT: vpcmpgtb %ymm1, %ymm0, %k1 ; AVX512-NEXT: vpcmpgtb %ymm3, %ymm2, %k0 {%k1} ; AVX512-NEXT: kmovd %k0, %eax