1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-25 12:12:47 +01:00

fixed to test features, not CPU models

llvm-svn: 258568
This commit is contained in:
Sanjay Patel 2016-01-22 22:20:56 +00:00
parent 6ef10254c1
commit 40baae90ce

View File

@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=knl < %s | FileCheck %s --check-prefix=AVX512
; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=core-avx2 < %s | FileCheck %s --check-prefix=AVX2
; RUN: opt -mtriple=x86_64-apple-darwin -codegenprepare -mcpu=corei7-avx -S < %s | FileCheck %s --check-prefix=AVX_SCALAR
; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=skx < %s | FileCheck %s --check-prefix=SKX
; RUN: llc -mtriple=x86_64-apple-darwin -mattr=avx512f < %s | FileCheck %s --check-prefix=AVX512
; RUN: llc -mtriple=x86_64-apple-darwin -mattr=avx2 < %s | FileCheck %s --check-prefix=AVX2
; RUN: opt -mtriple=x86_64-apple-darwin -codegenprepare -mattr=avx -S < %s | FileCheck %s --check-prefix=AVX_SCALAR
; RUN: llc -mtriple=x86_64-apple-darwin -mattr=avx512f,avx512bw,avx512vl < %s | FileCheck %s --check-prefix=SKX
; AVX512-LABEL: test1
; AVX512: vmovdqu32 (%rdi), %zmm0 {%k1} {z}
@ -536,22 +536,22 @@ define <16 x i64> @test_load_16i64(<16 x i64>* %ptrs, <16 x i1> %mask, <16 x i64
; AVX2-NEXT: vpslld $31, %xmm5, %xmm5
; AVX2-NEXT: vpsrad $31, %xmm5, %xmm5
; AVX2-NEXT: vpmovsxdq %xmm5, %ymm5
; AVX2-NEXT: vpmaskmovq (%rdi), %ymm5, %ymm9
; AVX2-NEXT: vpshufd {{.*#+}} xmm7 = xmm0[1,1,2,3]
; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero
; AVX2-NEXT: vpslld $31, %xmm7, %xmm7
; AVX2-NEXT: vpsrad $31, %xmm7, %xmm7
; AVX2-NEXT: vpmovsxdq %xmm7, %ymm7
; AVX2-NEXT: vpmaskmovq 32(%rdi), %ymm7, %ymm8
; AVX2-NEXT: vpshufd {{.*#+}} xmm6 = xmm0[2,3,0,1]
; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm6 = xmm6[0],zero,zero,zero,xmm6[1],zero,zero,zero,xmm6[2],zero,zero,zero,xmm6[3],zero,zero,zero
; AVX2-NEXT: vpslld $31, %xmm6, %xmm6
; AVX2-NEXT: vpsrad $31, %xmm6, %xmm6
; AVX2-NEXT: vpmovsxdq %xmm6, %ymm6
; AVX2-NEXT: vpmaskmovq 64(%rdi), %ymm6, %ymm10
; AVX2-NEXT: vblendvpd %ymm5, %ymm9, %ymm1, %ymm5
; AVX2-NEXT: vblendvpd %ymm7, %ymm8, %ymm2, %ymm1
; AVX2-NEXT: vblendvpd %ymm6, %ymm10, %ymm3, %ymm2
; AVX2-NEXT: vpmaskmovq (%rdi), %ymm5, %ymm6
; AVX2-NEXT: vblendvpd %ymm5, %ymm6, %ymm1, %ymm5
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
; AVX2-NEXT: vpslld $31, %xmm1, %xmm1
; AVX2-NEXT: vpsrad $31, %xmm1, %xmm1
; AVX2-NEXT: vpmovsxdq %xmm1, %ymm1
; AVX2-NEXT: vpmaskmovq 32(%rdi), %ymm1, %ymm6
; AVX2-NEXT: vblendvpd %ymm1, %ymm6, %ymm2, %ymm1
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
; AVX2-NEXT: vpslld $31, %xmm2, %xmm2
; AVX2-NEXT: vpsrad $31, %xmm2, %xmm2
; AVX2-NEXT: vpmovsxdq %xmm2, %ymm2
; AVX2-NEXT: vpmaskmovq 64(%rdi), %ymm2, %ymm6
; AVX2-NEXT: vblendvpd %ymm2, %ymm6, %ymm3, %ymm2
; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX2-NEXT: vpslld $31, %xmm0, %xmm0
@ -595,22 +595,22 @@ define <16 x double> @test_load_16f64(<16 x double>* %ptrs, <16 x i1> %mask, <16
; AVX2-NEXT: vpslld $31, %xmm5, %xmm5
; AVX2-NEXT: vpsrad $31, %xmm5, %xmm5
; AVX2-NEXT: vpmovsxdq %xmm5, %ymm5
; AVX2-NEXT: vmaskmovpd (%rdi), %ymm5, %ymm9
; AVX2-NEXT: vpshufd {{.*#+}} xmm7 = xmm0[1,1,2,3]
; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero
; AVX2-NEXT: vpslld $31, %xmm7, %xmm7
; AVX2-NEXT: vpsrad $31, %xmm7, %xmm7
; AVX2-NEXT: vpmovsxdq %xmm7, %ymm7
; AVX2-NEXT: vmaskmovpd 32(%rdi), %ymm7, %ymm8
; AVX2-NEXT: vpshufd {{.*#+}} xmm6 = xmm0[2,3,0,1]
; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm6 = xmm6[0],zero,zero,zero,xmm6[1],zero,zero,zero,xmm6[2],zero,zero,zero,xmm6[3],zero,zero,zero
; AVX2-NEXT: vpslld $31, %xmm6, %xmm6
; AVX2-NEXT: vpsrad $31, %xmm6, %xmm6
; AVX2-NEXT: vpmovsxdq %xmm6, %ymm6
; AVX2-NEXT: vmaskmovpd 64(%rdi), %ymm6, %ymm10
; AVX2-NEXT: vblendvpd %ymm5, %ymm9, %ymm1, %ymm5
; AVX2-NEXT: vblendvpd %ymm7, %ymm8, %ymm2, %ymm1
; AVX2-NEXT: vblendvpd %ymm6, %ymm10, %ymm3, %ymm2
; AVX2-NEXT: vmaskmovpd (%rdi), %ymm5, %ymm6
; AVX2-NEXT: vblendvpd %ymm5, %ymm6, %ymm1, %ymm5
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
; AVX2-NEXT: vpslld $31, %xmm1, %xmm1
; AVX2-NEXT: vpsrad $31, %xmm1, %xmm1
; AVX2-NEXT: vpmovsxdq %xmm1, %ymm1
; AVX2-NEXT: vmaskmovpd 32(%rdi), %ymm1, %ymm6
; AVX2-NEXT: vblendvpd %ymm1, %ymm6, %ymm2, %ymm1
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
; AVX2-NEXT: vpslld $31, %xmm2, %xmm2
; AVX2-NEXT: vpsrad $31, %xmm2, %xmm2
; AVX2-NEXT: vpmovsxdq %xmm2, %ymm2
; AVX2-NEXT: vmaskmovpd 64(%rdi), %ymm2, %ymm6
; AVX2-NEXT: vblendvpd %ymm2, %ymm6, %ymm3, %ymm2
; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX2-NEXT: vpslld $31, %xmm0, %xmm0
@ -670,27 +670,28 @@ define <32 x double> @test_load_32f64(<32 x double>* %ptrs, <32 x i1> %mask, <32
; AVX2-NEXT: .cfi_def_cfa_register %rbp
; AVX2-NEXT: andq $-32, %rsp
; AVX2-NEXT: subq $32, %rsp
; AVX2-NEXT: vpshufd {{.*#+}} xmm8 = xmm0[1,1,2,3]
; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm8 = xmm8[0],zero,zero,zero,xmm8[1],zero,zero,zero,xmm8[2],zero,zero,zero,xmm8[3],zero,zero,zero
; AVX2-NEXT: vpslld $31, %xmm8, %xmm8
; AVX2-NEXT: vpsrad $31, %xmm8, %xmm8
; AVX2-NEXT: vpmovsxdq %xmm8, %ymm8
; AVX2-NEXT: vmaskmovpd 32(%rsi), %ymm8, %ymm9
; AVX2-NEXT: vpshufd {{.*#+}} xmm10 = xmm0[2,3,0,1]
; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm10 = xmm10[0],zero,zero,zero,xmm10[1],zero,zero,zero,xmm10[2],zero,zero,zero,xmm10[3],zero,zero,zero
; AVX2-NEXT: vpslld $31, %xmm10, %xmm10
; AVX2-NEXT: vpsrad $31, %xmm10, %xmm10
; AVX2-NEXT: vpmovsxdq %xmm10, %ymm10
; AVX2-NEXT: vmaskmovpd 64(%rsi), %ymm10, %ymm11
; AVX2-NEXT: vpshufd {{.*#+}} xmm12 = xmm0[3,1,2,3]
; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm12 = xmm12[0],zero,zero,zero,xmm12[1],zero,zero,zero,xmm12[2],zero,zero,zero,xmm12[3],zero,zero,zero
; AVX2-NEXT: vpslld $31, %xmm12, %xmm12
; AVX2-NEXT: vpsrad $31, %xmm12, %xmm12
; AVX2-NEXT: vpmovsxdq %xmm12, %ymm12
; AVX2-NEXT: vmaskmovpd 96(%rsi), %ymm12, %ymm13
; AVX2-NEXT: vblendvpd %ymm8, %ymm9, %ymm2, %ymm8
; AVX2-NEXT: vblendvpd %ymm10, %ymm11, %ymm3, %ymm9
; AVX2-NEXT: vblendvpd %ymm12, %ymm13, %ymm4, %ymm11
; AVX2-NEXT: vmovapd 16(%rbp), %ymm8
; AVX2-NEXT: vpshufd {{.*#+}} xmm9 = xmm0[1,1,2,3]
; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm9 = xmm9[0],zero,zero,zero,xmm9[1],zero,zero,zero,xmm9[2],zero,zero,zero,xmm9[3],zero,zero,zero
; AVX2-NEXT: vpslld $31, %xmm9, %xmm9
; AVX2-NEXT: vpsrad $31, %xmm9, %xmm9
; AVX2-NEXT: vpmovsxdq %xmm9, %ymm9
; AVX2-NEXT: vmaskmovpd 32(%rsi), %ymm9, %ymm10
; AVX2-NEXT: vblendvpd %ymm9, %ymm10, %ymm2, %ymm9
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
; AVX2-NEXT: vpslld $31, %xmm2, %xmm2
; AVX2-NEXT: vpsrad $31, %xmm2, %xmm2
; AVX2-NEXT: vpmovsxdq %xmm2, %ymm2
; AVX2-NEXT: vmaskmovpd 64(%rsi), %ymm2, %ymm10
; AVX2-NEXT: vblendvpd %ymm2, %ymm10, %ymm3, %ymm11
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[3,1,2,3]
; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
; AVX2-NEXT: vpslld $31, %xmm2, %xmm2
; AVX2-NEXT: vpsrad $31, %xmm2, %xmm2
; AVX2-NEXT: vpmovsxdq %xmm2, %ymm2
; AVX2-NEXT: vmaskmovpd 96(%rsi), %ymm2, %ymm10
; AVX2-NEXT: vblendvpd %ymm2, %ymm10, %ymm4, %ymm4
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[1,1,2,3]
; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
@ -698,28 +699,27 @@ define <32 x double> @test_load_32f64(<32 x double>* %ptrs, <32 x i1> %mask, <32
; AVX2-NEXT: vpsrad $31, %xmm3, %xmm3
; AVX2-NEXT: vpmovsxdq %xmm3, %ymm3
; AVX2-NEXT: vmaskmovpd 160(%rsi), %ymm3, %ymm10
; AVX2-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[2,3,0,1]
; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero
; AVX2-NEXT: vpslld $31, %xmm4, %xmm4
; AVX2-NEXT: vpsrad $31, %xmm4, %xmm4
; AVX2-NEXT: vpmovsxdq %xmm4, %ymm4
; AVX2-NEXT: vmaskmovpd 192(%rsi), %ymm4, %ymm12
; AVX2-NEXT: vblendvpd %ymm3, %ymm10, %ymm6, %ymm3
; AVX2-NEXT: vmovapd 16(%rbp), %ymm6
; AVX2-NEXT: vblendvpd %ymm4, %ymm12, %ymm7, %ymm4
; AVX2-NEXT: vpshufd {{.*#+}} xmm6 = xmm2[2,3,0,1]
; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm6 = xmm6[0],zero,zero,zero,xmm6[1],zero,zero,zero,xmm6[2],zero,zero,zero,xmm6[3],zero,zero,zero
; AVX2-NEXT: vpslld $31, %xmm6, %xmm6
; AVX2-NEXT: vpsrad $31, %xmm6, %xmm6
; AVX2-NEXT: vpmovsxdq %xmm6, %ymm6
; AVX2-NEXT: vmaskmovpd 192(%rsi), %ymm6, %ymm10
; AVX2-NEXT: vblendvpd %ymm6, %ymm10, %ymm7, %ymm6
; AVX2-NEXT: vpshufd {{.*#+}} xmm7 = xmm2[3,1,2,3]
; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero
; AVX2-NEXT: vpslld $31, %xmm7, %xmm7
; AVX2-NEXT: vpsrad $31, %xmm7, %xmm7
; AVX2-NEXT: vpmovsxdq %xmm7, %ymm7
; AVX2-NEXT: vmaskmovpd 224(%rsi), %ymm7, %ymm10
; AVX2-NEXT: vblendvpd %ymm7, %ymm10, %ymm6, %ymm6
; AVX2-NEXT: vblendvpd %ymm7, %ymm10, %ymm8, %ymm7
; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX2-NEXT: vpslld $31, %xmm0, %xmm0
; AVX2-NEXT: vpsrad $31, %xmm0, %xmm0
; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
; AVX2-NEXT: vmaskmovpd (%rsi), %ymm0, %ymm7
; AVX2-NEXT: vblendvpd %ymm0, %ymm7, %ymm1, %ymm0
; AVX2-NEXT: vmaskmovpd (%rsi), %ymm0, %ymm8
; AVX2-NEXT: vblendvpd %ymm0, %ymm8, %ymm1, %ymm0
; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
; AVX2-NEXT: vpslld $31, %xmm1, %xmm1
; AVX2-NEXT: vpsrad $31, %xmm1, %xmm1
@ -728,12 +728,12 @@ define <32 x double> @test_load_32f64(<32 x double>* %ptrs, <32 x i1> %mask, <32
; AVX2-NEXT: vblendvpd %ymm1, %ymm2, %ymm5, %ymm1
; AVX2-NEXT: vmovapd %ymm1, 128(%rdi)
; AVX2-NEXT: vmovapd %ymm0, (%rdi)
; AVX2-NEXT: vmovapd %ymm6, 224(%rdi)
; AVX2-NEXT: vmovapd %ymm4, 192(%rdi)
; AVX2-NEXT: vmovapd %ymm7, 224(%rdi)
; AVX2-NEXT: vmovapd %ymm6, 192(%rdi)
; AVX2-NEXT: vmovapd %ymm3, 160(%rdi)
; AVX2-NEXT: vmovapd %ymm11, 96(%rdi)
; AVX2-NEXT: vmovapd %ymm9, 64(%rdi)
; AVX2-NEXT: vmovapd %ymm8, 32(%rdi)
; AVX2-NEXT: vmovapd %ymm4, 96(%rdi)
; AVX2-NEXT: vmovapd %ymm11, 64(%rdi)
; AVX2-NEXT: vmovapd %ymm9, 32(%rdi)
; AVX2-NEXT: movq %rdi, %rax
; AVX2-NEXT: movq %rbp, %rsp
; AVX2-NEXT: popq %rbp