1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-25 05:52:53 +02:00
llvm-mirror/test/CodeGen/X86/SwizzleShuff.ll
Sanjay Patel 02dd192e99 [x86] use a single shufps when it can save instructions
This is a tiny patch with a big pile of test changes.
This partially fixes PR27885:
https://llvm.org/bugs/show_bug.cgi?id=27885

My motivating case looks like this:

  - vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
  - vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
  - vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]

  + vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]

And this happens several times in the diffs. For chips with domain-crossing penalties,
the instruction count and size reduction should usually overcome any potential 
domain-crossing penalty due to using an FP op in a sequence of int ops. For chips such
as recent Intel big cores and Atom, there is no domain-crossing penalty for shufps, so
using shufps is a pure win.

So the test case diffs all appear to be improvements except one test in 
vector-shuffle-combining.ll where we miss an opportunity to use a shift to generate 
zero elements and one test in combine-sra.ll where multiple uses prevent the expected
shuffle combining.

Differential Revision: https://reviews.llvm.org/D27692

llvm-svn: 289837
2016-12-15 18:03:38 +00:00

78 lines
2.8 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx -x86-experimental-vector-widening-legalization | FileCheck %s
; Check that we perform a scalar XOR on i32.
define void @pull_bitcast(<4 x i8>* %pA, <4 x i8>* %pB) {
; CHECK-LABEL: pull_bitcast:
; CHECK: # BB#0:
; CHECK-NEXT: movl (%rsi), %eax
; CHECK-NEXT: xorl %eax, (%rdi)
; CHECK-NEXT: retq
%A = load <4 x i8>, <4 x i8>* %pA
%B = load <4 x i8>, <4 x i8>* %pB
%C = xor <4 x i8> %A, %B
store <4 x i8> %C, <4 x i8>* %pA
ret void
}
define <4 x i32> @multi_use_swizzle(<4 x i32>* %pA, <4 x i32>* %pB) {
; CHECK-LABEL: multi_use_swizzle:
; CHECK: # BB#0:
; CHECK-NEXT: vmovaps (%rdi), %xmm0
; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,1],mem[1,2]
; CHECK-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[1,3,2,2]
; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,1,0,2]
; CHECK-NEXT: vxorps %xmm0, %xmm1, %xmm0
; CHECK-NEXT: retq
%A = load <4 x i32>, <4 x i32>* %pA
%B = load <4 x i32>, <4 x i32>* %pB
%S = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 1, i32 1, i32 5, i32 6>
%S1 = shufflevector <4 x i32> %S, <4 x i32> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 2>
%S2 = shufflevector <4 x i32> %S, <4 x i32> undef, <4 x i32> <i32 2, i32 1, i32 0, i32 2>
%R = xor <4 x i32> %S1, %S2
ret <4 x i32> %R
}
define <4 x i8> @pull_bitcast2(<4 x i8>* %pA, <4 x i8>* %pB, <4 x i8>* %pC) {
; CHECK-LABEL: pull_bitcast2:
; CHECK: # BB#0:
; CHECK-NEXT: movl (%rdi), %eax
; CHECK-NEXT: movl %eax, (%rdx)
; CHECK-NEXT: xorl (%rsi), %eax
; CHECK-NEXT: vmovd %eax, %xmm0
; CHECK-NEXT: movl %eax, (%rdi)
; CHECK-NEXT: retq
%A = load <4 x i8>, <4 x i8>* %pA
store <4 x i8> %A, <4 x i8>* %pC
%B = load <4 x i8>, <4 x i8>* %pB
%C = xor <4 x i8> %A, %B
store <4 x i8> %C, <4 x i8>* %pA
ret <4 x i8> %C
}
define <4 x i32> @reverse_1(<4 x i32>* %pA, <4 x i32>* %pB) {
; CHECK-LABEL: reverse_1:
; CHECK: # BB#0:
; CHECK-NEXT: vmovaps (%rdi), %xmm0
; CHECK-NEXT: retq
%A = load <4 x i32>, <4 x i32>* %pA
%B = load <4 x i32>, <4 x i32>* %pB
%S = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
%S1 = shufflevector <4 x i32> %S, <4 x i32> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
ret <4 x i32> %S1
}
define <4 x i32> @no_reverse_shuff(<4 x i32>* %pA, <4 x i32>* %pB) {
; CHECK-LABEL: no_reverse_shuff:
; CHECK: # BB#0:
; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = mem[2,3,2,3]
; CHECK-NEXT: retq
%A = load <4 x i32>, <4 x i32>* %pA
%B = load <4 x i32>, <4 x i32>* %pB
%S = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
%S1 = shufflevector <4 x i32> %S, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 3, i32 2>
ret <4 x i32> %S1
}