mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-02 00:42:52 +01:00
ee44d6a752
If there exists a use of a build_vector that's the bitwise complement of the mask, then transform the node to (and (xor x, (build_vector -1,-1,-1,-1)), (build_vector ~c1,~c2,~c3,~c4)). Since this transformation is only useful when 1) the given build_vector will become a load from constpool, and 2) (and (xor x -1), y) matches to a single instruction, I decided this is appropriate as a x86 specific transformation. rdar://7323335 llvm-svn: 96389
69 lines
2.7 KiB
LLVM
69 lines
2.7 KiB
LLVM
; RUN: llc < %s -mtriple=x86_64-apple-darwin | FileCheck %s
|
|
|
|
; Full strength reduction wouldn't reduce register pressure, so LSR should
|
|
; stick with indexing here.
|
|
|
|
; Also checks andps and andnps shares the same constantpool. Previously llvm
|
|
; will codegen two andps, one using 0x80000000, the other 0x7fffffff.
|
|
; rdar://7323335
|
|
|
|
; CHECK: movaps LCPI1_0
|
|
; CHECK: movaps LCPI1_1
|
|
; CHECK-NOT: movaps LCPI1_2
|
|
; CHECK: movaps (%rsi,%rax,4), %xmm2
|
|
; CHECK: andps
|
|
; CHECK: andnps
|
|
; CHECK: movaps %xmm2, (%rdi,%rax,4)
|
|
; CHECK: addq $4, %rax
|
|
; CHECK: cmpl %eax, (%rdx)
|
|
; CHECK-NEXT: jg
|
|
|
|
define void @vvfloorf(float* nocapture %y, float* nocapture %x, i32* nocapture %n) nounwind {
|
|
entry:
|
|
%0 = load i32* %n, align 4
|
|
%1 = icmp sgt i32 %0, 0
|
|
br i1 %1, label %bb, label %return
|
|
|
|
bb:
|
|
%indvar = phi i64 [ %indvar.next, %bb ], [ 0, %entry ]
|
|
%tmp = shl i64 %indvar, 2
|
|
%scevgep = getelementptr float* %y, i64 %tmp
|
|
%scevgep9 = bitcast float* %scevgep to <4 x float>*
|
|
%scevgep10 = getelementptr float* %x, i64 %tmp
|
|
%scevgep1011 = bitcast float* %scevgep10 to <4 x float>*
|
|
%2 = load <4 x float>* %scevgep1011, align 16
|
|
%3 = bitcast <4 x float> %2 to <4 x i32>
|
|
%4 = and <4 x i32> %3, <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>
|
|
%5 = bitcast <4 x i32> %4 to <4 x float>
|
|
%6 = and <4 x i32> %3, <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648>
|
|
%7 = tail call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %5, <4 x float> <float 8.388608e+06, float 8.388608e+06, float 8.388608e+06, float 8.388608e+06>, i8 5) nounwind
|
|
%tmp.i4 = bitcast <4 x float> %7 to <4 x i32>
|
|
%8 = xor <4 x i32> %tmp.i4, <i32 -1, i32 -1, i32 -1, i32 -1>
|
|
%9 = and <4 x i32> %8, <i32 1258291200, i32 1258291200, i32 1258291200, i32 1258291200>
|
|
%10 = or <4 x i32> %9, %6
|
|
%11 = bitcast <4 x i32> %10 to <4 x float>
|
|
%12 = fadd <4 x float> %2, %11
|
|
%13 = fsub <4 x float> %12, %11
|
|
%14 = tail call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %2, <4 x float> %13, i8 1) nounwind
|
|
%15 = bitcast <4 x float> %14 to <4 x i32>
|
|
%16 = tail call <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32> %15) nounwind readnone
|
|
%17 = fadd <4 x float> %13, %16
|
|
%tmp.i = bitcast <4 x float> %17 to <4 x i32>
|
|
%18 = or <4 x i32> %tmp.i, %6
|
|
%19 = bitcast <4 x i32> %18 to <4 x float>
|
|
store <4 x float> %19, <4 x float>* %scevgep9, align 16
|
|
%tmp12 = add i64 %tmp, 4
|
|
%tmp13 = trunc i64 %tmp12 to i32
|
|
%20 = load i32* %n, align 4
|
|
%21 = icmp sgt i32 %20, %tmp13
|
|
%indvar.next = add i64 %indvar, 1
|
|
br i1 %21, label %bb, label %return
|
|
|
|
return:
|
|
ret void
|
|
}
|
|
|
|
declare <4 x float> @llvm.x86.sse.cmp.ps(<4 x float>, <4 x float>, i8) nounwind readnone
|
|
|
|
declare <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32>) nounwind readnone
|