1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-31 16:02:52 +01:00
llvm-mirror/test/CodeGen/X86/break-anti-dependencies.ll
Eric Christopher 72d7cc25f3 Turn on list-ilp scheduling by default on x86 and x86-64, fix up
testcases accordingly. Some are currently xfailed and will be filed
as bugs to be fixed or understood.

Performance results:

roughly neutral on SPEC
some micro benchmarks in the llvm suite are up between 100 and 150%, only
a pair of regressions that are due to be investigated

john-the-ripper saw:
10% improvement in traditional DES
8% improvement in BSDI DES
59% improvement in FreeBSD MD5
67% improvement in OpenBSD Blowfish
14% improvement in LM DES

Small compile time impact.

llvm-svn: 127208
2011-03-08 02:42:25 +00:00

35 lines
1.1 KiB
LLVM

; Without list-burr scheduling we may not see the difference in codegen here.
; RUN: llc < %s -march=x86-64 -post-RA-scheduler -pre-RA-sched=list-burr -break-anti-dependencies=none > %t
; RUN: grep {%xmm0} %t | count 14
; RUN: not grep {%xmm1} %t
; RUN: llc < %s -march=x86-64 -post-RA-scheduler -break-anti-dependencies=critical > %t
; RUN: grep {%xmm0} %t | count 7
; RUN: grep {%xmm1} %t | count 7
define void @goo(double* %r, double* %p, double* %q) nounwind {
entry:
%0 = load double* %p, align 8
%1 = fadd double %0, 1.100000e+00
%2 = fmul double %1, 1.200000e+00
%3 = fadd double %2, 1.300000e+00
%4 = fmul double %3, 1.400000e+00
%5 = fadd double %4, 1.500000e+00
%6 = fptosi double %5 to i32
%7 = load double* %r, align 8
%8 = fadd double %7, 7.100000e+00
%9 = fmul double %8, 7.200000e+00
%10 = fadd double %9, 7.300000e+00
%11 = fmul double %10, 7.400000e+00
%12 = fadd double %11, 7.500000e+00
%13 = fptosi double %12 to i32
%14 = icmp slt i32 %6, %13
br i1 %14, label %bb, label %return
bb:
store double 9.300000e+00, double* %q, align 8
ret void
return:
ret void
}