1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-23 04:52:54 +02:00
llvm-mirror/test/CodeGen/X86/lsr-static-addr.ll
Simon Pilgrim c09c832a8e [X86][Atom] Convert Atom scheduler model to SchedRW (PR32431)
Atom is the only x86 target that still uses schedule itineraries, if we can remove this then we can begin the work on removing x86 itineraries. I've also found that it will help with PR36550.

I've focussed on matching the existing model as closely as possible (relying on the schedule tests), PR36895 indicated a lot of these were incorrect but we can just as easily fix these after this patch as before. Hopefully we can get llvm-exegesis to help here,

There are a few instructions that rely on itinerary scheduling (mainly push/pop/return) of multiple resource stages, but I don't think any of these are show stoppers.

There are also a few codegen changes that seem related to the post-ra scheduler acting a little differently, I haven't tracked these down but they don't seem critical.

NOTE: I don't have access to any Atom hardware, so this hasn't been tested in the wild.

Differential Revision: https://reviews.llvm.org/D45486

llvm-svn: 329837
2018-04-11 18:23:01 +00:00

33 lines
1012 B
LLVM

; RUN: llc -mcpu=generic -mtriple=x86_64-unknown-linux-gnu -relocation-model=static -asm-verbose=false < %s | FileCheck %s
; RUN: llc -mcpu=atom -mtriple=x86_64-unknown-linux-gnu -relocation-model=static -asm-verbose=false < %s | FileCheck %s
; CHECK: xorl %eax, %eax
; CHECK: movsd .LCPI0_0(%rip), %xmm0
; CHECK: align
; CHECK-NEXT: BB0_2:
; CHECK-NEXT: movsd A(,%rax,8)
; CHECK-NEXT: mulsd
; CHECK-NEXT: movsd
; CHECK-NEXT: incq %rax
@A = external global [0 x double]
define void @foo(i64 %n) nounwind {
entry:
%cmp5 = icmp sgt i64 %n, 0
br i1 %cmp5, label %for.body, label %for.end
for.body:
%i.06 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr [0 x double], [0 x double]* @A, i64 0, i64 %i.06
%tmp3 = load double, double* %arrayidx, align 8
%mul = fmul double %tmp3, 2.300000e+00
store double %mul, double* %arrayidx, align 8
%inc = add nsw i64 %i.06, 1
%exitcond = icmp eq i64 %inc, %n
br i1 %exitcond, label %for.end, label %for.body
for.end:
ret void
}