mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-10-21 20:12:56 +02:00
856f7f79e2
Summary: 1/ Operand folding during complex pattern matching for LEAs has been extended, such that it promotes Scale to accommodate similar operand appearing in the DAG. e.g. T1 = A + B T2 = T1 + 10 T3 = T2 + A For above DAG rooted at T3, X86AddressMode will no look like Base = B , Index = A , Scale = 2 , Disp = 10 2/ During OptimizeLEAPass down the pipeline factorization is now performed over LEAs so that if there is an opportunity then complex LEAs (having 3 operands) could be factored out. e.g. leal 1(%rax,%rcx,1), %rdx leal 1(%rax,%rcx,2), %rcx will be factored as following leal 1(%rax,%rcx,1), %rdx leal (%rdx,%rcx) , %edx 3/ Aggressive operand folding for AM based selection for LEAs is sensitive to loops, thus avoiding creation of any complex LEAs within a loop. Reviewers: lsaba, RKSimon, craig.topper, qcolombet Reviewed By: lsaba Subscribers: spatel, igorb, llvm-commits Differential Revision: https://reviews.llvm.org/D35014 llvm-svn: 313343
71 lines
2.2 KiB
LLVM
71 lines
2.2 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+slow-3ops-lea | FileCheck %s -check-prefix=X64
|
|
; RUN: llc < %s -mtriple=i686-unknown -mattr=+slow-3ops-lea | FileCheck %s -check-prefix=X86
|
|
|
|
%struct.SA = type { i32 , i32 , i32 , i32 , i32};
|
|
|
|
define void @foo(%struct.SA* nocapture %ctx, i32 %n) local_unnamed_addr #0 {
|
|
; X64-LABEL: foo:
|
|
; X64: # BB#0: # %entry
|
|
; X64-NEXT: .p2align 4, 0x90
|
|
; X64-NEXT: .LBB0_1: # %loop
|
|
; X64-NEXT: # =>This Inner Loop Header: Depth=1
|
|
; X64-NEXT: movl 16(%rdi), %eax
|
|
; X64-NEXT: movl (%rdi), %ecx
|
|
; X64-NEXT: addl %eax, %ecx
|
|
; X64-NEXT: incl %ecx
|
|
; X64-NEXT: movl %ecx, 12(%rdi)
|
|
; X64-NEXT: decl %esi
|
|
; X64-NEXT: jne .LBB0_1
|
|
; X64-NEXT: # BB#2: # %exit
|
|
; X64-NEXT: addl %eax, %ecx
|
|
; X64-NEXT: movl %ecx, 16(%rdi)
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: foo:
|
|
; X86: # BB#0: # %entry
|
|
; X86-NEXT: pushl %esi
|
|
; X86-NEXT: .Lcfi0:
|
|
; X86-NEXT: .cfi_def_cfa_offset 8
|
|
; X86-NEXT: .Lcfi1:
|
|
; X86-NEXT: .cfi_offset %esi, -8
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: .p2align 4, 0x90
|
|
; X86-NEXT: .LBB0_1: # %loop
|
|
; X86-NEXT: # =>This Inner Loop Header: Depth=1
|
|
; X86-NEXT: movl 16(%eax), %edx
|
|
; X86-NEXT: movl (%eax), %esi
|
|
; X86-NEXT: addl %edx, %esi
|
|
; X86-NEXT: incl %esi
|
|
; X86-NEXT: movl %esi, 12(%eax)
|
|
; X86-NEXT: decl %ecx
|
|
; X86-NEXT: jne .LBB0_1
|
|
; X86-NEXT: # BB#2: # %exit
|
|
; X86-NEXT: addl %edx, %esi
|
|
; X86-NEXT: movl %esi, 16(%eax)
|
|
; X86-NEXT: popl %esi
|
|
; X86-NEXT: retl
|
|
entry:
|
|
br label %loop
|
|
|
|
loop:
|
|
%iter = phi i32 [%n ,%entry ] ,[ %iter.ctr ,%loop]
|
|
%h0 = getelementptr inbounds %struct.SA, %struct.SA* %ctx, i64 0, i32 0
|
|
%0 = load i32, i32* %h0, align 8
|
|
%h3 = getelementptr inbounds %struct.SA, %struct.SA* %ctx, i64 0, i32 3
|
|
%h4 = getelementptr inbounds %struct.SA, %struct.SA* %ctx, i64 0, i32 4
|
|
%1 = load i32, i32* %h4, align 8
|
|
%add = add i32 %0, 1
|
|
%add4 = add i32 %add, %1
|
|
store i32 %add4, i32* %h3, align 4
|
|
%add29 = add i32 %add4, %1
|
|
%iter.ctr = sub i32 %iter , 1
|
|
%res = icmp ne i32 %iter.ctr , 0
|
|
br i1 %res , label %loop , label %exit
|
|
|
|
exit:
|
|
store i32 %add29, i32* %h4, align 8
|
|
ret void
|
|
}
|