2017-08-25 21:25:03 +02:00
|
|
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s
|
2013-01-31 01:44:12 +01:00
|
|
|
|
Optimize some 64-bit multiplication by constants into two lea's or one lea + shl since imulq is slow (latency 5). e.g.
x * 40
=>
shlq $3, %rdi
leaq (%rdi,%rdi,4), %rax
This has the added benefit of allowing more multiply to be folded into addressing mode. e.g.
a * 24 + b
=>
leaq (%rdi,%rdi,2), %rax
leaq (%rsi,%rax,8), %rax
llvm-svn: 67917
2009-03-28 06:57:29 +01:00
|
|
|
|
|
|
|
define i64 @t1(i64 %a) nounwind readnone {
|
2017-08-25 21:25:03 +02:00
|
|
|
; CHECK-LABEL: t1:
|
2017-12-04 18:18:51 +01:00
|
|
|
; CHECK: # %bb.0: # %entry
|
2017-08-25 21:25:03 +02:00
|
|
|
; CHECK-NEXT: leaq (%rdi,%rdi,8), %rax
|
|
|
|
; CHECK-NEXT: leaq (%rax,%rax,8), %rax
|
|
|
|
; CHECK-NEXT: retq
|
Optimize some 64-bit multiplication by constants into two lea's or one lea + shl since imulq is slow (latency 5). e.g.
x * 40
=>
shlq $3, %rdi
leaq (%rdi,%rdi,4), %rax
This has the added benefit of allowing more multiply to be folded into addressing mode. e.g.
a * 24 + b
=>
leaq (%rdi,%rdi,2), %rax
leaq (%rsi,%rax,8), %rax
llvm-svn: 67917
2009-03-28 06:57:29 +01:00
|
|
|
entry:
|
2013-01-31 01:44:12 +01:00
|
|
|
%0 = mul i64 %a, 81
|
|
|
|
ret i64 %0
|
Optimize some 64-bit multiplication by constants into two lea's or one lea + shl since imulq is slow (latency 5). e.g.
x * 40
=>
shlq $3, %rdi
leaq (%rdi,%rdi,4), %rax
This has the added benefit of allowing more multiply to be folded into addressing mode. e.g.
a * 24 + b
=>
leaq (%rdi,%rdi,2), %rax
leaq (%rsi,%rax,8), %rax
llvm-svn: 67917
2009-03-28 06:57:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
define i64 @t2(i64 %a) nounwind readnone {
|
2017-08-25 21:25:03 +02:00
|
|
|
; CHECK-LABEL: t2:
|
2017-12-04 18:18:51 +01:00
|
|
|
; CHECK: # %bb.0: # %entry
|
2017-08-25 21:25:03 +02:00
|
|
|
; CHECK-NEXT: shlq $3, %rdi
|
|
|
|
; CHECK-NEXT: leaq (%rdi,%rdi,4), %rax
|
|
|
|
; CHECK-NEXT: retq
|
Optimize some 64-bit multiplication by constants into two lea's or one lea + shl since imulq is slow (latency 5). e.g.
x * 40
=>
shlq $3, %rdi
leaq (%rdi,%rdi,4), %rax
This has the added benefit of allowing more multiply to be folded into addressing mode. e.g.
a * 24 + b
=>
leaq (%rdi,%rdi,2), %rax
leaq (%rsi,%rax,8), %rax
llvm-svn: 67917
2009-03-28 06:57:29 +01:00
|
|
|
entry:
|
2013-01-31 01:44:12 +01:00
|
|
|
%0 = mul i64 %a, 40
|
|
|
|
ret i64 %0
|
Optimize some 64-bit multiplication by constants into two lea's or one lea + shl since imulq is slow (latency 5). e.g.
x * 40
=>
shlq $3, %rdi
leaq (%rdi,%rdi,4), %rax
This has the added benefit of allowing more multiply to be folded into addressing mode. e.g.
a * 24 + b
=>
leaq (%rdi,%rdi,2), %rax
leaq (%rsi,%rax,8), %rax
llvm-svn: 67917
2009-03-28 06:57:29 +01:00
|
|
|
}
|
2017-08-25 21:25:03 +02:00
|
|
|
|