1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-22 12:33:33 +02:00
llvm-mirror/test/CodeGen/X86/x86-64-double-precision-shift-left.ll
2015-11-09 21:53:58 +00:00

75 lines
1.8 KiB
LLVM

; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=bdver1 | FileCheck %s
; Verify that for the architectures that are known to have poor latency
; double precision shift instructions we generate alternative sequence
; of instructions with lower latencies instead of shld instruction.
;uint64_t lshift1(uint64_t a, uint64_t b)
;{
; return (a << 1) | (b >> 63);
;}
; CHECK-LABEL: lshift1:
; CHECK: shrq $63, %rsi
; CHECK-NEXT: leaq (%rsi,%rdi,2), %rax
define i64 @lshift1(i64 %a, i64 %b) nounwind readnone uwtable {
entry:
%shl = shl i64 %a, 1
%shr = lshr i64 %b, 63
%or = or i64 %shr, %shl
ret i64 %or
}
;uint64_t lshift2(uint64_t a, uint64_t b)
;{
; return (a << 2) | (b >> 62);
;}
; CHECK-LABEL: lshift2:
; CHECK: shrq $62, %rsi
; CHECK-NEXT: leaq (%rsi,%rdi,4), %rax
define i64 @lshift2(i64 %a, i64 %b) nounwind readnone uwtable {
entry:
%shl = shl i64 %a, 2
%shr = lshr i64 %b, 62
%or = or i64 %shr, %shl
ret i64 %or
}
;uint64_t lshift7(uint64_t a, uint64_t b)
;{
; return (a << 7) | (b >> 57);
;}
; CHECK: lshift7:
; CHECK: shlq $7, {{.*}}
; CHECK-NEXT: shrq $57, {{.*}}
; CHECK-NEXT: leaq ({{.*}},{{.*}}), {{.*}}
define i64 @lshift7(i64 %a, i64 %b) nounwind readnone uwtable {
entry:
%shl = shl i64 %a, 7
%shr = lshr i64 %b, 57
%or = or i64 %shr, %shl
ret i64 %or
}
;uint64_t lshift63(uint64_t a, uint64_t b)
;{
; return (a << 63) | (b >> 1);
;}
; CHECK: lshift63:
; CHECK: shlq $63, {{.*}}
; CHECK-NEXT: shrq {{.*}}
; CHECK-NEXT: leaq ({{.*}},{{.*}}), {{.*}}
define i64 @lshift63(i64 %a, i64 %b) nounwind readnone uwtable {
entry:
%shl = shl i64 %a, 63
%shr = lshr i64 %b, 1
%or = or i64 %shr, %shl
ret i64 %or
}