1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-21 12:02:58 +02:00
llvm-mirror/test/CodeGen/X86/sar_fold64.ll
Michael Kuperstein 493cb3070b [X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into       (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into    (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])

sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.

This fixes PR24373.

Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161

llvm-svn: 255761
2015-12-16 11:22:37 +00:00

44 lines
951 B
LLVM

; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s
define i32 @shl48sar47(i64 %a) #0 {
; CHECK-LABEL: shl48sar47:
; CHECK: # BB#0:
; CHECK-NEXT: movswq %di, %rax
%1 = shl i64 %a, 48
%2 = ashr exact i64 %1, 47
%3 = trunc i64 %2 to i32
ret i32 %3
}
define i32 @shl48sar49(i64 %a) #0 {
; CHECK-LABEL: shl48sar49:
; CHECK: # BB#0:
; CHECK-NEXT: movswq %di, %rax
%1 = shl i64 %a, 48
%2 = ashr exact i64 %1, 49
%3 = trunc i64 %2 to i32
ret i32 %3
}
define i32 @shl56sar55(i64 %a) #0 {
; CHECK-LABEL: shl56sar55:
; CHECK: # BB#0:
; CHECK-NEXT: movsbq %dil, %rax
%1 = shl i64 %a, 56
%2 = ashr exact i64 %1, 55
%3 = trunc i64 %2 to i32
ret i32 %3
}
define i32 @shl56sar57(i64 %a) #0 {
; CHECK-LABEL: shl56sar57:
; CHECK: # BB#0:
; CHECK-NEXT: movsbq %dil, %rax
%1 = shl i64 %a, 56
%2 = ashr exact i64 %1, 57
%3 = trunc i64 %2 to i32
ret i32 %3
}
attributes #0 = { nounwind }