1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-24 19:52:54 +01:00
llvm-mirror/test/CodeGen/X86/sar_fold.ll
Michael Kuperstein 493cb3070b [X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into       (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into    (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])

sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.

This fixes PR24373.

Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161

llvm-svn: 255761
2015-12-16 11:22:37 +00:00

38 lines
861 B
LLVM

; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s
define i32 @shl16sar15(i32 %a) #0 {
; CHECK-LABEL: shl16sar15:
; CHECK: # BB#0:
; CHECK-NEXT: movswl {{[0-9]+}}(%esp), %eax
%1 = shl i32 %a, 16
%2 = ashr exact i32 %1, 15
ret i32 %2
}
define i32 @shl16sar17(i32 %a) #0 {
; CHECK-LABEL: shl16sar17:
; CHECK: # BB#0:
; CHECK-NEXT: movswl {{[0-9]+}}(%esp), %eax
%1 = shl i32 %a, 16
%2 = ashr exact i32 %1, 17
ret i32 %2
}
define i32 @shl24sar23(i32 %a) #0 {
; CHECK-LABEL: shl24sar23:
; CHECK: # BB#0:
; CHECK-NEXT: movsbl {{[0-9]+}}(%esp), %eax
%1 = shl i32 %a, 24
%2 = ashr exact i32 %1, 23
ret i32 %2
}
define i32 @shl24sar25(i32 %a) #0 {
; CHECK-LABEL: shl24sar25:
; CHECK: # BB#0:
; CHECK-NEXT: movsbl {{[0-9]+}}(%esp), %eax
%1 = shl i32 %a, 24
%2 = ashr exact i32 %1, 25
ret i32 %2
}