1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 03:02:36 +01:00

[InstCombine] Relax preconditions for ashr+and+icmp fold (PR44754)

Fix for https://bugs.llvm.org/show_bug.cgi?id=44754. We already have
a fold that converts icmp (and (ashr X, C3), C2), C1 into
icmp (and C2'), C1', but it imposed overly strict requirements on the
transform.

Relax this by checking that both C2 and C1 don't shift out bits
(in a signed sense) when forming the new constants.

Alive proofs (https://rise4fun.com/Alive/PTz0):

    Name: ashr_legal
    Pre: ((C2 << C3) >> C3) == C2 && ((C1 << C3) >> C3) == C1
    %a = ashr i16 %x, C3
    %b = and i16 %a, C2
    %c = icmp i16 %b, C1
    =>
    %d = and i16 %x, C2 << C3
    %c = icmp i16 %d, C1 << C3

    Name: ashr_shiftout_eq
    Pre: ((C2 << C3) >> C3) == C2 && ((C1 << C3) >> C3) != C1
    %a = ashr i16 %x, C3
    %b = and i16 %a, C2
    %c = icmp eq i16 %b, C1
    =>
    %c = false

Note that >> corresponds to ashr here. The case of an equality
comparison has some special handling in this transform, because
it will form to a true/false result if the condition on the comparison
constant it violated.

Differential Revision: https://reviews.llvm.org/D74294
This commit is contained in:
Nikita Popov 2020-02-09 09:57:59 +01:00
parent d9bc9d4b9a
commit 830db6ad23
2 changed files with 12 additions and 25 deletions

View File

@ -1666,17 +1666,13 @@ Instruction *InstCombiner::foldICmpAndShift(ICmpInst &Cmp, BinaryOperator *And,
if (Cmp.isSigned() && (NewAndCst.isNegative() || NewCmpCst.isNegative()))
return nullptr;
} else {
// For an arithmetic shift right we can do the same, if we ensure
// the And doesn't use any bits being shifted in. Normally these would
// be turned into lshr by SimplifyDemandedBits, but not if there is an
// additional user.
// For an arithmetic shift, check that both constants don't use (in a
// signed sense) the top bits being shifted out.
assert(ShiftOpcode == Instruction::AShr && "Unknown shift opcode");
NewCmpCst = C1.shl(*C3);
NewAndCst = C2.shl(*C3);
AnyCmpCstBitsShiftedOut = NewCmpCst.lshr(*C3) != C1;
if (NewAndCst.lshr(*C3) != C2)
return nullptr;
if (Cmp.isSigned() && (NewAndCst.isNegative() || NewCmpCst.isNegative()))
AnyCmpCstBitsShiftedOut = NewCmpCst.ashr(*C3) != C1;
if (NewAndCst.ashr(*C3) != C2)
return nullptr;
}

View File

@ -1864,9 +1864,8 @@ define i1 @icmp_ashr_and_overshift(i8 %X) {
define i1 @icmp_and_ashr_neg_and_legal(i8 %x) {
; CHECK-LABEL: @icmp_and_ashr_neg_and_legal(
; CHECK-NEXT: [[ASHR:%.*]] = ashr i8 [[X:%.*]], 4
; CHECK-NEXT: [[AND:%.*]] = and i8 [[ASHR]], -2
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[AND]], 1
; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X:%.*]], -32
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[TMP1]], 16
; CHECK-NEXT: ret i1 [[CMP]]
;
%ashr = ashr i8 %x, 4
@ -1891,9 +1890,8 @@ define i1 @icmp_and_ashr_mixed_and_shiftout(i8 %x) {
define i1 @icmp_and_ashr_neg_cmp_slt_legal(i8 %x) {
; CHECK-LABEL: @icmp_and_ashr_neg_cmp_slt_legal(
; CHECK-NEXT: [[ASHR:%.*]] = ashr i8 [[X:%.*]], 4
; CHECK-NEXT: [[AND:%.*]] = and i8 [[ASHR]], -2
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[AND]], -4
; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X:%.*]], -32
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[TMP1]], -64
; CHECK-NEXT: ret i1 [[CMP]]
;
%ashr = ashr i8 %x, 4
@ -1918,9 +1916,8 @@ define i1 @icmp_and_ashr_neg_cmp_slt_shiftout(i8 %x) {
define i1 @icmp_and_ashr_neg_cmp_eq_legal(i8 %x) {
; CHECK-LABEL: @icmp_and_ashr_neg_cmp_eq_legal(
; CHECK-NEXT: [[ASHR:%.*]] = ashr i8 [[X:%.*]], 4
; CHECK-NEXT: [[AND:%.*]] = and i8 [[ASHR]], -2
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[AND]], -4
; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X:%.*]], -32
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[TMP1]], -64
; CHECK-NEXT: ret i1 [[CMP]]
;
%ashr = ashr i8 %x, 4
@ -1931,10 +1928,7 @@ define i1 @icmp_and_ashr_neg_cmp_eq_legal(i8 %x) {
define i1 @icmp_and_ashr_neg_cmp_eq_shiftout(i8 %x) {
; CHECK-LABEL: @icmp_and_ashr_neg_cmp_eq_shiftout(
; CHECK-NEXT: [[ASHR:%.*]] = ashr i8 [[X:%.*]], 4
; CHECK-NEXT: [[AND:%.*]] = and i8 [[ASHR]], -2
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[AND]], -68
; CHECK-NEXT: ret i1 [[CMP]]
; CHECK-NEXT: ret i1 false
;
%ashr = ashr i8 %x, 4
%and = and i8 %ashr, -2
@ -1944,10 +1938,7 @@ define i1 @icmp_and_ashr_neg_cmp_eq_shiftout(i8 %x) {
define i1 @icmp_and_ashr_neg_cmp_ne_shiftout(i8 %x) {
; CHECK-LABEL: @icmp_and_ashr_neg_cmp_ne_shiftout(
; CHECK-NEXT: [[ASHR:%.*]] = ashr i8 [[X:%.*]], 4
; CHECK-NEXT: [[AND:%.*]] = and i8 [[ASHR]], -2
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i8 [[AND]], -68
; CHECK-NEXT: ret i1 [[CMP]]
; CHECK-NEXT: ret i1 true
;
%ashr = ashr i8 %x, 4
%and = and i8 %ashr, -2