1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-22 18:54:02 +01:00

[InstCombine] Fold lshr/ashr(or(neg(x),x),bw-1) --> zext/sext(icmp_ne(x,0)) (PR50816)

Handle the missing fold reported in PR50816, which is a variant of the existing ashr(sub_nsw(X,Y),bw-1) --> sext(icmp_sgt(X,Y)) fold.

We also handle the lshr(or(neg(x),x),bw-1) --> zext(icmp_ne(x,0)) equivalent - https://alive2.llvm.org/ce/z/SnZmSj

We still allow multi uses of the neg(x) - as this is likely to let us further simplify other uses of the neg - but not multi uses of the or() which would increase instruction count.

Differential Revision: https://reviews.llvm.org/D105764
This commit is contained in:
Simon Pilgrim 2021-07-13 14:26:03 +01:00
parent ff9425b575
commit 552824ecdc
3 changed files with 34 additions and 33 deletions

View File

@ -1139,6 +1139,10 @@ Instruction *InstCombinerImpl::visitLShr(BinaryOperator &I) {
Value *Y;
if (ShAmt == BitWidth - 1) {
// lshr i32 or(X,-X), 31 --> zext (X != 0)
if (match(Op0, m_OneUse(m_c_Or(m_Neg(m_Value(X)), m_Deferred(X)))))
return new ZExtInst(Builder.CreateIsNotNull(X), Ty);
// lshr i32 (X -nsw Y), 31 --> zext (X < Y)
if (match(Op0, m_OneUse(m_NSWSub(m_Value(X), m_Value(Y)))))
return new ZExtInst(Builder.CreateICmpSLT(X, Y), Ty);
@ -1323,11 +1327,16 @@ Instruction *InstCombinerImpl::visitAShr(BinaryOperator &I) {
return new SExtInst(NewSh, Ty);
}
// ashr i32 (X -nsw Y), 31 --> sext (X < Y)
Value *Y;
if (ShAmt == BitWidth - 1 &&
match(Op0, m_OneUse(m_NSWSub(m_Value(X), m_Value(Y)))))
return new SExtInst(Builder.CreateICmpSLT(X, Y), Ty);
if (ShAmt == BitWidth - 1) {
// ashr i32 or(X,-X), 31 --> sext (X != 0)
if (match(Op0, m_OneUse(m_c_Or(m_Neg(m_Value(X)), m_Deferred(X)))))
return new SExtInst(Builder.CreateIsNotNull(X), Ty);
// ashr i32 (X -nsw Y), 31 --> sext (X < Y)
Value *Y;
if (match(Op0, m_OneUse(m_NSWSub(m_Value(X), m_Value(Y)))))
return new SExtInst(Builder.CreateICmpSLT(X, Y), Ty);
}
// If the shifted-out value is known-zero, then this is an exact shift.
if (!I.isExact() &&

View File

@ -74,9 +74,8 @@ define i64 @sub_ashr_or_i64(i64 %x, i64 %y) {
define i32 @neg_or_ashr_i32(i32 %x) {
; CHECK-LABEL: @neg_or_ashr_i32(
; CHECK-NEXT: [[NEG:%.*]] = sub i32 0, [[X:%.*]]
; CHECK-NEXT: [[OR:%.*]] = or i32 [[NEG]], [[X]]
; CHECK-NEXT: [[SHR:%.*]] = ashr i32 [[OR]], 31
; CHECK-NEXT: [[TMP1:%.*]] = icmp ne i32 [[X:%.*]], 0
; CHECK-NEXT: [[SHR:%.*]] = sext i1 [[TMP1]] to i32
; CHECK-NEXT: ret i32 [[SHR]]
;
%neg = sub i32 0, %x
@ -116,9 +115,8 @@ define i32 @sub_ashr_or_i32_commute(i32 %x, i32 %y) {
define i32 @neg_or_ashr_i32_commute(i32 %x0) {
; CHECK-LABEL: @neg_or_ashr_i32_commute(
; CHECK-NEXT: [[X:%.*]] = sdiv i32 42, [[X0:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = sub nsw i32 0, [[X]]
; CHECK-NEXT: [[OR:%.*]] = or i32 [[X]], [[NEG]]
; CHECK-NEXT: [[SHR:%.*]] = ashr i32 [[OR]], 31
; CHECK-NEXT: [[TMP1:%.*]] = icmp ne i32 [[X]], 0
; CHECK-NEXT: [[SHR:%.*]] = sext i1 [[TMP1]] to i32
; CHECK-NEXT: ret i32 [[SHR]]
;
%x = sdiv i32 42, %x0 ; thwart complexity-based canonicalization
@ -156,9 +154,8 @@ define <4 x i32> @sub_ashr_or_i32_vec_nuw_nsw(<4 x i32> %x, <4 x i32> %y) {
define <4 x i32> @neg_or_ashr_i32_vec(<4 x i32> %x) {
; CHECK-LABEL: @neg_or_ashr_i32_vec(
; CHECK-NEXT: [[NEG:%.*]] = sub <4 x i32> zeroinitializer, [[X:%.*]]
; CHECK-NEXT: [[OR:%.*]] = or <4 x i32> [[NEG]], [[X]]
; CHECK-NEXT: [[SHR:%.*]] = ashr <4 x i32> [[OR]], <i32 31, i32 31, i32 31, i32 31>
; CHECK-NEXT: [[TMP1:%.*]] = icmp ne <4 x i32> [[X:%.*]], zeroinitializer
; CHECK-NEXT: [[SHR:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i32>
; CHECK-NEXT: ret <4 x i32> [[SHR]]
;
%neg = sub <4 x i32> zeroinitializer, %x
@ -182,9 +179,8 @@ define <4 x i32> @sub_ashr_or_i32_vec_commute(<4 x i32> %x, <4 x i32> %y) {
define <4 x i32> @neg_or_ashr_i32_vec_commute(<4 x i32> %x0) {
; CHECK-LABEL: @neg_or_ashr_i32_vec_commute(
; CHECK-NEXT: [[X:%.*]] = sdiv <4 x i32> <i32 42, i32 42, i32 42, i32 42>, [[X0:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = sub nsw <4 x i32> zeroinitializer, [[X]]
; CHECK-NEXT: [[OR:%.*]] = or <4 x i32> [[X]], [[NEG]]
; CHECK-NEXT: [[SHR:%.*]] = ashr <4 x i32> [[OR]], <i32 31, i32 31, i32 31, i32 31>
; CHECK-NEXT: [[TMP1:%.*]] = icmp ne <4 x i32> [[X]], zeroinitializer
; CHECK-NEXT: [[SHR:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i32>
; CHECK-NEXT: ret <4 x i32> [[SHR]]
;
%x = sdiv <4 x i32> <i32 42, i32 42, i32 42, i32 42>, %x0 ; thwart complexity-based canonicalization
@ -228,8 +224,8 @@ define i32 @sub_ashr_or_i32_extra_use_or(i32 %x, i32 %y, i32* %p) {
define i32 @neg_extra_use_or_ashr_i32(i32 %x, i32* %p) {
; CHECK-LABEL: @neg_extra_use_or_ashr_i32(
; CHECK-NEXT: [[NEG:%.*]] = sub i32 0, [[X:%.*]]
; CHECK-NEXT: [[OR:%.*]] = or i32 [[NEG]], [[X]]
; CHECK-NEXT: [[SHR:%.*]] = ashr i32 [[OR]], 31
; CHECK-NEXT: [[TMP1:%.*]] = icmp ne i32 [[X]], 0
; CHECK-NEXT: [[SHR:%.*]] = sext i1 [[TMP1]] to i32
; CHECK-NEXT: store i32 [[NEG]], i32* [[P:%.*]], align 4
; CHECK-NEXT: ret i32 [[SHR]]
;

View File

@ -5,9 +5,8 @@
define i32 @neg_or_lshr_i32(i32 %x) {
; CHECK-LABEL: @neg_or_lshr_i32(
; CHECK-NEXT: [[NEG:%.*]] = sub i32 0, [[X:%.*]]
; CHECK-NEXT: [[OR:%.*]] = or i32 [[NEG]], [[X]]
; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[OR]], 31
; CHECK-NEXT: [[TMP1:%.*]] = icmp ne i32 [[X:%.*]], 0
; CHECK-NEXT: [[SHR:%.*]] = zext i1 [[TMP1]] to i32
; CHECK-NEXT: ret i32 [[SHR]]
;
%neg = sub i32 0, %x
@ -21,9 +20,8 @@ define i32 @neg_or_lshr_i32(i32 %x) {
define i32 @neg_or_lshr_i32_commute(i32 %x0) {
; CHECK-LABEL: @neg_or_lshr_i32_commute(
; CHECK-NEXT: [[X:%.*]] = sdiv i32 42, [[X0:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = sub nsw i32 0, [[X]]
; CHECK-NEXT: [[OR:%.*]] = or i32 [[X]], [[NEG]]
; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[OR]], 31
; CHECK-NEXT: [[TMP1:%.*]] = icmp ne i32 [[X]], 0
; CHECK-NEXT: [[SHR:%.*]] = zext i1 [[TMP1]] to i32
; CHECK-NEXT: ret i32 [[SHR]]
;
%x = sdiv i32 42, %x0 ; thwart complexity-based canonicalization
@ -37,9 +35,8 @@ define i32 @neg_or_lshr_i32_commute(i32 %x0) {
define <4 x i32> @neg_or_lshr_i32_vec(<4 x i32> %x) {
; CHECK-LABEL: @neg_or_lshr_i32_vec(
; CHECK-NEXT: [[NEG:%.*]] = sub <4 x i32> zeroinitializer, [[X:%.*]]
; CHECK-NEXT: [[OR:%.*]] = or <4 x i32> [[NEG]], [[X]]
; CHECK-NEXT: [[SHR:%.*]] = lshr <4 x i32> [[OR]], <i32 31, i32 31, i32 31, i32 31>
; CHECK-NEXT: [[TMP1:%.*]] = icmp ne <4 x i32> [[X:%.*]], zeroinitializer
; CHECK-NEXT: [[SHR:%.*]] = zext <4 x i1> [[TMP1]] to <4 x i32>
; CHECK-NEXT: ret <4 x i32> [[SHR]]
;
%neg = sub <4 x i32> zeroinitializer, %x
@ -51,9 +48,8 @@ define <4 x i32> @neg_or_lshr_i32_vec(<4 x i32> %x) {
define <4 x i32> @neg_or_lshr_i32_vec_commute(<4 x i32> %x0) {
; CHECK-LABEL: @neg_or_lshr_i32_vec_commute(
; CHECK-NEXT: [[X:%.*]] = sdiv <4 x i32> <i32 42, i32 42, i32 42, i32 42>, [[X0:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = sub nsw <4 x i32> zeroinitializer, [[X]]
; CHECK-NEXT: [[OR:%.*]] = or <4 x i32> [[X]], [[NEG]]
; CHECK-NEXT: [[SHR:%.*]] = lshr <4 x i32> [[OR]], <i32 31, i32 31, i32 31, i32 31>
; CHECK-NEXT: [[TMP1:%.*]] = icmp ne <4 x i32> [[X]], zeroinitializer
; CHECK-NEXT: [[SHR:%.*]] = zext <4 x i1> [[TMP1]] to <4 x i32>
; CHECK-NEXT: ret <4 x i32> [[SHR]]
;
%x = sdiv <4 x i32> <i32 42, i32 42, i32 42, i32 42>, %x0 ; thwart complexity-based canonicalization
@ -68,8 +64,8 @@ define <4 x i32> @neg_or_lshr_i32_vec_commute(<4 x i32> %x0) {
define i32 @neg_extra_use_or_lshr_i32(i32 %x, i32* %p) {
; CHECK-LABEL: @neg_extra_use_or_lshr_i32(
; CHECK-NEXT: [[NEG:%.*]] = sub i32 0, [[X:%.*]]
; CHECK-NEXT: [[OR:%.*]] = or i32 [[NEG]], [[X]]
; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[OR]], 31
; CHECK-NEXT: [[TMP1:%.*]] = icmp ne i32 [[X]], 0
; CHECK-NEXT: [[SHR:%.*]] = zext i1 [[TMP1]] to i32
; CHECK-NEXT: store i32 [[NEG]], i32* [[P:%.*]], align 4
; CHECK-NEXT: ret i32 [[SHR]]
;