1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 03:02:36 +01:00

[InstCombine] fold not+or+neg

~((-X) | Y) --> (X - 1) & (~Y)

We generally prefer 'add' over 'sub', this reduces the
dependency chain, and this looks better for codegen on
x86, ARM, and AArch64 targets.

https://llvm.org/PR45755

https://alive2.llvm.org/ce/z/cxZDSp
This commit is contained in:
Sanjay Patel 2021-04-02 11:57:34 -04:00
parent 893bc148de
commit b76ba99fa7
2 changed files with 18 additions and 6 deletions

View File

@ -3204,6 +3204,14 @@ Instruction *InstCombinerImpl::visitXor(BinaryOperator &I) {
if (isa<Constant>(X) || NotVal->hasOneUse())
return BinaryOperator::CreateAdd(Builder.CreateNot(X), Y);
// ~((-X) | Y) --> (X - 1) & (~Y)
if (match(NotVal,
m_OneUse(m_c_Or(m_OneUse(m_Neg(m_Value(X))), m_Value(Y))))) {
Value *DecX = Builder.CreateAdd(X, ConstantInt::getAllOnesValue(Ty));
Value *NotY = Builder.CreateNot(Y);
return BinaryOperator::CreateAnd(DecX, NotY);
}
// ~(~X >>s Y) --> (X >>s Y)
if (match(NotVal, m_AShr(m_Not(m_Value(X)), m_Value(Y))))
return BinaryOperator::CreateAShr(X, Y);

View File

@ -408,9 +408,9 @@ define i1 @not_select_cmpf_extra_use(i1 %x, i32 %z, i32 %w, i1 %cond) {
define i8 @not_or_neg(i8 %x, i8 %y) {
; CHECK-LABEL: @not_or_neg(
; CHECK-NEXT: [[S:%.*]] = sub i8 0, [[Y:%.*]]
; CHECK-NEXT: [[O:%.*]] = or i8 [[S]], [[X:%.*]]
; CHECK-NEXT: [[NOT:%.*]] = xor i8 [[O]], -1
; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[Y:%.*]], -1
; CHECK-NEXT: [[TMP2:%.*]] = xor i8 [[X:%.*]], -1
; CHECK-NEXT: [[NOT:%.*]] = and i8 [[TMP1]], [[TMP2]]
; CHECK-NEXT: ret i8 [[NOT]]
;
%s = sub i8 0, %y
@ -422,9 +422,9 @@ define i8 @not_or_neg(i8 %x, i8 %y) {
define <3 x i5> @not_or_neg_commute_vec(<3 x i5> %x, <3 x i5> %p) {
; CHECK-LABEL: @not_or_neg_commute_vec(
; CHECK-NEXT: [[Y:%.*]] = mul <3 x i5> [[P:%.*]], <i5 1, i5 2, i5 3>
; CHECK-NEXT: [[S:%.*]] = sub <3 x i5> <i5 0, i5 0, i5 undef>, [[X:%.*]]
; CHECK-NEXT: [[O:%.*]] = or <3 x i5> [[Y]], [[S]]
; CHECK-NEXT: [[NOT:%.*]] = xor <3 x i5> [[O]], <i5 -1, i5 undef, i5 -1>
; CHECK-NEXT: [[TMP1:%.*]] = add <3 x i5> [[X:%.*]], <i5 -1, i5 -1, i5 -1>
; CHECK-NEXT: [[TMP2:%.*]] = xor <3 x i5> [[Y]], <i5 -1, i5 -1, i5 -1>
; CHECK-NEXT: [[NOT:%.*]] = and <3 x i5> [[TMP1]], [[TMP2]]
; CHECK-NEXT: ret <3 x i5> [[NOT]]
;
%y = mul <3 x i5> %p, <i5 1, i5 2, i5 3> ; thwart complexity-based-canonicalization
@ -434,6 +434,8 @@ define <3 x i5> @not_or_neg_commute_vec(<3 x i5> %x, <3 x i5> %p) {
ret <3 x i5> %not
}
; negative test
define i8 @not_or_neg_use1(i8 %x, i8 %y) {
; CHECK-LABEL: @not_or_neg_use1(
; CHECK-NEXT: [[S:%.*]] = sub i8 0, [[Y:%.*]]
@ -449,6 +451,8 @@ define i8 @not_or_neg_use1(i8 %x, i8 %y) {
ret i8 %not
}
; negative test
define i8 @not_or_neg_use2(i8 %x, i8 %y) {
; CHECK-LABEL: @not_or_neg_use2(
; CHECK-NEXT: [[S:%.*]] = sub i8 0, [[Y:%.*]]