1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 11:13:28 +01:00

[InstSimplify] simplifyUnsignedRangeCheck(): if we know that X != 0, handle more cases (PR43246)

Summary:
This is motivated by D67122 sanitizer check enhancement.
That patch seemingly worsens `-fsanitize=pointer-overflow`
overhead from 25% to 50%, which strongly implies missing folds.

In this particular case, given
```
char* test(char& base, unsigned long offset) {
  return &base + offset;
}
```
it will end up producing something like
https://godbolt.org/z/LK5-iH
which after optimizations reduces down to roughly
```
define i1 @t0(i8* nonnull %base, i64 %offset) {
  %base_int = ptrtoint i8* %base to i64
  %adjusted = add i64 %base_int, %offset
  %non_null_after_adjustment = icmp ne i64 %adjusted, 0
  %no_overflow_during_adjustment = icmp uge i64 %adjusted, %base_int
  %res = and i1 %non_null_after_adjustment, %no_overflow_during_adjustment
  ret i1 %res
}
```
Without D67122 there was no `%non_null_after_adjustment`,
and in this particular case we can get rid of the overhead:

Here we add some offset to a non-null pointer,
and check that the result does not overflow and is not a null pointer.
But since the base pointer is already non-null, and we check for overflow,
that overflow check will already catch the null pointer,
so the separate null check is redundant and can be dropped.

Alive proofs:
https://rise4fun.com/Alive/WRzq

There are more patterns of "unsigned-add-with-overflow", they are not handled here,
but this is the main pattern, that we currently consider canonical,
so it makes sense to handle it.

https://bugs.llvm.org/show_bug.cgi?id=43246

Reviewers: spatel, nikic, vsk

Reviewed By: spatel

Subscribers: hiraditya, llvm-commits, reames

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D67332

llvm-svn: 371349
This commit is contained in:
Roman Lebedev 2019-09-08 20:14:15 +00:00
parent 8c0a936e23
commit ca7b7a7578
2 changed files with 41 additions and 58 deletions

View File

@ -1371,7 +1371,8 @@ Value *llvm::SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
/// Commuted variants are assumed to be handled by calling this function again
/// with the parameters swapped.
static Value *simplifyUnsignedRangeCheck(ICmpInst *ZeroICmp,
ICmpInst *UnsignedICmp, bool IsAnd) {
ICmpInst *UnsignedICmp, bool IsAnd,
const DataLayout &DL) {
Value *X, *Y;
ICmpInst::Predicate EqPred;
@ -1395,6 +1396,18 @@ static Value *simplifyUnsignedRangeCheck(ICmpInst *ZeroICmp,
if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_NE)
return IsAnd ? UnsignedICmp : ZeroICmp;
// X <= Y && Y != 0 --> X <= Y iff X != 0
// X <= Y || Y != 0 --> Y != 0 iff X != 0
if (UnsignedPred == ICmpInst::ICMP_ULE && EqPred == ICmpInst::ICMP_NE &&
isKnownNonZero(X, DL))
return IsAnd ? UnsignedICmp : ZeroICmp;
// X > Y && Y == 0 --> Y == 0 iff X != 0
// X > Y || Y == 0 --> X > Y iff X != 0
if (UnsignedPred == ICmpInst::ICMP_UGT && EqPred == ICmpInst::ICMP_EQ &&
isKnownNonZero(X, DL))
return IsAnd ? ZeroICmp : UnsignedICmp;
// X >= Y || Y != 0 --> true
// X >= Y || Y == 0 --> X >= Y
if (UnsignedPred == ICmpInst::ICMP_UGE && !IsAnd) {
@ -1587,10 +1600,11 @@ static Value *simplifyAndOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1,
}
static Value *simplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1,
const InstrInfoQuery &IIQ) {
if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/true))
const InstrInfoQuery &IIQ,
const DataLayout &DL) {
if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/true, DL))
return X;
if (Value *X = simplifyUnsignedRangeCheck(Op1, Op0, /*IsAnd=*/true))
if (Value *X = simplifyUnsignedRangeCheck(Op1, Op0, /*IsAnd=*/true, DL))
return X;
if (Value *X = simplifyAndOfICmpsWithSameOperands(Op0, Op1))
@ -1660,10 +1674,11 @@ static Value *simplifyOrOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1,
}
static Value *simplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1,
const InstrInfoQuery &IIQ) {
if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/false))
const InstrInfoQuery &IIQ,
const DataLayout &DL) {
if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/false, DL))
return X;
if (Value *X = simplifyUnsignedRangeCheck(Op1, Op0, /*IsAnd=*/false))
if (Value *X = simplifyUnsignedRangeCheck(Op1, Op0, /*IsAnd=*/false, DL))
return X;
if (Value *X = simplifyOrOfICmpsWithSameOperands(Op0, Op1))
@ -1738,8 +1753,8 @@ static Value *simplifyAndOrOfCmps(const SimplifyQuery &Q,
auto *ICmp0 = dyn_cast<ICmpInst>(Op0);
auto *ICmp1 = dyn_cast<ICmpInst>(Op1);
if (ICmp0 && ICmp1)
V = IsAnd ? simplifyAndOfICmps(ICmp0, ICmp1, Q.IIQ)
: simplifyOrOfICmps(ICmp0, ICmp1, Q.IIQ);
V = IsAnd ? simplifyAndOfICmps(ICmp0, ICmp1, Q.IIQ, Q.DL)
: simplifyOrOfICmps(ICmp0, ICmp1, Q.IIQ, Q.DL);
auto *FCmp0 = dyn_cast<FCmpInst>(Op0);
auto *FCmp1 = dyn_cast<FCmpInst>(Op1);

View File

@ -11,10 +11,8 @@ define i1 @t0(i8* nonnull %base, i64 %offset) {
; CHECK-LABEL: @t0(
; CHECK-NEXT: [[BASE_INT:%.*]] = ptrtoint i8* [[BASE:%.*]] to i64
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i64 [[BASE_INT]], [[OFFSET:%.*]]
; CHECK-NEXT: [[NON_NULL_AFTER_ADJUSTMENT:%.*]] = icmp ne i64 [[ADJUSTED]], 0
; CHECK-NEXT: [[NO_OVERFLOW_DURING_ADJUSTMENT:%.*]] = icmp uge i64 [[ADJUSTED]], [[BASE_INT]]
; CHECK-NEXT: [[RES:%.*]] = and i1 [[NON_NULL_AFTER_ADJUSTMENT]], [[NO_OVERFLOW_DURING_ADJUSTMENT]]
; CHECK-NEXT: ret i1 [[RES]]
; CHECK-NEXT: ret i1 [[NO_OVERFLOW_DURING_ADJUSTMENT]]
;
%base_int = ptrtoint i8* %base to i64
%adjusted = add i64 %base_int, %offset
@ -27,10 +25,8 @@ define i1 @t1(i8* nonnull %base, i64 %offset) {
; CHECK-LABEL: @t1(
; CHECK-NEXT: [[BASE_INT:%.*]] = ptrtoint i8* [[BASE:%.*]] to i64
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i64 [[BASE_INT]], [[OFFSET:%.*]]
; CHECK-NEXT: [[NON_NULL_AFTER_ADJUSTMENT:%.*]] = icmp ne i64 [[ADJUSTED]], 0
; CHECK-NEXT: [[NO_OVERFLOW_DURING_ADJUSTMENT:%.*]] = icmp ule i64 [[BASE_INT]], [[ADJUSTED]]
; CHECK-NEXT: [[RES:%.*]] = and i1 [[NON_NULL_AFTER_ADJUSTMENT]], [[NO_OVERFLOW_DURING_ADJUSTMENT]]
; CHECK-NEXT: ret i1 [[RES]]
; CHECK-NEXT: ret i1 [[NO_OVERFLOW_DURING_ADJUSTMENT]]
;
%base_int = ptrtoint i8* %base to i64
%adjusted = add i64 %base_int, %offset
@ -43,10 +39,8 @@ define i1 @t2(i8* nonnull %base, i64 %offset) {
; CHECK-LABEL: @t2(
; CHECK-NEXT: [[BASE_INT:%.*]] = ptrtoint i8* [[BASE:%.*]] to i64
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i64 [[BASE_INT]], [[OFFSET:%.*]]
; CHECK-NEXT: [[NON_NULL_AFTER_ADJUSTMENT:%.*]] = icmp ne i64 [[ADJUSTED]], 0
; CHECK-NEXT: [[NO_OVERFLOW_DURING_ADJUSTMENT:%.*]] = icmp uge i64 [[ADJUSTED]], [[BASE_INT]]
; CHECK-NEXT: [[RES:%.*]] = and i1 [[NO_OVERFLOW_DURING_ADJUSTMENT]], [[NON_NULL_AFTER_ADJUSTMENT]]
; CHECK-NEXT: ret i1 [[RES]]
; CHECK-NEXT: ret i1 [[NO_OVERFLOW_DURING_ADJUSTMENT]]
;
%base_int = ptrtoint i8* %base to i64
%adjusted = add i64 %base_int, %offset
@ -59,10 +53,8 @@ define i1 @t3(i8* nonnull %base, i64 %offset) {
; CHECK-LABEL: @t3(
; CHECK-NEXT: [[BASE_INT:%.*]] = ptrtoint i8* [[BASE:%.*]] to i64
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i64 [[BASE_INT]], [[OFFSET:%.*]]
; CHECK-NEXT: [[NON_NULL_AFTER_ADJUSTMENT:%.*]] = icmp ne i64 [[ADJUSTED]], 0
; CHECK-NEXT: [[NO_OVERFLOW_DURING_ADJUSTMENT:%.*]] = icmp ule i64 [[BASE_INT]], [[ADJUSTED]]
; CHECK-NEXT: [[RES:%.*]] = and i1 [[NO_OVERFLOW_DURING_ADJUSTMENT]], [[NON_NULL_AFTER_ADJUSTMENT]]
; CHECK-NEXT: ret i1 [[RES]]
; CHECK-NEXT: ret i1 [[NO_OVERFLOW_DURING_ADJUSTMENT]]
;
%base_int = ptrtoint i8* %base to i64
%adjusted = add i64 %base_int, %offset
@ -72,7 +64,7 @@ define i1 @t3(i8* nonnull %base, i64 %offset) {
ret i1 %res
}
; If the joining operator was 'or', i.e. we check that either we produced null
; If the joining operator was 'or', i.e. we check that either we produced non-null
; pointer, or no overflow happened, then the overflow check itself is redundant.
define i1 @t4(i8* nonnull %base, i64 %offset) {
@ -80,9 +72,7 @@ define i1 @t4(i8* nonnull %base, i64 %offset) {
; CHECK-NEXT: [[BASE_INT:%.*]] = ptrtoint i8* [[BASE:%.*]] to i64
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i64 [[BASE_INT]], [[OFFSET:%.*]]
; CHECK-NEXT: [[NON_NULL_AFTER_ADJUSTMENT:%.*]] = icmp ne i64 [[ADJUSTED]], 0
; CHECK-NEXT: [[NO_OVERFLOW_DURING_ADJUSTMENT:%.*]] = icmp uge i64 [[ADJUSTED]], [[BASE_INT]]
; CHECK-NEXT: [[RES:%.*]] = or i1 [[NON_NULL_AFTER_ADJUSTMENT]], [[NO_OVERFLOW_DURING_ADJUSTMENT]]
; CHECK-NEXT: ret i1 [[RES]]
; CHECK-NEXT: ret i1 [[NON_NULL_AFTER_ADJUSTMENT]]
;
%base_int = ptrtoint i8* %base to i64
%adjusted = add i64 %base_int, %offset
@ -96,9 +86,7 @@ define i1 @t5(i8* nonnull %base, i64 %offset) {
; CHECK-NEXT: [[BASE_INT:%.*]] = ptrtoint i8* [[BASE:%.*]] to i64
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i64 [[BASE_INT]], [[OFFSET:%.*]]
; CHECK-NEXT: [[NON_NULL_AFTER_ADJUSTMENT:%.*]] = icmp ne i64 [[ADJUSTED]], 0
; CHECK-NEXT: [[NO_OVERFLOW_DURING_ADJUSTMENT:%.*]] = icmp ule i64 [[BASE_INT]], [[ADJUSTED]]
; CHECK-NEXT: [[RES:%.*]] = or i1 [[NON_NULL_AFTER_ADJUSTMENT]], [[NO_OVERFLOW_DURING_ADJUSTMENT]]
; CHECK-NEXT: ret i1 [[RES]]
; CHECK-NEXT: ret i1 [[NON_NULL_AFTER_ADJUSTMENT]]
;
%base_int = ptrtoint i8* %base to i64
%adjusted = add i64 %base_int, %offset
@ -112,9 +100,7 @@ define i1 @t6(i8* nonnull %base, i64 %offset) {
; CHECK-NEXT: [[BASE_INT:%.*]] = ptrtoint i8* [[BASE:%.*]] to i64
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i64 [[BASE_INT]], [[OFFSET:%.*]]
; CHECK-NEXT: [[NON_NULL_AFTER_ADJUSTMENT:%.*]] = icmp ne i64 [[ADJUSTED]], 0
; CHECK-NEXT: [[NO_OVERFLOW_DURING_ADJUSTMENT:%.*]] = icmp uge i64 [[ADJUSTED]], [[BASE_INT]]
; CHECK-NEXT: [[RES:%.*]] = or i1 [[NO_OVERFLOW_DURING_ADJUSTMENT]], [[NON_NULL_AFTER_ADJUSTMENT]]
; CHECK-NEXT: ret i1 [[RES]]
; CHECK-NEXT: ret i1 [[NON_NULL_AFTER_ADJUSTMENT]]
;
%base_int = ptrtoint i8* %base to i64
%adjusted = add i64 %base_int, %offset
@ -128,9 +114,7 @@ define i1 @t7(i8* nonnull %base, i64 %offset) {
; CHECK-NEXT: [[BASE_INT:%.*]] = ptrtoint i8* [[BASE:%.*]] to i64
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i64 [[BASE_INT]], [[OFFSET:%.*]]
; CHECK-NEXT: [[NON_NULL_AFTER_ADJUSTMENT:%.*]] = icmp ne i64 [[ADJUSTED]], 0
; CHECK-NEXT: [[NO_OVERFLOW_DURING_ADJUSTMENT:%.*]] = icmp ule i64 [[BASE_INT]], [[ADJUSTED]]
; CHECK-NEXT: [[RES:%.*]] = or i1 [[NO_OVERFLOW_DURING_ADJUSTMENT]], [[NON_NULL_AFTER_ADJUSTMENT]]
; CHECK-NEXT: ret i1 [[RES]]
; CHECK-NEXT: ret i1 [[NON_NULL_AFTER_ADJUSTMENT]]
;
%base_int = ptrtoint i8* %base to i64
%adjusted = add i64 %base_int, %offset
@ -148,10 +132,8 @@ define i1 @t8(i8* nonnull %base, i64 %offset) {
; CHECK-LABEL: @t8(
; CHECK-NEXT: [[BASE_INT:%.*]] = ptrtoint i8* [[BASE:%.*]] to i64
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i64 [[BASE_INT]], [[OFFSET:%.*]]
; CHECK-NEXT: [[NON_NULL_AFTER_ADJUSTMENT:%.*]] = icmp eq i64 [[ADJUSTED]], 0
; CHECK-NEXT: [[NO_OVERFLOW_DURING_ADJUSTMENT:%.*]] = icmp ult i64 [[ADJUSTED]], [[BASE_INT]]
; CHECK-NEXT: [[RES:%.*]] = or i1 [[NON_NULL_AFTER_ADJUSTMENT]], [[NO_OVERFLOW_DURING_ADJUSTMENT]]
; CHECK-NEXT: ret i1 [[RES]]
; CHECK-NEXT: ret i1 [[NO_OVERFLOW_DURING_ADJUSTMENT]]
;
%base_int = ptrtoint i8* %base to i64
%adjusted = add i64 %base_int, %offset
@ -164,10 +146,8 @@ define i1 @t9(i8* nonnull %base, i64 %offset) {
; CHECK-LABEL: @t9(
; CHECK-NEXT: [[BASE_INT:%.*]] = ptrtoint i8* [[BASE:%.*]] to i64
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i64 [[BASE_INT]], [[OFFSET:%.*]]
; CHECK-NEXT: [[NON_NULL_AFTER_ADJUSTMENT:%.*]] = icmp eq i64 [[ADJUSTED]], 0
; CHECK-NEXT: [[NO_OVERFLOW_DURING_ADJUSTMENT:%.*]] = icmp ugt i64 [[BASE_INT]], [[ADJUSTED]]
; CHECK-NEXT: [[RES:%.*]] = or i1 [[NON_NULL_AFTER_ADJUSTMENT]], [[NO_OVERFLOW_DURING_ADJUSTMENT]]
; CHECK-NEXT: ret i1 [[RES]]
; CHECK-NEXT: ret i1 [[NO_OVERFLOW_DURING_ADJUSTMENT]]
;
%base_int = ptrtoint i8* %base to i64
%adjusted = add i64 %base_int, %offset
@ -180,10 +160,8 @@ define i1 @t10(i8* nonnull %base, i64 %offset) {
; CHECK-LABEL: @t10(
; CHECK-NEXT: [[BASE_INT:%.*]] = ptrtoint i8* [[BASE:%.*]] to i64
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i64 [[BASE_INT]], [[OFFSET:%.*]]
; CHECK-NEXT: [[NON_NULL_AFTER_ADJUSTMENT:%.*]] = icmp eq i64 [[ADJUSTED]], 0
; CHECK-NEXT: [[NO_OVERFLOW_DURING_ADJUSTMENT:%.*]] = icmp ult i64 [[ADJUSTED]], [[BASE_INT]]
; CHECK-NEXT: [[RES:%.*]] = or i1 [[NO_OVERFLOW_DURING_ADJUSTMENT]], [[NON_NULL_AFTER_ADJUSTMENT]]
; CHECK-NEXT: ret i1 [[RES]]
; CHECK-NEXT: ret i1 [[NO_OVERFLOW_DURING_ADJUSTMENT]]
;
%base_int = ptrtoint i8* %base to i64
%adjusted = add i64 %base_int, %offset
@ -196,10 +174,8 @@ define i1 @t11(i8* nonnull %base, i64 %offset) {
; CHECK-LABEL: @t11(
; CHECK-NEXT: [[BASE_INT:%.*]] = ptrtoint i8* [[BASE:%.*]] to i64
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i64 [[BASE_INT]], [[OFFSET:%.*]]
; CHECK-NEXT: [[NON_NULL_AFTER_ADJUSTMENT:%.*]] = icmp eq i64 [[ADJUSTED]], 0
; CHECK-NEXT: [[NO_OVERFLOW_DURING_ADJUSTMENT:%.*]] = icmp ugt i64 [[BASE_INT]], [[ADJUSTED]]
; CHECK-NEXT: [[RES:%.*]] = or i1 [[NO_OVERFLOW_DURING_ADJUSTMENT]], [[NON_NULL_AFTER_ADJUSTMENT]]
; CHECK-NEXT: ret i1 [[RES]]
; CHECK-NEXT: ret i1 [[NO_OVERFLOW_DURING_ADJUSTMENT]]
;
%base_int = ptrtoint i8* %base to i64
%adjusted = add i64 %base_int, %offset
@ -217,9 +193,7 @@ define i1 @t12(i8* nonnull %base, i64 %offset) {
; CHECK-NEXT: [[BASE_INT:%.*]] = ptrtoint i8* [[BASE:%.*]] to i64
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i64 [[BASE_INT]], [[OFFSET:%.*]]
; CHECK-NEXT: [[NON_NULL_AFTER_ADJUSTMENT:%.*]] = icmp eq i64 [[ADJUSTED]], 0
; CHECK-NEXT: [[NO_OVERFLOW_DURING_ADJUSTMENT:%.*]] = icmp ult i64 [[ADJUSTED]], [[BASE_INT]]
; CHECK-NEXT: [[RES:%.*]] = and i1 [[NON_NULL_AFTER_ADJUSTMENT]], [[NO_OVERFLOW_DURING_ADJUSTMENT]]
; CHECK-NEXT: ret i1 [[RES]]
; CHECK-NEXT: ret i1 [[NON_NULL_AFTER_ADJUSTMENT]]
;
%base_int = ptrtoint i8* %base to i64
%adjusted = add i64 %base_int, %offset
@ -233,9 +207,7 @@ define i1 @t13(i8* nonnull %base, i64 %offset) {
; CHECK-NEXT: [[BASE_INT:%.*]] = ptrtoint i8* [[BASE:%.*]] to i64
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i64 [[BASE_INT]], [[OFFSET:%.*]]
; CHECK-NEXT: [[NON_NULL_AFTER_ADJUSTMENT:%.*]] = icmp eq i64 [[ADJUSTED]], 0
; CHECK-NEXT: [[NO_OVERFLOW_DURING_ADJUSTMENT:%.*]] = icmp ugt i64 [[BASE_INT]], [[ADJUSTED]]
; CHECK-NEXT: [[RES:%.*]] = and i1 [[NON_NULL_AFTER_ADJUSTMENT]], [[NO_OVERFLOW_DURING_ADJUSTMENT]]
; CHECK-NEXT: ret i1 [[RES]]
; CHECK-NEXT: ret i1 [[NON_NULL_AFTER_ADJUSTMENT]]
;
%base_int = ptrtoint i8* %base to i64
%adjusted = add i64 %base_int, %offset
@ -249,9 +221,7 @@ define i1 @t14(i8* nonnull %base, i64 %offset) {
; CHECK-NEXT: [[BASE_INT:%.*]] = ptrtoint i8* [[BASE:%.*]] to i64
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i64 [[BASE_INT]], [[OFFSET:%.*]]
; CHECK-NEXT: [[NON_NULL_AFTER_ADJUSTMENT:%.*]] = icmp eq i64 [[ADJUSTED]], 0
; CHECK-NEXT: [[NO_OVERFLOW_DURING_ADJUSTMENT:%.*]] = icmp ult i64 [[ADJUSTED]], [[BASE_INT]]
; CHECK-NEXT: [[RES:%.*]] = and i1 [[NO_OVERFLOW_DURING_ADJUSTMENT]], [[NON_NULL_AFTER_ADJUSTMENT]]
; CHECK-NEXT: ret i1 [[RES]]
; CHECK-NEXT: ret i1 [[NON_NULL_AFTER_ADJUSTMENT]]
;
%base_int = ptrtoint i8* %base to i64
%adjusted = add i64 %base_int, %offset
@ -265,9 +235,7 @@ define i1 @t15(i8* nonnull %base, i64 %offset) {
; CHECK-NEXT: [[BASE_INT:%.*]] = ptrtoint i8* [[BASE:%.*]] to i64
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i64 [[BASE_INT]], [[OFFSET:%.*]]
; CHECK-NEXT: [[NON_NULL_AFTER_ADJUSTMENT:%.*]] = icmp eq i64 [[ADJUSTED]], 0
; CHECK-NEXT: [[NO_OVERFLOW_DURING_ADJUSTMENT:%.*]] = icmp ugt i64 [[BASE_INT]], [[ADJUSTED]]
; CHECK-NEXT: [[RES:%.*]] = and i1 [[NO_OVERFLOW_DURING_ADJUSTMENT]], [[NON_NULL_AFTER_ADJUSTMENT]]
; CHECK-NEXT: ret i1 [[RES]]
; CHECK-NEXT: ret i1 [[NON_NULL_AFTER_ADJUSTMENT]]
;
%base_int = ptrtoint i8* %base to i64
%adjusted = add i64 %base_int, %offset