1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2025-02-01 05:01:59 +01:00
llvm-mirror/test/Transforms/InstSimplify/result-of-usub-by-nonzero-is-non-zero-and-no-overflow.ll
Roman Lebedev 5fe74069b5 [InstSimplify] simplifyUnsignedRangeCheck(): handle more cases (PR43251)
Summary:
I don't have a direct motivational case for this,
but it would be good to have this for completeness/symmetry.

This pattern is basically the motivational pattern from
https://bugs.llvm.org/show_bug.cgi?id=43251
but with different predicate that requires that the offset is non-zero.

The completeness bit comes from the fact that a similar pattern (offset != zero)
will be needed for https://bugs.llvm.org/show_bug.cgi?id=43259,
so it'd seem to be good to not overlook very similar patterns..

Proofs: https://rise4fun.com/Alive/21b

Also, there is something odd with `isKnownNonZero()`, if the non-zero
knowledge was specified as an assumption, it didn't pick it up (PR43267)

Reviewers: spatel, nikic, xbolva00

Reviewed By: spatel

Subscribers: hiraditya, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D67411

llvm-svn: 371718
2019-09-12 09:26:17 +00:00

87 lines
3.1 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt %s -instsimplify -S | FileCheck %s
; Here we subtract two values, check that subtraction did not overflow AND
; that the result is non-zero. This can be simplified just to a comparison
; between the base and offset.
define i1 @t0(i64 %base, i64* nonnull %offsetptr) {
; CHECK-LABEL: @t0(
; CHECK-NEXT: [[OFFSET:%.*]] = ptrtoint i64* [[OFFSETPTR:%.*]] to i64
; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]]
; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp uge i64 [[ADJUSTED]], [[BASE]]
; CHECK-NEXT: ret i1 [[NO_UNDERFLOW]]
;
%offset = ptrtoint i64* %offsetptr to i64
%adjusted = sub i64 %base, %offset
%no_underflow = icmp uge i64 %adjusted, %base
%not_null = icmp ne i64 %adjusted, 0
%r = and i1 %not_null, %no_underflow
ret i1 %r
}
define i1 @t1(i64 %base, i64* nonnull %offsetptr) {
; CHECK-LABEL: @t1(
; CHECK-NEXT: [[OFFSET:%.*]] = ptrtoint i64* [[OFFSETPTR:%.*]] to i64
; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]]
; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ult i64 [[ADJUSTED]], [[BASE]]
; CHECK-NEXT: ret i1 [[NO_UNDERFLOW]]
;
%offset = ptrtoint i64* %offsetptr to i64
%adjusted = sub i64 %base, %offset
%no_underflow = icmp ult i64 %adjusted, %base
%not_null = icmp eq i64 %adjusted, 0
%r = or i1 %not_null, %no_underflow
ret i1 %r
}
define i1 @t2_commutative(i64 %base, i64* nonnull %offsetptr) {
; CHECK-LABEL: @t2_commutative(
; CHECK-NEXT: [[OFFSET:%.*]] = ptrtoint i64* [[OFFSETPTR:%.*]] to i64
; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]]
; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ule i64 [[BASE]], [[ADJUSTED]]
; CHECK-NEXT: ret i1 [[NO_UNDERFLOW]]
;
%offset = ptrtoint i64* %offsetptr to i64
%adjusted = sub i64 %base, %offset
%no_underflow = icmp ule i64 %base, %adjusted
%not_null = icmp ne i64 %adjusted, 0
%r = and i1 %not_null, %no_underflow
ret i1 %r
}
define i1 @t3_commutative(i64 %base, i64* nonnull %offsetptr) {
; CHECK-LABEL: @t3_commutative(
; CHECK-NEXT: [[OFFSET:%.*]] = ptrtoint i64* [[OFFSETPTR:%.*]] to i64
; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]]
; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ugt i64 [[BASE]], [[ADJUSTED]]
; CHECK-NEXT: ret i1 [[NO_UNDERFLOW]]
;
%offset = ptrtoint i64* %offsetptr to i64
%adjusted = sub i64 %base, %offset
%no_underflow = icmp ugt i64 %base, %adjusted
%not_null = icmp eq i64 %adjusted, 0
%r = or i1 %not_null, %no_underflow
ret i1 %r
}
; We don't know that offset is non-zero, so we can't fold.
define i1 @t4_bad(i64 %base, i64 %offset) {
; CHECK-LABEL: @t4_bad(
; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET:%.*]]
; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp uge i64 [[ADJUSTED]], [[BASE]]
; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i64 [[ADJUSTED]], 0
; CHECK-NEXT: [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
; CHECK-NEXT: ret i1 [[R]]
;
%adjusted = sub i64 %base, %offset
%no_underflow = icmp uge i64 %adjusted, %base
%not_null = icmp ne i64 %adjusted, 0
%r = and i1 %not_null, %no_underflow
ret i1 %r
}