1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-26 12:43:36 +01:00
llvm-mirror/test/Transforms/InstCombine/smulo.ll
Philip Reames ade0ec7fb0 [instcombine] Fold overflow check using overflow intrinsic to comparison
This follows up to D104665 (which added umulo handling alongside the existing uaddo case), and generalizes for the remaining overflow intrinsics.

I went to add analogous handling to LVI, and discovered that LVI already had a more general implementation. Instead, we can port was LVI does to instcombine. (For context, LVI uses makeExactNoWrapRegion to constrain the value 'x' in blocks reached after a branch on the condition `op.with.overflow(x, C).overflow`.)

Differential Revision: https://reviews.llvm.org/D104932
2021-07-01 09:41:55 -07:00

101 lines
3.1 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -instcombine -S | FileCheck %s
declare { i64, i1 } @llvm.smul.with.overflow.i64(i64, i64)
declare { i8, i1 } @llvm.smul.with.overflow.i8(i8, i8)
define i1 @test_generic(i64 %a, i64 %b) {
; CHECK-LABEL: @test_generic(
; CHECK-NEXT: [[RES:%.*]] = tail call { i64, i1 } @llvm.smul.with.overflow.i64(i64 [[A:%.*]], i64 [[B:%.*]])
; CHECK-NEXT: [[OVERFLOW:%.*]] = extractvalue { i64, i1 } [[RES]], 1
; CHECK-NEXT: ret i1 [[OVERFLOW]]
;
%res = tail call { i64, i1 } @llvm.smul.with.overflow.i64(i64 %a, i64 %b)
%overflow = extractvalue { i64, i1 } %res, 1
ret i1 %overflow
}
define i1 @test_constant0(i8 %a) {
; CHECK-LABEL: @test_constant0(
; CHECK-NEXT: ret i1 false
;
%res = tail call { i8, i1 } @llvm.smul.with.overflow.i8(i8 %a, i8 0)
%overflow = extractvalue { i8, i1 } %res, 1
ret i1 %overflow
}
define i1 @test_constant1(i8 %a) {
; CHECK-LABEL: @test_constant1(
; CHECK-NEXT: ret i1 false
;
%res = tail call { i8, i1 } @llvm.smul.with.overflow.i8(i8 %a, i8 1)
%overflow = extractvalue { i8, i1 } %res, 1
ret i1 %overflow
}
define i1 @test_constant2(i8 %a) {
; CHECK-LABEL: @test_constant2(
; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[A:%.*]], 64
; CHECK-NEXT: [[OVERFLOW:%.*]] = icmp slt i8 [[TMP1]], 0
; CHECK-NEXT: ret i1 [[OVERFLOW]]
;
%res = tail call { i8, i1 } @llvm.smul.with.overflow.i8(i8 %a, i8 2)
%overflow = extractvalue { i8, i1 } %res, 1
ret i1 %overflow
}
define i1 @test_constant3(i8 %a) {
; CHECK-LABEL: @test_constant3(
; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[A:%.*]], 42
; CHECK-NEXT: [[OVERFLOW:%.*]] = icmp ugt i8 [[TMP1]], 84
; CHECK-NEXT: ret i1 [[OVERFLOW]]
;
%res = tail call { i8, i1 } @llvm.smul.with.overflow.i8(i8 %a, i8 3)
%overflow = extractvalue { i8, i1 } %res, 1
ret i1 %overflow
}
define i1 @test_constant4(i8 %a) {
; CHECK-LABEL: @test_constant4(
; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[A:%.*]], 32
; CHECK-NEXT: [[OVERFLOW:%.*]] = icmp ugt i8 [[TMP1]], 63
; CHECK-NEXT: ret i1 [[OVERFLOW]]
;
%res = tail call { i8, i1 } @llvm.smul.with.overflow.i8(i8 %a, i8 4)
%overflow = extractvalue { i8, i1 } %res, 1
ret i1 %overflow
}
define i1 @test_constant127(i8 %a) {
; CHECK-LABEL: @test_constant127(
; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[A:%.*]], 1
; CHECK-NEXT: [[OVERFLOW:%.*]] = icmp ugt i8 [[TMP1]], 2
; CHECK-NEXT: ret i1 [[OVERFLOW]]
;
%res = tail call { i8, i1 } @llvm.smul.with.overflow.i8(i8 %a, i8 127)
%overflow = extractvalue { i8, i1 } %res, 1
ret i1 %overflow
}
define i1 @test_constant128(i8 %a) {
; CHECK-LABEL: @test_constant128(
; CHECK-NEXT: [[OVERFLOW:%.*]] = icmp ugt i8 [[A:%.*]], 1
; CHECK-NEXT: ret i1 [[OVERFLOW]]
;
%res = tail call { i8, i1 } @llvm.smul.with.overflow.i8(i8 %a, i8 128)
%overflow = extractvalue { i8, i1 } %res, 1
ret i1 %overflow
}
define i1 @test_constant255(i8 %a) {
; CHECK-LABEL: @test_constant255(
; CHECK-NEXT: [[OVERFLOW:%.*]] = icmp eq i8 [[A:%.*]], -128
; CHECK-NEXT: ret i1 [[OVERFLOW]]
;
%res = tail call { i8, i1 } @llvm.smul.with.overflow.i8(i8 %a, i8 255)
%overflow = extractvalue { i8, i1 } %res, 1
ret i1 %overflow
}