1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-26 04:32:44 +01:00
llvm-mirror/test/Transforms/InstCombine/demorgan-sink-not-into-xor.ll
Nikita Popov 730509657a [InstCombine] DCE instructions earlier
When InstCombine initially populates the worklist, it already
performs constant folding and DCE. However, as the instructions
are initially visited in program order, this DCE can pick up only
the last instruction of a dead chain, the rest would only get
picked up in the main InstCombine run.

To avoid this, we instead perform the DCE in separate pass over the
collected instructions in reverse order, which will allow us to
pick up full dead instruction chains. We already need to do this
reverse iteration anyway to populate the worklist, so this
shouldn't add extra cost.

This by itself only fixes a small part of the problem though:
The same basic issue also applies during the main InstCombine loop.
We generally always want DCE to occur as early as possible,
because it will allow one-use folds to happen. Address this by also
performing DCE while adding deferred instructions to the main worklist.

This drops the number of tests that perform more than 2 InstCombine
iterations from ~80 to ~40. There's some spurious test changes due
to operand order / icmp toggling.

Differential Revision: https://reviews.llvm.org/D75008
2020-02-27 18:45:59 +01:00

139 lines
4.2 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -instcombine -S | FileCheck %s
; https://bugs.llvm.org/show_bug.cgi?id=38446
; Pattern:
; ~(x ^ y)
; Should be transformed into:
; (~x) ^ y
; or into
; x ^ (~y)
; While -reassociate does handle this simple pattern, it does not handle
; the more complicated motivating pattern.
; ============================================================================ ;
; Basic positive tests
; ============================================================================ ;
; If the operand is easily-invertible, fold into it.
declare i1 @gen1()
define i1 @positive_easyinvert(i16 %x, i8 %y) {
; CHECK-LABEL: @positive_easyinvert(
; CHECK-NEXT: [[TMP1:%.*]] = icmp slt i16 [[X:%.*]], 0
; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt i8 [[Y:%.*]], -1
; CHECK-NEXT: [[TMP4:%.*]] = xor i1 [[TMP2]], [[TMP1]]
; CHECK-NEXT: ret i1 [[TMP4]]
;
%tmp1 = icmp slt i16 %x, 0
%tmp2 = icmp slt i8 %y, 0
%tmp3 = xor i1 %tmp2, %tmp1
%tmp4 = xor i1 %tmp3, true
ret i1 %tmp4
}
define i1 @positive_easyinvert0(i8 %y) {
; CHECK-LABEL: @positive_easyinvert0(
; CHECK-NEXT: [[TMP1:%.*]] = call i1 @gen1()
; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt i8 [[Y:%.*]], -1
; CHECK-NEXT: [[TMP4:%.*]] = xor i1 [[TMP2]], [[TMP1]]
; CHECK-NEXT: ret i1 [[TMP4]]
;
%tmp1 = call i1 @gen1()
%tmp2 = icmp slt i8 %y, 0
%tmp3 = xor i1 %tmp2, %tmp1
%tmp4 = xor i1 %tmp3, true
ret i1 %tmp4
}
define i1 @positive_easyinvert1(i8 %y) {
; CHECK-LABEL: @positive_easyinvert1(
; CHECK-NEXT: [[TMP1:%.*]] = call i1 @gen1()
; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt i8 [[Y:%.*]], -1
; CHECK-NEXT: [[TMP4:%.*]] = xor i1 [[TMP2]], [[TMP1]]
; CHECK-NEXT: ret i1 [[TMP4]]
;
%tmp1 = call i1 @gen1()
%tmp2 = icmp slt i8 %y, 0
%tmp3 = xor i1 %tmp1, %tmp2
%tmp4 = xor i1 %tmp3, true
ret i1 %tmp4
}
; ============================================================================ ;
; One-use tests with easily-invertible operand.
; ============================================================================ ;
declare void @use1(i1)
define i1 @oneuse_easyinvert_0(i8 %y) {
; CHECK-LABEL: @oneuse_easyinvert_0(
; CHECK-NEXT: [[TMP1:%.*]] = call i1 @gen1()
; CHECK-NEXT: [[TMP2:%.*]] = icmp slt i8 [[Y:%.*]], 0
; CHECK-NEXT: call void @use1(i1 [[TMP2]])
; CHECK-NEXT: [[TMP3:%.*]] = xor i1 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = xor i1 [[TMP3]], true
; CHECK-NEXT: ret i1 [[TMP4]]
;
%tmp1 = call i1 @gen1()
%tmp2 = icmp slt i8 %y, 0
call void @use1(i1 %tmp2)
%tmp3 = xor i1 %tmp1, %tmp2
%tmp4 = xor i1 %tmp3, true
ret i1 %tmp4
}
define i1 @oneuse_easyinvert_1(i8 %y) {
; CHECK-LABEL: @oneuse_easyinvert_1(
; CHECK-NEXT: [[TMP1:%.*]] = call i1 @gen1()
; CHECK-NEXT: [[TMP2:%.*]] = icmp slt i8 [[Y:%.*]], 0
; CHECK-NEXT: [[TMP3:%.*]] = xor i1 [[TMP1]], [[TMP2]]
; CHECK-NEXT: call void @use1(i1 [[TMP3]])
; CHECK-NEXT: [[TMP4:%.*]] = xor i1 [[TMP3]], true
; CHECK-NEXT: ret i1 [[TMP4]]
;
%tmp1 = call i1 @gen1()
%tmp2 = icmp slt i8 %y, 0
%tmp3 = xor i1 %tmp1, %tmp2
call void @use1(i1 %tmp3)
%tmp4 = xor i1 %tmp3, true
ret i1 %tmp4
}
define i1 @oneuse_easyinvert_2(i8 %y) {
; CHECK-LABEL: @oneuse_easyinvert_2(
; CHECK-NEXT: [[TMP1:%.*]] = call i1 @gen1()
; CHECK-NEXT: [[TMP2:%.*]] = icmp slt i8 [[Y:%.*]], 0
; CHECK-NEXT: call void @use1(i1 [[TMP2]])
; CHECK-NEXT: [[TMP3:%.*]] = xor i1 [[TMP1]], [[TMP2]]
; CHECK-NEXT: call void @use1(i1 [[TMP3]])
; CHECK-NEXT: [[TMP4:%.*]] = xor i1 [[TMP3]], true
; CHECK-NEXT: ret i1 [[TMP4]]
;
%tmp1 = call i1 @gen1()
%tmp2 = icmp slt i8 %y, 0
call void @use1(i1 %tmp2)
%tmp3 = xor i1 %tmp1, %tmp2
call void @use1(i1 %tmp3)
%tmp4 = xor i1 %tmp3, true
ret i1 %tmp4
}
; ============================================================================ ;
; Negative tests
; ============================================================================ ;
; Not easily invertible.
define i32 @negative(i32 %x, i32 %y) {
; CHECK-LABEL: @negative(
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = xor i32 [[TMP1]], -1
; CHECK-NEXT: ret i32 [[TMP2]]
;
%tmp1 = xor i32 %x, %y
%tmp2 = xor i32 %tmp1, -1
ret i32 %tmp2
}