1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2025-01-31 12:41:49 +01:00
llvm-mirror/test/Transforms/InstCombine/icmp-xor-signbit.ll
Sanjay Patel 366e501988 [InstCombine] reverse bitcast + bitwise-logic canonicalization (PR33138)
There are 2 parts to this patch made simultaneously to avoid a regression.

We're reversing the canonicalization that moves bitwise vector ops before bitcasts. 
We're moving bitwise vector ops *after* bitcasts instead. That's the 1st and 3rd hunks 
of the patch. The motivation is that there's only one fold that currently depends on 
the existing canonicalization (see next), but there are many folds that would 
automatically benefit from the new canonicalization. 
PR33138 ( https://bugs.llvm.org/show_bug.cgi?id=33138 ) shows why/how we have these 
patterns in IR.

There's an or(and,andn) pattern that requires an adjustment in order to continue matching
to 'select' because the bitcast changes position. This match is unfortunately complicated 
because it requires 4 logic ops with optional bitcast and sext ops.

Test diffs:

  1. The bitcast.ll and bitcast-bigendian.ll changes show the most basic difference - 
     bitcast comes before logic.
  2. There are also tests with no diffs in bitcast.ll that verify that we're still doing 
     folds that were enabled by the previous canonicalization.
  3. icmp-xor-signbit.ll shows the payoff. We don't need to adjust existing icmp patterns 
     to look through bitcasts.
  4. logical-select.ll contains several tests for the or(and,andn) --> select fold to 
     verify that we are still handling those cases. The lone diff shows the movement of 
     the bitcast from the new canonicalization rule.

Differential Revision: https://reviews.llvm.org/D33517

llvm-svn: 306011
2017-06-22 15:46:54 +00:00

220 lines
5.9 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -instcombine -S | FileCheck %s
; icmp u/s (a ^ signmask), (b ^ signmask) --> icmp s/u a, b
define i1 @slt_to_ult(i8 %x, i8 %y) {
; CHECK-LABEL: @slt_to_ult(
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8 %x, %y
; CHECK-NEXT: ret i1 [[CMP]]
;
%a = xor i8 %x, 128
%b = xor i8 %y, 128
%cmp = icmp slt i8 %a, %b
ret i1 %cmp
}
; PR33138 - https://bugs.llvm.org/show_bug.cgi?id=33138
define <2 x i1> @slt_to_ult_splat(<2 x i8> %x, <2 x i8> %y) {
; CHECK-LABEL: @slt_to_ult_splat(
; CHECK-NEXT: [[CMP:%.*]] = icmp ult <2 x i8> %x, %y
; CHECK-NEXT: ret <2 x i1> [[CMP]]
;
%a = xor <2 x i8> %x, <i8 128, i8 128>
%b = xor <2 x i8> %y, <i8 128, i8 128>
%cmp = icmp slt <2 x i8> %a, %b
ret <2 x i1> %cmp
}
; Make sure that unsigned -> signed works too.
define i1 @ult_to_slt(i8 %x, i8 %y) {
; CHECK-LABEL: @ult_to_slt(
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 %x, %y
; CHECK-NEXT: ret i1 [[CMP]]
;
%a = xor i8 %x, 128
%b = xor i8 %y, 128
%cmp = icmp ult i8 %a, %b
ret i1 %cmp
}
define <2 x i1> @ult_to_slt_splat(<2 x i8> %x, <2 x i8> %y) {
; CHECK-LABEL: @ult_to_slt_splat(
; CHECK-NEXT: [[CMP:%.*]] = icmp slt <2 x i8> %x, %y
; CHECK-NEXT: ret <2 x i1> [[CMP]]
;
%a = xor <2 x i8> %x, <i8 128, i8 128>
%b = xor <2 x i8> %y, <i8 128, i8 128>
%cmp = icmp ult <2 x i8> %a, %b
ret <2 x i1> %cmp
}
; icmp u/s (a ^ maxsignval), (b ^ maxsignval) --> icmp s/u' a, b
define i1 @slt_to_ugt(i8 %x, i8 %y) {
; CHECK-LABEL: @slt_to_ugt(
; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i8 %x, %y
; CHECK-NEXT: ret i1 [[CMP]]
;
%a = xor i8 %x, 127
%b = xor i8 %y, 127
%cmp = icmp slt i8 %a, %b
ret i1 %cmp
}
define <2 x i1> @slt_to_ugt_splat(<2 x i8> %x, <2 x i8> %y) {
; CHECK-LABEL: @slt_to_ugt_splat(
; CHECK-NEXT: [[CMP:%.*]] = icmp ugt <2 x i8> %x, %y
; CHECK-NEXT: ret <2 x i1> [[CMP]]
;
%a = xor <2 x i8> %x, <i8 127, i8 127>
%b = xor <2 x i8> %y, <i8 127, i8 127>
%cmp = icmp slt <2 x i8> %a, %b
ret <2 x i1> %cmp
}
; Make sure that unsigned -> signed works too.
define i1 @ult_to_sgt(i8 %x, i8 %y) {
; CHECK-LABEL: @ult_to_sgt(
; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8 %x, %y
; CHECK-NEXT: ret i1 [[CMP]]
;
%a = xor i8 %x, 127
%b = xor i8 %y, 127
%cmp = icmp ult i8 %a, %b
ret i1 %cmp
}
define <2 x i1> @ult_to_sgt_splat(<2 x i8> %x, <2 x i8> %y) {
; CHECK-LABEL: @ult_to_sgt_splat(
; CHECK-NEXT: [[CMP:%.*]] = icmp sgt <2 x i8> %x, %y
; CHECK-NEXT: ret <2 x i1> [[CMP]]
;
%a = xor <2 x i8> %x, <i8 127, i8 127>
%b = xor <2 x i8> %y, <i8 127, i8 127>
%cmp = icmp ult <2 x i8> %a, %b
ret <2 x i1> %cmp
}
; icmp u/s (a ^ signmask), C --> icmp s/u a, C'
define i1 @sge_to_ugt(i8 %x) {
; CHECK-LABEL: @sge_to_ugt(
; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i8 %x, -114
; CHECK-NEXT: ret i1 [[CMP]]
;
%a = xor i8 %x, 128
%cmp = icmp sge i8 %a, 15
ret i1 %cmp
}
define <2 x i1> @sge_to_ugt_splat(<2 x i8> %x) {
; CHECK-LABEL: @sge_to_ugt_splat(
; CHECK-NEXT: [[CMP:%.*]] = icmp ugt <2 x i8> %x, <i8 -114, i8 -114>
; CHECK-NEXT: ret <2 x i1> [[CMP]]
;
%a = xor <2 x i8> %x, <i8 128, i8 128>
%cmp = icmp sge <2 x i8> %a, <i8 15, i8 15>
ret <2 x i1> %cmp
}
; Make sure that unsigned -> signed works too.
define i1 @uge_to_sgt(i8 %x) {
; CHECK-LABEL: @uge_to_sgt(
; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8 %x, -114
; CHECK-NEXT: ret i1 [[CMP]]
;
%a = xor i8 %x, 128
%cmp = icmp uge i8 %a, 15
ret i1 %cmp
}
define <2 x i1> @uge_to_sgt_splat(<2 x i8> %x) {
; CHECK-LABEL: @uge_to_sgt_splat(
; CHECK-NEXT: [[CMP:%.*]] = icmp sgt <2 x i8> %x, <i8 -114, i8 -114>
; CHECK-NEXT: ret <2 x i1> [[CMP]]
;
%a = xor <2 x i8> %x, <i8 128, i8 128>
%cmp = icmp uge <2 x i8> %a, <i8 15, i8 15>
ret <2 x i1> %cmp
}
; icmp u/s (a ^ maxsignval), C --> icmp s/u' a, C'
define i1 @sge_to_ult(i8 %x) {
; CHECK-LABEL: @sge_to_ult(
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8 %x, 113
; CHECK-NEXT: ret i1 [[CMP]]
;
%a = xor i8 %x, 127
%cmp = icmp sge i8 %a, 15
ret i1 %cmp
}
define <2 x i1> @sge_to_ult_splat(<2 x i8> %x) {
; CHECK-LABEL: @sge_to_ult_splat(
; CHECK-NEXT: [[CMP:%.*]] = icmp ult <2 x i8> %x, <i8 113, i8 113>
; CHECK-NEXT: ret <2 x i1> [[CMP]]
;
%a = xor <2 x i8> %x, <i8 127, i8 127>
%cmp = icmp sge <2 x i8> %a, <i8 15, i8 15>
ret <2 x i1> %cmp
}
; Make sure that unsigned -> signed works too.
define i1 @uge_to_slt(i8 %x) {
; CHECK-LABEL: @uge_to_slt(
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 %x, 113
; CHECK-NEXT: ret i1 [[CMP]]
;
%a = xor i8 %x, 127
%cmp = icmp uge i8 %a, 15
ret i1 %cmp
}
define <2 x i1> @uge_to_slt_splat(<2 x i8> %x) {
; CHECK-LABEL: @uge_to_slt_splat(
; CHECK-NEXT: [[CMP:%.*]] = icmp slt <2 x i8> %x, <i8 113, i8 113>
; CHECK-NEXT: ret <2 x i1> [[CMP]]
;
%a = xor <2 x i8> %x, <i8 127, i8 127>
%cmp = icmp uge <2 x i8> %a, <i8 15, i8 15>
ret <2 x i1> %cmp
}
; PR33138, part 2: https://bugs.llvm.org/show_bug.cgi?id=33138
; Bitcast canonicalization ensures that we recognize the signbit constant.
define <8 x i1> @sgt_to_ugt_bitcasted_splat(<2 x i32> %x, <2 x i32> %y) {
; CHECK-LABEL: @sgt_to_ugt_bitcasted_splat(
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i32> %x to <8 x i8>
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <2 x i32> %y to <8 x i8>
; CHECK-NEXT: [[E:%.*]] = icmp ugt <8 x i8> [[TMP1]], [[TMP2]]
; CHECK-NEXT: ret <8 x i1> [[E]]
;
%a = xor <2 x i32> %x, <i32 2155905152, i32 2155905152> ; 0x80808080
%b = xor <2 x i32> %y, <i32 2155905152, i32 2155905152>
%c = bitcast <2 x i32> %a to <8 x i8>
%d = bitcast <2 x i32> %b to <8 x i8>
%e = icmp sgt <8 x i8> %c, %d
ret <8 x i1> %e
}
; Bitcast canonicalization ensures that we recognize the signbit constant.
define <2 x i1> @negative_simplify_splat(<4 x i8> %x) {
; CHECK-LABEL: @negative_simplify_splat(
; CHECK-NEXT: ret <2 x i1> zeroinitializer
;
%a = or <4 x i8> %x, <i8 0, i8 128, i8 0, i8 128>
%b = bitcast <4 x i8> %a to <2 x i16>
%c = icmp sgt <2 x i16> %b, zeroinitializer
ret <2 x i1> %c
}