1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 19:23:23 +01:00

[InstCombine] look through bitcasts to find selects

There was concern that creating bitcasts for the simpler potential select pattern:

define <2 x i64> @vecBitcastOp1(<4 x i1> %cmp, <2 x i64> %a) {
  %a2 = add <2 x i64> %a, %a
  %sext = sext <4 x i1> %cmp to <4 x i32>
  %bc = bitcast <4 x i32> %sext to <2 x i64>
  %and = and <2 x i64> %a2, %bc
  ret <2 x i64> %and
}

might lead to worse code for some targets, so this patch is matching the larger
patterns seen in the test cases.

The motivating example for this patch is this IR produced via SSE intrinsics in C:

define <2 x i64> @gibson(<2 x i64> %a, <2 x i64> %b) {
  %t0 = bitcast <2 x i64> %a to <4 x i32>
  %t1 = bitcast <2 x i64> %b to <4 x i32>
  %cmp = icmp sgt <4 x i32> %t0, %t1
  %sext = sext <4 x i1> %cmp to <4 x i32>
  %t2 = bitcast <4 x i32> %sext to <2 x i64>
  %and = and <2 x i64> %t2, %a
  %neg = xor <4 x i32> %sext, <i32 -1, i32 -1, i32 -1, i32 -1>
  %neg2 = bitcast <4 x i32> %neg to <2 x i64>
  %and2 = and <2 x i64> %neg2, %b
  %or = or <2 x i64> %and, %and2
  ret <2 x i64> %or
}

For an AVX target, this is currently:

vpcmpgtd  %xmm1, %xmm0, %xmm2
vpand     %xmm0, %xmm2, %xmm0
vpandn    %xmm1, %xmm2, %xmm1
vpor      %xmm1, %xmm0, %xmm0
retq

With this patch, it becomes:

vpmaxsd   %xmm1, %xmm0, %xmm0

Differential Revision: http://reviews.llvm.org/D20774

llvm-svn: 271676
This commit is contained in:
Sanjay Patel 2016-06-03 14:42:07 +00:00
parent baaf0740cf
commit 500fe712ac
2 changed files with 66 additions and 47 deletions

View File

@ -1641,24 +1641,55 @@ Instruction *InstCombiner::MatchBSwap(BinaryOperator &I) {
/// We have an expression of the form (A&C)|(B&D). Check if A is (cond?-1:0)
/// and either B or D is ~(cond?-1,0) or (cond?0,-1), then we can simplify this
/// expression to "cond ? C : D or B".
static Instruction *matchSelectFromAndOr(Value *A, Value *B,
Value *C, Value *D) {
static Instruction *matchSelectFromAndOr(Value *A, Value *B, Value *C, Value *D,
InstCombiner::BuilderTy &Builder) {
// If A is not a select of -1/0, this cannot match.
Value *Cond = nullptr;
if (!match(A, m_SExt(m_Value(Cond))) || !Cond->getType()->isIntegerTy(1))
return nullptr;
if (match(A, m_SExt(m_Value(Cond))) &&
Cond->getType()->getScalarType()->isIntegerTy(1)) {
// ((cond?-1:0)&C) | (B&(cond?0:-1)) -> cond ? C : B.
if (match(D, m_Not(m_SExt(m_Specific(Cond)))))
return SelectInst::Create(Cond, C, B);
if (match(D, m_SExt(m_Not(m_Specific(Cond)))))
return SelectInst::Create(Cond, C, B);
// ((cond ? -1:0) & C) | (B & (cond ? 0:-1)) -> cond ? C : B.
if (match(D, m_Not(m_SExt(m_Specific(Cond)))))
return SelectInst::Create(Cond, C, B);
if (match(D, m_SExt(m_Not(m_Specific(Cond)))))
return SelectInst::Create(Cond, C, B);
// ((cond?-1:0)&C) | ((cond?0:-1)&D) -> cond ? C : D.
if (match(B, m_Not(m_SExt(m_Specific(Cond)))))
return SelectInst::Create(Cond, C, D);
if (match(B, m_SExt(m_Not(m_Specific(Cond)))))
return SelectInst::Create(Cond, C, D);
// ((cond ? -1:0) & C) | ((cond ? 0:-1) & D) -> cond ? C : D.
if (match(B, m_Not(m_SExt(m_Specific(Cond)))))
return SelectInst::Create(Cond, C, D);
if (match(B, m_SExt(m_Not(m_Specific(Cond)))))
return SelectInst::Create(Cond, C, D);
}
// TODO: Refactor the pattern matching above and below so there's less code.
// The sign-extended boolean condition may be hiding behind a bitcast. In that
// case, look for the same patterns as above. However, we need to bitcast the
// input operands to the select and bitcast the output of the select to match
// the expected types.
if (match(A, m_BitCast(m_SExt(m_Value(Cond)))) &&
Cond->getType()->getScalarType()->isIntegerTy(1)) {
Type *SrcType = cast<BitCastInst>(A)->getSrcTy();
// ((bc Cond) & C) | (B & (bc ~Cond)) --> bc (select Cond, (bc C), (bc B))
if (match(D, m_CombineOr(m_BitCast(m_Not(m_SExt(m_Specific(Cond)))),
m_BitCast(m_SExt(m_Not(m_Specific(Cond))))))) {
Value *BitcastC = Builder.CreateBitCast(C, SrcType);
Value *BitcastB = Builder.CreateBitCast(B, SrcType);
Value *Select = Builder.CreateSelect(Cond, BitcastC, BitcastB);
return CastInst::Create(Instruction::BitCast, Select, A->getType());
}
// ((bc Cond) & C) | ((bc ~Cond) & D) --> bc (select Cond, (bc C), (bc D))
if (match(B, m_CombineOr(m_BitCast(m_Not(m_SExt(m_Specific(Cond)))),
m_BitCast(m_SExt(m_Not(m_Specific(Cond))))))) {
Value *BitcastC = Builder.CreateBitCast(C, SrcType);
Value *BitcastD = Builder.CreateBitCast(D, SrcType);
Value *Select = Builder.CreateSelect(Cond, BitcastC, BitcastD);
return CastInst::Create(Instruction::BitCast, Select, A->getType());
}
}
return nullptr;
}
@ -2256,13 +2287,13 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
}
// (A & (C0?-1:0)) | (B & ~(C0?-1:0)) -> C0 ? A : B, and commuted variants.
if (Instruction *Match = matchSelectFromAndOr(A, B, C, D))
if (Instruction *Match = matchSelectFromAndOr(A, B, C, D, *Builder))
return Match;
if (Instruction *Match = matchSelectFromAndOr(B, A, D, C))
if (Instruction *Match = matchSelectFromAndOr(B, A, D, C, *Builder))
return Match;
if (Instruction *Match = matchSelectFromAndOr(C, B, A, D))
if (Instruction *Match = matchSelectFromAndOr(C, B, A, D, *Builder))
return Match;
if (Instruction *Match = matchSelectFromAndOr(D, A, B, C))
if (Instruction *Match = matchSelectFromAndOr(D, A, B, C, *Builder))
return Match;
// ((A&~B)|(~A&B)) -> A^B

View File

@ -77,19 +77,16 @@ define i32 @par(i32 %a, i32 %b, i32 %c, i32 %d) {
ret i32 %t3
}
; FIXME: In the following tests, verify that a bitcast doesn't get in the way
; In the following tests, verify that a bitcast doesn't get in the way
; of a select transform. These bitcasts are common in SSE/AVX and possibly
; other vector code because of canonicalization to i64 elements for vectors.
define <2 x i64> @bitcast_select(<4 x i1> %cmp, <2 x i64> %a, <2 x i64> %b) {
; CHECK-LABEL: @bitcast_select(
; CHECK-NEXT: [[SEXT:%.*]] = sext <4 x i1> %cmp to <4 x i32>
; CHECK-NEXT: [[T2:%.*]] = bitcast <4 x i32> [[SEXT]] to <2 x i64>
; CHECK-NEXT: [[AND:%.*]] = and <2 x i64> [[T2]], %a
; CHECK-NEXT: [[NEG:%.*]] = xor <4 x i32> [[SEXT]], <i32 -1, i32 -1, i32 -1, i32 -1>
; CHECK-NEXT: [[NEG2:%.*]] = bitcast <4 x i32> [[NEG]] to <2 x i64>
; CHECK-NEXT: [[AND2:%.*]] = and <2 x i64> [[NEG2]], %b
; CHECK-NEXT: [[OR:%.*]] = or <2 x i64> [[AND]], [[AND2]]
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> %a to <4 x i32>
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> %b to <4 x i32>
; CHECK-NEXT: [[TMP3:%.*]] = select <4 x i1> %cmp, <4 x i32> [[TMP1]], <4 x i32> [[TMP2]]
; CHECK-NEXT: [[OR:%.*]] = bitcast <4 x i32> [[TMP3]] to <2 x i64>
; CHECK-NEXT: ret <2 x i64> [[OR]]
;
%sext = sext <4 x i1> %cmp to <4 x i32>
@ -104,13 +101,10 @@ define <2 x i64> @bitcast_select(<4 x i1> %cmp, <2 x i64> %a, <2 x i64> %b) {
define <2 x i64> @bitcast_select_swap_or_ops(<4 x i1> %cmp, <2 x i64> %a, <2 x i64> %b) {
; CHECK-LABEL: @bitcast_select_swap_or_ops(
; CHECK-NEXT: [[SEXT:%.*]] = sext <4 x i1> %cmp to <4 x i32>
; CHECK-NEXT: [[T2:%.*]] = bitcast <4 x i32> [[SEXT]] to <2 x i64>
; CHECK-NEXT: [[AND:%.*]] = and <2 x i64> [[T2]], %a
; CHECK-NEXT: [[NEG:%.*]] = xor <4 x i32> [[SEXT]], <i32 -1, i32 -1, i32 -1, i32 -1>
; CHECK-NEXT: [[NEG2:%.*]] = bitcast <4 x i32> [[NEG]] to <2 x i64>
; CHECK-NEXT: [[AND2:%.*]] = and <2 x i64> [[NEG2]], %b
; CHECK-NEXT: [[OR:%.*]] = or <2 x i64> [[AND2]], [[AND]]
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> %a to <4 x i32>
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> %b to <4 x i32>
; CHECK-NEXT: [[TMP3:%.*]] = select <4 x i1> %cmp, <4 x i32> [[TMP1]], <4 x i32> [[TMP2]]
; CHECK-NEXT: [[OR:%.*]] = bitcast <4 x i32> [[TMP3]] to <2 x i64>
; CHECK-NEXT: ret <2 x i64> [[OR]]
;
%sext = sext <4 x i1> %cmp to <4 x i32>
@ -125,13 +119,10 @@ define <2 x i64> @bitcast_select_swap_or_ops(<4 x i1> %cmp, <2 x i64> %a, <2 x i
define <2 x i64> @bitcast_select_swap_and_ops(<4 x i1> %cmp, <2 x i64> %a, <2 x i64> %b) {
; CHECK-LABEL: @bitcast_select_swap_and_ops(
; CHECK-NEXT: [[SEXT:%.*]] = sext <4 x i1> %cmp to <4 x i32>
; CHECK-NEXT: [[T2:%.*]] = bitcast <4 x i32> [[SEXT]] to <2 x i64>
; CHECK-NEXT: [[AND:%.*]] = and <2 x i64> [[T2]], %a
; CHECK-NEXT: [[NEG:%.*]] = xor <4 x i32> [[SEXT]], <i32 -1, i32 -1, i32 -1, i32 -1>
; CHECK-NEXT: [[NEG2:%.*]] = bitcast <4 x i32> [[NEG]] to <2 x i64>
; CHECK-NEXT: [[AND2:%.*]] = and <2 x i64> [[NEG2]], %b
; CHECK-NEXT: [[OR:%.*]] = or <2 x i64> [[AND]], [[AND2]]
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> %a to <4 x i32>
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> %b to <4 x i32>
; CHECK-NEXT: [[TMP3:%.*]] = select <4 x i1> %cmp, <4 x i32> [[TMP1]], <4 x i32> [[TMP2]]
; CHECK-NEXT: [[OR:%.*]] = bitcast <4 x i32> [[TMP3]] to <2 x i64>
; CHECK-NEXT: ret <2 x i64> [[OR]]
;
%sext = sext <4 x i1> %cmp to <4 x i32>
@ -146,13 +137,10 @@ define <2 x i64> @bitcast_select_swap_and_ops(<4 x i1> %cmp, <2 x i64> %a, <2 x
define <2 x i64> @bitcast_select_swap_and_ops2(<4 x i1> %cmp, <2 x i64> %a, <2 x i64> %b) {
; CHECK-LABEL: @bitcast_select_swap_and_ops2(
; CHECK-NEXT: [[SEXT:%.*]] = sext <4 x i1> %cmp to <4 x i32>
; CHECK-NEXT: [[T2:%.*]] = bitcast <4 x i32> [[SEXT]] to <2 x i64>
; CHECK-NEXT: [[AND:%.*]] = and <2 x i64> [[T2]], %a
; CHECK-NEXT: [[NEG:%.*]] = xor <4 x i32> [[SEXT]], <i32 -1, i32 -1, i32 -1, i32 -1>
; CHECK-NEXT: [[NEG2:%.*]] = bitcast <4 x i32> [[NEG]] to <2 x i64>
; CHECK-NEXT: [[AND2:%.*]] = and <2 x i64> [[NEG2]], %b
; CHECK-NEXT: [[OR:%.*]] = or <2 x i64> [[AND]], [[AND2]]
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> %a to <4 x i32>
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> %b to <4 x i32>
; CHECK-NEXT: [[TMP3:%.*]] = select <4 x i1> %cmp, <4 x i32> [[TMP1]], <4 x i32> [[TMP2]]
; CHECK-NEXT: [[OR:%.*]] = bitcast <4 x i32> [[TMP3]] to <2 x i64>
; CHECK-NEXT: ret <2 x i64> [[OR]]
;
%sext = sext <4 x i1> %cmp to <4 x i32>