1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-20 19:42:54 +02:00

Revert "[InstCombine] Enable cast-folding in logic(cast(icmp), cast(icmp))"

Makes InstCombine infloop when compiling v8.

This reverts commit r275989 and r276105.

llvm-svn: 276106
This commit is contained in:
Benjamin Kramer 2016-07-20 11:40:16 +00:00
parent c7498938cb
commit 1dc4aabfd0
2 changed files with 2 additions and 78 deletions

View File

@ -1212,13 +1212,6 @@ bool InstCombiner::shouldOptimizeCast(CastInst *CI) {
isa<CmpInst>(CastSrc) && CI->getDestTy()->isVectorTy())
return false;
// Don't optimize the cast if it is a (zext icmp) that can already be
// eliminated.
if (auto *ZExt = dyn_cast<ZExtInst>(CI))
if (auto *ICmp = dyn_cast<ICmpInst>(CastSrc))
if (transformZExtICmp(ICmp, *ZExt, false))
return false;
return true;
}
@ -1267,7 +1260,8 @@ Instruction *InstCombiner::foldCastedBitwiseLogic(BinaryOperator &I) {
Value *Cast1Src = Cast1->getOperand(0);
// fold logic(cast(A), cast(B)) -> cast(logic(A, B))
if (shouldOptimizeCast(Cast0) && shouldOptimizeCast(Cast1)) {
if ((!isa<ICmpInst>(Cast0Src) || !isa<ICmpInst>(Cast1Src)) &&
shouldOptimizeCast(Cast0) && shouldOptimizeCast(Cast1)) {
Value *NewOp = Builder->CreateBinOp(LogicOpc, Cast0Src, Cast1Src,
I.getName());
return CastInst::Create(CastOpcode, NewOp, DestTy);

View File

@ -73,73 +73,3 @@ define <2 x i64> @fold_xor_zext_sandwich_vec(<2 x i1> %a) {
ret <2 x i64> %zext2
}
; Assert that zexts in and(zext(icmp), zext(icmp)) can be folded
; CHECK-LABEL: @fold_and_zext_icmp(
; CHECK-NEXT: [[ICMP1:%.*]] = icmp sgt i64 %a, %b
; CHECK-NEXT: [[ICMP2:%.*]] = icmp slt i64 %a, %c
; CHECK-NEXT: [[AND:%.*]] = and i1 [[ICMP1]], [[ICMP2]]
; CHECK-NEXT: [[ZEXT:%.*]] = zext i1 [[AND]] to i8
; CHECK-NEXT: ret i8 [[ZEXT]]
define i8 @fold_and_zext_icmp(i64 %a, i64 %b, i64 %c) {
%1 = icmp sgt i64 %a, %b
%2 = zext i1 %1 to i8
%3 = icmp slt i64 %a, %c
%4 = zext i1 %3 to i8
%5 = and i8 %2, %4
ret i8 %5
}
; Assert that zexts in or(zext(icmp), zext(icmp)) can be folded
; CHECK-LABEL: @fold_or_zext_icmp(
; CHECK-NEXT: [[ICMP1:%.*]] = icmp sgt i64 %a, %b
; CHECK-NEXT: [[ICMP2:%.*]] = icmp slt i64 %a, %c
; CHECK-NEXT: [[OR:%.*]] = or i1 [[ICMP1]], [[ICMP2]]
; CHECK-NEXT: [[ZEXT:%.*]] = zext i1 [[OR]] to i8
; CHECK-NEXT: ret i8 [[ZEXT]]
define i8 @fold_or_zext_icmp(i64 %a, i64 %b, i64 %c) {
%1 = icmp sgt i64 %a, %b
%2 = zext i1 %1 to i8
%3 = icmp slt i64 %a, %c
%4 = zext i1 %3 to i8
%5 = or i8 %2, %4
ret i8 %5
}
; Assert that zexts in xor(zext(icmp), zext(icmp)) can be folded
; CHECK-LABEL: @fold_xor_zext_icmp(
; CHECK-NEXT: [[ICMP1:%.*]] = icmp sgt i64 %a, %b
; CHECK-NEXT: [[ICMP2:%.*]] = icmp slt i64 %a, %c
; CHECK-NEXT: [[XOR:%.*]] = xor i1 [[ICMP1]], [[ICMP2]]
; CHECK-NEXT: [[ZEXT:%.*]] = zext i1 [[XOR]] to i8
; CHECK-NEXT: ret i8 [[ZEXT]]
define i8 @fold_xor_zext_icmp(i64 %a, i64 %b, i64 %c) {
%1 = icmp sgt i64 %a, %b
%2 = zext i1 %1 to i8
%3 = icmp slt i64 %a, %c
%4 = zext i1 %3 to i8
%5 = xor i8 %2, %4
ret i8 %5
}
; Assert that zexts in logic(zext(icmp), zext(icmp)) are also folded accross
; nested logical operators.
; CHECK-LABEL: @fold_nested_logic_zext_icmp(
; CHECK-NEXT: [[ICMP1:%.*]] = icmp sgt i64 %a, %b
; CHECK-NEXT: [[ICMP2:%.*]] = icmp slt i64 %a, %c
; CHECK-NEXT: [[AND:%.*]] = and i1 [[ICMP1]], [[ICMP2]]
; CHECK-NEXT: [[ICMP3:%.*]] = icmp eq i64 %a, %d
; CHECK-NEXT: [[OR:%.*]] = or i1 [[AND]], [[ICMP3]]
; CHECK-NEXT: [[ZEXT:%.*]] = zext i1 [[OR]] to i8
; CHECK-NEXT: ret i8 [[ZEXT]]
define i8 @fold_nested_logic_zext_icmp(i64 %a, i64 %b, i64 %c, i64 %d) {
%1 = icmp sgt i64 %a, %b
%2 = zext i1 %1 to i8
%3 = icmp slt i64 %a, %c
%4 = zext i1 %3 to i8
%5 = and i8 %2, %4
%6 = icmp eq i64 %a, %d
%7 = zext i1 %6 to i8
%8 = or i8 %5, %7
ret i8 %8
}