1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-30 15:32:52 +01:00
llvm-mirror/test/CodeGen/X86/shrink-compare.ll
Benjamin Kramer 5d9e616519 DAGCombiner: Don't drop extension behavior when shrinking a load when unsafe.
ReduceLoadWidth unconditionally drops extensions from loads. Limit it to the
case when all of the bits the extension would otherwise produce are dropped by
the shrink. It would be possible to shrink the load in more cases by merging
the extensions, but this isn't trivial and a very rare case. I left a TODO for
that case.

Fixes PR16551.

llvm-svn: 185755
2013-07-06 14:05:09 +00:00

92 lines
1.9 KiB
LLVM

; RUN: llc < %s -march=x86-64 | FileCheck %s
declare void @bar()
define void @test1(i32* nocapture %X) nounwind {
entry:
%tmp1 = load i32* %X, align 4
%and = and i32 %tmp1, 255
%cmp = icmp eq i32 %and, 47
br i1 %cmp, label %if.then, label %if.end
if.then:
tail call void @bar() nounwind
br label %if.end
if.end:
ret void
; CHECK: test1:
; CHECK: cmpb $47, (%{{rdi|rcx}})
}
define void @test2(i32 %X) nounwind {
entry:
%and = and i32 %X, 255
%cmp = icmp eq i32 %and, 47
br i1 %cmp, label %if.then, label %if.end
if.then:
tail call void @bar() nounwind
br label %if.end
if.end:
ret void
; CHECK: test2:
; CHECK: cmpb $47, %{{dil|cl}}
}
define void @test3(i32 %X) nounwind {
entry:
%and = and i32 %X, 255
%cmp = icmp eq i32 %and, 255
br i1 %cmp, label %if.then, label %if.end
if.then:
tail call void @bar() nounwind
br label %if.end
if.end:
ret void
; CHECK: test3:
; CHECK: cmpb $-1, %{{dil|cl}}
}
; PR16083
define i1 @test4(i64 %a, i32 %b) {
entry:
%tobool = icmp ne i32 %b, 0
br i1 %tobool, label %lor.end, label %lor.rhs
lor.rhs: ; preds = %entry
%and = and i64 0, %a
%tobool1 = icmp ne i64 %and, 0
br label %lor.end
lor.end: ; preds = %lor.rhs, %entry
%p = phi i1 [ true, %entry ], [ %tobool1, %lor.rhs ]
ret i1 %p
}
@x = global { i8, i8, i8, i8, i8, i8, i8, i8 } { i8 1, i8 0, i8 0, i8 0, i8 1, i8 0, i8 0, i8 1 }, align 4
; PR16551
define void @test5(i32 %X) nounwind {
entry:
%bf.load = load i56* bitcast ({ i8, i8, i8, i8, i8, i8, i8, i8 }* @x to i56*), align 4
%bf.lshr = lshr i56 %bf.load, 32
%bf.cast = trunc i56 %bf.lshr to i32
%cmp = icmp ne i32 %bf.cast, 1
br i1 %cmp, label %if.then, label %if.end
if.then:
tail call void @bar() nounwind
br label %if.end
if.end:
ret void
; CHECK: test5:
; CHECK-NOT: cmpl $1,{{.*}}x+4
; CHECK: ret
}