mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-23 19:23:23 +01:00
5200dee212
Add a pass to lower is.constant and objectsize intrinsics This pass lowers is.constant and objectsize intrinsics not simplified by earlier constant folding, i.e. if the object given is not constant or if not using the optimized pass chain. The result is recursively simplified and constant conditionals are pruned, so that dead blocks are removed even for -O0. This allows inline asm blocks with operand constraints to work all the time. The new pass replaces the existing lowering in the codegen-prepare pass and fallbacks in SDAG/GlobalISEL and FastISel. The latter now assert on the intrinsics. Differential Revision: https://reviews.llvm.org/D65280 llvm-svn: 374784
115 lines
4.0 KiB
LLVM
115 lines
4.0 KiB
LLVM
; RUN: opt -lower-constant-intrinsics -S < %s | FileCheck %s
|
|
|
|
;; Ensure that an unfoldable is.constant gets lowered reasonably in
|
|
;; optimized codegen, in particular, that the "true" branch is
|
|
;; eliminated.
|
|
|
|
;; Also ensure that any unfoldable objectsize is resolved in order.
|
|
|
|
;; CHECK-NOT: tail call i32 @subfun_1()
|
|
;; CHECK: tail call i32 @subfun_2()
|
|
;; CHECK-NOT: tail call i32 @subfun_1()
|
|
|
|
declare i1 @llvm.is.constant.i32(i32 %a) nounwind readnone
|
|
declare i1 @llvm.is.constant.i64(i64 %a) nounwind readnone
|
|
declare i1 @llvm.is.constant.i256(i256 %a) nounwind readnone
|
|
declare i1 @llvm.is.constant.v2i64(<2 x i64> %a) nounwind readnone
|
|
declare i1 @llvm.is.constant.f32(float %a) nounwind readnone
|
|
declare i1 @llvm.is.constant.sl_i32i32s({i32, i32} %a) nounwind readnone
|
|
declare i1 @llvm.is.constant.a2i64([2 x i64] %a) nounwind readnone
|
|
declare i1 @llvm.is.constant.p0i64(i64* %a) nounwind readnone
|
|
|
|
declare i64 @llvm.objectsize.i64.p0i8(i8*, i1, i1, i1) nounwind readnone
|
|
|
|
declare i32 @subfun_1()
|
|
declare i32 @subfun_2()
|
|
|
|
define i32 @test_branch(i32 %in) nounwind {
|
|
%v = call i1 @llvm.is.constant.i32(i32 %in)
|
|
br i1 %v, label %True, label %False
|
|
|
|
True:
|
|
%call1 = tail call i32 @subfun_1()
|
|
ret i32 %call1
|
|
|
|
False:
|
|
%call2 = tail call i32 @subfun_2()
|
|
ret i32 %call2
|
|
}
|
|
|
|
;; llvm.objectsize is another tricky case which gets folded to -1 very
|
|
;; late in the game. We'd like to ensure that llvm.is.constant of
|
|
;; llvm.objectsize is true.
|
|
define i1 @test_objectsize(i8* %obj) nounwind {
|
|
;; CHECK-LABEL: test_objectsize
|
|
;; CHECK-NOT: llvm.objectsize
|
|
;; CHECK-NOT: llvm.is.constant
|
|
;; CHECK: ret i1 true
|
|
%os = call i64 @llvm.objectsize.i64.p0i8(i8* %obj, i1 false, i1 false, i1 false)
|
|
%os1 = add i64 %os, 1
|
|
%v = call i1 @llvm.is.constant.i64(i64 %os1)
|
|
ret i1 %v
|
|
}
|
|
|
|
@test_phi_a = dso_local global i32 0, align 4
|
|
declare dso_local i32 @test_phi_b(...)
|
|
|
|
; Function Attrs: nounwind uwtable
|
|
define dso_local i32 @test_phi() {
|
|
entry:
|
|
%0 = load i32, i32* @test_phi_a, align 4
|
|
%1 = tail call i1 @llvm.is.constant.i32(i32 %0)
|
|
br i1 %1, label %cond.end, label %cond.false
|
|
|
|
cond.false: ; preds = %entry
|
|
%call = tail call i32 bitcast (i32 (...)* @test_phi_b to i32 ()*)() #3
|
|
%.pre = load i32, i32* @test_phi_a, align 4
|
|
br label %cond.end
|
|
|
|
cond.end: ; preds = %entry, %cond.false
|
|
%2 = phi i32 [ %.pre, %cond.false ], [ %0, %entry ]
|
|
%cond = phi i32 [ %call, %cond.false ], [ 1, %entry ]
|
|
%cmp = icmp eq i32 %cond, %2
|
|
br i1 %cmp, label %cond.true1, label %cond.end4
|
|
|
|
cond.true1: ; preds = %cond.end
|
|
%call2 = tail call i32 bitcast (i32 (...)* @test_phi_b to i32 ()*)() #3
|
|
br label %cond.end4
|
|
|
|
cond.end4: ; preds = %cond.end, %cond.true1
|
|
ret i32 undef
|
|
}
|
|
|
|
define i1 @test_various_types(i256 %int, float %float, <2 x i64> %vec, {i32, i32} %struct, [2 x i64] %arr, i64* %ptr) #0 {
|
|
; CHECK-LABEL: @test_various_types(
|
|
; CHECK-NOT: llvm.is.constant
|
|
%v1 = call i1 @llvm.is.constant.i256(i256 %int)
|
|
%v2 = call i1 @llvm.is.constant.f32(float %float)
|
|
%v3 = call i1 @llvm.is.constant.v2i64(<2 x i64> %vec)
|
|
%v4 = call i1 @llvm.is.constant.sl_i32i32s({i32, i32} %struct)
|
|
%v5 = call i1 @llvm.is.constant.a2i64([2 x i64] %arr)
|
|
%v6 = call i1 @llvm.is.constant.p0i64(i64* %ptr)
|
|
|
|
%c1 = call i1 @llvm.is.constant.i256(i256 -1)
|
|
%c2 = call i1 @llvm.is.constant.f32(float 17.0)
|
|
%c3 = call i1 @llvm.is.constant.v2i64(<2 x i64> <i64 -1, i64 44>)
|
|
%c4 = call i1 @llvm.is.constant.sl_i32i32s({i32, i32} {i32 -1, i32 32})
|
|
%c5 = call i1 @llvm.is.constant.a2i64([2 x i64] [i64 -1, i64 32])
|
|
%c6 = call i1 @llvm.is.constant.p0i64(i64* inttoptr (i32 42 to i64*))
|
|
|
|
%x1 = add i1 %v1, %c1
|
|
%x2 = add i1 %v2, %c2
|
|
%x3 = add i1 %v3, %c3
|
|
%x4 = add i1 %v4, %c4
|
|
%x5 = add i1 %v5, %c5
|
|
%x6 = add i1 %v6, %c6
|
|
|
|
%res2 = add i1 %x1, %x2
|
|
%res3 = add i1 %res2, %x3
|
|
%res4 = add i1 %res3, %x4
|
|
%res5 = add i1 %res4, %x5
|
|
%res6 = add i1 %res5, %x6
|
|
|
|
ret i1 %res6
|
|
}
|