1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 19:23:23 +01:00

Avoid infinite looping in AllGlobalLoadUsesSimpleEnoughForHeapSRA(). This can happen when PHI uses are recursively dependent on each other.

llvm-svn: 72710
This commit is contained in:
Evan Cheng 2009-06-02 00:56:07 +00:00
parent fe3b3add52
commit 7875093e82
2 changed files with 133 additions and 6 deletions

View File

@ -1020,7 +1020,8 @@ static void ReplaceUsesOfMallocWithGlobal(Instruction *Alloc,
/// of a load) are simple enough to perform heap SRA on. This permits GEP's
/// that index through the array and struct field, icmps of null, and PHIs.
static bool LoadUsesSimpleEnoughForHeapSRA(Value *V,
SmallPtrSet<PHINode*, 32> &LoadUsingPHIs) {
SmallPtrSet<PHINode*, 32> &LoadUsingPHIs,
SmallPtrSet<PHINode*, 32> &LoadUsingPHIsPerLoad) {
// We permit two users of the load: setcc comparing against the null
// pointer, and a getelementptr of a specific form.
for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;++UI){
@ -1044,12 +1045,17 @@ static bool LoadUsesSimpleEnoughForHeapSRA(Value *V,
}
if (PHINode *PN = dyn_cast<PHINode>(User)) {
// If we have already recursively analyzed this PHI, then it is safe.
if (LoadUsingPHIs.insert(PN))
if (!LoadUsingPHIsPerLoad.insert(PN))
// This means some phi nodes are dependent on each other.
// Avoid infinite looping!
return false;
if (!LoadUsingPHIs.insert(PN))
// If we have already analyzed this PHI, then it is safe.
continue;
// Make sure all uses of the PHI are simple enough to transform.
if (!LoadUsesSimpleEnoughForHeapSRA(PN, LoadUsingPHIs))
if (!LoadUsesSimpleEnoughForHeapSRA(PN,
LoadUsingPHIs, LoadUsingPHIsPerLoad))
return false;
continue;
@ -1068,11 +1074,15 @@ static bool LoadUsesSimpleEnoughForHeapSRA(Value *V,
static bool AllGlobalLoadUsesSimpleEnoughForHeapSRA(GlobalVariable *GV,
MallocInst *MI) {
SmallPtrSet<PHINode*, 32> LoadUsingPHIs;
SmallPtrSet<PHINode*, 32> LoadUsingPHIsPerLoad;
for (Value::use_iterator UI = GV->use_begin(), E = GV->use_end(); UI != E;
++UI)
if (LoadInst *LI = dyn_cast<LoadInst>(*UI))
if (!LoadUsesSimpleEnoughForHeapSRA(LI, LoadUsingPHIs))
if (LoadInst *LI = dyn_cast<LoadInst>(*UI)) {
if (!LoadUsesSimpleEnoughForHeapSRA(LI, LoadUsingPHIs,
LoadUsingPHIsPerLoad))
return false;
LoadUsingPHIsPerLoad.clear();
}
// If we reach here, we know that all uses of the loads and transitive uses
// (through PHI nodes) are simple enough to transform. However, we don't know

View File

@ -0,0 +1,117 @@
; RUN: llvm-as < %s | opt -globalopt
%struct.s_annealing_sched = type { i32, float, float, float, float }
%struct.s_bb = type { i32, i32, i32, i32 }
%struct.s_net = type { i8*, i32, i32*, float, float }
%struct.s_placer_opts = type { i32, float, i32, i32, i8*, i32, i32 }
@net = internal global %struct.s_net* null ; <%struct.s_net**> [#uses=4]
define fastcc void @alloc_and_load_placement_structs(i32 %place_cost_type, i32 %num_regions, float %place_cost_exp, float*** nocapture %old_region_occ_x, float*** nocapture %old_region_occ_y) nounwind ssp {
entry:
br i1 undef, label %bb.i, label %my_malloc.exit
bb.i: ; preds = %entry
unreachable
my_malloc.exit: ; preds = %entry
br i1 undef, label %bb.i81, label %my_malloc.exit83
bb.i81: ; preds = %my_malloc.exit
unreachable
my_malloc.exit83: ; preds = %my_malloc.exit
br i1 undef, label %bb.i.i57, label %my_calloc.exit.i
bb.i.i57: ; preds = %my_malloc.exit83
unreachable
my_calloc.exit.i: ; preds = %my_malloc.exit83
br i1 undef, label %bb.i4.i, label %my_calloc.exit5.i
bb.i4.i: ; preds = %my_calloc.exit.i
unreachable
my_calloc.exit5.i: ; preds = %my_calloc.exit.i
%.pre.i58 = load %struct.s_net** @net, align 4 ; <%struct.s_net*> [#uses=1]
br label %bb17.i78
bb1.i61: ; preds = %bb4.preheader.i, %bb1.i61
br i1 undef, label %bb1.i61, label %bb5.i62
bb5.i62: ; preds = %bb1.i61
br i1 undef, label %bb6.i64, label %bb15.preheader.i
bb15.preheader.i: ; preds = %bb4.preheader.i, %bb5.i62
br label %bb16.i77
bb6.i64: ; preds = %bb5.i62
br i1 undef, label %bb7.i65, label %bb8.i67
bb7.i65: ; preds = %bb6.i64
unreachable
bb8.i67: ; preds = %bb6.i64
br i1 undef, label %bb.i1.i68, label %my_malloc.exit.i70
bb.i1.i68: ; preds = %bb8.i67
unreachable
my_malloc.exit.i70: ; preds = %bb8.i67
%0 = load %struct.s_net** @net, align 4 ; <%struct.s_net*> [#uses=1]
br i1 undef, label %bb9.i71, label %bb16.i77
bb9.i71: ; preds = %bb9.i71, %my_malloc.exit.i70
%1 = load %struct.s_net** @net, align 4 ; <%struct.s_net*> [#uses=1]
br i1 undef, label %bb9.i71, label %bb16.i77
bb16.i77: ; preds = %bb9.i71, %my_malloc.exit.i70, %bb15.preheader.i
%.pre41.i.rle244 = phi %struct.s_net* [ %.pre41.i, %bb15.preheader.i ], [ %0, %my_malloc.exit.i70 ], [ %1, %bb9.i71 ] ; <%struct.s_net*> [#uses=1]
br label %bb17.i78
bb17.i78: ; preds = %bb16.i77, %my_calloc.exit5.i
%.pre41.i = phi %struct.s_net* [ %.pre41.i.rle244, %bb16.i77 ], [ %.pre.i58, %my_calloc.exit5.i ] ; <%struct.s_net*> [#uses=1]
br i1 undef, label %bb4.preheader.i, label %alloc_and_load_unique_pin_list.exit
bb4.preheader.i: ; preds = %bb17.i78
br i1 undef, label %bb1.i61, label %bb15.preheader.i
alloc_and_load_unique_pin_list.exit: ; preds = %bb17.i78
ret void
}
define void @read_net(i8* %net_file) nounwind ssp {
entry:
br i1 undef, label %bb3.us.us.i, label %bb6.preheader
bb6.preheader: ; preds = %entry
br i1 undef, label %bb7, label %bb
bb3.us.us.i: ; preds = %entry
unreachable
bb: ; preds = %bb6.preheader
br i1 undef, label %bb.i34, label %bb1.i38
bb.i34: ; preds = %bb
unreachable
bb1.i38: ; preds = %bb
%0 = malloc %struct.s_net, i32 undef ; <%struct.s_net*> [#uses=1]
br i1 undef, label %bb.i1.i39, label %my_malloc.exit2.i
bb.i1.i39: ; preds = %bb1.i38
unreachable
my_malloc.exit2.i: ; preds = %bb1.i38
store %struct.s_net* %0, %struct.s_net** @net, align 4
br i1 undef, label %bb.i7.i40, label %my_malloc.exit8.i
bb.i7.i40: ; preds = %my_malloc.exit2.i
unreachable
my_malloc.exit8.i: ; preds = %my_malloc.exit2.i
unreachable
bb7: ; preds = %bb6.preheader
unreachable
}