mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-24 03:33:20 +01:00
Fix PR3784: If the source of a phi comes from a bb ended with an invoke, make sure the copy is inserted before the try range (unless it's used as an input to the invoke, then insert it after the last use), not at the end of the bb.
Also re-apply r66140 which was disabled as a workaround. llvm-svn: 66976
This commit is contained in:
parent
2b3f72d6d7
commit
cda58e565f
@ -31,6 +31,7 @@
|
||||
using namespace llvm;
|
||||
|
||||
STATISTIC(NumAtomic, "Number of atomic phis lowered");
|
||||
STATISTIC(NumEH, "Number of EH try blocks skipped");
|
||||
|
||||
namespace {
|
||||
class VISIBILITY_HIDDEN PNE : public MachineFunctionPass {
|
||||
@ -65,6 +66,9 @@ namespace {
|
||||
///
|
||||
void analyzePHINodes(const MachineFunction& Fn);
|
||||
|
||||
void WalkPassEHTryRange(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator &I, unsigned SrcReg);
|
||||
|
||||
typedef std::pair<const MachineBasicBlock*, unsigned> BBVRegPair;
|
||||
typedef std::map<BBVRegPair, unsigned> VRegPHIUse;
|
||||
|
||||
@ -140,6 +144,39 @@ static bool isSourceDefinedByImplicitDef(const MachineInstr *MPhi,
|
||||
return true;
|
||||
}
|
||||
|
||||
void PNE::WalkPassEHTryRange(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator &I, unsigned SrcReg) {
|
||||
if (I == MBB.begin())
|
||||
return;
|
||||
MachineBasicBlock::iterator PI = prior(I);
|
||||
if (PI->getOpcode() != TargetInstrInfo::EH_LABEL)
|
||||
return;
|
||||
|
||||
// Trying to walk pass the EH try range. If we run into a use instruction,
|
||||
// we want to insert the copy there.
|
||||
SmallPtrSet<MachineInstr*, 4> UsesInMBB;
|
||||
for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(SrcReg),
|
||||
UE = MRI->use_end(); UI != UE; ++UI) {
|
||||
MachineInstr *UseMI = &*UI;
|
||||
if (UseMI->getParent() == &MBB)
|
||||
UsesInMBB.insert(UseMI);
|
||||
}
|
||||
|
||||
while (PI != MBB.begin()) {
|
||||
--PI;
|
||||
if (PI->getOpcode() == TargetInstrInfo::EH_LABEL) {
|
||||
++NumEH;
|
||||
I = PI;
|
||||
return;
|
||||
} else if (UsesInMBB.count(&*PI)) {
|
||||
++NumEH;
|
||||
I = next(PI);
|
||||
return;
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
/// LowerAtomicPHINode - Lower the PHI node at the top of the specified block,
|
||||
/// under the assuption that it needs to be lowered in a way that supports
|
||||
/// atomic execution of PHIs. This lowering method is always correct all of the
|
||||
@ -238,6 +275,9 @@ void PNE::LowerAtomicPHINode(MachineBasicBlock &MBB,
|
||||
// in the block (or end()).
|
||||
MachineBasicBlock::iterator InsertPos = opBlock.getFirstTerminator();
|
||||
|
||||
// Walk pass EH try range if needed.
|
||||
WalkPassEHTryRange(opBlock, InsertPos, SrcReg);
|
||||
|
||||
// Insert the copy.
|
||||
TII->copyRegToReg(opBlock, InsertPos, IncomingReg, SrcReg, RC, RC);
|
||||
|
||||
|
@ -327,6 +327,12 @@ static void SplitEdgeNicely(TerminatorInst *TI, unsigned SuccNum,
|
||||
assert(isa<PHINode>(Dest->begin()) &&
|
||||
"This should only be called if Dest has a PHI!");
|
||||
|
||||
// Do not split edges to EH landing pads.
|
||||
if (InvokeInst *Invoke = dyn_cast<InvokeInst>(TI)) {
|
||||
if (Invoke->getSuccessor(1) == Dest)
|
||||
return;
|
||||
}
|
||||
|
||||
// As a hack, never split backedges of loops. Even though the copy for any
|
||||
// PHIs inserted on the backedge would be dead for exits from the loop, we
|
||||
// assume that the cost of *splitting* the backedge would be too high.
|
||||
|
37
test/CodeGen/X86/2009-03-13-PHIElimBug.ll
Normal file
37
test/CodeGen/X86/2009-03-13-PHIElimBug.ll
Normal file
@ -0,0 +1,37 @@
|
||||
; RUN: llvm-as < %s | llc -mtriple=i386-pc-linux-gnu -stats |& grep phielim | grep {Number of EH try blocks skipped} | grep 4
|
||||
; PR3784
|
||||
|
||||
%struct.c38002a__arr___XUB = type { i32, i32 }
|
||||
%struct.c38002a__arr_name = type { [0 x i32]*, %struct.c38002a__arr___XUB* }
|
||||
%struct.c38002a__rec = type { i32, %struct.c38002a__arr_name }
|
||||
|
||||
define void @_ada_c38002a() {
|
||||
entry:
|
||||
%0 = invoke i8* @__gnat_malloc(i32 12)
|
||||
to label %invcont unwind label %lpad ; <i8*> [#uses=0]
|
||||
|
||||
invcont: ; preds = %entry
|
||||
%1 = invoke i8* @__gnat_malloc(i32 20)
|
||||
to label %invcont1 unwind label %lpad ; <i8*> [#uses=0]
|
||||
|
||||
invcont1: ; preds = %invcont
|
||||
%2 = invoke i32 @report__ident_int(i32 2)
|
||||
to label %.noexc unwind label %lpad ; <i32> [#uses=0]
|
||||
|
||||
.noexc: ; preds = %invcont1
|
||||
%3 = invoke i32 @report__ident_int(i32 3)
|
||||
to label %.noexc88 unwind label %lpad ; <i32> [#uses=0]
|
||||
|
||||
.noexc88: ; preds = %.noexc
|
||||
unreachable
|
||||
|
||||
lpad: ; preds = %.noexc, %invcont1, %invcont, %entry
|
||||
%r.0 = phi %struct.c38002a__rec* [ null, %entry ], [ null, %invcont ], [ null, %invcont1 ], [ null, %.noexc ] ; <%struct.c38002a__rec*> [#uses=1]
|
||||
%4 = getelementptr %struct.c38002a__rec* %r.0, i32 0, i32 0 ; <i32*> [#uses=1]
|
||||
%5 = load i32* %4, align 4 ; <i32> [#uses=0]
|
||||
ret void
|
||||
}
|
||||
|
||||
declare i32 @report__ident_int(i32)
|
||||
|
||||
declare i8* @__gnat_malloc(i32)
|
@ -1,4 +1,38 @@
|
||||
; RUN: llvm-as < %s | llc -mtriple=x86_64-apple-darwin | not grep jmp
|
||||
; rdar://6647639
|
||||
|
||||
%struct.FetchPlanHeader = type { i8*, i8*, i32, i8*, i8*, i8*, i8*, i8*, %struct.NSObject* (%struct.NSObject*, %struct.objc_selector*, ...)*, %struct.__attributeDescriptionFlags }
|
||||
%struct.NSArray = type { %struct.NSObject }
|
||||
%struct.NSAutoreleasePool = type { %struct.NSObject, i8*, i8*, i8*, i8* }
|
||||
%struct.NSObject = type { %struct.NSObject* }
|
||||
%struct.__attributeDescriptionFlags = type <{ i32 }>
|
||||
%struct._message_ref_t = type { %struct.NSObject* (%struct.NSObject*, %struct._message_ref_t*, ...)*, %struct.objc_selector* }
|
||||
%struct.objc_selector = type opaque
|
||||
@"\01l_objc_msgSend_fixup_alloc" = external global %struct._message_ref_t, align 16 ; <%struct._message_ref_t*> [#uses=2]
|
||||
|
||||
define %struct.NSArray* @newFetchedRowsForFetchPlan_MT(%struct.FetchPlanHeader* %fetchPlan, %struct.objc_selector* %selectionMethod, %struct.NSObject* %selectionParameter) ssp {
|
||||
entry:
|
||||
%0 = invoke %struct.NSObject* null(%struct.NSObject* null, %struct._message_ref_t* @"\01l_objc_msgSend_fixup_alloc")
|
||||
to label %invcont unwind label %lpad ; <%struct.NSObject*> [#uses=1]
|
||||
|
||||
invcont: ; preds = %entry
|
||||
%1 = invoke %struct.NSObject* (%struct.NSObject*, %struct.objc_selector*, ...)* @objc_msgSend(%struct.NSObject* %0, %struct.objc_selector* null)
|
||||
to label %invcont26 unwind label %lpad ; <%struct.NSObject*> [#uses=0]
|
||||
|
||||
invcont26: ; preds = %invcont
|
||||
%2 = invoke %struct.NSObject* null(%struct.NSObject* null, %struct._message_ref_t* @"\01l_objc_msgSend_fixup_alloc")
|
||||
to label %invcont27 unwind label %lpad ; <%struct.NSObject*> [#uses=0]
|
||||
|
||||
invcont27: ; preds = %invcont26
|
||||
unreachable
|
||||
|
||||
lpad: ; preds = %invcont26, %invcont, %entry
|
||||
%pool.1 = phi %struct.NSAutoreleasePool* [ null, %entry ], [ null, %invcont ], [ null, %invcont26 ] ; <%struct.NSAutoreleasePool*> [#uses=0]
|
||||
unreachable
|
||||
}
|
||||
|
||||
declare %struct.NSObject* @objc_msgSend(%struct.NSObject*, %struct.objc_selector*, ...)
|
||||
; RUN: llvm-as < %s | llc -mtriple=x86_64-apple-darwin | not grep jmp
|
||||
; rdar://6647639
|
||||
; XFAIL: *
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user