mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-24 03:33:20 +01:00
[WinEH] Demote values and phis live across exception handlers up front
In particular, this handles SSA values that are live *out* of a handler. The existing code only handles values that are live *in* to a handler. It also handles phi nodes in the block where normal control should resume after the end of a catch handler. When EH return points have phi nodes, we need to split the return edge. It is impossible for phi elimination to emit copies in the previous block if that block gets outlined. The indirectbr that we leave in the function is only notional, and is eliminated from the MachineFunction CFG early on. Reviewers: majnemer, andrew.w.kaylor Differential Revision: http://reviews.llvm.org/D9158 llvm-svn: 235545
This commit is contained in:
parent
61d5d8dd8e
commit
37da701417
@ -18,6 +18,7 @@
|
||||
#include "llvm/ADT/MapVector.h"
|
||||
#include "llvm/ADT/STLExtras.h"
|
||||
#include "llvm/ADT/SmallSet.h"
|
||||
#include "llvm/ADT/SetVector.h"
|
||||
#include "llvm/ADT/TinyPtrVector.h"
|
||||
#include "llvm/Analysis/LibCallSemantics.h"
|
||||
#include "llvm/CodeGen/WinEHFuncInfo.h"
|
||||
@ -86,6 +87,8 @@ private:
|
||||
bool prepareExceptionHandlers(Function &F,
|
||||
SmallVectorImpl<LandingPadInst *> &LPads);
|
||||
void promoteLandingPadValues(LandingPadInst *LPad);
|
||||
void demoteValuesLiveAcrossHandlers(Function &F,
|
||||
SmallVectorImpl<LandingPadInst *> &LPads);
|
||||
void completeNestedLandingPad(Function *ParentFn,
|
||||
LandingPadInst *OutlinedLPad,
|
||||
const LandingPadInst *OriginalLPad,
|
||||
@ -326,6 +329,10 @@ static cl::opt<bool>
|
||||
cl::desc("Prepare functions with SEH personalities"));
|
||||
|
||||
bool WinEHPrepare::runOnFunction(Function &Fn) {
|
||||
// No need to prepare outlined handlers.
|
||||
if (Fn.hasFnAttribute("wineh-parent"))
|
||||
return false;
|
||||
|
||||
SmallVector<LandingPadInst *, 4> LPads;
|
||||
SmallVector<ResumeInst *, 4> Resumes;
|
||||
for (BasicBlock &BB : Fn) {
|
||||
@ -369,8 +376,258 @@ void WinEHPrepare::getAnalysisUsage(AnalysisUsage &AU) const {
|
||||
AU.addRequired<DominatorTreeWrapperPass>();
|
||||
}
|
||||
|
||||
static bool isSelectorDispatch(BasicBlock *BB, BasicBlock *&CatchHandler,
|
||||
Constant *&Selector, BasicBlock *&NextBB);
|
||||
|
||||
// Finds blocks reachable from the starting set Worklist. Does not follow unwind
|
||||
// edges or blocks listed in StopPoints.
|
||||
static void findReachableBlocks(SmallPtrSetImpl<BasicBlock *> &ReachableBBs,
|
||||
SetVector<BasicBlock *> &Worklist,
|
||||
const SetVector<BasicBlock *> *StopPoints) {
|
||||
while (!Worklist.empty()) {
|
||||
BasicBlock *BB = Worklist.pop_back_val();
|
||||
|
||||
// Don't cross blocks that we should stop at.
|
||||
if (StopPoints && StopPoints->count(BB))
|
||||
continue;
|
||||
|
||||
if (!ReachableBBs.insert(BB).second)
|
||||
continue; // Already visited.
|
||||
|
||||
// Don't follow unwind edges of invokes.
|
||||
if (auto *II = dyn_cast<InvokeInst>(BB->getTerminator())) {
|
||||
Worklist.insert(II->getNormalDest());
|
||||
continue;
|
||||
}
|
||||
|
||||
// Otherwise, follow all successors.
|
||||
Worklist.insert(succ_begin(BB), succ_end(BB));
|
||||
}
|
||||
}
|
||||
|
||||
/// Find all points where exceptional control rejoins normal control flow via
|
||||
/// llvm.eh.endcatch. Add them to the normal bb reachability worklist.
|
||||
static void findCXXEHReturnPoints(Function &F,
|
||||
SetVector<BasicBlock *> &EHReturnBlocks) {
|
||||
for (auto BBI = F.begin(), BBE = F.end(); BBI != BBE; ++BBI) {
|
||||
BasicBlock *BB = BBI;
|
||||
for (Instruction &I : *BB) {
|
||||
if (match(&I, m_Intrinsic<Intrinsic::eh_endcatch>())) {
|
||||
// Split the block after the call to llvm.eh.endcatch if there is
|
||||
// anything other than an unconditional branch, or if the successor
|
||||
// starts with a phi.
|
||||
auto *Br = dyn_cast<BranchInst>(I.getNextNode());
|
||||
if (!Br || !Br->isUnconditional() ||
|
||||
isa<PHINode>(Br->getSuccessor(0)->begin())) {
|
||||
DEBUG(dbgs() << "splitting block " << BB->getName()
|
||||
<< " with llvm.eh.endcatch\n");
|
||||
BBI = BB->splitBasicBlock(I.getNextNode(), "ehreturn");
|
||||
}
|
||||
// The next BB is normal control flow.
|
||||
EHReturnBlocks.insert(BB->getTerminator()->getSuccessor(0));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static bool isCatchAllLandingPad(const BasicBlock *BB) {
|
||||
const LandingPadInst *LP = BB->getLandingPadInst();
|
||||
if (!LP)
|
||||
return false;
|
||||
unsigned N = LP->getNumClauses();
|
||||
return (N > 0 && LP->isCatch(N - 1) &&
|
||||
isa<ConstantPointerNull>(LP->getClause(N - 1)));
|
||||
}
|
||||
|
||||
/// Find all points where exceptions control rejoins normal control flow via
|
||||
/// selector dispatch.
|
||||
static void findSEHEHReturnPoints(Function &F,
|
||||
SetVector<BasicBlock *> &EHReturnBlocks) {
|
||||
for (auto BBI = F.begin(), BBE = F.end(); BBI != BBE; ++BBI) {
|
||||
BasicBlock *BB = BBI;
|
||||
// If the landingpad is a catch-all, treat the whole lpad as if it is
|
||||
// reachable from normal control flow.
|
||||
// FIXME: This is imprecise. We need a better way of identifying where a
|
||||
// catch-all starts and cleanups stop. As far as LLVM is concerned, there
|
||||
// is no difference.
|
||||
if (isCatchAllLandingPad(BB)) {
|
||||
EHReturnBlocks.insert(BB);
|
||||
continue;
|
||||
}
|
||||
|
||||
BasicBlock *CatchHandler;
|
||||
BasicBlock *NextBB;
|
||||
Constant *Selector;
|
||||
if (isSelectorDispatch(BB, CatchHandler, Selector, NextBB)) {
|
||||
// Split the edge if there is a phi node. Returning from EH to a phi node
|
||||
// is just as impossible as having a phi after an indirectbr.
|
||||
if (isa<PHINode>(CatchHandler->begin())) {
|
||||
DEBUG(dbgs() << "splitting EH return edge from " << BB->getName()
|
||||
<< " to " << CatchHandler->getName() << '\n');
|
||||
BBI = CatchHandler = SplitCriticalEdge(
|
||||
BB, std::find(succ_begin(BB), succ_end(BB), CatchHandler));
|
||||
}
|
||||
EHReturnBlocks.insert(CatchHandler);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Ensure that all values live into and out of exception handlers are stored
|
||||
/// in memory.
|
||||
/// FIXME: This falls down when values are defined in one handler and live into
|
||||
/// another handler. For example, a cleanup defines a value used only by a
|
||||
/// catch handler.
|
||||
void WinEHPrepare::demoteValuesLiveAcrossHandlers(
|
||||
Function &F, SmallVectorImpl<LandingPadInst *> &LPads) {
|
||||
DEBUG(dbgs() << "Demoting values live across exception handlers in function "
|
||||
<< F.getName() << '\n');
|
||||
|
||||
// Build a set of all non-exceptional blocks and exceptional blocks.
|
||||
// - Non-exceptional blocks are blocks reachable from the entry block while
|
||||
// not following invoke unwind edges.
|
||||
// - Exceptional blocks are blocks reachable from landingpads. Analysis does
|
||||
// not follow llvm.eh.endcatch blocks, which mark a transition from
|
||||
// exceptional to normal control.
|
||||
SmallPtrSet<BasicBlock *, 4> NormalBlocks;
|
||||
SmallPtrSet<BasicBlock *, 4> EHBlocks;
|
||||
SetVector<BasicBlock *> EHReturnBlocks;
|
||||
SetVector<BasicBlock *> Worklist;
|
||||
|
||||
if (Personality == EHPersonality::MSVC_CXX)
|
||||
findCXXEHReturnPoints(F, EHReturnBlocks);
|
||||
else
|
||||
findSEHEHReturnPoints(F, EHReturnBlocks);
|
||||
|
||||
DEBUG({
|
||||
dbgs() << "identified the following blocks as EH return points:\n";
|
||||
for (BasicBlock *BB : EHReturnBlocks)
|
||||
dbgs() << " " << BB->getName() << '\n';
|
||||
});
|
||||
|
||||
// Join points should not have phis at this point, unless they are a
|
||||
// landingpad, in which case we will demote their phis later.
|
||||
#ifndef NDEBUG
|
||||
for (BasicBlock *BB : EHReturnBlocks)
|
||||
assert((BB->isLandingPad() || !isa<PHINode>(BB->begin())) &&
|
||||
"non-lpad EH return block has phi");
|
||||
#endif
|
||||
|
||||
// Normal blocks are the blocks reachable from the entry block and all EH
|
||||
// return points.
|
||||
Worklist = EHReturnBlocks;
|
||||
Worklist.insert(&F.getEntryBlock());
|
||||
findReachableBlocks(NormalBlocks, Worklist, nullptr);
|
||||
DEBUG({
|
||||
dbgs() << "marked the following blocks as normal:\n";
|
||||
for (BasicBlock *BB : NormalBlocks)
|
||||
dbgs() << " " << BB->getName() << '\n';
|
||||
});
|
||||
|
||||
// Exceptional blocks are the blocks reachable from landingpads that don't
|
||||
// cross EH return points.
|
||||
Worklist.clear();
|
||||
for (auto *LPI : LPads)
|
||||
Worklist.insert(LPI->getParent());
|
||||
findReachableBlocks(EHBlocks, Worklist, &EHReturnBlocks);
|
||||
DEBUG({
|
||||
dbgs() << "marked the following blocks as exceptional:\n";
|
||||
for (BasicBlock *BB : EHBlocks)
|
||||
dbgs() << " " << BB->getName() << '\n';
|
||||
});
|
||||
|
||||
SetVector<Argument *> ArgsToDemote;
|
||||
SetVector<Instruction *> InstrsToDemote;
|
||||
for (BasicBlock &BB : F) {
|
||||
bool IsNormalBB = NormalBlocks.count(&BB);
|
||||
bool IsEHBB = EHBlocks.count(&BB);
|
||||
if (!IsNormalBB && !IsEHBB)
|
||||
continue; // Blocks that are neither normal nor EH are unreachable.
|
||||
for (Instruction &I : BB) {
|
||||
for (Value *Op : I.operands()) {
|
||||
// Don't demote static allocas, constants, and labels.
|
||||
if (isa<Constant>(Op) || isa<BasicBlock>(Op) || isa<InlineAsm>(Op))
|
||||
continue;
|
||||
auto *AI = dyn_cast<AllocaInst>(Op);
|
||||
if (AI && AI->isStaticAlloca())
|
||||
continue;
|
||||
|
||||
if (auto *Arg = dyn_cast<Argument>(Op)) {
|
||||
if (IsEHBB) {
|
||||
DEBUG(dbgs() << "Demoting argument " << *Arg
|
||||
<< " used by EH instr: " << I << "\n");
|
||||
ArgsToDemote.insert(Arg);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
auto *OpI = cast<Instruction>(Op);
|
||||
BasicBlock *OpBB = OpI->getParent();
|
||||
// If a value is produced and consumed in the same BB, we don't need to
|
||||
// demote it.
|
||||
if (OpBB == &BB)
|
||||
continue;
|
||||
bool IsOpNormalBB = NormalBlocks.count(OpBB);
|
||||
bool IsOpEHBB = EHBlocks.count(OpBB);
|
||||
if (IsNormalBB != IsOpNormalBB || IsEHBB != IsOpEHBB) {
|
||||
DEBUG({
|
||||
dbgs() << "Demoting instruction live in-out from EH:\n";
|
||||
dbgs() << "Instr: " << *OpI << '\n';
|
||||
dbgs() << "User: " << I << '\n';
|
||||
});
|
||||
InstrsToDemote.insert(OpI);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Demote values live into and out of handlers.
|
||||
// FIXME: This demotion is inefficient. We should insert spills at the point
|
||||
// of definition, insert one reload in each handler that uses the value, and
|
||||
// insert reloads in the BB used to rejoin normal control flow.
|
||||
Instruction *AllocaInsertPt = F.getEntryBlock().getFirstInsertionPt();
|
||||
for (Instruction *I : InstrsToDemote)
|
||||
DemoteRegToStack(*I, false, AllocaInsertPt);
|
||||
|
||||
// Demote arguments separately, and only for uses in EH blocks.
|
||||
for (Argument *Arg : ArgsToDemote) {
|
||||
auto *Slot = new AllocaInst(Arg->getType(), nullptr,
|
||||
Arg->getName() + ".reg2mem", AllocaInsertPt);
|
||||
SmallVector<User *, 4> Users(Arg->user_begin(), Arg->user_end());
|
||||
for (User *U : Users) {
|
||||
auto *I = dyn_cast<Instruction>(U);
|
||||
if (I && EHBlocks.count(I->getParent())) {
|
||||
auto *Reload = new LoadInst(Slot, Arg->getName() + ".reload", false, I);
|
||||
U->replaceUsesOfWith(Arg, Reload);
|
||||
}
|
||||
}
|
||||
new StoreInst(Arg, Slot, AllocaInsertPt);
|
||||
}
|
||||
|
||||
// Demote landingpad phis, as the landingpad will be removed from the machine
|
||||
// CFG.
|
||||
for (LandingPadInst *LPI : LPads) {
|
||||
BasicBlock *BB = LPI->getParent();
|
||||
while (auto *Phi = dyn_cast<PHINode>(BB->begin()))
|
||||
DemotePHIToStack(Phi, AllocaInsertPt);
|
||||
}
|
||||
|
||||
DEBUG(dbgs() << "Demoted " << InstrsToDemote.size() << " instructions and "
|
||||
<< ArgsToDemote.size() << " arguments for WinEHPrepare\n\n");
|
||||
}
|
||||
|
||||
bool WinEHPrepare::prepareExceptionHandlers(
|
||||
Function &F, SmallVectorImpl<LandingPadInst *> &LPads) {
|
||||
// Don't run on functions that are already prepared.
|
||||
for (LandingPadInst *LPad : LPads) {
|
||||
BasicBlock *LPadBB = LPad->getParent();
|
||||
for (Instruction &Inst : *LPadBB)
|
||||
if (match(&Inst, m_Intrinsic<Intrinsic::eh_actions>()))
|
||||
return false;
|
||||
}
|
||||
|
||||
demoteValuesLiveAcrossHandlers(F, LPads);
|
||||
|
||||
// These containers are used to re-map frame variables that are used in
|
||||
// outlined catch and cleanup handlers. They will be populated as the
|
||||
// handlers are outlined.
|
||||
@ -391,11 +648,9 @@ bool WinEHPrepare::prepareExceptionHandlers(
|
||||
bool LPadHasActionList = false;
|
||||
BasicBlock *LPadBB = LPad->getParent();
|
||||
for (Instruction &Inst : *LPadBB) {
|
||||
if (auto *IntrinCall = dyn_cast<IntrinsicInst>(&Inst)) {
|
||||
if (IntrinCall->getIntrinsicID() == Intrinsic::eh_actions) {
|
||||
LPadHasActionList = true;
|
||||
break;
|
||||
}
|
||||
if (match(&Inst, m_Intrinsic<Intrinsic::eh_actions>())) {
|
||||
LPadHasActionList = true;
|
||||
break;
|
||||
}
|
||||
// FIXME: This is here to help with the development of nested landing pad
|
||||
// outlining. It should be removed when that is finished.
|
||||
@ -493,14 +748,17 @@ bool WinEHPrepare::prepareExceptionHandlers(
|
||||
CallInst::Create(ActionIntrin, ActionArgs, "recover", NewLPadBB);
|
||||
|
||||
// Add an indirect branch listing possible successors of the catch handlers.
|
||||
IndirectBrInst *Branch = IndirectBrInst::Create(Recover, 0, NewLPadBB);
|
||||
SetVector<BasicBlock *> ReturnTargets;
|
||||
for (ActionHandler *Action : Actions) {
|
||||
if (auto *CatchAction = dyn_cast<CatchHandler>(Action)) {
|
||||
for (auto *Target : CatchAction->getReturnTargets()) {
|
||||
Branch->addDestination(Target);
|
||||
}
|
||||
const auto &CatchTargets = CatchAction->getReturnTargets();
|
||||
ReturnTargets.insert(CatchTargets.begin(), CatchTargets.end());
|
||||
}
|
||||
}
|
||||
IndirectBrInst *Branch =
|
||||
IndirectBrInst::Create(Recover, ReturnTargets.size(), NewLPadBB);
|
||||
for (BasicBlock *Target : ReturnTargets)
|
||||
Branch->addDestination(Target);
|
||||
} // End for each landingpad
|
||||
|
||||
// If nothing got outlined, there is no more processing to be done.
|
||||
@ -543,51 +801,10 @@ bool WinEHPrepare::prepareExceptionHandlers(
|
||||
|
||||
// Finally, replace all of the temporary allocas for frame variables used in
|
||||
// the outlined handlers with calls to llvm.framerecover.
|
||||
BasicBlock::iterator II = Entry->getFirstInsertionPt();
|
||||
Instruction *AllocaInsertPt = II;
|
||||
for (auto &VarInfoEntry : FrameVarInfo) {
|
||||
Value *ParentVal = VarInfoEntry.first;
|
||||
TinyPtrVector<AllocaInst *> &Allocas = VarInfoEntry.second;
|
||||
|
||||
// If the mapped value isn't already an alloca, we need to spill it if it
|
||||
// is a computed value or copy it if it is an argument.
|
||||
AllocaInst *ParentAlloca = dyn_cast<AllocaInst>(ParentVal);
|
||||
if (!ParentAlloca) {
|
||||
if (auto *Arg = dyn_cast<Argument>(ParentVal)) {
|
||||
// Lower this argument to a copy and then demote that to the stack.
|
||||
// We can't just use the argument location because the handler needs
|
||||
// it to be in the frame allocation block.
|
||||
// Use 'select i8 true, %arg, undef' to simulate a 'no-op' instruction.
|
||||
Value *TrueValue = ConstantInt::getTrue(Context);
|
||||
Value *UndefValue = UndefValue::get(Arg->getType());
|
||||
Instruction *SI =
|
||||
SelectInst::Create(TrueValue, Arg, UndefValue,
|
||||
Arg->getName() + ".tmp", AllocaInsertPt);
|
||||
Arg->replaceAllUsesWith(SI);
|
||||
// Reset the select operand, because it was clobbered by the RAUW above.
|
||||
SI->setOperand(1, Arg);
|
||||
ParentAlloca = DemoteRegToStack(*SI, true, SI);
|
||||
} else if (auto *PN = dyn_cast<PHINode>(ParentVal)) {
|
||||
ParentAlloca = DemotePHIToStack(PN, AllocaInsertPt);
|
||||
} else {
|
||||
Instruction *ParentInst = cast<Instruction>(ParentVal);
|
||||
// FIXME: This is a work-around to temporarily handle the case where an
|
||||
// instruction that is only used in handlers is not sunk.
|
||||
// Without uses, DemoteRegToStack would just eliminate the value.
|
||||
// This will fail if ParentInst is an invoke.
|
||||
if (ParentInst->getNumUses() == 0) {
|
||||
BasicBlock::iterator InsertPt = ParentInst;
|
||||
++InsertPt;
|
||||
ParentAlloca =
|
||||
new AllocaInst(ParentInst->getType(), nullptr,
|
||||
ParentInst->getName() + ".reg2mem",
|
||||
AllocaInsertPt);
|
||||
new StoreInst(ParentInst, ParentAlloca, InsertPt);
|
||||
} else {
|
||||
ParentAlloca = DemoteRegToStack(*ParentInst, true, AllocaInsertPt);
|
||||
}
|
||||
}
|
||||
}
|
||||
AllocaInst *ParentAlloca = cast<AllocaInst>(ParentVal);
|
||||
|
||||
// FIXME: We should try to sink unescaped allocas from the parent frame into
|
||||
// the child frame. If the alloca is escaped, we have to use the lifetime
|
||||
@ -1357,31 +1574,23 @@ WinEHFrameVariableMaterializer::WinEHFrameVariableMaterializer(
|
||||
}
|
||||
|
||||
Value *WinEHFrameVariableMaterializer::materializeValueFor(Value *V) {
|
||||
// If we're asked to materialize a value that is an instruction, we
|
||||
// temporarily create an alloca in the outlined function and add this
|
||||
// to the FrameVarInfo map. When all the outlining is complete, we'll
|
||||
// collect these into a structure, spilling non-alloca values in the
|
||||
// parent frame as necessary, and replace these temporary allocas with
|
||||
// GEPs referencing the frame allocation block.
|
||||
|
||||
// If the value is an alloca, the mapping is direct.
|
||||
// If we're asked to materialize a static alloca, we temporarily create an
|
||||
// alloca in the outlined function and add this to the FrameVarInfo map. When
|
||||
// all the outlining is complete, we'll replace these temporary allocas with
|
||||
// calls to llvm.framerecover.
|
||||
if (auto *AV = dyn_cast<AllocaInst>(V)) {
|
||||
assert(AV->isStaticAlloca() &&
|
||||
"cannot materialize un-demoted dynamic alloca");
|
||||
AllocaInst *NewAlloca = dyn_cast<AllocaInst>(AV->clone());
|
||||
Builder.Insert(NewAlloca, AV->getName());
|
||||
FrameVarInfo[AV].push_back(NewAlloca);
|
||||
return NewAlloca;
|
||||
}
|
||||
|
||||
// For other types of instructions or arguments, we need an alloca based on
|
||||
// the value's type and a load of the alloca. The alloca will be replaced
|
||||
// by a GEP, but the load will stay. In the parent function, the value will
|
||||
// be spilled to a location in the frame allocation block.
|
||||
if (isa<Instruction>(V) || isa<Argument>(V)) {
|
||||
AllocaInst *NewAlloca =
|
||||
Builder.CreateAlloca(V->getType(), nullptr, "eh.temp.alloca");
|
||||
FrameVarInfo[V].push_back(NewAlloca);
|
||||
LoadInst *NewLoad = Builder.CreateLoad(NewAlloca, V->getName() + ".reload");
|
||||
return NewLoad;
|
||||
errs() << "Failed to demote instruction used in exception handler:\n";
|
||||
errs() << " " << *V << '\n';
|
||||
report_fatal_error("WinEHPrepare failed to demote instruction");
|
||||
}
|
||||
|
||||
// Don't materialize other values.
|
||||
|
72
test/CodeGen/WinEH/cppeh-demote-liveout.ll
Normal file
72
test/CodeGen/WinEH/cppeh-demote-liveout.ll
Normal file
@ -0,0 +1,72 @@
|
||||
; RUN: opt -winehprepare -S < %s | FileCheck %s
|
||||
|
||||
; Notionally based on this C++ source:
|
||||
; int liveout_catch(int p) {
|
||||
; int val = p + 1;
|
||||
; try {
|
||||
; might_throw();
|
||||
; } catch (int) {
|
||||
; val++;
|
||||
; }
|
||||
; return val;
|
||||
; }
|
||||
|
||||
declare void @llvm.eh.begincatch(i8*, i8*)
|
||||
declare void @llvm.eh.endcatch()
|
||||
declare void @might_throw()
|
||||
declare i32 @__CxxFrameHandler3(...)
|
||||
declare i32 @llvm.eh.typeid.for(i8*)
|
||||
|
||||
@typeinfo.int = external global i32
|
||||
|
||||
define i32 @liveout_catch(i32 %p) {
|
||||
entry:
|
||||
%val.entry = add i32 %p, 1
|
||||
invoke void @might_throw()
|
||||
to label %ret unwind label %lpad
|
||||
|
||||
lpad:
|
||||
%ehvals = landingpad { i8*, i32 } personality i32 (...)* @__CxxFrameHandler3
|
||||
cleanup
|
||||
catch i32* @typeinfo.int
|
||||
%ehptr = extractvalue { i8*, i32 } %ehvals, 0
|
||||
%sel = extractvalue { i8*, i32 } %ehvals, 1
|
||||
%int_sel = call i32 @llvm.eh.typeid.for(i8* bitcast (i32* @typeinfo.int to i8*))
|
||||
%match = icmp eq i32 %sel, %int_sel
|
||||
br i1 %match, label %catchit, label %resume
|
||||
|
||||
catchit:
|
||||
call void @llvm.eh.begincatch(i8* %ehptr, i8* null)
|
||||
%val.lpad = add i32 %val.entry, 1
|
||||
call void @llvm.eh.endcatch()
|
||||
br label %ret
|
||||
|
||||
ret:
|
||||
%rv = phi i32 [%val.entry, %entry], [%val.lpad, %catchit]
|
||||
ret i32 %rv
|
||||
|
||||
resume:
|
||||
resume {i8*, i32} %ehvals
|
||||
}
|
||||
|
||||
; CHECK-LABEL: define i32 @liveout_catch(i32 %p)
|
||||
; CHECK: %val.entry = add i32 %p, 1
|
||||
; CHECK-NEXT: store i32 %val.entry, i32* %val.entry.reg2mem
|
||||
; CHECK: invoke void @might_throw()
|
||||
;
|
||||
; CHECK: landingpad
|
||||
; CHECK: indirectbr i8* {{.*}}, [label %ehreturn]
|
||||
;
|
||||
; CHECK: ehreturn:
|
||||
; CHECK: load i32, i32* %val.lpad.reg2mem
|
||||
; CHECK: br label %ret
|
||||
;
|
||||
; CHECK: ret:
|
||||
; CHECK: %rv = phi i32 [ {{.*}}, %entry ], [ {{.*}}, %ehreturn ]
|
||||
; CHECK: ret i32
|
||||
|
||||
; CHECK-LABEL: define internal i8* @liveout_catch.catch(i8*, i8*)
|
||||
; CHECK: %[[val:[^ ]*]] = load i32, i32*
|
||||
; CHECK-NEXT: %[[val_lpad:[^ ]*]] = add i32 %[[val]], 1
|
||||
; CHECK-NEXT: store i32 %[[val_lpad]], i32*
|
||||
; CHECK: ret i8* blockaddress(@liveout_catch, %ehreturn)
|
@ -39,8 +39,7 @@ $"\01??_R0H@8" = comdat any
|
||||
; CHECK: define i32 @"\01?test@@YAHUA@@@Z"(<{ %struct.A }>* inalloca)
|
||||
; CHECK: entry:
|
||||
; CHECK: [[TMP_REGMEM:\%.+]] = alloca <{ %struct.A }>*
|
||||
; CHECK: [[TMP:\%.+]] = select i1 true, <{ %struct.A }>* %0, <{ %struct.A }>* undef
|
||||
; CHECK: store <{ %struct.A }>* [[TMP]], <{ %struct.A }>** [[TMP_REGMEM]]
|
||||
; CHECK: store <{ %struct.A }>* %0, <{ %struct.A }>** [[TMP_REGMEM]]
|
||||
; CHECK: [[RETVAL:\%.+]] = alloca i32, align 4
|
||||
; CHECK: [[E_PTR:\%.+]] = alloca i32, align 4
|
||||
; CHECK: [[CLEANUP_SLOT:\%.+]] = alloca i32
|
||||
@ -109,10 +108,8 @@ try.cont: ; preds = %invoke.cont
|
||||
|
||||
; The cleanup block should be re-written like this.
|
||||
; CHECK: cleanup:{{[ ]+}}; preds = %[[LPAD_LABEL]], %try.cont
|
||||
; CHECK-NOT: %a2 = getelementptr inbounds <{ %struct.A }>, <{ %struct.A }>* %0, i32 0, i32 0
|
||||
; CHECK: [[TMP_RELOAD:\%.+]] = load volatile <{ %struct.A }>*, <{ %struct.A }>** [[TMP_REGMEM]]
|
||||
; CHECK: [[A2:\%.+]] = getelementptr inbounds <{ %struct.A }>, <{ %struct.A }>* [[TMP_RELOAD]], i32 0, i32 0
|
||||
; CHECK: call x86_thiscallcc void @"\01??1A@@QAE@XZ"(%struct.A* [[A2]])
|
||||
; CHECK: %a2 = getelementptr inbounds <{ %struct.A }>, <{ %struct.A }>* %0, i32 0, i32 0
|
||||
; CHECK: call x86_thiscallcc void @"\01??1A@@QAE@XZ"(%struct.A* %a2)
|
||||
; CHECK: [[TMP1:\%.+]] = load i32, i32* [[RETVAL]]
|
||||
; CHECK: ret i32 [[TMP1]]
|
||||
|
||||
@ -148,12 +145,12 @@ eh.resume: ; preds = %ehcleanup
|
||||
; CHECK: [[E_PTR:\%.+]] = bitcast i8* [[RECOVER_E]] to i32*
|
||||
; CHECK: [[RECOVER_EH_TEMP:\%.+]] = call i8* @llvm.framerecover(i8* bitcast (i32 (<{ %struct.A }>*)* @"\01?test@@YAHUA@@@Z" to i8*), i8* %1, i32 1)
|
||||
; CHECK: [[EH_TEMP:\%.+]] = bitcast i8* [[RECOVER_EH_TEMP]] to <{ %struct.A }>**
|
||||
; CHECK: [[TMP_RELOAD:\%.+]] = load <{ %struct.A }>*, <{ %struct.A }>** [[EH_TEMP]]
|
||||
; CHECK: [[RECOVER_RETVAL:\%.+]] = call i8* @llvm.framerecover(i8* bitcast (i32 (<{ %struct.A }>*)* @"\01?test@@YAHUA@@@Z" to i8*), i8* %1, i32 2)
|
||||
; CHECK: [[RETVAL1:\%.+]] = bitcast i8* [[RECOVER_RETVAL]] to i32*
|
||||
; CHECK: [[RECOVER_CLEANUPSLOT:\%.+]] = call i8* @llvm.framerecover(i8* bitcast (i32 (<{ %struct.A }>*)* @"\01?test@@YAHUA@@@Z" to i8*), i8* %1, i32 3)
|
||||
; CHECK: [[CLEANUPSLOT1:\%.+]] = bitcast i8* [[RECOVER_CLEANUPSLOT]] to i32*
|
||||
; CHECK: [[E_I8PTR:\%.+]] = bitcast i32* [[E_PTR]] to i8*
|
||||
; CHECK: [[TMP_RELOAD:\%.+]] = load <{ %struct.A }>*, <{ %struct.A }>** [[EH_TEMP]]
|
||||
; CHECK: [[RECOVER_A:\%.+]] = getelementptr inbounds <{ %struct.A }>, <{ %struct.A }>* [[TMP_RELOAD]], i32 0, i32 0
|
||||
; CHECK: [[A1:\%.+]] = getelementptr inbounds %struct.A, %struct.A* [[RECOVER_A]], i32 0, i32 0
|
||||
; CHECK: [[TMP2:\%.+]] = load i32, i32* [[A1]], align 4
|
||||
|
@ -79,7 +79,7 @@ invoke.cont: ; preds = %entry
|
||||
; CHECK-SAME: i32 1, i8* bitcast (%eh.HandlerMapEntry* @llvm.eh.handlermapentry._J to i8*), i32 1, i8* (i8*, i8*)* @"\01?test@@YAXXZ.catch1",
|
||||
; CHECK-SAME: i32 1, i8* bitcast (%eh.HandlerMapEntry* @"llvm.eh.handlermapentry.reference.?AVSomeClass@@" to i8*), i32 2, i8* (i8*, i8*)* @"\01?test@@YAXXZ.catch2",
|
||||
; CHECK-SAME: i32 1, i8* null, i32 -1, i8* (i8*, i8*)* @"\01?test@@YAXXZ.catch3")
|
||||
; CHECK-NEXT: indirectbr i8* [[RECOVER]], [label %catch14.split, label %catch10.split, label %catch6.split, label %catch.split]
|
||||
; CHECK-NEXT: indirectbr i8* [[RECOVER]], [label %ret]
|
||||
|
||||
lpad: ; preds = %entry
|
||||
%0 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*)
|
||||
@ -100,8 +100,11 @@ catch.dispatch: ; preds = %lpad
|
||||
%matches = icmp eq i32 %sel, %3
|
||||
br i1 %matches, label %catch14, label %catch.fallthrough
|
||||
|
||||
ret:
|
||||
ret void
|
||||
|
||||
; CHECK-NOT: catch14:
|
||||
; CHECK: catch14.split:
|
||||
; CHECK: ret:
|
||||
; CHECK-NEXT: ret void
|
||||
catch14: ; preds = %catch.dispatch
|
||||
%exn15 = load i8*, i8** %exn.slot
|
||||
@ -110,10 +113,10 @@ catch14: ; preds = %catch.dispatch
|
||||
%5 = load i32, i32* %i, align 4
|
||||
call void @"\01?handle_int@@YAXH@Z"(i32 %5)
|
||||
call void @llvm.eh.endcatch() #3
|
||||
ret void
|
||||
br label %ret
|
||||
|
||||
try.cont: ; preds = %invoke.cont
|
||||
ret void
|
||||
br label %ret
|
||||
|
||||
; CHECK-NOT: catch.fallthrough:
|
||||
catch.fallthrough: ; preds = %catch.dispatch
|
||||
@ -122,8 +125,6 @@ catch.fallthrough: ; preds = %catch.dispatch
|
||||
br i1 %matches1, label %catch10, label %catch.fallthrough2
|
||||
|
||||
; CHECK-NOT: catch10:
|
||||
; CHECK: catch10.split:
|
||||
; CHECK-NEXT: ret void
|
||||
catch10: ; preds = %catch.fallthrough
|
||||
%exn11 = load i8*, i8** %exn.slot
|
||||
%7 = bitcast i64* %ll to i8*
|
||||
@ -131,7 +132,7 @@ catch10: ; preds = %catch.fallthrough
|
||||
%8 = load i64, i64* %ll, align 8
|
||||
call void @"\01?handle_long_long@@YAX_J@Z"(i64 %8)
|
||||
call void @llvm.eh.endcatch() #3
|
||||
ret void
|
||||
br label %ret
|
||||
|
||||
; CHECK-NOT: catch.fallthrough2:
|
||||
catch.fallthrough2: ; preds = %catch.fallthrough
|
||||
@ -140,8 +141,6 @@ catch.fallthrough2: ; preds = %catch.fallthrough
|
||||
br i1 %matches3, label %catch6, label %catch
|
||||
|
||||
; CHECK-NOT: catch6:
|
||||
; CHECK: catch6.split:
|
||||
; CHECK-NEXT: ret void
|
||||
catch6: ; preds = %catch.fallthrough2
|
||||
%exn7 = load i8*, i8** %exn.slot
|
||||
%10 = bitcast %class.SomeClass** %obj to i8*
|
||||
@ -149,16 +148,14 @@ catch6: ; preds = %catch.fallthrough2
|
||||
%11 = load %class.SomeClass*, %class.SomeClass** %obj, align 8
|
||||
call void @"\01?handle_obj@@YAXPEAVSomeClass@@@Z"(%class.SomeClass* %11)
|
||||
call void @llvm.eh.endcatch() #3
|
||||
ret void
|
||||
br label %ret
|
||||
|
||||
; CHECK-NOT: catch:
|
||||
; CHECK: catch.split:
|
||||
; CHECK-NEXT: ret void
|
||||
catch: ; preds = %catch.fallthrough2
|
||||
%exn = load i8*, i8** %exn.slot
|
||||
call void @llvm.eh.begincatch(i8* %exn, i8* null) #3
|
||||
call void @"\01?handle_exception@@YAXXZ"() call void @llvm.eh.endcatch() #3
|
||||
ret void
|
||||
br label %ret
|
||||
; CHECK: }
|
||||
}
|
||||
|
||||
@ -168,7 +165,7 @@ catch: ; preds = %catch.fallthrough2
|
||||
; CHECK: [[I_PTR:\%.+]] = bitcast i8* [[RECOVER_I]] to i32*
|
||||
; CHECK: [[TMP1:\%.+]] = load i32, i32* [[I_PTR]], align 4
|
||||
; CHECK: call void @"\01?handle_int@@YAXH@Z"(i32 [[TMP1]])
|
||||
; CHECK: ret i8* blockaddress(@"\01?test@@YAXXZ", %catch14.split)
|
||||
; CHECK: ret i8* blockaddress(@"\01?test@@YAXXZ", %ret)
|
||||
; CHECK: }
|
||||
|
||||
; CHECK-LABEL: define internal i8* @"\01?test@@YAXXZ.catch1"(i8*, i8*)
|
||||
@ -177,7 +174,7 @@ catch: ; preds = %catch.fallthrough2
|
||||
; CHECK: [[LL_PTR:\%.+]] = bitcast i8* [[RECOVER_LL]] to i64*
|
||||
; CHECK: [[TMP2:\%.+]] = load i64, i64* [[LL_PTR]], align 8
|
||||
; CHECK: call void @"\01?handle_long_long@@YAX_J@Z"(i64 [[TMP2]])
|
||||
; CHECK: ret i8* blockaddress(@"\01?test@@YAXXZ", %catch10.split)
|
||||
; CHECK: ret i8* blockaddress(@"\01?test@@YAXXZ", %ret)
|
||||
; CHECK: }
|
||||
|
||||
; CHECK-LABEL: define internal i8* @"\01?test@@YAXXZ.catch2"(i8*, i8*)
|
||||
@ -186,13 +183,13 @@ catch: ; preds = %catch.fallthrough2
|
||||
; CHECK: [[OBJ_PTR:\%.+]] = bitcast i8* [[RECOVER_OBJ]] to %class.SomeClass**
|
||||
; CHECK: [[TMP3:\%.+]] = load %class.SomeClass*, %class.SomeClass** [[OBJ_PTR]], align 8
|
||||
; CHECK: call void @"\01?handle_obj@@YAXPEAVSomeClass@@@Z"(%class.SomeClass* [[TMP3]])
|
||||
; CHECK: ret i8* blockaddress(@"\01?test@@YAXXZ", %catch6.split)
|
||||
; CHECK: ret i8* blockaddress(@"\01?test@@YAXXZ", %ret)
|
||||
; CHECK: }
|
||||
|
||||
; CHECK-LABEL: define internal i8* @"\01?test@@YAXXZ.catch3"(i8*, i8*)
|
||||
; CHECK: entry:
|
||||
; CHECK: call void @"\01?handle_exception@@YAXXZ"()
|
||||
; CHECK: ret i8* blockaddress(@"\01?test@@YAXXZ", %catch.split)
|
||||
; CHECK: ret i8* blockaddress(@"\01?test@@YAXXZ", %ret)
|
||||
; CHECK: }
|
||||
|
||||
|
||||
|
@ -55,8 +55,8 @@ $"\01??_R0H@8" = comdat any
|
||||
; CHECK: entry:
|
||||
; CHECK: [[NUMEXCEPTIONS_REGMEM:\%.+]] = alloca i32
|
||||
; CHECK: [[I_REGMEM:\%.+]] = alloca i32
|
||||
; CHECK: [[A_REGMEM:\%.+]] = alloca i32*
|
||||
; CHECK: [[B_REGMEM:\%.+]] = alloca i32*
|
||||
; CHECK: [[A_REGMEM:\%.+]] = alloca i32*
|
||||
; CHECK: [[E_PTR:\%.+]] = alloca i32, align 4
|
||||
; CHECK: [[EXCEPTIONVAL:\%.+]] = alloca [10 x i32], align 16
|
||||
; CHECK: [[DATA_PTR:\%.+]] = alloca i64, align 8
|
||||
@ -68,9 +68,7 @@ $"\01??_R0H@8" = comdat any
|
||||
; CHECK: store i32* [[A_PTR]], i32** [[A_REGMEM]]
|
||||
; CHECK: [[B_PTR:\%.+]] = getelementptr inbounds %struct.SomeData, %struct.SomeData* [[TMPCAST]], i64 0, i32 1
|
||||
; CHECK: store i32* [[B_PTR]], i32** [[B_REGMEM]]
|
||||
; CHECK: store i32 0, i32* [[NUMEXCEPTIONS_REGMEM]]
|
||||
; CHECK: store i32 0, i32* [[I_REGMEM]]
|
||||
; CHECK: call void (...) @llvm.frameescape(i32* %e, i32* %NumExceptions.020.reg2mem, [10 x i32]* [[EXCEPTIONVAL]], i32* [[I_REGMEM]], i32** [[A_REGMEM]], i32** [[B_REGMEM]])
|
||||
; CHECK: call void (...) @llvm.frameescape(i32* %e, i32* %NumExceptions.020.reg2mem, [10 x i32]* [[EXCEPTIONVAL]], i32* %inc.reg2mem, i32* [[I_REGMEM]], i32** [[A_REGMEM]], i32** [[B_REGMEM]])
|
||||
; CHECK: br label %for.body
|
||||
|
||||
; Function Attrs: uwtable
|
||||
@ -88,10 +86,11 @@ entry:
|
||||
br label %for.body
|
||||
|
||||
; CHECK: for.body:
|
||||
; CHECK-NOT: phi i32 [ 0, %entry ], [ {{\%NumExceptions.*}}, %try.cont ]
|
||||
; CHECK-NOT: phi i32 [ 0, %entry ], [ {{\%inc.*}}, %try.cont ]
|
||||
; CHECK: [[I_RELOAD:\%.+]] = load i32, i32* [[I_REGMEM]]
|
||||
; CHECK: [[NUMEXCEPTIONS_RELOAD:\%.+]] = load i32, i32* [[NUMEXCEPTIONS_REGMEM]]
|
||||
; CHECK: [[NUMEXCEPTIONS_PHI:\%.*]] = phi i32 [ 0, %entry ], [ {{\%NumExceptions.*}}, %try.cont ]
|
||||
; CHECK: [[I_PHI:\%.*]] = phi i32 [ 0, %entry ], [ {{\%inc.*}}, %try.cont ]
|
||||
; CHECK: store i32 [[I_PHI]], i32* [[I_REGMEM]]
|
||||
; CHECK: store i32 [[NUMEXCEPTIONS_PHI]], i32* [[NUMEXCEPTIONS_REGMEM]]
|
||||
; CHECK: invoke void @"\01?may_throw@@YAXXZ"()
|
||||
for.body: ; preds = %entry, %try.cont
|
||||
%NumExceptions.020 = phi i32 [ 0, %entry ], [ %NumExceptions.1, %try.cont ]
|
||||
%i.019 = phi i32 [ 0, %entry ], [ %inc5, %try.cont ]
|
||||
@ -99,11 +98,12 @@ for.body: ; preds = %entry, %try.cont
|
||||
to label %invoke.cont unwind label %lpad
|
||||
|
||||
; CHECK: invoke.cont: ; preds = %for.body
|
||||
; CHECK: [[A_RELOAD:\%.+]] = load volatile i32*, i32** [[A_REGMEM]]
|
||||
; CHECK: [[A_RELOAD:\%.+]] = load i32*, i32** [[A_REGMEM]]
|
||||
; CHECK: [[TMP1:\%.+]] = load i32, i32* [[A_RELOAD]], align 8
|
||||
; CHECK: [[I_RELOAD:\%.+]] = load i32, i32* [[I_REGMEM]]
|
||||
; CHECK: [[ADD:\%.+]] = add nsw i32 [[TMP1]], [[I_RELOAD]]
|
||||
; CHECK: [[A_RELOAD1:\%.+]] = load volatile i32*, i32** [[A_REGMEM]]
|
||||
; CHECK: store i32 [[ADD]], i32* [[A_RELOAD1]], align 8
|
||||
; CHECK: [[A_RELOAD1:\%.+]] = load i32*, i32** [[A_REGMEM]]
|
||||
; CHECK: [[NUMEXCEPTIONS_RELOAD:\%.+]] = load i32, i32* [[NUMEXCEPTIONS_REGMEM]]
|
||||
; CHECK: br label %try.cont
|
||||
invoke.cont: ; preds = %for.body
|
||||
%1 = load i32, i32* %a, align 8, !tbaa !2
|
||||
@ -115,7 +115,7 @@ invoke.cont: ; preds = %for.body
|
||||
; CHECK: landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*)
|
||||
; CHECK-NEXT: catch i8* bitcast (%rtti.TypeDescriptor2* @"\01??_R0H@8" to i8*)
|
||||
; CHECK-NEXT: [[RECOVER:\%.+]] = call i8* (...) @llvm.eh.actions(i32 1, i8* bitcast (%rtti.TypeDescriptor2* @"\01??_R0H@8" to i8*), i32 0, i8* (i8*, i8*)* @"\01?test@@YAXXZ.catch")
|
||||
; CHECK-NEXT: indirectbr i8* [[RECOVER]], [label %try.cont]
|
||||
; CHECK-NEXT: indirectbr i8* [[RECOVER]], [label %[[SPLIT_RECOVER_BB:.*]]]
|
||||
|
||||
lpad: ; preds = %for.body
|
||||
%2 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*)
|
||||
@ -139,8 +139,6 @@ catch: ; preds = %lpad
|
||||
%cmp1 = icmp eq i32 %tmp8, %i.019
|
||||
br i1 %cmp1, label %if.then, label %if.else
|
||||
|
||||
; CHECK-NOT: if.then:
|
||||
|
||||
if.then: ; preds = %catch
|
||||
%tmp9 = load i32, i32* %b, align 4, !tbaa !8
|
||||
%add2 = add nsw i32 %tmp9, %i.019
|
||||
@ -156,18 +154,20 @@ if.else: ; preds = %catch
|
||||
br label %if.end
|
||||
|
||||
; CHECK-NOT: if.end:
|
||||
; CHECK: [[SPLIT_RECOVER_BB]]:
|
||||
; CHECK: [[INC_RELOAD:\%.*]] = load i32, i32*
|
||||
; CHECK: br label %try.cont
|
||||
|
||||
if.end: ; preds = %if.else, %if.then
|
||||
tail call void @llvm.eh.endcatch() #1
|
||||
br label %try.cont
|
||||
|
||||
; CHECK: try.cont:{{[ ]+}}; preds = %[[LPAD_LABEL]], %invoke.cont
|
||||
; CHECK-NOT: phi i32
|
||||
; CHECK: tail call void @"\01?does_not_throw@@YAXH@Z"(i32 [[NUMEXCEPTIONS_RELOAD]])
|
||||
; CHECK: try.cont:{{[ ]+}}; preds = %[[SPLIT_RECOVER_BB]], %invoke.cont
|
||||
; CHECK: [[NUMEXCEPTIONS_PHI:\%.*]] = phi i32 [ [[NUMEXCEPTIONS_RELOAD]], %invoke.cont ], [ [[INC_RELOAD]], %[[SPLIT_RECOVER_BB]] ]
|
||||
; CHECK: tail call void @"\01?does_not_throw@@YAXH@Z"(i32 [[NUMEXCEPTIONS_PHI]])
|
||||
; CHECK: [[I_RELOAD:\%.+]] = load i32, i32* [[I_REGMEM]]
|
||||
; CHECK: [[INC:\%.+]] = add nuw nsw i32 [[I_RELOAD]], 1
|
||||
; CHECK: [[CMP:\%.+]] = icmp slt i32 [[INC]], 10
|
||||
; CHECK: store i32 [[NUMEXCEPTIONS_RELOAD]], i32* [[NUMEXCEPTIONS_REGMEM]]
|
||||
; CHECK: store i32 [[INC]], i32* [[I_REGMEM]]
|
||||
; CHECK: br i1 [[CMP]], label %for.body, label %for.end
|
||||
|
||||
try.cont: ; preds = %if.end, %invoke.cont
|
||||
@ -194,43 +194,48 @@ eh.resume: ; preds = %lpad
|
||||
; CHECK: entry:
|
||||
; CHECK: [[RECOVER_E:\%.+]] = call i8* @llvm.framerecover(i8* bitcast (void ()* @"\01?test@@YAXXZ" to i8*), i8* %1, i32 0)
|
||||
; CHECK: [[E_PTR:\%.+]] = bitcast i8* [[RECOVER_E]] to i32*
|
||||
; CHECK: [[RECOVER_EH_TEMP:\%.+]] = call i8* @llvm.framerecover(i8* bitcast (void ()* @"\01?test@@YAXXZ" to i8*), i8* %1, i32 1)
|
||||
; CHECK: [[EH_TEMP:\%.+]] = bitcast i8* [[RECOVER_EH_TEMP]] to i32*
|
||||
; CHECK: [[NUMEXCEPTIONS_RELOAD:\%.+]] = load i32, i32* [[EH_TEMP]]
|
||||
; CHECK: [[RECOVER_NUMEXCEPTIONS:\%.+]] = call i8* @llvm.framerecover(i8* bitcast (void ()* @"\01?test@@YAXXZ" to i8*), i8* %1, i32 1)
|
||||
; CHECK: [[NUMEXCEPTIONS_REGMEM:\%.+]] = bitcast i8* [[RECOVER_NUMEXCEPTIONS]] to i32*
|
||||
; CHECK: [[RECOVER_EXCEPTIONVAL:\%.+]] = call i8* @llvm.framerecover(i8* bitcast (void ()* @"\01?test@@YAXXZ" to i8*), i8* %1, i32 2)
|
||||
; CHECK: [[EXCEPTIONVAL:\%.+]] = bitcast i8* [[RECOVER_EXCEPTIONVAL]] to [10 x i32]*
|
||||
; CHECK: [[RECOVER_EH_TEMP1:\%.+]] = call i8* @llvm.framerecover(i8* bitcast (void ()* @"\01?test@@YAXXZ" to i8*), i8* %1, i32 3)
|
||||
; CHECK: [[EH_TEMP1:\%.+]] = bitcast i8* [[RECOVER_EH_TEMP1]] to i32*
|
||||
; CHECK: [[I_RELOAD:\%.+]] = load i32, i32* [[EH_TEMP1]]
|
||||
; CHECK: [[RECOVER_EH_TEMP2:\%.+]] = call i8* @llvm.framerecover(i8* bitcast (void ()* @"\01?test@@YAXXZ" to i8*), i8* %1, i32 4)
|
||||
; CHECK: [[EH_TEMP2:\%.+]] = bitcast i8* [[RECOVER_EH_TEMP2]] to i32**
|
||||
; CHECK: [[A_RELOAD:\%.+]] = load i32*, i32** [[EH_TEMP2]]
|
||||
; CHECK: [[RECOVER_EH_TEMP3:\%.+]] = call i8* @llvm.framerecover(i8* bitcast (void ()* @"\01?test@@YAXXZ" to i8*), i8* %1, i32 5)
|
||||
; CHECK: [[EH_TEMP3:\%.+]] = bitcast i8* [[RECOVER_EH_TEMP3]] to i32**
|
||||
; CHECK: [[B_RELOAD:\%.+]] = load i32*, i32** [[EH_TEMP3]]
|
||||
; CHECK: [[RECOVER_INC:\%.+]] = call i8* @llvm.framerecover(i8* bitcast (void ()* @"\01?test@@YAXXZ" to i8*), i8* %1, i32 3)
|
||||
; CHECK: [[INC_REGMEM:\%.+]] = bitcast i8* [[RECOVER_INC]] to i32*
|
||||
; CHECK: [[RECOVER_I:\%.+]] = call i8* @llvm.framerecover(i8* bitcast (void ()* @"\01?test@@YAXXZ" to i8*), i8* %1, i32 4)
|
||||
; CHECK: [[I_REGMEM:\%.+]] = bitcast i8* [[RECOVER_I]] to i32*
|
||||
; CHECK: [[RECOVER_A:\%.+]] = call i8* @llvm.framerecover(i8* bitcast (void ()* @"\01?test@@YAXXZ" to i8*), i8* %1, i32 5)
|
||||
; CHECK: [[A_REGMEM:\%.+]] = bitcast i8* [[RECOVER_A]] to i32**
|
||||
; CHECK: [[RECOVER_B:\%.+]] = call i8* @llvm.framerecover(i8* bitcast (void ()* @"\01?test@@YAXXZ" to i8*), i8* %1, i32 6)
|
||||
; CHECK: [[B_REGMEM:\%.+]] = bitcast i8* [[RECOVER_B]] to i32**
|
||||
; CHECK: [[E_I8PTR:\%.+]] = bitcast i32* [[E_PTR]] to i8*
|
||||
; CHECK: [[TMP:\%.+]] = load i32, i32* [[E_PTR]], align 4
|
||||
; CHECK: [[NUMEXCEPTIONS_RELOAD:\%.+]] = load i32, i32* [[NUMEXCEPTIONS_REGMEM]]
|
||||
; CHECK: [[IDXPROM:\%.+]] = sext i32 [[NUMEXCEPTIONS_RELOAD]] to i64
|
||||
; CHECK: [[ARRAYIDX:\%.+]] = getelementptr inbounds [10 x i32], [10 x i32]* [[EXCEPTIONVAL]], i64 0, i64 [[IDXPROM]]
|
||||
; CHECK: store i32 [[TMP]], i32* [[ARRAYIDX]], align 4
|
||||
; CHECK: [[NUMEXCEPTIONS_RELOAD:\%.+]] = load i32, i32* [[NUMEXCEPTIONS_REGMEM]]
|
||||
; CHECK: [[INC:\%.+]] = add nsw i32 [[NUMEXCEPTIONS_RELOAD]], 1
|
||||
; CHECK: [[CMP:\%.+]] = icmp eq i32 [[TMP]], [[I_RELOAD]]
|
||||
; CHECK: br i1 [[CMP]], label %if.then, label %if.else
|
||||
;
|
||||
; CHECK: if.then:{{[ ]+}}; preds = %entry
|
||||
; CHECK: [[B_RELOAD:\%.+]] = load i32*, i32** [[B_REGMEM]]
|
||||
; CHECK: [[TMP1:\%.+]] = load i32, i32* [[B_RELOAD]], align 4
|
||||
; CHECK: [[I_RELOAD:\%.+]] = load i32, i32* [[I_REGMEM]]
|
||||
; CHECK: [[ADD:\%.+]] = add nsw i32 [[TMP1]], [[I_RELOAD]]
|
||||
; CHECK: [[B_RELOAD:\%.+]] = load i32*, i32** [[B_REGMEM]]
|
||||
; CHECK: store i32 [[ADD]], i32* [[B_RELOAD]], align 4
|
||||
; CHECK: br label %if.end
|
||||
;
|
||||
; CHECK: if.else:{{[ ]+}}; preds = %entry
|
||||
; CHECK: [[A_RELOAD:\%.+]] = load i32*, i32** [[A_REGMEM]]
|
||||
; CHECK: [[TMP2:\%.+]] = load i32, i32* [[A_RELOAD]], align 8
|
||||
; CHECK: [[ADD2:\%.+]] = add nsw i32 [[TMP2]], [[TMP]]
|
||||
; CHECK: [[A_RELOAD:\%.+]] = load i32*, i32** [[A_REGMEM]]
|
||||
; CHECK: store i32 [[ADD2]], i32* [[A_RELOAD]], align 8
|
||||
; CHECK: br label %if.end
|
||||
;
|
||||
; CHECK: if.end:{{[ ]+}}; preds = %if.else, %if.then
|
||||
; CHECK: ret i8* blockaddress(@"\01?test@@YAXXZ", %try.cont)
|
||||
; CHECK: ret i8* blockaddress(@"\01?test@@YAXXZ", %[[SPLIT_RECOVER_BB]])
|
||||
; CHECK: }
|
||||
|
||||
; Function Attrs: nounwind
|
||||
|
@ -94,11 +94,60 @@ eh.resume:
|
||||
; CHECK-LABEL: define i32 @except_phi()
|
||||
; CHECK: landingpad { i8*, i32 }
|
||||
; CHECK-NEXT: catch i32 ()* @filt
|
||||
; CHECK-NEXT: call i8* (...) @llvm.eh.actions(i32 1, i8* bitcast (i32 ()* @filt to i8*), i32 -1, i8* blockaddress(@except_phi, %return))
|
||||
; CHECK-NEXT: indirectbr {{.*}} [label %return]
|
||||
; CHECK-NEXT: call i8* (...) @llvm.eh.actions(i32 1, i8* bitcast (i32 ()* @filt to i8*), i32 -1, i8* blockaddress(@except_phi, %lpad.return_crit_edge))
|
||||
; CHECK-NEXT: indirectbr {{.*}} [label %lpad.return_crit_edge]
|
||||
;
|
||||
; CHECK: lpad.return_crit_edge:
|
||||
; CHECK: br label %return
|
||||
;
|
||||
; CHECK: return:
|
||||
; CHECK-NEXT: %r = phi i32 [ 0, %entry ], [ 1, %lpad1 ]
|
||||
; CHECK-NEXT: %r = phi i32 [ 0, %entry ], [ 1, %lpad.return_crit_edge ]
|
||||
; CHECK-NEXT: ret i32 %r
|
||||
|
||||
define i32 @lpad_phi() {
|
||||
entry:
|
||||
invoke void @might_crash()
|
||||
to label %cont unwind label %lpad
|
||||
|
||||
cont:
|
||||
invoke void @might_crash()
|
||||
to label %return unwind label %lpad
|
||||
|
||||
lpad:
|
||||
%ncalls.1 = phi i32 [ 0, %entry ], [ 1, %cont ]
|
||||
%ehvals = landingpad { i8*, i32 } personality i32 (...)* @__C_specific_handler
|
||||
catch i32 ()* @filt
|
||||
%sel = extractvalue { i8*, i32 } %ehvals, 1
|
||||
%filt_sel = tail call i32 @llvm.eh.typeid.for(i8* bitcast (i32 ()* @filt to i8*))
|
||||
%matches = icmp eq i32 %sel, %filt_sel
|
||||
br i1 %matches, label %return, label %eh.resume
|
||||
|
||||
return:
|
||||
%r = phi i32 [2, %cont], [%ncalls.1, %lpad]
|
||||
ret i32 %r
|
||||
|
||||
eh.resume:
|
||||
resume { i8*, i32 } %ehvals
|
||||
}
|
||||
|
||||
; CHECK-LABEL: define i32 @lpad_phi()
|
||||
; CHECK: alloca i32
|
||||
; CHECK: store i32 0, i32*
|
||||
; CHECK: invoke void @might_crash()
|
||||
; CHECK: store i32 1, i32*
|
||||
; CHECK: invoke void @might_crash()
|
||||
; CHECK: landingpad { i8*, i32 }
|
||||
; CHECK-NEXT: cleanup
|
||||
; CHECK-NEXT: catch i32 ()* @filt
|
||||
; CHECK-NEXT: call i8* (...) @llvm.eh.actions(i32 0, void (i8*, i8*)* @lpad_phi.cleanup, i32 1, i8* bitcast (i32 ()* @filt to i8*), i32 -1, i8* blockaddress(@lpad_phi, %lpad.return_crit_edge))
|
||||
; CHECK-NEXT: indirectbr {{.*}} [label %lpad.return_crit_edge]
|
||||
;
|
||||
; CHECK: lpad.return_crit_edge:
|
||||
; CHECK: load i32, i32*
|
||||
; CHECK: br label %return
|
||||
;
|
||||
; CHECK: return:
|
||||
; CHECK-NEXT: %r = phi i32 [ 2, %cont ], [ %{{.*}}, %lpad.return_crit_edge ]
|
||||
; CHECK-NEXT: ret i32 %r
|
||||
|
||||
define i32 @cleanup_and_except() {
|
||||
@ -130,9 +179,17 @@ eh.resume:
|
||||
; CHECK-NEXT: catch i32 ()* @filt
|
||||
; CHECK-NEXT: call i8* (...) @llvm.eh.actions(
|
||||
; CHECK: i32 0, void (i8*, i8*)* @cleanup_and_except.cleanup,
|
||||
; CHECK: i32 1, i8* bitcast (i32 ()* @filt to i8*), i32 -1, i8* blockaddress(@cleanup_and_except, %return))
|
||||
; CHECK-NEXT: indirectbr {{.*}} [label %return]
|
||||
; CHECK: i32 1, i8* bitcast (i32 ()* @filt to i8*), i32 -1, i8* blockaddress(@cleanup_and_except, %lpad.return_crit_edge))
|
||||
; CHECK-NEXT: indirectbr {{.*}} [label %lpad.return_crit_edge]
|
||||
;
|
||||
; CHECK: lpad.return_crit_edge:
|
||||
; CHECK: br label %return
|
||||
;
|
||||
; CHECK: return:
|
||||
; CHECK-NEXT: %r = phi i32 [ 0, %entry ], [ 1, %lpad1 ]
|
||||
; CHECK-NEXT: %r = phi i32 [ 0, %entry ], [ 1, %lpad.return_crit_edge ]
|
||||
; CHECK-NEXT: ret i32 %r
|
||||
|
||||
; FIXME: This cleanup is an artifact of bad demotion.
|
||||
; CHECK-LABEL: define internal void @lpad_phi.cleanup(i8*, i8*)
|
||||
; CHECK: load i32
|
||||
; CHECK: store i32 %{{.*}}, i32*
|
||||
|
Loading…
Reference in New Issue
Block a user