1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-18 18:42:46 +02:00

[SEH] Remove CATCHPAD SDNode and X86::EH_RESTORE MachineInstr

The CATCHPAD node mostly existed to be selected into the EH_RESTORE
instruction, which sets the frame back up when 32-bit Windows exceptions
return to the parent function. However, creating this MachineInstr early
increases the risk that other passes will come along and insert
instructions that use the stack before ESP and EBP are restored. That
happened in PR44697.

Instead of representing these in the instruction stream early, delay it
until PEI. Mark the blocks where this needs to happen as EHPads, but not
funclet entry blocks. Passes after PEI have to be careful not to hoist
instructions that can use stack across frame setup instructions, so this
should be relatively reliable.

Fixes PR44697

Reviewed By: hans

Differential Revision: https://reviews.llvm.org/D73752
This commit is contained in:
Reid Kleckner 2020-01-30 15:15:57 -08:00
parent dccd6cdc3a
commit 4849f968cb
13 changed files with 101 additions and 68 deletions

View File

@ -738,9 +738,6 @@ namespace ISD {
/// past it.
ANNOTATION_LABEL,
/// CATCHPAD - Represents a catchpad instruction.
CATCHPAD,
/// CATCHRET - Represents a return from a catch block funclet. Used for
/// MSVC compatible exception handling. Takes a chain operand and a
/// destination basic block operand.

View File

@ -564,8 +564,6 @@ def br : SDNode<"ISD::BR" , SDTBr, [SDNPHasChain]>;
def catchret : SDNode<"ISD::CATCHRET" , SDTCatchret,
[SDNPHasChain, SDNPSideEffect]>;
def cleanupret : SDNode<"ISD::CLEANUPRET" , SDTNone, [SDNPHasChain]>;
def catchpad : SDNode<"ISD::CATCHPAD" , SDTNone,
[SDNPHasChain, SDNPSideEffect]>;
def trap : SDNode<"ISD::TRAP" , SDTNone,
[SDNPHasChain, SDNPSideEffect]>;

View File

@ -1615,17 +1615,12 @@ void SelectionDAGBuilder::visitCatchPad(const CatchPadInst &I) {
bool IsMSVCCXX = Pers == EHPersonality::MSVC_CXX;
bool IsCoreCLR = Pers == EHPersonality::CoreCLR;
bool IsSEH = isAsynchronousEHPersonality(Pers);
bool IsWasmCXX = Pers == EHPersonality::Wasm_CXX;
MachineBasicBlock *CatchPadMBB = FuncInfo.MBB;
if (!IsSEH)
CatchPadMBB->setIsEHScopeEntry();
// In MSVC C++ and CoreCLR, catchblocks are funclets and need prologues.
if (IsMSVCCXX || IsCoreCLR)
CatchPadMBB->setIsEHFuncletEntry();
// Wasm does not need catchpads anymore
if (!IsWasmCXX)
DAG.setRoot(DAG.getNode(ISD::CATCHPAD, getCurSDLoc(), MVT::Other,
getControlRoot()));
}
void SelectionDAGBuilder::visitCatchRet(const CatchReturnInst &I) {

View File

@ -1497,12 +1497,6 @@ MachineBasicBlock *AArch64TargetLowering::EmitLoweredCatchRet(
return BB;
}
MachineBasicBlock *AArch64TargetLowering::EmitLoweredCatchPad(
MachineInstr &MI, MachineBasicBlock *BB) const {
MI.eraseFromParent();
return BB;
}
MachineBasicBlock *AArch64TargetLowering::EmitInstrWithCustomInserter(
MachineInstr &MI, MachineBasicBlock *BB) const {
switch (MI.getOpcode()) {
@ -1521,8 +1515,6 @@ MachineBasicBlock *AArch64TargetLowering::EmitInstrWithCustomInserter(
case AArch64::CATCHRET:
return EmitLoweredCatchRet(MI, BB);
case AArch64::CATCHPAD:
return EmitLoweredCatchPad(MI, BB);
}
}

View File

@ -386,9 +386,6 @@ public:
MachineBasicBlock *EmitLoweredCatchRet(MachineInstr &MI,
MachineBasicBlock *BB) const;
MachineBasicBlock *EmitLoweredCatchPad(MachineInstr &MI,
MachineBasicBlock *BB) const;
MachineBasicBlock *
EmitInstrWithCustomInserter(MachineInstr &MI,
MachineBasicBlock *MBB) const override;

View File

@ -3628,10 +3628,6 @@ let isTerminator = 1, hasSideEffects = 1, isBarrier = 1, hasCtrlDep = 1,
Sched<[]>;
}
let hasSideEffects = 1, hasCtrlDep = 1, isCodeGenOnly = 1,
usesCustomInserter = 1 in
def CATCHPAD : Pseudo<(outs), (ins), [(catchpad)]>, Sched<[]>;
//===----------------------------------------------------------------------===//
// Floating point immediate move.
//===----------------------------------------------------------------------===//

View File

@ -331,14 +331,6 @@ bool X86ExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
MBB.erase(MBBI);
return true;
}
case X86::EH_RESTORE: {
// Restore ESP and EBP, and optionally ESI if required.
bool IsSEH = isAsynchronousEHPersonality(classifyEHPersonality(
MBB.getParent()->getFunction().getPersonalityFn()));
X86FL->restoreWin32EHStackPointers(MBB, MBBI, DL, /*RestoreSP=*/IsSEH);
MBBI->eraseFromParent();
return true;
}
case X86::LCMPXCHG8B_SAVE_EBX:
case X86::LCMPXCHG16B_SAVE_RBX: {
// Perform the following transformation.

View File

@ -3219,3 +3219,24 @@ void X86FrameLowering::processFunctionBeforeFrameFinalized(
UnwindHelpFI)
.addImm(-2);
}
void X86FrameLowering::processFunctionBeforeFrameIndicesReplaced(
MachineFunction &MF, RegScavenger *RS) const {
if (STI.is32Bit() && MF.hasEHFunclets())
restoreWinEHStackPointersInParent(MF);
}
void X86FrameLowering::restoreWinEHStackPointersInParent(
MachineFunction &MF) const {
// 32-bit functions have to restore stack pointers when control is transferred
// back to the parent function. These blocks are identified as eh pads that
// are not funclet entries.
bool IsSEH = isAsynchronousEHPersonality(
classifyEHPersonality(MF.getFunction().getPersonalityFn()));
for (MachineBasicBlock &MBB : MF) {
bool NeedsRestore = MBB.isEHPad() && !MBB.isEHFuncletEntry();
if (NeedsRestore)
restoreWin32EHStackPointers(MBB, MBB.begin(), DebugLoc(),
/*RestoreSP=*/IsSEH);
}
}

View File

@ -116,6 +116,10 @@ public:
void processFunctionBeforeFrameFinalized(MachineFunction &MF,
RegScavenger *RS) const override;
void
processFunctionBeforeFrameIndicesReplaced(MachineFunction &MF,
RegScavenger *RS) const override;
/// Check the instruction before/after the passed instruction. If
/// it is an ADD/SUB/LEA instruction it is deleted argument and the
/// stack adjustment is returned as a positive value for ADD/LEA and
@ -169,6 +173,8 @@ public:
MachineBasicBlock::iterator MBBI,
const DebugLoc &DL, bool RestoreSP = false) const;
void restoreWinEHStackPointersInParent(MachineFunction &MF) const;
int getInitialCFAOffset(const MachineFunction &MF) const override;
unsigned getInitialCFARegister(const MachineFunction &MF) const override;

View File

@ -31273,25 +31273,12 @@ X86TargetLowering::EmitLoweredCatchRet(MachineInstr &MI,
BB->addSuccessor(RestoreMBB);
MI.getOperand(0).setMBB(RestoreMBB);
auto RestoreMBBI = RestoreMBB->begin();
BuildMI(*RestoreMBB, RestoreMBBI, DL, TII.get(X86::EH_RESTORE));
BuildMI(*RestoreMBB, RestoreMBBI, DL, TII.get(X86::JMP_4)).addMBB(TargetMBB);
return BB;
}
// Marking this as an EH pad but not a funclet entry block causes PEI to
// restore stack pointers in the block.
RestoreMBB->setIsEHPad(true);
MachineBasicBlock *
X86TargetLowering::EmitLoweredCatchPad(MachineInstr &MI,
MachineBasicBlock *BB) const {
MachineFunction *MF = BB->getParent();
const Constant *PerFn = MF->getFunction().getPersonalityFn();
bool IsSEH = isAsynchronousEHPersonality(classifyEHPersonality(PerFn));
// Only 32-bit SEH requires special handling for catchpad.
if (IsSEH && Subtarget.is32Bit()) {
const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
DebugLoc DL = MI.getDebugLoc();
BuildMI(*BB, MI, DL, TII.get(X86::EH_RESTORE));
}
MI.eraseFromParent();
auto RestoreMBBI = RestoreMBB->begin();
BuildMI(*RestoreMBB, RestoreMBBI, DL, TII.get(X86::JMP_4)).addMBB(TargetMBB);
return BB;
}
@ -32283,8 +32270,6 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
return EmitLoweredRetpoline(MI, BB);
case X86::CATCHRET:
return EmitLoweredCatchRet(MI, BB);
case X86::CATCHPAD:
return EmitLoweredCatchPad(MI, BB);
case X86::SEG_ALLOCA_32:
case X86::SEG_ALLOCA_64:
return EmitLoweredSegAlloca(MI, BB);

View File

@ -1462,9 +1462,6 @@ namespace llvm {
MachineBasicBlock *EmitLoweredCatchRet(MachineInstr &MI,
MachineBasicBlock *BB) const;
MachineBasicBlock *EmitLoweredCatchPad(MachineInstr &MI,
MachineBasicBlock *BB) const;
MachineBasicBlock *EmitLoweredSegAlloca(MachineInstr &MI,
MachineBasicBlock *BB) const;

View File

@ -177,18 +177,6 @@ let isTerminator = 1, hasSideEffects = 1, isBarrier = 1, hasCtrlDep = 1,
[(catchret bb:$dst, bb:$from)]>;
}
let hasSideEffects = 1, hasCtrlDep = 1, isCodeGenOnly = 1,
usesCustomInserter = 1 in
def CATCHPAD : I<0, Pseudo, (outs), (ins), "# CATCHPAD", [(catchpad)]>;
// This instruction is responsible for re-establishing stack pointers after an
// exception has been caught and we are rejoining normal control flow in the
// parent function or funclet. It generally sets ESP and EBP, and optionally
// ESI. It is only needed for 32-bit WinEH, as the runtime restores CSRs for us
// elsewhere.
let hasSideEffects = 1, hasCtrlDep = 1, isCodeGenOnly = 1 in
def EH_RESTORE : I<0, Pseudo, (outs), (ins), "# EH_RESTORE", []>;
let hasSideEffects = 1, isBarrier = 1, isCodeGenOnly = 1,
usesCustomInserter = 1 in {
def EH_SjLj_SetJmp32 : I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$buf),

View File

@ -0,0 +1,69 @@
; RUN: llc < %s | FileCheck %s
; In PR44697, the register allocator inserted loads into the __except block
; before the instructions that restore EBP and ESP back to what they should be.
; Make sure they are the first instructions in the __except block.
; ModuleID = 't.cpp'
source_filename = "t.cpp"
target datalayout = "e-m:x-p:32:32-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:32-n8:16:32-a:0:32-S32"
target triple = "i386-pc-windows-msvc19.24.28315"
declare i8* @llvm.frameaddress.p0i8(i32 immarg)
declare i8* @llvm.eh.recoverfp(i8*, i8*)
declare i8* @llvm.localrecover(i8*, i8*, i32 immarg)
declare dso_local i32 @_except_handler3(...)
declare void @llvm.localescape(...)
define dso_local zeroext i1 @invokewrapper(
void ()* nocapture %Fn,
i1 zeroext %DumpStackAndCleanup,
i32* nocapture dereferenceable(4) %RetCode)
personality i32 (...)* @_except_handler3 {
entry:
%__exception_code = alloca i32, align 4
call void (...) @llvm.localescape(i32* nonnull %__exception_code)
invoke void %Fn()
to label %return unwind label %catch.dispatch
catch.dispatch: ; preds = %entry
%0 = catchswitch within none [label %__except.ret] unwind to caller
__except.ret: ; preds = %catch.dispatch
%1 = catchpad within %0 [i8* bitcast (i32 ()* @filter to i8*)]
catchret from %1 to label %__except
__except: ; preds = %__except.ret
%2 = load i32, i32* %__exception_code, align 4
store i32 %2, i32* %RetCode, align 4
br label %return
return: ; preds = %entry, %__except
%retval.0 = phi i1 [ false, %__except ], [ true, %entry ]
ret i1 %retval.0
}
; CHECK-LABEL: _invokewrapper: # @invokewrapper
; CHECK: calll *8(%ebp)
; CHECK: LBB0_2: # %return
; CHECK: LBB0_1: # %__except.ret
; CHECK-NEXT: movl -24(%ebp), %esp
; CHECK-NEXT: addl $12, %ebp
; Function Attrs: nofree nounwind
define internal i32 @filter() {
entry:
%0 = tail call i8* @llvm.frameaddress.p0i8(i32 1)
%1 = tail call i8* @llvm.eh.recoverfp(i8* bitcast (i1 (void ()*, i1, i32*)* @invokewrapper to i8*), i8* %0)
%2 = tail call i8* @llvm.localrecover(i8* bitcast (i1 (void ()*, i1, i32*)* @invokewrapper to i8*), i8* %1, i32 0)
%__exception_code = bitcast i8* %2 to i32*
%3 = getelementptr inbounds i8, i8* %0, i32 -20
%4 = bitcast i8* %3 to { i32*, i8* }**
%5 = load { i32*, i8* }*, { i32*, i8* }** %4, align 4
%6 = getelementptr inbounds { i32*, i8* }, { i32*, i8* }* %5, i32 0, i32 0
%7 = load i32*, i32** %6, align 4
%8 = load i32, i32* %7, align 4
store i32 %8, i32* %__exception_code, align 4
ret i32 1
}