1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-26 12:43:36 +01:00

Delete x86_64 ShadowCallStack support

Summary:
ShadowCallStack on x86_64 suffered from the same racy security issues as
Return Flow Guard and had performance overhead as high as 13% depending
on the benchmark. x86_64 ShadowCallStack was always an experimental
feature and never shipped a runtime required to support it, as such
there are no expected downstream users.

Reviewers: pcc

Reviewed By: pcc

Subscribers: mgorny, javed.absar, hiraditya, jdoerfert, cfe-commits, #sanitizers, llvm-commits

Tags: #clang, #sanitizers, #llvm

Differential Revision: https://reviews.llvm.org/D59034

llvm-svn: 355624
This commit is contained in:
Vlad Tsyrklevich 2019-03-07 18:56:36 +00:00
parent cf77187603
commit 5004401bf0
8 changed files with 0 additions and 545 deletions

View File

@ -22,7 +22,6 @@ endif()
add_public_tablegen_target(X86CommonTableGen)
set(sources
ShadowCallStack.cpp
X86AsmPrinter.cpp
X86CallFrameOptimization.cpp
X86CallingConv.cpp

View File

@ -1,321 +0,0 @@
//===------- ShadowCallStack.cpp - Shadow Call Stack pass -----------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// The ShadowCallStack pass instruments function prologs/epilogs to check that
// the return address has not been corrupted during the execution of the
// function. The return address is stored in a 'shadow call stack' addressed
// using the %gs segment register.
//
//===----------------------------------------------------------------------===//
#include "X86.h"
#include "X86InstrBuilder.h"
#include "X86InstrInfo.h"
#include "X86Subtarget.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/TargetInstrInfo.h"
#include "llvm/Pass.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
namespace {
class ShadowCallStack : public MachineFunctionPass {
public:
static char ID;
ShadowCallStack() : MachineFunctionPass(ID) {
initializeShadowCallStackPass(*PassRegistry::getPassRegistry());
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
MachineFunctionPass::getAnalysisUsage(AU);
}
bool runOnMachineFunction(MachineFunction &Fn) override;
private:
// Do not instrument leaf functions with this many or fewer instructions. The
// shadow call stack instrumented prolog/epilog are slightly race-y reading
// and checking the saved return address, so it is better to not instrument
// functions that have fewer instructions than the instrumented prolog/epilog
// race.
static const size_t SkipLeafInstructions = 3;
};
char ShadowCallStack::ID = 0;
} // end anonymous namespace.
static void addProlog(MachineFunction &Fn, const TargetInstrInfo *TII,
MachineBasicBlock &MBB, const DebugLoc &DL);
static void addPrologLeaf(MachineFunction &Fn, const TargetInstrInfo *TII,
MachineBasicBlock &MBB, const DebugLoc &DL,
MCPhysReg FreeRegister);
static void addEpilog(const TargetInstrInfo *TII, MachineBasicBlock &MBB,
MachineInstr &MI, MachineBasicBlock &TrapBB);
static void addEpilogLeaf(const TargetInstrInfo *TII, MachineBasicBlock &MBB,
MachineInstr &MI, MachineBasicBlock &TrapBB,
MCPhysReg FreeRegister);
// Generate a longer epilog that only uses r10 when a tailcall branches to r11.
static void addEpilogOnlyR10(const TargetInstrInfo *TII, MachineBasicBlock &MBB,
MachineInstr &MI, MachineBasicBlock &TrapBB);
// Helper function to add ModR/M references for [Seg: Reg + Offset] memory
// accesses
static inline const MachineInstrBuilder &
addSegmentedMem(const MachineInstrBuilder &MIB, MCPhysReg Seg, MCPhysReg Reg,
int Offset = 0) {
return MIB.addReg(Reg).addImm(1).addReg(0).addImm(Offset).addReg(Seg);
}
static void addProlog(MachineFunction &Fn, const TargetInstrInfo *TII,
MachineBasicBlock &MBB, const DebugLoc &DL) {
const MCPhysReg ReturnReg = X86::R10;
const MCPhysReg OffsetReg = X86::R11;
auto MBBI = MBB.begin();
// mov r10, [rsp]
addDirectMem(BuildMI(MBB, MBBI, DL, TII->get(X86::MOV64rm)).addDef(ReturnReg),
X86::RSP);
// xor r11, r11
BuildMI(MBB, MBBI, DL, TII->get(X86::XOR64rr))
.addDef(OffsetReg)
.addReg(OffsetReg, RegState::Undef)
.addReg(OffsetReg, RegState::Undef);
// add QWORD [gs:r11], 8
addSegmentedMem(BuildMI(MBB, MBBI, DL, TII->get(X86::ADD64mi8)), X86::GS,
OffsetReg)
.addImm(8);
// mov r11, [gs:r11]
addSegmentedMem(
BuildMI(MBB, MBBI, DL, TII->get(X86::MOV64rm)).addDef(OffsetReg), X86::GS,
OffsetReg);
// mov [gs:r11], r10
addSegmentedMem(BuildMI(MBB, MBBI, DL, TII->get(X86::MOV64mr)), X86::GS,
OffsetReg)
.addReg(ReturnReg);
}
static void addPrologLeaf(MachineFunction &Fn, const TargetInstrInfo *TII,
MachineBasicBlock &MBB, const DebugLoc &DL,
MCPhysReg FreeRegister) {
// mov REG, [rsp]
addDirectMem(BuildMI(MBB, MBB.begin(), DL, TII->get(X86::MOV64rm))
.addDef(FreeRegister),
X86::RSP);
}
static void addEpilog(const TargetInstrInfo *TII, MachineBasicBlock &MBB,
MachineInstr &MI, MachineBasicBlock &TrapBB) {
const DebugLoc &DL = MI.getDebugLoc();
// xor r11, r11
BuildMI(MBB, MI, DL, TII->get(X86::XOR64rr))
.addDef(X86::R11)
.addReg(X86::R11, RegState::Undef)
.addReg(X86::R11, RegState::Undef);
// mov r10, [gs:r11]
addSegmentedMem(BuildMI(MBB, MI, DL, TII->get(X86::MOV64rm)).addDef(X86::R10),
X86::GS, X86::R11);
// mov r10, [gs:r10]
addSegmentedMem(BuildMI(MBB, MI, DL, TII->get(X86::MOV64rm)).addDef(X86::R10),
X86::GS, X86::R10);
// sub QWORD [gs:r11], 8
// This instruction should not be moved up to avoid a signal race.
addSegmentedMem(BuildMI(MBB, MI, DL, TII->get(X86::SUB64mi8)),
X86::GS, X86::R11)
.addImm(8);
// cmp [rsp], r10
addDirectMem(BuildMI(MBB, MI, DL, TII->get(X86::CMP64mr)), X86::RSP)
.addReg(X86::R10);
// jne trap
BuildMI(MBB, MI, DL, TII->get(X86::JNE_1)).addMBB(&TrapBB);
MBB.addSuccessor(&TrapBB);
}
static void addEpilogLeaf(const TargetInstrInfo *TII, MachineBasicBlock &MBB,
MachineInstr &MI, MachineBasicBlock &TrapBB,
MCPhysReg FreeRegister) {
const DebugLoc &DL = MI.getDebugLoc();
// cmp [rsp], REG
addDirectMem(BuildMI(MBB, MI, DL, TII->get(X86::CMP64mr)), X86::RSP)
.addReg(FreeRegister);
// jne trap
BuildMI(MBB, MI, DL, TII->get(X86::JNE_1)).addMBB(&TrapBB);
MBB.addSuccessor(&TrapBB);
}
static void addEpilogOnlyR10(const TargetInstrInfo *TII, MachineBasicBlock &MBB,
MachineInstr &MI, MachineBasicBlock &TrapBB) {
const DebugLoc &DL = MI.getDebugLoc();
// xor r10, r10
BuildMI(MBB, MI, DL, TII->get(X86::XOR64rr))
.addDef(X86::R10)
.addReg(X86::R10, RegState::Undef)
.addReg(X86::R10, RegState::Undef);
// mov r10, [gs:r10]
addSegmentedMem(BuildMI(MBB, MI, DL, TII->get(X86::MOV64rm)).addDef(X86::R10),
X86::GS, X86::R10);
// mov r10, [gs:r10]
addSegmentedMem(BuildMI(MBB, MI, DL, TII->get(X86::MOV64rm)).addDef(X86::R10),
X86::GS, X86::R10);
// sub QWORD [gs:0], 8
// This instruction should not be moved up to avoid a signal race.
addSegmentedMem(BuildMI(MBB, MI, DL, TII->get(X86::SUB64mi8)), X86::GS, 0)
.addImm(8);
// cmp [rsp], r10
addDirectMem(BuildMI(MBB, MI, DL, TII->get(X86::CMP64mr)), X86::RSP)
.addReg(X86::R10);
// jne trap
BuildMI(MBB, MI, DL, TII->get(X86::JNE_1)).addMBB(&TrapBB);
MBB.addSuccessor(&TrapBB);
}
bool ShadowCallStack::runOnMachineFunction(MachineFunction &Fn) {
if (!Fn.getFunction().hasFnAttribute(Attribute::ShadowCallStack) ||
Fn.getFunction().hasFnAttribute(Attribute::Naked))
return false;
if (Fn.empty() || !Fn.getRegInfo().tracksLiveness())
return false;
// FIXME: Skip functions that have r10 or r11 live on entry (r10 can be live
// on entry for parameters with the nest attribute.)
if (Fn.front().isLiveIn(X86::R10) || Fn.front().isLiveIn(X86::R11))
return false;
// FIXME: Skip functions with conditional and r10 tail calls for now.
bool HasReturn = false;
for (auto &MBB : Fn) {
if (MBB.empty())
continue;
const MachineInstr &MI = MBB.instr_back();
if (MI.isReturn())
HasReturn = true;
if (MI.isReturn() && MI.isCall()) {
if (MI.findRegisterUseOperand(X86::EFLAGS))
return false;
// This should only be possible on Windows 64 (see GR64_TC versus
// GR64_TCW64.)
if (MI.findRegisterUseOperand(X86::R10) ||
MI.hasRegisterImplicitUseOperand(X86::R10))
return false;
}
}
if (!HasReturn)
return false;
// For leaf functions:
// 1. Do not instrument very short functions where it would not improve that
// function's security.
// 2. Detect if there is an unused caller-saved register we can reserve to
// hold the return address instead of writing/reading it from the shadow
// call stack.
MCPhysReg LeafFuncRegister = X86::NoRegister;
if (!Fn.getFrameInfo().adjustsStack()) {
size_t InstructionCount = 0;
std::bitset<X86::NUM_TARGET_REGS> UsedRegs;
for (auto &MBB : Fn) {
for (auto &LiveIn : MBB.liveins())
UsedRegs.set(LiveIn.PhysReg);
for (auto &MI : MBB) {
if (!MI.isDebugValue() && !MI.isCFIInstruction() && !MI.isLabel())
InstructionCount++;
for (auto &Op : MI.operands())
if (Op.isReg() && Op.isDef())
UsedRegs.set(Op.getReg());
}
}
if (InstructionCount <= SkipLeafInstructions)
return false;
std::bitset<X86::NUM_TARGET_REGS> CalleeSavedRegs;
const MCPhysReg *CSRegs = Fn.getRegInfo().getCalleeSavedRegs();
for (size_t i = 0; CSRegs[i]; i++)
CalleeSavedRegs.set(CSRegs[i]);
const TargetRegisterInfo *TRI = Fn.getSubtarget().getRegisterInfo();
for (auto &Reg : X86::GR64_NOSPRegClass.getRegisters()) {
// FIXME: Optimization opportunity: spill/restore a callee-saved register
// if a caller-saved register is unavailable.
if (CalleeSavedRegs.test(Reg))
continue;
bool Used = false;
for (MCSubRegIterator SR(Reg, TRI, true); SR.isValid(); ++SR)
if ((Used = UsedRegs.test(*SR)))
break;
if (!Used) {
LeafFuncRegister = Reg;
break;
}
}
}
const bool LeafFuncOptimization = LeafFuncRegister != X86::NoRegister;
if (LeafFuncOptimization)
// Mark the leaf function register live-in for all MBBs except the entry MBB
for (auto I = ++Fn.begin(), E = Fn.end(); I != E; ++I)
I->addLiveIn(LeafFuncRegister);
MachineBasicBlock &MBB = Fn.front();
const MachineBasicBlock *NonEmpty = MBB.empty() ? MBB.getFallThrough() : &MBB;
const DebugLoc &DL = NonEmpty->front().getDebugLoc();
const TargetInstrInfo *TII = Fn.getSubtarget().getInstrInfo();
if (LeafFuncOptimization)
addPrologLeaf(Fn, TII, MBB, DL, LeafFuncRegister);
else
addProlog(Fn, TII, MBB, DL);
MachineBasicBlock *Trap = nullptr;
for (auto &MBB : Fn) {
if (MBB.empty())
continue;
MachineInstr &MI = MBB.instr_back();
if (MI.isReturn()) {
if (!Trap) {
Trap = Fn.CreateMachineBasicBlock();
BuildMI(Trap, MI.getDebugLoc(), TII->get(X86::TRAP));
Fn.push_back(Trap);
}
if (LeafFuncOptimization)
addEpilogLeaf(TII, MBB, MI, *Trap, LeafFuncRegister);
else if (MI.findRegisterUseOperand(X86::R11))
addEpilogOnlyR10(TII, MBB, MI, *Trap);
else
addEpilog(TII, MBB, MI, *Trap);
}
}
return true;
}
INITIALIZE_PASS(ShadowCallStack, "shadow-call-stack", "Shadow Call Stack",
false, false)
FunctionPass *llvm::createShadowCallStackPass() {
return new ShadowCallStack();
}

View File

@ -49,11 +49,6 @@ FunctionPass *createX86FloatingPointStackifierPass();
/// transition penalty between functions encoded with AVX and SSE.
FunctionPass *createX86IssueVZeroUpperPass();
/// This pass instruments the function prolog to save the return address to a
/// 'shadow call stack' and the function epilog to check that the return address
/// did not change during function execution.
FunctionPass *createShadowCallStackPass();
/// This pass inserts ENDBR instructions before indirect jump/call
/// destinations as part of CET IBT mechanism.
FunctionPass *createX86IndirectBranchTrackingPass();
@ -137,7 +132,6 @@ FunctionPass *createX86SpeculativeLoadHardeningPass();
void initializeEvexToVexInstPassPass(PassRegistry &);
void initializeFixupBWInstPassPass(PassRegistry &);
void initializeFixupLEAPassPass(PassRegistry &);
void initializeShadowCallStackPass(PassRegistry &);
void initializeWinEHStatePassPass(PassRegistry &);
void initializeX86AvoidSFBPassPass(PassRegistry &);
void initializeX86CallFrameOptimizationPass(PassRegistry &);

View File

@ -69,7 +69,6 @@ extern "C" void LLVMInitializeX86Target() {
initializeFixupBWInstPassPass(PR);
initializeEvexToVexInstPassPass(PR);
initializeFixupLEAPassPass(PR);
initializeShadowCallStackPass(PR);
initializeX86CallFrameOptimizationPass(PR);
initializeX86CmovConverterPassPass(PR);
initializeX86ExecutionDomainFixPass(PR);
@ -489,7 +488,6 @@ void X86PassConfig::addPreEmitPass() {
addPass(createBreakFalseDeps());
}
addPass(createShadowCallStackPass());
addPass(createX86IndirectBranchTrackingPass());
if (UseVZeroUpper)

View File

@ -55,7 +55,6 @@
; CHECK-NEXT: Post-RA pseudo instruction expansion pass
; CHECK-NEXT: X86 pseudo instruction expansion pass
; CHECK-NEXT: Analyze Machine Code For Garbage Collection
; CHECK-NEXT: Shadow Call Stack
; CHECK-NEXT: X86 Indirect Branch Tracking
; CHECK-NEXT: X86 vzeroupper inserter
; CHECK-NEXT: X86 Discriminate Memory Operands

View File

@ -150,7 +150,6 @@
; CHECK-NEXT: ReachingDefAnalysis
; CHECK-NEXT: X86 Execution Dependency Fix
; CHECK-NEXT: BreakFalseDeps
; CHECK-NEXT: Shadow Call Stack
; CHECK-NEXT: X86 Indirect Branch Tracking
; CHECK-NEXT: X86 vzeroupper inserter
; CHECK-NEXT: MachineDominator Tree Construction

View File

@ -1,212 +0,0 @@
# RUN: llc -mtriple=x86_64-unknown-linux-gnu -run-pass shadow-call-stack -verify-machineinstrs -o - %s | FileCheck %s
--- |
define void @no_return() #0 { ret void }
define void @normal_return() #0 { ret void }
define void @normal_return_leaf_func() #0 { ret void }
define void @short_leaf_func() #0 { ret void }
define void @normal_tail_call() #0 { ret void }
define void @r11_tail_call() #0 { ret void }
define void @conditional_tail_call() #0 { ret void }
define void @r10_live_in() #0 { ret void }
attributes #0 = { shadowcallstack }
...
---
# CHECK-LABEL: name: no_return
name: no_return
tracksRegLiveness: true
frameInfo:
adjustsStack: true # not a leaf function
body: |
; CHECK: bb.0:
bb.0:
; CHECK-NEXT: $eax = MOV32ri 13
$eax = MOV32ri 13
...
---
# CHECK-LABEL: name: normal_return
name: normal_return
tracksRegLiveness: true
frameInfo:
adjustsStack: true # not a leaf function
body: |
; CHECK: bb.0:
bb.0:
; CHECK: $r10 = MOV64rm $rsp, 1, $noreg, 0, $noreg
; CHECK-NEXT: $r11 = XOR64rr undef $r11, undef $r11, implicit-def $eflags
; CHECK-NEXT: ADD64mi8 $r11, 1, $noreg, 0, $gs, 8, implicit-def $eflags
; CHECK-NEXT: $r11 = MOV64rm $r11, 1, $noreg, 0, $gs
; CHECK-NEXT: MOV64mr $r11, 1, $noreg, 0, $gs, $r10
; CHECK-NEXT: $eax = MOV32ri 13
$eax = MOV32ri 13
; CHECK-NEXT: $r11 = XOR64rr undef $r11, undef $r11, implicit-def $eflags
; CHECK-NEXT: $r10 = MOV64rm $r11, 1, $noreg, 0, $gs
; CHECK-NEXT: $r10 = MOV64rm $r10, 1, $noreg, 0, $gs
; CHECK-NEXT: SUB64mi8 $r11, 1, $noreg, 0, $gs, 8, implicit-def $eflags
; CHECK-NEXT: CMP64mr $rsp, 1, $noreg, 0, $noreg, $r10, implicit-def $eflags
; CHECK-NEXT: JNE_1 %bb.1, implicit $eflags
; CHECK-NEXT: RETQ $eax
RETQ $eax
; CHECK: bb.1:
; CHECK-NEXT; TRAP
...
---
# CHECK-LABEL: name: normal_return_leaf_func
name: normal_return_leaf_func
tracksRegLiveness: true
frameInfo:
adjustsStack: false # leaf function
body: |
; CHECK: bb.0:
; CHECK: liveins: $rcx
bb.0:
liveins: $rcx
; CHECK: $rdx = MOV64rm $rsp, 1, $noreg, 0, $noreg
; CHECK-NEXT: $eax = MOV32ri 0
$eax = MOV32ri 0
; CHECK-NEXT: CMP64ri8 $rcx, 5, implicit-def $eflags
CMP64ri8 $rcx, 5, implicit-def $eflags
; CHECK-NEXT: JA_1 %bb.1, implicit $eflags
JA_1 %bb.1, implicit $eflags
; CHECK-NEXT: JMP_1 %bb.2
JMP_1 %bb.2
; CHECK: bb.1
; CHECK: liveins: $eax, $rdx
bb.1:
liveins: $eax
; CHECKT: $eax = MOV32ri 1
$eax = MOV32ri 1
; CHECK: bb.2
; CHECK: liveins: $eax, $rdx
bb.2:
liveins: $eax
; CHECK: CMP64mr $rsp, 1, $noreg, 0, $noreg, $rdx, implicit-def $eflags
; CHECK-NEXT: JNE_1 %bb.3, implicit $eflags
; CHECK-NEXT: RETQ $eax
RETQ $eax
; CHECK: bb.3:
; CHECK-NEXT; TRAP
...
---
# CHECK-LABEL: name: short_leaf_func
name: short_leaf_func
tracksRegLiveness: true
frameInfo:
adjustsStack: false # leaf function
body: |
; CHECK: bb.0:
bb.0:
; Ensure these are not counted as machine instructions
CFI_INSTRUCTION 0
CFI_INSTRUCTION 0
CFI_INSTRUCTION 0
DBG_VALUE 0
DBG_VALUE 0
DBG_VALUE 0
; CHECK: $eax = MOV32ri 13
$eax = MOV32ri 13
; CHECK-NEXT: RETQ $eax
RETQ $eax
...
---
# CHECK-LABEL: name: normal_tail_call
name: normal_tail_call
tracksRegLiveness: true
frameInfo:
adjustsStack: true # not a leaf function
body: |
; CHECK: bb.0:
bb.0:
; CHECK: $r10 = MOV64rm $rsp, 1, $noreg, 0, $noreg
; CHECK-NEXT: $r11 = XOR64rr undef $r11, undef $r11, implicit-def $eflags
; CHECK-NEXT: ADD64mi8 $r11, 1, $noreg, 0, $gs, 8, implicit-def $eflags
; CHECK-NEXT: $r11 = MOV64rm $r11, 1, $noreg, 0, $gs
; CHECK-NEXT: MOV64mr $r11, 1, $noreg, 0, $gs, $r10
; CHECK-NEXT: $eax = MOV32ri 13
$eax = MOV32ri 13
; CHECK-NEXT: $r11 = XOR64rr undef $r11, undef $r11, implicit-def $eflags
; CHECK-NEXT: $r10 = MOV64rm $r11, 1, $noreg, 0, $gs
; CHECK-NEXT: $r10 = MOV64rm $r10, 1, $noreg, 0, $gs
; CHECK-NEXT: SUB64mi8 $r11, 1, $noreg, 0, $gs, 8, implicit-def $eflags
; CHECK-NEXT: CMP64mr $rsp, 1, $noreg, 0, $noreg, $r10, implicit-def $eflags
; CHECK-NEXT: JNE_1 %bb.1, implicit $eflags
; CHECK-NEXT: TAILJMPr64 $rax
TAILJMPr64 $rax
; CHECK: bb.1:
; CHECK-NEXT; TRAP
...
---
# CHECK-LABEL: name: r11_tail_call
name: r11_tail_call
tracksRegLiveness: true
frameInfo:
adjustsStack: true # not a leaf function
body: |
; CHECK: bb.0:
bb.0:
; CHECK: $r10 = MOV64rm $rsp, 1, $noreg, 0, $noreg
; CHECK-NEXT: $r11 = XOR64rr undef $r11, undef $r11, implicit-def $eflags
; CHECK-NEXT: ADD64mi8 $r11, 1, $noreg, 0, $gs, 8, implicit-def $eflags
; CHECK-NEXT: $r11 = MOV64rm $r11, 1, $noreg, 0, $gs
; CHECK-NEXT: MOV64mr $r11, 1, $noreg, 0, $gs, $r10
; CHECK-NEXT: $eax = MOV32ri 13
$eax = MOV32ri 13
; CHECK-NEXT: $r10 = XOR64rr undef $r10, undef $r10, implicit-def $eflags
; CHECK-NEXT: $r10 = MOV64rm $r10, 1, $noreg, 0, $gs
; CHECK-NEXT: $r10 = MOV64rm $r10, 1, $noreg, 0, $gs
; CHECK-NEXT: SUB64mi8 $noreg, 1, $noreg, 0, $gs, 8, implicit-def $eflags
; CHECK-NEXT: CMP64mr $rsp, 1, $noreg, 0, $noreg, $r10, implicit-def $eflags
; CHECK-NEXT: JNE_1 %bb.1, implicit $eflags
; CHECK-NEXT: TAILJMPr64 undef $r11
TAILJMPr64 undef $r11
; CHECK: bb.1:
; CHECK-NEXT; TRAP
...
---
# CHECK-LABEL: name: conditional_tail_call
name: conditional_tail_call
tracksRegLiveness: true
frameInfo:
adjustsStack: true # not a leaf function
body: |
; CHECK: bb.0:
bb.0:
; CHECK: $eax = MOV32ri 13
$eax = MOV32ri 13
; CHECK-NEXT: TAILJMPd64_CC @conditional_tail_call, undef $eflags
TAILJMPd64_CC @conditional_tail_call, undef $eflags
...
---
# CHECK-LABEL: name: r10_live_in
name: r10_live_in
tracksRegLiveness: true
frameInfo:
adjustsStack: true # not a leaf function
body: |
; CHECK: bb.0:
; CHECK: liveins: $r10
bb.0:
liveins: $r10
; CHECK: $eax = MOV32ri 13
$eax = MOV32ri 13
; CHECK-NEXT: RETQ $eax
RETQ $eax
...

View File

@ -76,7 +76,6 @@ static_library("LLVMX86CodeGen") {
deps += [ ":X86GenFoldTables" ]
}
sources = [
"ShadowCallStack.cpp",
"X86AsmPrinter.cpp",
"X86AvoidStoreForwardingBlocks.cpp",
"X86CallFrameOptimization.cpp",