1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2025-01-31 12:41:49 +01:00

[SSP] Remove llvm.stackprotectorcheck.

This is a cleanup patch for SSP support in LLVM. There is no functional change.
llvm.stackprotectorcheck is not needed, because SelectionDAG isn't
actually lowering it in SelectBasicBlock; rather, it adds check code in
FinishBasicBlock, ignoring the position where the intrinsic is inserted
(See FindSplitPointForStackProtector()).

llvm-svn: 265851
This commit is contained in:
Tim Shen 2016-04-08 21:26:31 +00:00
parent 9fcd4fe510
commit 8cac1d5c28
15 changed files with 144 additions and 177 deletions

View File

@ -11946,44 +11946,6 @@ checked against the original guard by ``llvm.stackprotectorcheck``. If they are
different, then ``llvm.stackprotectorcheck`` causes the program to abort by
calling the ``__stack_chk_fail()`` function.
'``llvm.stackprotectorcheck``' Intrinsic
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
::
declare void @llvm.stackprotectorcheck(i8** <guard>)
Overview:
"""""""""
The ``llvm.stackprotectorcheck`` intrinsic compares ``guard`` against an already
created stack protector and if they are not equal calls the
``__stack_chk_fail()`` function.
Arguments:
""""""""""
The ``llvm.stackprotectorcheck`` intrinsic requires one pointer argument, the
the variable ``@__stack_chk_guard``.
Semantics:
""""""""""
This intrinsic is provided to perform the stack protector check by comparing
``guard`` with the stack slot created by ``llvm.stackprotector`` and if the
values do not match call the ``__stack_chk_fail()`` function.
The reason to provide this as an IR level intrinsic instead of implementing it
via other IR operations is that in order to perform this operation at the IR
level without an intrinsic, one would need to create additional basic blocks to
handle the success/failure cases. This makes it difficult to stop the stack
protector check from disrupting sibling tail calls in Codegen. With this
intrinsic, we are able to generate the stack protector basic blocks late in
codegen after the tail call decision has occurred.
'``llvm.objectsize``' Intrinsic
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

View File

@ -75,6 +75,12 @@ private:
/// times.
SmallPtrSet<const PHINode *, 16> VisitedPHIs;
// A prologue is generated.
bool HasPrologue = false;
// IR checking code is generated.
bool HasIRCheck = false;
/// InsertStackProtectors - Insert code into the prologue and epilogue of
/// the function.
///
@ -120,6 +126,10 @@ public:
}
SSPLayoutKind getSSPLayout(const AllocaInst *AI) const;
// Return true if StackProtector is supposed to be handled by SelectionDAG.
bool shouldEmitSDCheck(const BasicBlock &BB) const;
void adjustForColoring(const AllocaInst *From, const AllocaInst *To);
bool runOnFunction(Function &Fn) override;

View File

@ -324,8 +324,6 @@ def int_assume : Intrinsic<[], [llvm_i1_ty], []>;
// Stack Protector Intrinsic - The stackprotector intrinsic writes the stack
// guard to the correct place on the stack frame.
def int_stackprotector : Intrinsic<[], [llvm_ptr_ty, llvm_ptrptr_ty], []>;
def int_stackprotectorcheck : Intrinsic<[], [llvm_ptrptr_ty],
[IntrReadWriteArgMem]>;
// A counter increment for instrumentation based profiling.
def int_instrprof_increment : Intrinsic<[],

View File

@ -1011,11 +1011,18 @@ public:
return PrefLoopAlignment;
}
/// If the target has a standard location for the stack protector cookie,
/// If the target has a standard location for the stack protector guard,
/// returns the address of that location. Otherwise, returns nullptr.
virtual Value *getStackCookieLocation(IRBuilder<> &IRB) const {
return nullptr;
}
virtual Value *getIRStackGuard(IRBuilder<> &IRB) const;
/// Inserts necessary declarations for SSP purpose. Should be used only when
/// getIRStackGuard returns nullptr.
virtual void insertSSPDeclarations(Module &M) const;
/// Return the variable that's previously inserted by insertSSPDeclarations,
/// if any, otherwise return nullptr. Should be used only when
/// getIRStackGuard returns nullptr.
virtual Value *getSDStackGuard(const Module &M) const;
/// If the target has a standard location for the unsafe stack pointer,
/// returns the address of that location. Otherwise, returns nullptr.

View File

@ -2014,7 +2014,10 @@ void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD,
MachineFrameInfo *MFI = ParentBB->getParent()->getFrameInfo();
int FI = MFI->getStackProtectorIndex();
const Value *IRGuard = SPD.getGuard();
const Module &M = *ParentBB->getParent()->getFunction()->getParent();
const Value *IRGuard = TLI.getSDStackGuard(M);
assert(IRGuard && "Currently there must be an IR guard in order to use "
"SelectionDAG SSP");
SDValue GuardPtr = getValue(IRGuard);
SDValue StackSlotPtr = DAG.getFrameIndex(FI, PtrTy);
@ -5517,18 +5520,6 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
case Intrinsic::invariant_end:
// Discard region information.
return nullptr;
case Intrinsic::stackprotectorcheck: {
// Do not actually emit anything for this basic block. Instead we initialize
// the stack protector descriptor and export the guard variable so we can
// access it in FinishBasicBlock.
const BasicBlock *BB = I.getParent();
SPDescriptor.initialize(BB, FuncInfo.MBBMap[BB], I);
ExportFromCurrentBlock(SPDescriptor.getGuard());
// Flush our exports since we are going to process a terminator.
(void)getControlRoot();
return nullptr;
}
case Intrinsic::clear_cache:
return TLI.getClearCacheBuiltinName();
case Intrinsic::donothing:

View File

@ -464,29 +464,25 @@ private:
/// the same function, use the same failure basic block).
class StackProtectorDescriptor {
public:
StackProtectorDescriptor() : ParentMBB(nullptr), SuccessMBB(nullptr),
FailureMBB(nullptr), Guard(nullptr),
GuardReg(0) { }
StackProtectorDescriptor()
: ParentMBB(nullptr), SuccessMBB(nullptr), FailureMBB(nullptr),
GuardReg(0) {}
/// Returns true if all fields of the stack protector descriptor are
/// initialized implying that we should/are ready to emit a stack protector.
bool shouldEmitStackProtector() const {
return ParentMBB && SuccessMBB && FailureMBB && Guard;
return ParentMBB && SuccessMBB && FailureMBB;
}
/// Initialize the stack protector descriptor structure for a new basic
/// block.
void initialize(const BasicBlock *BB,
MachineBasicBlock *MBB,
const CallInst &StackProtCheckCall) {
void initialize(const BasicBlock *BB, MachineBasicBlock *MBB) {
// Make sure we are not initialized yet.
assert(!shouldEmitStackProtector() && "Stack Protector Descriptor is "
"already initialized!");
ParentMBB = MBB;
SuccessMBB = AddSuccessorMBB(BB, MBB, /* IsLikely */ true);
FailureMBB = AddSuccessorMBB(BB, MBB, /* IsLikely */ false, FailureMBB);
if (!Guard)
Guard = StackProtCheckCall.getArgOperand(0);
}
/// Reset state that changes when we handle different basic blocks.
@ -515,14 +511,12 @@ private:
/// always the same.
void resetPerFunctionState() {
FailureMBB = nullptr;
Guard = nullptr;
GuardReg = 0;
}
MachineBasicBlock *getParentMBB() { return ParentMBB; }
MachineBasicBlock *getSuccessMBB() { return SuccessMBB; }
MachineBasicBlock *getFailureMBB() { return FailureMBB; }
const Value *getGuard() { return Guard; }
unsigned getGuardReg() const { return GuardReg; }
void setGuardReg(unsigned R) { GuardReg = R; }
@ -545,10 +539,6 @@ private:
/// contain a call to __stack_chk_fail().
MachineBasicBlock *FailureMBB;
/// The guard variable which we will compare against the stored value in the
/// stack protector stack slot.
const Value *Guard;
/// The virtual register holding the stack guard value.
unsigned GuardReg;

View File

@ -11,7 +11,7 @@
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/GCStrategy.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "ScheduleDAGSDNodes.h"
#include "SelectionDAGBuilder.h"
#include "llvm/ADT/PostOrderIterator.h"
@ -25,6 +25,7 @@
#include "llvm/CodeGen/FastISel.h"
#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/CodeGen/GCMetadata.h"
#include "llvm/CodeGen/GCStrategy.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
@ -32,8 +33,8 @@
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
#include "llvm/CodeGen/SchedulerRegistry.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/SelectionDAGISel.h"
#include "llvm/CodeGen/StackProtector.h"
#include "llvm/CodeGen/WinEHFuncInfo.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DebugInfo.h"
@ -377,6 +378,8 @@ SelectionDAGISel::~SelectionDAGISel() {
void SelectionDAGISel::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<AAResultsWrapperPass>();
AU.addRequired<GCModuleInfo>();
AU.addRequired<StackProtector>();
AU.addPreserved<StackProtector>();
AU.addPreserved<GCModuleInfo>();
AU.addRequired<TargetLibraryInfoWrapperPass>();
if (UseMBPI && OptLevel != CodeGenOpt::None)
@ -1476,6 +1479,8 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
LowerArguments(Fn);
}
}
if (getAnalysis<StackProtector>().shouldEmitSDCheck(*LLVMBB))
SDB->SPDescriptor.initialize(LLVMBB, FuncInfo->MBBMap[LLVMBB]);
if (Begin != BI)
++NumDAGBlocks;

View File

@ -89,6 +89,8 @@ bool StackProtector::runOnFunction(Function &Fn) {
getAnalysisIfAvailable<DominatorTreeWrapperPass>();
DT = DTWP ? &DTWP->getDomTree() : nullptr;
TLI = TM->getSubtargetImpl(Fn)->getTargetLowering();
HasPrologue = false;
HasIRCheck = false;
Attribute Attr = Fn.getFnAttribute("stack-protector-buffer-size");
if (Attr.isStringAttribute() &&
@ -200,11 +202,21 @@ bool StackProtector::HasAddressTaken(const Instruction *AI) {
bool StackProtector::RequiresStackProtector() {
bool Strong = false;
bool NeedsProtector = false;
for (const BasicBlock &BB : *F)
for (const Instruction &I : BB)
if (const CallInst *CI = dyn_cast<CallInst>(&I))
if (CI->getCalledFunction() ==
Intrinsic::getDeclaration(F->getParent(),
Intrinsic::stackprotector))
HasPrologue = true;
if (F->hasFnAttribute(Attribute::StackProtectReq)) {
NeedsProtector = true;
Strong = true; // Use the same heuristic as strong to determine SSPLayout
} else if (F->hasFnAttribute(Attribute::StackProtectStrong))
Strong = true;
else if (HasPrologue)
NeedsProtector = true;
else if (!F->hasFnAttribute(Attribute::StackProtect))
return false;
@ -256,68 +268,6 @@ bool StackProtector::RequiresStackProtector() {
return NeedsProtector;
}
static bool InstructionWillNotHaveChain(const Instruction *I) {
return !I->mayHaveSideEffects() && !I->mayReadFromMemory() &&
isSafeToSpeculativelyExecute(I);
}
/// Identify if RI has a previous instruction in the "Tail Position" and return
/// it. Otherwise return 0.
///
/// This is based off of the code in llvm::isInTailCallPosition. The difference
/// is that it inverts the first part of llvm::isInTailCallPosition since
/// isInTailCallPosition is checking if a call is in a tail call position, and
/// we are searching for an unknown tail call that might be in the tail call
/// position. Once we find the call though, the code uses the same refactored
/// code, returnTypeIsEligibleForTailCall.
static CallInst *FindPotentialTailCall(BasicBlock *BB, ReturnInst *RI,
const TargetLoweringBase *TLI) {
// Establish a reasonable upper bound on the maximum amount of instructions we
// will look through to find a tail call.
unsigned SearchCounter = 0;
const unsigned MaxSearch = 4;
bool NoInterposingChain = true;
for (BasicBlock::reverse_iterator I = std::next(BB->rbegin()), E = BB->rend();
I != E && SearchCounter < MaxSearch; ++I) {
Instruction *Inst = &*I;
// Skip over debug intrinsics and do not allow them to affect our MaxSearch
// counter.
if (isa<DbgInfoIntrinsic>(Inst))
continue;
// If we find a call and the following conditions are satisifed, then we
// have found a tail call that satisfies at least the target independent
// requirements of a tail call:
//
// 1. The call site has the tail marker.
//
// 2. The call site either will not cause the creation of a chain or if a
// chain is necessary there are no instructions in between the callsite and
// the call which would create an interposing chain.
//
// 3. The return type of the function does not impede tail call
// optimization.
if (CallInst *CI = dyn_cast<CallInst>(Inst)) {
if (CI->isTailCall() &&
(InstructionWillNotHaveChain(CI) || NoInterposingChain) &&
returnTypeIsEligibleForTailCall(BB->getParent(), CI, RI, *TLI))
return CI;
}
// If we did not find a call see if we have an instruction that may create
// an interposing chain.
NoInterposingChain =
NoInterposingChain && InstructionWillNotHaveChain(Inst);
// Increment max search.
SearchCounter++;
}
return nullptr;
}
/// Insert code into the entry block that stores the __stack_chk_guard
/// variable onto the stack:
///
@ -329,29 +279,25 @@ static CallInst *FindPotentialTailCall(BasicBlock *BB, ReturnInst *RI,
/// Returns true if the platform/triple supports the stackprotectorcreate pseudo
/// node.
static bool CreatePrologue(Function *F, Module *M, ReturnInst *RI,
const TargetLoweringBase *TLI, const Triple &TT,
AllocaInst *&AI, Value *&StackGuardVar) {
const TargetLoweringBase *TLI, AllocaInst *&AI,
Value *&StackGuardVar) {
bool SupportsSelectionDAGSP = false;
PointerType *PtrTy = Type::getInt8PtrTy(RI->getContext());
IRBuilder<> B(&F->getEntryBlock().front());
StackGuardVar = TLI->getStackCookieLocation(B);
StackGuardVar = TLI->getIRStackGuard(B);
if (!StackGuardVar) {
if (TT.isOSOpenBSD()) {
StackGuardVar = M->getOrInsertGlobal("__guard_local", PtrTy);
cast<GlobalValue>(StackGuardVar)
->setVisibility(GlobalValue::HiddenVisibility);
} else {
SupportsSelectionDAGSP = true;
StackGuardVar = M->getOrInsertGlobal("__stack_chk_guard", PtrTy);
}
/// Use SelectionDAG SSP handling, since there isn't an IR guard.
SupportsSelectionDAGSP = true;
TLI->insertSSPDeclarations(*M);
StackGuardVar = TLI->getSDStackGuard(*M);
}
assert(StackGuardVar && "Must have stack guard available");
PointerType *PtrTy = Type::getInt8PtrTy(RI->getContext());
AI = B.CreateAlloca(PtrTy, nullptr, "StackGuardSlot");
LoadInst *LI = B.CreateLoad(StackGuardVar, "StackGuard");
B.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::stackprotector),
{LI, AI});
return SupportsSelectionDAGSP;
}
@ -362,7 +308,6 @@ static bool CreatePrologue(Function *F, Module *M, ReturnInst *RI,
/// - The epilogue checks the value stored in the prologue against the original
/// value. It calls __stack_chk_fail if they differ.
bool StackProtector::InsertStackProtectors() {
bool HasPrologue = false;
bool SupportsSelectionDAGSP =
EnableSelectionDAGSP && !TM->Options.EnableFastISel;
AllocaInst *AI = nullptr; // Place on stack that stores the stack guard.
@ -377,27 +322,10 @@ bool StackProtector::InsertStackProtectors() {
if (!HasPrologue) {
HasPrologue = true;
SupportsSelectionDAGSP &=
CreatePrologue(F, M, RI, TLI, Trip, AI, StackGuardVar);
CreatePrologue(F, M, RI, TLI, AI, StackGuardVar);
}
if (SupportsSelectionDAGSP) {
// Since we have a potential tail call, insert the special stack check
// intrinsic.
Instruction *InsertionPt = nullptr;
if (CallInst *CI = FindPotentialTailCall(BB, RI, TLI)) {
InsertionPt = CI;
} else {
InsertionPt = RI;
// At this point we know that BB has a return statement so it *DOES*
// have a terminator.
assert(InsertionPt != nullptr &&
"BB must have a terminator instruction at this point.");
}
Function *Intrinsic =
Intrinsic::getDeclaration(M, Intrinsic::stackprotectorcheck);
CallInst::Create(Intrinsic, StackGuardVar, "", InsertionPt);
} else {
if (!SupportsSelectionDAGSP) {
// If we do not support SelectionDAG based tail calls, generate IR level
// tail calls.
//
@ -428,6 +356,10 @@ bool StackProtector::InsertStackProtectors() {
// fail BB generated by the stack protector pseudo instruction.
BasicBlock *FailBB = CreateFailBB();
// Set HasIRCheck to true, so that SelectionDAG will not generate its own
// version.
HasIRCheck = true;
// Split the basic block before the return instruction.
BasicBlock *NewBB = BB->splitBasicBlock(RI->getIterator(), "SP_return");
@ -487,3 +419,7 @@ BasicBlock *StackProtector::CreateFailBB() {
B.CreateUnreachable();
return FailBB;
}
bool StackProtector::shouldEmitSDCheck(const BasicBlock &BB) const {
return HasPrologue && !HasIRCheck && dyn_cast<ReturnInst>(BB.getTerminator());
}

View File

@ -1746,3 +1746,32 @@ bool TargetLoweringBase::isLegalAddressingMode(const DataLayout &DL,
return true;
}
//===----------------------------------------------------------------------===//
// Stack Protector
//===----------------------------------------------------------------------===//
// For OpenBSD return its special guard variable. Otherwise return nullptr,
// so that SelectionDAG handle SSP.
Value *TargetLoweringBase::getIRStackGuard(IRBuilder<> &IRB) const {
if (getTargetMachine().getTargetTriple().isOSOpenBSD()) {
Module &M = *IRB.GetInsertBlock()->getParent()->getParent();
PointerType *PtrTy = Type::getInt8PtrTy(M.getContext());
auto Guard = cast<GlobalValue>(M.getOrInsertGlobal("__guard_local", PtrTy));
Guard->setVisibility(GlobalValue::HiddenVisibility);
return Guard;
}
return nullptr;
}
// Currently only support "standard" __stack_chk_guard.
// TODO: add LOAD_STACK_GUARD support.
void TargetLoweringBase::insertSSPDeclarations(Module &M) const {
M.getOrInsertGlobal("__stack_chk_guard", Type::getInt8PtrTy(M.getContext()));
}
// Currently only support "standard" __stack_chk_guard.
// TODO: add LOAD_STACK_GUARD support.
Value *TargetLoweringBase::getSDStackGuard(const Module &M) const {
return M.getGlobalVariable("__stack_chk_guard");
}

View File

@ -159,6 +159,12 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
}
break;
case 's':
if (Name == "stackprotectorcheck") {
NewFn = nullptr;
return true;
}
case 'x': {
if (Name.startswith("x86.sse2.pcmpeq.") ||
Name.startswith("x86.sse2.pcmpgt.") ||
@ -645,6 +651,8 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
Value *UndefV = UndefValue::get(Op0->getType());
Rep = Builder.CreateShuffleVector(Op0, UndefV, ConstantVector::get(Idxs));
} else if (Name == "llvm.stackprotectorcheck") {
Rep = nullptr;
} else {
bool PD128 = false, PD256 = false, PS128 = false, PS256 = false;
if (Name == "llvm.x86.avx.vpermil.pd.256")
@ -684,7 +692,8 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
}
}
CI->replaceAllUsesWith(Rep);
if (Rep)
CI->replaceAllUsesWith(Rep);
CI->eraseFromParent();
return;
}

View File

@ -10212,9 +10212,9 @@ bool AArch64TargetLowering::shouldNormalizeToSelectSequence(LLVMContext &,
return false;
}
Value *AArch64TargetLowering::getStackCookieLocation(IRBuilder<> &IRB) const {
Value *AArch64TargetLowering::getIRStackGuard(IRBuilder<> &IRB) const {
if (!Subtarget->isTargetAndroid())
return TargetLowering::getStackCookieLocation(IRB);
return TargetLowering::getIRStackGuard(IRB);
// Android provides a fixed TLS slot for the stack cookie. See the definition
// of TLS_SLOT_STACK_GUARD in

View File

@ -360,7 +360,7 @@ public:
/// If the target has a standard location for the stack protector cookie,
/// returns the address of that location. Otherwise, returns nullptr.
Value *getStackCookieLocation(IRBuilder<> &IRB) const override;
Value *getIRStackGuard(IRBuilder<> &IRB) const override;
/// If the target has a standard location for the unsafe stack pointer,
/// returns the address of that location. Otherwise, returns nullptr.

View File

@ -2193,9 +2193,9 @@ unsigned X86TargetLowering::getAddressSpace() const {
return 256;
}
Value *X86TargetLowering::getStackCookieLocation(IRBuilder<> &IRB) const {
Value *X86TargetLowering::getIRStackGuard(IRBuilder<> &IRB) const {
if (!Subtarget.isTargetLinux())
return TargetLowering::getStackCookieLocation(IRB);
return TargetLowering::getIRStackGuard(IRB);
// %fs:0x28, unless we're using a Kernel code model, in which case it's %gs:
// %gs:0x14 on i386
@ -2206,6 +2206,19 @@ Value *X86TargetLowering::getStackCookieLocation(IRBuilder<> &IRB) const {
Type::getInt8PtrTy(IRB.getContext())->getPointerTo(AddressSpace));
}
void X86TargetLowering::insertSSPDeclarations(Module &M) const {
if (!Subtarget.isTargetLinux())
TargetLowering::insertSSPDeclarations(M);
else
llvm_unreachable("X86 Linux supports customized IR stack guard load");
}
Value *X86TargetLowering::getSDStackGuard(const Module &M) const {
if (!Subtarget.isTargetLinux())
return TargetLowering::getSDStackGuard(M);
llvm_unreachable("X86 Linux supports customized IR stack guard load");
}
Value *X86TargetLowering::getSafeStackPointerLocation(IRBuilder<> &IRB) const {
if (!Subtarget.isTargetAndroid())
return TargetLowering::getSafeStackPointerLocation(IRB);

View File

@ -962,7 +962,11 @@ namespace llvm {
/// If the target has a standard location for the stack protector cookie,
/// returns the address of that location. Otherwise, returns nullptr.
Value *getStackCookieLocation(IRBuilder<> &IRB) const override;
Value *getIRStackGuard(IRBuilder<> &IRB) const override;
void insertSSPDeclarations(Module &M) const override;
Value *getSDStackGuard(const Module &M) const override;
/// Return true if the target stores SafeStack pointer at a fixed offset in
/// some non-standard address space, and populates the address space and

View File

@ -54,7 +54,20 @@ entry:
define i32 @test.objectsize() {
; CHECK-LABEL: @test.objectsize(
; CHECK: @llvm.objectsize.i32.p0i8
; CHECK-DAG: declare i32 @llvm.objectsize.i32.p0i8
%s = call i32 @llvm.objectsize.i32(i8* getelementptr inbounds ([60 x i8], [60 x i8]* @a, i32 0, i32 0), i1 false)
ret i32 %s
}
@__stack_chk_guard = external global i8*
declare void @llvm.stackprotectorcheck(i8**)
define void @test.stackprotectorcheck() {
; CHECK-LABEL: @test.stackprotectorcheck(
; CHECK-NEXT: ret void
call void @llvm.stackprotectorcheck(i8** @__stack_chk_guard)
ret void
}
; This is part of @test.objectsize(), since llvm.objectsize declaration gets
; emitted at the end.
; CHECK: declare i32 @llvm.objectsize.i32.p0i8