1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 19:23:23 +01:00

GlobalISel: translate stack protector intrinsics

llvm-svn: 285614
This commit is contained in:
Tim Northover 2016-10-31 18:30:59 +00:00
parent 26dd5b03a1
commit f9cdcd6b12
5 changed files with 105 additions and 16 deletions

View File

@ -74,6 +74,10 @@ private:
// in once all MachineBasicBlocks have been created.
SmallVector<std::pair<const PHINode *, MachineInstr *>, 4> PendingPHIs;
/// Record of what frame index has been allocated to specified allocas for
/// this function.
DenseMap<const AllocaInst *, int> FrameIndices;
/// Methods for translating form LLVM IR to MachineInstr.
/// \see ::translate for general information on the translate methods.
/// @{
@ -120,6 +124,8 @@ private:
bool translateMemcpy(const CallInst &CI);
void getStackGuard(unsigned DstReg);
bool translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID);
/// Translate call instruction.
@ -330,6 +336,10 @@ private:
/// If such VReg does not exist, it is created.
unsigned getOrCreateVReg(const Value &Val);
/// Get the frame index that represents \p Val.
/// If such VReg does not exist, it is created.
int getOrCreateFrameIndex(const AllocaInst &AI);
/// Get the alignment of the given memory operation instruction. This will
/// either be the explicitly specified value or the ABI-required alignment for
/// the type being accessed (according to the Module's DataLayout).

View File

@ -74,6 +74,27 @@ unsigned IRTranslator::getOrCreateVReg(const Value &Val) {
return ValReg;
}
int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) {
if (FrameIndices.find(&AI) != FrameIndices.end())
return FrameIndices[&AI];
MachineFunction &MF = MIRBuilder.getMF();
unsigned ElementSize = DL->getTypeStoreSize(AI.getAllocatedType());
unsigned Size =
ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue();
// Always allocate at least one byte.
Size = std::max(Size, 1u);
unsigned Alignment = AI.getAlignment();
if (!Alignment)
Alignment = DL->getABITypeAlignment(AI.getAllocatedType());
int &FI = FrameIndices[&AI];
FI = MF.getFrameInfo().CreateStackObject(Size, Alignment, false, &AI);
return FI;
}
unsigned IRTranslator::getMemOpAlignment(const Instruction &I) {
unsigned Alignment = 0;
Type *ValTy = nullptr;
@ -382,6 +403,26 @@ bool IRTranslator::translateMemcpy(const CallInst &CI) {
CallLowering::ArgInfo(0, CI.getType()), Args);
}
void IRTranslator::getStackGuard(unsigned DstReg) {
auto MIB = MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD);
MIB.addDef(DstReg);
auto &MF = MIRBuilder.getMF();
auto &TLI = *MF.getSubtarget().getTargetLowering();
Value *Global = TLI.getSDagStackGuard(*MF.getFunction()->getParent());
if (!Global)
return;
MachinePointerInfo MPInfo(Global);
MachineInstr::mmo_iterator MemRefs = MF.allocateMemRefsArray(1);
auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
MachineMemOperand::MODereferenceable;
*MemRefs =
MF.getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8,
DL->getPointerABIAlignment());
MIB.setMemRefs(MemRefs, MemRefs + 1);
}
bool IRTranslator::translateKnownIntrinsic(const CallInst &CI,
Intrinsic::ID ID) {
unsigned Op = 0;
@ -402,6 +443,24 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI,
MIRBuilder.buildConstant(getOrCreateVReg(CI), Min->isZero() ? -1ULL : 0);
return true;
}
case Intrinsic::stackguard:
getStackGuard(getOrCreateVReg(CI));
return true;
case Intrinsic::stackprotector: {
MachineFunction &MF = MIRBuilder.getMF();
LLT PtrTy{*CI.getArgOperand(0)->getType(), *DL};
unsigned GuardVal = MRI->createGenericVirtualRegister(PtrTy);
getStackGuard(GuardVal);
AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1));
MIRBuilder.buildStore(
GuardVal, getOrCreateVReg(*Slot),
*MF.getMachineMemOperand(
MachinePointerInfo::getFixedStack(MF, getOrCreateFrameIndex(*Slot)),
MachineMemOperand::MOStore | MachineMemOperand::MOVolatile,
PtrTy.getSizeInBits() / 8, 8));
return true;
}
}
LLT Ty{*CI.getOperand(0)->getType(), *DL};
@ -468,20 +527,8 @@ bool IRTranslator::translateStaticAlloca(const AllocaInst &AI) {
return false;
assert(AI.isStaticAlloca() && "only handle static allocas now");
MachineFunction &MF = MIRBuilder.getMF();
unsigned ElementSize = DL->getTypeStoreSize(AI.getAllocatedType());
unsigned Size =
ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue();
// Always allocate at least one byte.
Size = std::max(Size, 1u);
unsigned Alignment = AI.getAlignment();
if (!Alignment)
Alignment = DL->getABITypeAlignment(AI.getAllocatedType());
unsigned Res = getOrCreateVReg(AI);
int FI = MF.getFrameInfo().CreateStackObject(Size, Alignment, false, &AI);
int FI = getOrCreateFrameIndex(AI);
MIRBuilder.buildFrameIndex(Res, FI);
return true;
}
@ -566,6 +613,7 @@ void IRTranslator::finalizeFunction() {
// Release the memory used by the different maps we
// needed during the translation.
ValToVReg.clear();
FrameIndices.clear();
Constants.clear();
}

View File

@ -37,6 +37,8 @@ LegalizerInfo::LegalizerInfo() : TablesInitialized(false) {
DefaultActions[TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS] = Legal;
DefaultActions[TargetOpcode::G_ADD] = NarrowScalar;
DefaultActions[TargetOpcode::G_LOAD] = NarrowScalar;
DefaultActions[TargetOpcode::G_STORE] = NarrowScalar;
DefaultActions[TargetOpcode::G_BRCOND] = WidenScalar;
}

View File

@ -478,15 +478,24 @@ bool AArch64InstructionSelector::select(MachineInstr &I) const {
MachineFunction &MF = *MBB.getParent();
MachineRegisterInfo &MRI = MF.getRegInfo();
if (!isPreISelGenericOpcode(I.getOpcode()))
return !I.isCopy() || selectCopy(I, TII, MRI, TRI, RBI);
unsigned Opcode = I.getOpcode();
if (!isPreISelGenericOpcode(I.getOpcode())) {
// Certain non-generic instructions also need some special handling.
if (Opcode == TargetOpcode::LOAD_STACK_GUARD)
return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
else if (I.isCopy())
return selectCopy(I, TII, MRI, TRI, RBI);
else
return true;
}
if (I.getNumOperands() != I.getNumExplicitOperands()) {
DEBUG(dbgs() << "Generic instruction has unexpected implicit operands\n");
return false;
}
unsigned Opcode = I.getOpcode();
LLT Ty =
I.getOperand(0).isReg() ? MRI.getType(I.getOperand(0).getReg()) : LLT{};

View File

@ -0,0 +1,20 @@
; RUN: llc -mtriple=aarch64-apple-ios %s -stop-after=irtranslator -o - -global-isel | FileCheck %s
; CHECK: name: test_stack_guard
; CHECK: stack:
; CHECK: - { id: 0, name: StackGuardSlot, offset: 0, size: 8, alignment: 8 }
; CHECK-NOT: id: 1
; CHECK: [[GUARD_SLOT:%[0-9]+]](p0) = G_FRAME_INDEX %stack.0.StackGuardSlot
; CHECK: [[GUARD:%[0-9]+]](p0) = LOAD_STACK_GUARD :: (dereferenceable invariant load 8 from @__stack_chk_guard)
; CHECK: G_STORE [[GUARD]](p0), [[GUARD_SLOT]](p0) :: (volatile store 8 into %stack.0.StackGuardSlot)
declare void @llvm.stackprotector(i8*, i8**)
define void @test_stack_guard_remat2() {
%StackGuardSlot = alloca i8*
call void @llvm.stackprotector(i8* undef, i8** %StackGuardSlot)
ret void
}
@__stack_chk_guard = external global i64*