1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2025-02-01 05:01:59 +01:00

[mips] Implement NaCl sandboxing of function calls:

* Add masking instructions before indirect calls (in MC layer).
  * Align call + branch delay to the bundle end (in MC layer).

Differential Revision: http://llvm-reviews.chandlerc.com/D3032

llvm-svn: 203606
This commit is contained in:
Sasa Stankovic 2014-03-11 21:23:40 +00:00
parent d9e77d0e39
commit 018056201a
2 changed files with 122 additions and 2 deletions

View File

@ -12,7 +12,8 @@
// before dangerous control-flow and memory access instructions. It inserts
// address-masking instructions after instructions that change the stack
// pointer. It ensures that the mask and the dangerous instruction are always
// emitted in the same bundle.
// emitted in the same bundle. It aligns call + branch delay to the bundle end,
// so that return address is always aligned to the start of next bundle.
//
//===----------------------------------------------------------------------===//
@ -36,11 +37,15 @@ class MipsNaClELFStreamer : public MCELFStreamer {
public:
MipsNaClELFStreamer(MCContext &Context, MCAsmBackend &TAB, raw_ostream &OS,
MCCodeEmitter *Emitter)
: MCELFStreamer(Context, TAB, OS, Emitter) {}
: MCELFStreamer(Context, TAB, OS, Emitter), PendingCall(false) {}
~MipsNaClELFStreamer() {}
private:
// Whether we started the sandboxing sequence for calls. Calls are bundled
// with branch delays and aligned to the bundle end.
bool PendingCall;
bool isIndirectJump(const MCInst &MI) {
return MI.getOpcode() == Mips::JR || MI.getOpcode() == Mips::RET;
}
@ -50,6 +55,25 @@ private:
&& MI.getOperand(0).getReg() == Mips::SP);
}
bool isCall(unsigned Opcode, bool *IsIndirectCall) {
*IsIndirectCall = false;
switch (Opcode) {
default:
return false;
case Mips::JAL:
case Mips::BAL_BR:
case Mips::BLTZAL:
case Mips::BGEZAL:
return true;
case Mips::JALR:
*IsIndirectCall = true;
return true;
}
}
void emitMask(unsigned AddrReg, unsigned MaskReg,
const MCSubtargetInfo &STI) {
MCInst MaskInst;
@ -98,6 +122,8 @@ public:
virtual void EmitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI) {
// Sandbox indirect jumps.
if (isIndirectJump(Inst)) {
if (PendingCall)
report_fatal_error("Dangerous instruction in branch delay slot!");
sandboxIndirectJump(Inst, STI);
return;
}
@ -109,6 +135,9 @@ public:
&IsStore);
bool IsSPFirstOperand = isStackPointerFirstOperand(Inst);
if (IsMemAccess || IsSPFirstOperand) {
if (PendingCall)
report_fatal_error("Dangerous instruction in branch delay slot!");
bool MaskBefore = (IsMemAccess
&& baseRegNeedsLoadStoreMask(Inst.getOperand(AddrIdx)
.getReg()));
@ -120,6 +149,31 @@ public:
return;
}
// Sandbox calls by aligning call and branch delay to the bundle end.
// For indirect calls, emit the mask before the call.
bool IsIndirectCall;
if (isCall(Inst.getOpcode(), &IsIndirectCall)) {
if (PendingCall)
report_fatal_error("Dangerous instruction in branch delay slot!");
// Start the sandboxing sequence by emitting call.
EmitBundleLock(true);
if (IsIndirectCall) {
unsigned TargetReg = Inst.getOperand(1).getReg();
emitMask(TargetReg, IndirectBranchMaskReg, STI);
}
MCELFStreamer::EmitInstruction(Inst, STI);
PendingCall = true;
return;
}
if (PendingCall) {
// Finish the sandboxing sequence by emitting branch delay.
MCELFStreamer::EmitInstruction(Inst, STI);
EmitBundleUnlock();
PendingCall = false;
return;
}
// None of the sandboxing applies, just emit the instruction.
MCELFStreamer::EmitInstruction(Inst, STI);
}

View File

@ -9,6 +9,7 @@
# Test that address-masking sandboxing is added before indirect branches and
# returns.
.align 4
test1:
.set noreorder
@ -35,6 +36,7 @@ test1:
# Test that address-masking sandboxing is added before load instructions.
.align 4
test2:
.set noreorder
@ -104,6 +106,7 @@ test2:
# Test that address-masking sandboxing is added before store instructions.
.align 4
test3:
.set noreorder
@ -166,6 +169,7 @@ test3:
# Test that address-masking sandboxing is added after instructions that change
# stack pointer.
.align 4
test4:
.set noreorder
@ -217,3 +221,65 @@ test4:
# CHECK-NOT: and
# CHECK: sw $sp, 123($sp)
# CHECK-NOT: and
# Test that call + branch delay is aligned at bundle end. Test that mask is
# added before indirect calls.
.align 4
test5:
.set noreorder
jal func1
addiu $4, $zero, 1
nop
bal func2
addiu $4, $zero, 2
nop
nop
bltzal $t1, func3
addiu $4, $zero, 3
nop
nop
nop
bgezal $t2, func4
addiu $4, $zero, 4
jalr $t9
addiu $4, $zero, 5
# CHECK-LABEL: test5:
# CHECK-NEXT: nop
# CHECK-NEXT: nop
# CHECK-NEXT: jal
# CHECK-NEXT: addiu $4, $zero, 1
# CHECK-NEXT: nop
# CHECK-NEXT: nop
# CHECK-NEXT: bal
# CHECK-NEXT: addiu $4, $zero, 2
# CHECK-NEXT: nop
# CHECK-NEXT: nop
# CHECK-NEXT: bltzal
# CHECK-NEXT: addiu $4, $zero, 3
# CHECK-NEXT: nop
# CHECK-NEXT: nop
# CHECK-NEXT: nop
# CHECK-NEXT: nop
# CHECK-NEXT: nop
# CHECK-NEXT: nop
# CHECK-NEXT: bgezal
# CHECK-NEXT: addiu $4, $zero, 4
# CHECK-NEXT: nop
# CHECK-NEXT: and $25, $25, $14
# CHECK-NEXT: jalr $25
# CHECK-NEXT: addiu $4, $zero, 5