1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 03:02:36 +01:00

hwasan: Support for outlined checks in the Linux kernel.

Add support for match-all tags and GOT-free runtime calls, which
are both required for the kernel to be able to support outlined
checks. This requires extending the access info to let the backend
know when to enable these features. To make the code easier to maintain
introduce an enum with the bit field positions for the access info.

Allow outlined checks to be enabled with -mllvm
-hwasan-inline-all-checks=0. Kernels that contain runtime support for
outlined checks may pass this flag. Kernels lacking runtime support
will continue to link because they do not pass the flag. Old versions
of LLVM will ignore the flag and continue to use inline checks.

With a separate kernel patch [1] I measured the code size of defconfig
+ tag-based KASAN, as well as boot time (i.e. time to init launch)
on a DragonBoard 845c with an Android arm64 GKI kernel. The results
are below:

         code size    boot time
before    92824064      6.18s
after     38822400      6.65s

[1] https://linux-review.googlesource.com/id/I1a30036c70ab3c3ee78d75ed9b87ef7cdc3fdb76

Depends on D90425

Differential Revision: https://reviews.llvm.org/D90426
This commit is contained in:
Peter Collingbourne 2020-10-27 16:26:29 -07:00
parent cb14fa015d
commit 51d3ffbbc5
5 changed files with 141 additions and 36 deletions

View File

@ -37,6 +37,24 @@ private:
FunctionPass *createHWAddressSanitizerLegacyPassPass(bool CompileKernel = false,
bool Recover = false);
namespace HWASanAccessInfo {
// Bit field positions for the accessinfo parameter to
// llvm.hwasan.check.memaccess. Shared between the pass and the backend. Bits
// 0-15 are also used by the runtime.
enum {
AccessSizeShift = 0, // 4 bits
IsWriteShift = 4,
RecoverShift = 5,
MatchAllShift = 16, // 8 bits
HasMatchAllShift = 24,
CompileKernelShift = 25,
};
enum { RuntimeMask = 0xffff };
} // namespace HWASanAccessInfo
} // namespace llvm
#endif

View File

@ -55,6 +55,7 @@
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Transforms/Instrumentation/HWAddressSanitizer.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
@ -338,6 +339,15 @@ void AArch64AsmPrinter::EmitHwasanMemaccessSymbols(Module &M) {
IsShort ? HwasanTagMismatchV2Ref : HwasanTagMismatchV1Ref;
MCSymbol *Sym = P.second;
bool HasMatchAllTag =
(AccessInfo >> HWASanAccessInfo::HasMatchAllShift) & 1;
uint8_t MatchAllTag =
(AccessInfo >> HWASanAccessInfo::MatchAllShift) & 0xff;
unsigned Size =
1 << ((AccessInfo >> HWASanAccessInfo::AccessSizeShift) & 0xf);
bool CompileKernel =
(AccessInfo >> HWASanAccessInfo::CompileKernelShift) & 1;
OutStreamer->SwitchSection(OutContext.getELFSection(
".text.hot", ELF::SHT_PROGBITS,
ELF::SHF_EXECINSTR | ELF::SHF_ALLOC | ELF::SHF_GROUP, 0,
@ -382,6 +392,26 @@ void AArch64AsmPrinter::EmitHwasanMemaccessSymbols(Module &M) {
MCInstBuilder(AArch64::RET).addReg(AArch64::LR), *STI);
OutStreamer->emitLabel(HandleMismatchOrPartialSym);
if (HasMatchAllTag) {
OutStreamer->emitInstruction(MCInstBuilder(AArch64::UBFMXri)
.addReg(AArch64::X16)
.addReg(Reg)
.addImm(56)
.addImm(63),
*STI);
OutStreamer->emitInstruction(MCInstBuilder(AArch64::SUBSXri)
.addReg(AArch64::XZR)
.addReg(AArch64::X16)
.addImm(MatchAllTag)
.addImm(0),
*STI);
OutStreamer->emitInstruction(
MCInstBuilder(AArch64::Bcc)
.addImm(AArch64CC::EQ)
.addExpr(MCSymbolRefExpr::create(ReturnSym, OutContext)),
*STI);
}
if (IsShort) {
OutStreamer->emitInstruction(MCInstBuilder(AArch64::SUBSWri)
.addReg(AArch64::WZR)
@ -402,7 +432,6 @@ void AArch64AsmPrinter::EmitHwasanMemaccessSymbols(Module &M) {
.addReg(Reg)
.addImm(AArch64_AM::encodeLogicalImmediate(0xf, 64)),
*STI);
unsigned Size = 1 << (AccessInfo & 0xf);
if (Size != 1)
OutStreamer->emitInstruction(MCInstBuilder(AArch64::ADDXri)
.addReg(AArch64::X17)
@ -470,32 +499,41 @@ void AArch64AsmPrinter::EmitHwasanMemaccessSymbols(Module &M) {
.addReg(Reg)
.addImm(0),
*STI);
OutStreamer->emitInstruction(MCInstBuilder(AArch64::MOVZXi)
.addReg(AArch64::X1)
.addImm(AccessInfo)
.addImm(0),
*STI);
OutStreamer->emitInstruction(
MCInstBuilder(AArch64::MOVZXi)
.addReg(AArch64::X1)
.addImm(AccessInfo & HWASanAccessInfo::RuntimeMask)
.addImm(0),
*STI);
// Intentionally load the GOT entry and branch to it, rather than possibly
// late binding the function, which may clobber the registers before we have
// a chance to save them.
OutStreamer->emitInstruction(
MCInstBuilder(AArch64::ADRP)
.addReg(AArch64::X16)
.addExpr(AArch64MCExpr::create(
HwasanTagMismatchRef, AArch64MCExpr::VariantKind::VK_GOT_PAGE,
OutContext)),
*STI);
OutStreamer->emitInstruction(
MCInstBuilder(AArch64::LDRXui)
.addReg(AArch64::X16)
.addReg(AArch64::X16)
.addExpr(AArch64MCExpr::create(
HwasanTagMismatchRef, AArch64MCExpr::VariantKind::VK_GOT_LO12,
OutContext)),
*STI);
OutStreamer->emitInstruction(
MCInstBuilder(AArch64::BR).addReg(AArch64::X16), *STI);
if (CompileKernel) {
// The Linux kernel's dynamic loader doesn't support GOT relative
// relocations, but it doesn't support late binding either, so just call
// the function directly.
OutStreamer->emitInstruction(
MCInstBuilder(AArch64::B).addExpr(HwasanTagMismatchRef), *STI);
} else {
// Intentionally load the GOT entry and branch to it, rather than possibly
// late binding the function, which may clobber the registers before we
// have a chance to save them.
OutStreamer->emitInstruction(
MCInstBuilder(AArch64::ADRP)
.addReg(AArch64::X16)
.addExpr(AArch64MCExpr::create(
HwasanTagMismatchRef, AArch64MCExpr::VariantKind::VK_GOT_PAGE,
OutContext)),
*STI);
OutStreamer->emitInstruction(
MCInstBuilder(AArch64::LDRXui)
.addReg(AArch64::X16)
.addReg(AArch64::X16)
.addExpr(AArch64MCExpr::create(
HwasanTagMismatchRef, AArch64MCExpr::VariantKind::VK_GOT_LO12,
OutContext)),
*STI);
OutStreamer->emitInstruction(
MCInstBuilder(AArch64::BR).addReg(AArch64::X16), *STI);
}
}
}

View File

@ -283,9 +283,13 @@ private:
bool CompileKernel;
bool Recover;
bool OutlinedChecks;
bool UseShortGranules;
bool InstrumentLandingPads;
bool HasMatchAllTag = false;
uint8_t MatchAllTag = 0;
Function *HwasanCtorFunction;
FunctionCallee HwasanMemoryAccessCallback[2][kNumberOfAccessSizes];
@ -496,6 +500,19 @@ void HWAddressSanitizer::initializeModule() {
UseShortGranules =
ClUseShortGranules.getNumOccurrences() ? ClUseShortGranules : NewRuntime;
OutlinedChecks =
TargetTriple.isAArch64() && TargetTriple.isOSBinFormatELF() &&
(ClInlineAllChecks.getNumOccurrences() ? !ClInlineAllChecks : !Recover);
if (ClMatchAllTag.getNumOccurrences()) {
if (ClMatchAllTag != -1) {
HasMatchAllTag = true;
MatchAllTag = ClMatchAllTag & 0xFF;
}
} else if (CompileKernel) {
HasMatchAllTag = true;
MatchAllTag = 0xFF;
}
// If we don't have personality function support, fall back to landing pads.
InstrumentLandingPads = ClInstrumentLandingPads.getNumOccurrences()
@ -706,11 +723,16 @@ Value *HWAddressSanitizer::memToShadow(Value *Mem, IRBuilder<> &IRB) {
void HWAddressSanitizer::instrumentMemAccessInline(Value *Ptr, bool IsWrite,
unsigned AccessSizeIndex,
Instruction *InsertBefore) {
const int64_t AccessInfo = Recover * 0x20 + IsWrite * 0x10 + AccessSizeIndex;
const int64_t AccessInfo =
(CompileKernel << HWASanAccessInfo::CompileKernelShift) +
(HasMatchAllTag << HWASanAccessInfo::HasMatchAllShift) +
(MatchAllTag << HWASanAccessInfo::MatchAllShift) +
(Recover << HWASanAccessInfo::RecoverShift) +
(IsWrite << HWASanAccessInfo::IsWriteShift) +
(AccessSizeIndex << HWASanAccessInfo::AccessSizeShift);
IRBuilder<> IRB(InsertBefore);
if (!ClInlineAllChecks && TargetTriple.isAArch64() &&
TargetTriple.isOSBinFormatELF() && !Recover) {
if (OutlinedChecks) {
Module *M = IRB.GetInsertBlock()->getParent()->getParent();
Ptr = IRB.CreateBitCast(Ptr, Int8PtrTy);
IRB.CreateCall(Intrinsic::getDeclaration(
@ -729,11 +751,9 @@ void HWAddressSanitizer::instrumentMemAccessInline(Value *Ptr, bool IsWrite,
Value *MemTag = IRB.CreateLoad(Int8Ty, Shadow);
Value *TagMismatch = IRB.CreateICmpNE(PtrTag, MemTag);
int matchAllTag = ClMatchAllTag.getNumOccurrences() > 0 ?
ClMatchAllTag : (CompileKernel ? 0xFF : -1);
if (matchAllTag != -1) {
Value *TagNotIgnored = IRB.CreateICmpNE(PtrTag,
ConstantInt::get(PtrTag->getType(), matchAllTag));
if (HasMatchAllTag) {
Value *TagNotIgnored = IRB.CreateICmpNE(
PtrTag, ConstantInt::get(PtrTag->getType(), MatchAllTag));
TagMismatch = IRB.CreateAnd(TagMismatch, TagNotIgnored);
}
@ -773,7 +793,9 @@ void HWAddressSanitizer::instrumentMemAccessInline(Value *Ptr, bool IsWrite,
// The signal handler will find the data address in rdi.
Asm = InlineAsm::get(
FunctionType::get(IRB.getVoidTy(), {PtrLong->getType()}, false),
"int3\nnopl " + itostr(0x40 + AccessInfo) + "(%rax)",
"int3\nnopl " +
itostr(0x40 + (AccessInfo & HWASanAccessInfo::RuntimeMask)) +
"(%rax)",
"{rdi}",
/*hasSideEffects=*/true);
break;
@ -782,7 +804,8 @@ void HWAddressSanitizer::instrumentMemAccessInline(Value *Ptr, bool IsWrite,
// The signal handler will find the data address in x0.
Asm = InlineAsm::get(
FunctionType::get(IRB.getVoidTy(), {PtrLong->getType()}, false),
"brk #" + itostr(0x900 + AccessInfo),
"brk #" +
itostr(0x900 + (AccessInfo & HWASanAccessInfo::RuntimeMask)),
"{x0}",
/*hasSideEffects=*/true);
break;

View File

@ -30,6 +30,12 @@ define i8* @f2(i8* %x0, i8* %x1) {
ret i8* %x0
}
define void @f3(i8* %x0, i8* %x1) {
; 0x3ff0000 (kernel, match-all = 0xff)
call void @llvm.hwasan.check.memaccess(i8* %x0, i8* %x1, i32 67043328)
ret void
}
declare void @llvm.hwasan.check.memaccess(i8*, i8*, i32)
declare void @llvm.hwasan.check.memaccess.shortgranules(i8*, i8*, i32)
@ -83,3 +89,20 @@ declare void @llvm.hwasan.check.memaccess.shortgranules(i8*, i8*, i32)
; CHECK-NEXT: adrp x16, :got:__hwasan_tag_mismatch
; CHECK-NEXT: ldr x16, [x16, :got_lo12:__hwasan_tag_mismatch]
; CHECK-NEXT: br x16
; CHECK: __hwasan_check_x1_67043328:
; CHECK-NEXT: sbfx x16, x1, #4, #52
; CHECK-NEXT: ldrb w16, [x9, x16]
; CHECK-NEXT: cmp x16, x1, lsr #56
; CHECK-NEXT: b.ne .Ltmp5
; CHECK-NEXT: .Ltmp6:
; CHECK-NEXT: ret
; CHECK-NEXT: .Ltmp5:
; CHECK-NEXT: lsr x16, x1, #56
; CHECK-NEXT: cmp x16, #255
; CHECK-NEXT: b.eq .Ltmp6
; CHECK-NEXT: stp x0, x1, [sp, #-256]!
; CHECK-NEXT: stp x29, x30, [sp, #232]
; CHECK-NEXT: mov x0, x1
; CHECK-NEXT: mov x1, #0
; CHECK-NEXT: b __hwasan_tag_mismatch

View File

@ -4,6 +4,7 @@
; RUN: opt < %s -hwasan -hwasan-kernel=1 -hwasan-recover=1 -S | FileCheck %s --check-prefixes=CHECK,NOOFFSET,MATCH-ALL
; RUN: opt < %s -hwasan -hwasan-kernel=1 -hwasan-recover=1 -hwasan-mapping-offset=12345678 -S | FileCheck %s --check-prefixes=CHECK,OFFSET,MATCH-ALL
; RUN: opt < %s -hwasan -hwasan-kernel=1 -hwasan-recover=1 -hwasan-match-all-tag=-1 -S | FileCheck %s --check-prefixes=CHECK,NOOFFSET,NO-MATCH-ALL
; RUN: opt < %s -hwasan -hwasan-kernel=1 -hwasan-recover=1 -hwasan-inline-all-checks=0 -hwasan-mapping-offset=12345678 -S | FileCheck %s --check-prefixes=OUTLINE
target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
target triple = "aarch64--linux-android"
@ -36,6 +37,8 @@ define i8 @test_load(i8* %a) sanitize_hwaddress {
; CHECK: %[[G:[^ ]*]] = load i8, i8* %a, align 4
; CHECK: ret i8 %[[G]]
; OUTLINE: %[[SHADOW:[^ ]*]] = call i8* asm "", "=r,0"(i8* inttoptr (i64 12345678 to i8*))
; OUTLINE: call void @llvm.hwasan.check.memaccess(i8* %[[SHADOW]], i8* %a, i32 67043360)
entry:
%b = load i8, i8* %a, align 4
ret i8 %b