mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-22 18:54:02 +01:00
update libatomic instrumentation
This commit is contained in:
parent
6f82a1b548
commit
f68c59afc1
@ -572,6 +572,9 @@ private:
|
|||||||
/// uninitialized value and returns an updated origin id encoding this info.
|
/// uninitialized value and returns an updated origin id encoding this info.
|
||||||
FunctionCallee MsanChainOriginFn;
|
FunctionCallee MsanChainOriginFn;
|
||||||
|
|
||||||
|
/// Run-time helper that paints an origin over a region.
|
||||||
|
FunctionCallee MsanSetOriginFn;
|
||||||
|
|
||||||
/// MSan runtime replacements for memmove, memcpy and memset.
|
/// MSan runtime replacements for memmove, memcpy and memset.
|
||||||
FunctionCallee MemmoveFn, MemcpyFn, MemsetFn;
|
FunctionCallee MemmoveFn, MemcpyFn, MemsetFn;
|
||||||
|
|
||||||
@ -850,6 +853,9 @@ void MemorySanitizer::initializeCallbacks(Module &M) {
|
|||||||
// instrumentation.
|
// instrumentation.
|
||||||
MsanChainOriginFn = M.getOrInsertFunction(
|
MsanChainOriginFn = M.getOrInsertFunction(
|
||||||
"__msan_chain_origin", IRB.getInt32Ty(), IRB.getInt32Ty());
|
"__msan_chain_origin", IRB.getInt32Ty(), IRB.getInt32Ty());
|
||||||
|
MsanSetOriginFn =
|
||||||
|
M.getOrInsertFunction("__msan_set_origin", IRB.getVoidTy(),
|
||||||
|
IRB.getInt8PtrTy(), IntptrTy, IRB.getInt32Ty());
|
||||||
MemmoveFn = M.getOrInsertFunction(
|
MemmoveFn = M.getOrInsertFunction(
|
||||||
"__msan_memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
|
"__msan_memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
|
||||||
IRB.getInt8PtrTy(), IntptrTy);
|
IRB.getInt8PtrTy(), IntptrTy);
|
||||||
@ -1769,6 +1775,24 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
|
|||||||
llvm_unreachable("Unknown ordering");
|
llvm_unreachable("Unknown ordering");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Value *makeAddReleaseOrderingTable(IRBuilder<> &IRB) {
|
||||||
|
constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1;
|
||||||
|
uint32_t OrderingTable[NumOrderings] = {};
|
||||||
|
|
||||||
|
OrderingTable[(int)AtomicOrderingCABI::relaxed] =
|
||||||
|
OrderingTable[(int)AtomicOrderingCABI::release] =
|
||||||
|
(int)AtomicOrderingCABI::release;
|
||||||
|
OrderingTable[(int)AtomicOrderingCABI::consume] =
|
||||||
|
OrderingTable[(int)AtomicOrderingCABI::acquire] =
|
||||||
|
OrderingTable[(int)AtomicOrderingCABI::acq_rel] =
|
||||||
|
(int)AtomicOrderingCABI::acq_rel;
|
||||||
|
OrderingTable[(int)AtomicOrderingCABI::seq_cst] =
|
||||||
|
(int)AtomicOrderingCABI::seq_cst;
|
||||||
|
|
||||||
|
return ConstantDataVector::get(IRB.getContext(),
|
||||||
|
makeArrayRef(OrderingTable, NumOrderings));
|
||||||
|
}
|
||||||
|
|
||||||
AtomicOrdering addAcquireOrdering(AtomicOrdering a) {
|
AtomicOrdering addAcquireOrdering(AtomicOrdering a) {
|
||||||
switch (a) {
|
switch (a) {
|
||||||
case AtomicOrdering::NotAtomic:
|
case AtomicOrdering::NotAtomic:
|
||||||
@ -1786,6 +1810,24 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
|
|||||||
llvm_unreachable("Unknown ordering");
|
llvm_unreachable("Unknown ordering");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Value *makeAddAcquireOrderingTable(IRBuilder<> &IRB) {
|
||||||
|
constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1;
|
||||||
|
uint32_t OrderingTable[NumOrderings] = {};
|
||||||
|
|
||||||
|
OrderingTable[(int)AtomicOrderingCABI::relaxed] =
|
||||||
|
OrderingTable[(int)AtomicOrderingCABI::acquire] =
|
||||||
|
OrderingTable[(int)AtomicOrderingCABI::consume] =
|
||||||
|
(int)AtomicOrderingCABI::acquire;
|
||||||
|
OrderingTable[(int)AtomicOrderingCABI::release] =
|
||||||
|
OrderingTable[(int)AtomicOrderingCABI::acq_rel] =
|
||||||
|
(int)AtomicOrderingCABI::acq_rel;
|
||||||
|
OrderingTable[(int)AtomicOrderingCABI::seq_cst] =
|
||||||
|
(int)AtomicOrderingCABI::seq_cst;
|
||||||
|
|
||||||
|
return ConstantDataVector::get(IRB.getContext(),
|
||||||
|
makeArrayRef(OrderingTable, NumOrderings));
|
||||||
|
}
|
||||||
|
|
||||||
// ------------------- Visitors.
|
// ------------------- Visitors.
|
||||||
using InstVisitor<MemorySanitizerVisitor>::visit;
|
using InstVisitor<MemorySanitizerVisitor>::visit;
|
||||||
void visit(Instruction &I) {
|
void visit(Instruction &I) {
|
||||||
@ -3404,6 +3446,60 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void visitLibAtomicLoad(CallBase &CB) {
|
||||||
|
IRBuilder<> IRB(&CB);
|
||||||
|
Value *Size = CB.getArgOperand(0);
|
||||||
|
Value *SrcPtr = CB.getArgOperand(1);
|
||||||
|
Value *DstPtr = CB.getArgOperand(2);
|
||||||
|
Value *Ordering = CB.getArgOperand(3);
|
||||||
|
// Convert the call to have at least Acquire ordering to make sure
|
||||||
|
// the shadow operations aren't reordered before it.
|
||||||
|
Value *NewOrdering =
|
||||||
|
IRB.CreateExtractElement(makeAddAcquireOrderingTable(IRB), Ordering);
|
||||||
|
CB.setArgOperand(3, NewOrdering);
|
||||||
|
|
||||||
|
IRBuilder<> NextIRB(CB.getNextNode());
|
||||||
|
NextIRB.SetCurrentDebugLocation(CB.getDebugLoc());
|
||||||
|
|
||||||
|
Value *SrcShadowPtr, *SrcOriginPtr;
|
||||||
|
std::tie(SrcShadowPtr, SrcOriginPtr) =
|
||||||
|
getShadowOriginPtr(SrcPtr, NextIRB, NextIRB.getInt8Ty(), Align(1),
|
||||||
|
/*isStore*/ false);
|
||||||
|
Value *DstShadowPtr =
|
||||||
|
getShadowOriginPtr(DstPtr, NextIRB, NextIRB.getInt8Ty(), Align(1),
|
||||||
|
/*isStore*/ true)
|
||||||
|
.first;
|
||||||
|
|
||||||
|
NextIRB.CreateMemCpy(DstShadowPtr, Align(1), SrcShadowPtr, Align(1), Size);
|
||||||
|
if (MS.TrackOrigins) {
|
||||||
|
Value *SrcOrigin = NextIRB.CreateAlignedLoad(MS.OriginTy, SrcOriginPtr,
|
||||||
|
kMinOriginAlignment);
|
||||||
|
Value *NewOrigin = updateOrigin(SrcOrigin, NextIRB);
|
||||||
|
NextIRB.CreateCall(MS.MsanSetOriginFn, {DstPtr, Size, NewOrigin});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void visitLibAtomicStore(CallBase &CB) {
|
||||||
|
IRBuilder<> IRB(&CB);
|
||||||
|
Value *Size = CB.getArgOperand(0);
|
||||||
|
Value *DstPtr = CB.getArgOperand(2);
|
||||||
|
Value *Ordering = CB.getArgOperand(3);
|
||||||
|
// Convert the call to have at least Release ordering to make sure
|
||||||
|
// the shadow operations aren't reordered after it.
|
||||||
|
Value *NewOrdering =
|
||||||
|
IRB.CreateExtractElement(makeAddReleaseOrderingTable(IRB), Ordering);
|
||||||
|
CB.setArgOperand(3, NewOrdering);
|
||||||
|
|
||||||
|
Value *DstShadowPtr =
|
||||||
|
getShadowOriginPtr(DstPtr, IRB, IRB.getInt8Ty(), Align(1),
|
||||||
|
/*isStore*/ true)
|
||||||
|
.first;
|
||||||
|
|
||||||
|
// Atomic store always paints clean shadow/origin. See file header.
|
||||||
|
IRB.CreateMemSet(DstShadowPtr, getCleanShadow(IRB.getInt8Ty()), Size,
|
||||||
|
Align(1));
|
||||||
|
}
|
||||||
|
|
||||||
void visitCallBase(CallBase &CB) {
|
void visitCallBase(CallBase &CB) {
|
||||||
assert(!CB.getMetadata("nosanitize"));
|
assert(!CB.getMetadata("nosanitize"));
|
||||||
if (CB.isInlineAsm()) {
|
if (CB.isInlineAsm()) {
|
||||||
@ -3417,6 +3513,23 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
|
|||||||
visitInstruction(CB);
|
visitInstruction(CB);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
LibFunc LF;
|
||||||
|
if (TLI->getLibFunc(CB, LF)) {
|
||||||
|
// libatomic.a functions need to have special handling because there isn't
|
||||||
|
// a good way to intercept them or compile the library with
|
||||||
|
// instrumentation.
|
||||||
|
switch (LF) {
|
||||||
|
case LibFunc_atomic_load:
|
||||||
|
visitLibAtomicLoad(CB);
|
||||||
|
return;
|
||||||
|
case LibFunc_atomic_store:
|
||||||
|
visitLibAtomicStore(CB);
|
||||||
|
return;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (auto *Call = dyn_cast<CallInst>(&CB)) {
|
if (auto *Call = dyn_cast<CallInst>(&CB)) {
|
||||||
assert(!isa<IntrinsicInst>(Call) && "intrinsics are handled elsewhere");
|
assert(!isa<IntrinsicInst>(Call) && "intrinsics are handled elsewhere");
|
||||||
|
|
||||||
|
70
test/Instrumentation/MemorySanitizer/libatomic.ll
Normal file
70
test/Instrumentation/MemorySanitizer/libatomic.ll
Normal file
@ -0,0 +1,70 @@
|
|||||||
|
; RUN: opt < %s -msan-check-access-address=0 -S -passes=msan 2>&1 | FileCheck %s
|
||||||
|
; RUN: opt < %s -msan-check-access-address=0 -msan-track-origins=2 -S -passes=msan 2>&1 | FileCheck %s -check-prefixes=CHECK,CHECK-ORIGIN
|
||||||
|
; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s
|
||||||
|
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
|
||||||
|
target triple = "x86_64-unknown-linux-gnu"
|
||||||
|
|
||||||
|
declare void @__atomic_load(i64, i8*, i8*, i32)
|
||||||
|
declare void @__atomic_store(i64, i8*, i8*, i32)
|
||||||
|
|
||||||
|
define i24 @odd_sized_load(i24* %ptr) sanitize_memory {
|
||||||
|
; CHECK: @odd_sized_load(i24* {{.*}}[[PTR:%.+]])
|
||||||
|
; CHECK: [[VAL_PTR:%.*]] = alloca i24, align 1
|
||||||
|
; CHECK-ORIGIN: @__msan_set_alloca_origin
|
||||||
|
; CHECK: [[VAL_PTR_I8:%.*]] = bitcast i24* [[VAL_PTR]] to i8*
|
||||||
|
; CHECK: [[PTR_I8:%.*]] = bitcast i24* [[PTR]] to i8*
|
||||||
|
; CHECK: call void @__atomic_load(i64 3, i8* [[PTR_I8]], i8* [[VAL_PTR_I8]], i32 2)
|
||||||
|
|
||||||
|
; CHECK: ptrtoint i8* [[PTR_I8]]
|
||||||
|
; CHECK: xor
|
||||||
|
; CHECK: [[SPTR_I8:%.*]] = inttoptr
|
||||||
|
; CHECK-ORIGIN: add
|
||||||
|
; CHECK-ORIGIN: and
|
||||||
|
; CHECK-ORIGIN: [[OPTR:%.*]] = inttoptr
|
||||||
|
|
||||||
|
; CHECK: ptrtoint i8* [[VAL_PTR_I8]]
|
||||||
|
; CHECK: xor
|
||||||
|
; CHECK: [[VAL_SPTR_I8:%.*]] = inttoptr
|
||||||
|
; CHECK-ORIGIN: add
|
||||||
|
; CHECK-ORIGIN: and
|
||||||
|
; CHECK-ORIGIN: [[VAL_OPTR:%.*]] = inttoptr
|
||||||
|
|
||||||
|
; CHECK: call void @llvm.memcpy{{.*}}(i8* align 1 [[VAL_SPTR_I8]], i8* align 1 [[SPTR_I8]], i64 3
|
||||||
|
|
||||||
|
; CHECK-ORIGIN: [[ARG_ORIGIN:%.*]] = load i32, i32* [[OPTR]]
|
||||||
|
; CHECK-ORIGIN: [[VAL_ORIGIN:%.*]] = call i32 @__msan_chain_origin(i32 [[ARG_ORIGIN]])
|
||||||
|
; CHECK-ORIGIN: call void @__msan_set_origin(i8* [[VAL_PTR_I8]], i64 3, i32 [[VAL_ORIGIN]])
|
||||||
|
|
||||||
|
; CHECK: [[VAL:%.*]] = load i24, i24* [[VAL_PTR]]
|
||||||
|
; CHECK: ret i24 [[VAL]]
|
||||||
|
%val_ptr = alloca i24, align 1
|
||||||
|
%val_ptr_i8 = bitcast i24* %val_ptr to i8*
|
||||||
|
%ptr_i8 = bitcast i24* %ptr to i8*
|
||||||
|
call void @__atomic_load(i64 3, i8* %ptr_i8, i8* %val_ptr_i8, i32 0)
|
||||||
|
%val = load i24, i24* %val_ptr
|
||||||
|
ret i24 %val
|
||||||
|
}
|
||||||
|
|
||||||
|
define void @odd_sized_store(i24* %ptr, i24 %val) sanitize_memory {
|
||||||
|
; CHECK: @odd_sized_store(i24* {{.*}}[[PTR:%.+]], i24 {{.*}}[[VAL:%.+]])
|
||||||
|
; CHECK: [[VAL_PTR:%.*]] = alloca i24, align 1
|
||||||
|
; CHECK: store i24 [[VAL]], i24* [[VAL_PTR]]
|
||||||
|
; CHECK: [[VAL_PTR_I8:%.*]] = bitcast i24* [[VAL_PTR]] to i8*
|
||||||
|
; CHECK: [[PTR_I8:%.*]] = bitcast i24* [[PTR]] to i8*
|
||||||
|
|
||||||
|
; CHECK: ptrtoint i8* [[PTR_I8]]
|
||||||
|
; CHECK: xor
|
||||||
|
; CHECK: [[SPTR_I8:%.*]] = inttoptr
|
||||||
|
; CHECK: call void @llvm.memset{{.*}}(i8* align 1 [[SPTR_I8]], i8 0, i64 3
|
||||||
|
; CHECK-ORIGIN: call void @__msan_set_origin(i8* [[PTR_I8]], i64 3, i32 0)
|
||||||
|
|
||||||
|
; CHECK: call void @__atomic_store(i64 3, i8* [[VAL_PTR_I8]], i8* [[PTR_I8]], i32 3)
|
||||||
|
; CHECK: ret void
|
||||||
|
%val_ptr = alloca i24, align 1
|
||||||
|
store i24 %val, i24* %val_ptr
|
||||||
|
%val_ptr_i8 = bitcast i24* %val_ptr to i8*
|
||||||
|
%ptr_i8 = bitcast i24* %ptr to i8*
|
||||||
|
call void @__atomic_store(i64 3, i8* %val_ptr_i8, i8* %ptr_i8, i32 0)
|
||||||
|
ret void
|
||||||
|
}
|
||||||
|
|
Loading…
Reference in New Issue
Block a user