From 1fa6761405d67581131d26f33c00291e64c2a2d8 Mon Sep 17 00:00:00 2001 From: Gui Andrade Date: Sat, 18 Jul 2020 03:53:00 +0000 Subject: [PATCH] Revert "update libatomic instrumentation" This was committed mistakenly. This reverts commit 1f29171ae77f81cacea32808b67d7ae62da23e0c. --- .../Instrumentation/MemorySanitizer.cpp | 113 ------------------ .../MemorySanitizer/libatomic.ll | 70 ----------- 2 files changed, 183 deletions(-) delete mode 100644 test/Instrumentation/MemorySanitizer/libatomic.ll diff --git a/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/lib/Transforms/Instrumentation/MemorySanitizer.cpp index 0001559c405..fcf7f470b3e 100644 --- a/lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ b/lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -572,9 +572,6 @@ private: /// uninitialized value and returns an updated origin id encoding this info. FunctionCallee MsanChainOriginFn; - /// Run-time helper that paints an origin over a region. - FunctionCallee MsanSetOriginFn; - /// MSan runtime replacements for memmove, memcpy and memset. FunctionCallee MemmoveFn, MemcpyFn, MemsetFn; @@ -853,9 +850,6 @@ void MemorySanitizer::initializeCallbacks(Module &M) { // instrumentation. MsanChainOriginFn = M.getOrInsertFunction( "__msan_chain_origin", IRB.getInt32Ty(), IRB.getInt32Ty()); - MsanSetOriginFn = - M.getOrInsertFunction("__msan_set_origin", IRB.getVoidTy(), - IRB.getInt8PtrTy(), IntptrTy, IRB.getInt32Ty()); MemmoveFn = M.getOrInsertFunction( "__msan_memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy); @@ -1775,24 +1769,6 @@ struct MemorySanitizerVisitor : public InstVisitor { llvm_unreachable("Unknown ordering"); } - Value *makeAddReleaseOrderingTable(IRBuilder<> &IRB) { - constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1; - uint32_t OrderingTable[NumOrderings] = {}; - - OrderingTable[(int)AtomicOrderingCABI::relaxed] = - OrderingTable[(int)AtomicOrderingCABI::release] = - (int)AtomicOrderingCABI::release; - OrderingTable[(int)AtomicOrderingCABI::consume] = - OrderingTable[(int)AtomicOrderingCABI::acquire] = - OrderingTable[(int)AtomicOrderingCABI::acq_rel] = - (int)AtomicOrderingCABI::acq_rel; - OrderingTable[(int)AtomicOrderingCABI::seq_cst] = - (int)AtomicOrderingCABI::seq_cst; - - return ConstantDataVector::get(IRB.getContext(), - makeArrayRef(OrderingTable, NumOrderings)); - } - AtomicOrdering addAcquireOrdering(AtomicOrdering a) { switch (a) { case AtomicOrdering::NotAtomic: @@ -1810,24 +1786,6 @@ struct MemorySanitizerVisitor : public InstVisitor { llvm_unreachable("Unknown ordering"); } - Value *makeAddAcquireOrderingTable(IRBuilder<> &IRB) { - constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1; - uint32_t OrderingTable[NumOrderings] = {}; - - OrderingTable[(int)AtomicOrderingCABI::relaxed] = - OrderingTable[(int)AtomicOrderingCABI::acquire] = - OrderingTable[(int)AtomicOrderingCABI::consume] = - (int)AtomicOrderingCABI::acquire; - OrderingTable[(int)AtomicOrderingCABI::release] = - OrderingTable[(int)AtomicOrderingCABI::acq_rel] = - (int)AtomicOrderingCABI::acq_rel; - OrderingTable[(int)AtomicOrderingCABI::seq_cst] = - (int)AtomicOrderingCABI::seq_cst; - - return ConstantDataVector::get(IRB.getContext(), - makeArrayRef(OrderingTable, NumOrderings)); - } - // ------------------- Visitors. using InstVisitor::visit; void visit(Instruction &I) { @@ -3446,60 +3404,6 @@ struct MemorySanitizerVisitor : public InstVisitor { } } - void visitLibAtomicLoad(CallBase &CB) { - IRBuilder<> IRB(&CB); - Value *Size = CB.getArgOperand(0); - Value *SrcPtr = CB.getArgOperand(1); - Value *DstPtr = CB.getArgOperand(2); - Value *Ordering = CB.getArgOperand(3); - // Convert the call to have at least Acquire ordering to make sure - // the shadow operations aren't reordered before it. - Value *NewOrdering = - IRB.CreateExtractElement(makeAddAcquireOrderingTable(IRB), Ordering); - CB.setArgOperand(3, NewOrdering); - - IRBuilder<> NextIRB(CB.getNextNode()); - NextIRB.SetCurrentDebugLocation(CB.getDebugLoc()); - - Value *SrcShadowPtr, *SrcOriginPtr; - std::tie(SrcShadowPtr, SrcOriginPtr) = - getShadowOriginPtr(SrcPtr, NextIRB, NextIRB.getInt8Ty(), Align(1), - /*isStore*/ false); - Value *DstShadowPtr = - getShadowOriginPtr(DstPtr, NextIRB, NextIRB.getInt8Ty(), Align(1), - /*isStore*/ true) - .first; - - NextIRB.CreateMemCpy(DstShadowPtr, Align(1), SrcShadowPtr, Align(1), Size); - if (MS.TrackOrigins) { - Value *SrcOrigin = NextIRB.CreateAlignedLoad(MS.OriginTy, SrcOriginPtr, - kMinOriginAlignment); - Value *NewOrigin = updateOrigin(SrcOrigin, NextIRB); - NextIRB.CreateCall(MS.MsanSetOriginFn, {DstPtr, Size, NewOrigin}); - } - } - - void visitLibAtomicStore(CallBase &CB) { - IRBuilder<> IRB(&CB); - Value *Size = CB.getArgOperand(0); - Value *DstPtr = CB.getArgOperand(2); - Value *Ordering = CB.getArgOperand(3); - // Convert the call to have at least Release ordering to make sure - // the shadow operations aren't reordered after it. - Value *NewOrdering = - IRB.CreateExtractElement(makeAddReleaseOrderingTable(IRB), Ordering); - CB.setArgOperand(3, NewOrdering); - - Value *DstShadowPtr = - getShadowOriginPtr(DstPtr, IRB, IRB.getInt8Ty(), Align(1), - /*isStore*/ true) - .first; - - // Atomic store always paints clean shadow/origin. See file header. - IRB.CreateMemSet(DstShadowPtr, getCleanShadow(IRB.getInt8Ty()), Size, - Align(1)); - } - void visitCallBase(CallBase &CB) { assert(!CB.getMetadata("nosanitize")); if (CB.isInlineAsm()) { @@ -3513,23 +3417,6 @@ struct MemorySanitizerVisitor : public InstVisitor { visitInstruction(CB); return; } - LibFunc LF; - if (TLI->getLibFunc(CB, LF)) { - // libatomic.a functions need to have special handling because there isn't - // a good way to intercept them or compile the library with - // instrumentation. - switch (LF) { - case LibFunc_atomic_load: - visitLibAtomicLoad(CB); - return; - case LibFunc_atomic_store: - visitLibAtomicStore(CB); - return; - default: - break; - } - } - if (auto *Call = dyn_cast(&CB)) { assert(!isa(Call) && "intrinsics are handled elsewhere"); diff --git a/test/Instrumentation/MemorySanitizer/libatomic.ll b/test/Instrumentation/MemorySanitizer/libatomic.ll deleted file mode 100644 index a2515740b45..00000000000 --- a/test/Instrumentation/MemorySanitizer/libatomic.ll +++ /dev/null @@ -1,70 +0,0 @@ -; RUN: opt < %s -msan-check-access-address=0 -S -passes=msan 2>&1 | FileCheck %s -; RUN: opt < %s -msan-check-access-address=0 -msan-track-origins=2 -S -passes=msan 2>&1 | FileCheck %s -check-prefixes=CHECK,CHECK-ORIGIN -; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s -target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" -target triple = "x86_64-unknown-linux-gnu" - -declare void @__atomic_load(i64, i8*, i8*, i32) -declare void @__atomic_store(i64, i8*, i8*, i32) - -define i24 @odd_sized_load(i24* %ptr) sanitize_memory { -; CHECK: @odd_sized_load(i24* {{.*}}[[PTR:%.+]]) -; CHECK: [[VAL_PTR:%.*]] = alloca i24, align 1 -; CHECK-ORIGIN: @__msan_set_alloca_origin -; CHECK: [[VAL_PTR_I8:%.*]] = bitcast i24* [[VAL_PTR]] to i8* -; CHECK: [[PTR_I8:%.*]] = bitcast i24* [[PTR]] to i8* -; CHECK: call void @__atomic_load(i64 3, i8* [[PTR_I8]], i8* [[VAL_PTR_I8]], i32 2) - -; CHECK: ptrtoint i8* [[PTR_I8]] -; CHECK: xor -; CHECK: [[SPTR_I8:%.*]] = inttoptr -; CHECK-ORIGIN: add -; CHECK-ORIGIN: and -; CHECK-ORIGIN: [[OPTR:%.*]] = inttoptr - -; CHECK: ptrtoint i8* [[VAL_PTR_I8]] -; CHECK: xor -; CHECK: [[VAL_SPTR_I8:%.*]] = inttoptr -; CHECK-ORIGIN: add -; CHECK-ORIGIN: and -; CHECK-ORIGIN: [[VAL_OPTR:%.*]] = inttoptr - -; CHECK: call void @llvm.memcpy{{.*}}(i8* align 1 [[VAL_SPTR_I8]], i8* align 1 [[SPTR_I8]], i64 3 - -; CHECK-ORIGIN: [[ARG_ORIGIN:%.*]] = load i32, i32* [[OPTR]] -; CHECK-ORIGIN: [[VAL_ORIGIN:%.*]] = call i32 @__msan_chain_origin(i32 [[ARG_ORIGIN]]) -; CHECK-ORIGIN: call void @__msan_set_origin(i8* [[VAL_PTR_I8]], i64 3, i32 [[VAL_ORIGIN]]) - -; CHECK: [[VAL:%.*]] = load i24, i24* [[VAL_PTR]] -; CHECK: ret i24 [[VAL]] - %val_ptr = alloca i24, align 1 - %val_ptr_i8 = bitcast i24* %val_ptr to i8* - %ptr_i8 = bitcast i24* %ptr to i8* - call void @__atomic_load(i64 3, i8* %ptr_i8, i8* %val_ptr_i8, i32 0) - %val = load i24, i24* %val_ptr - ret i24 %val -} - -define void @odd_sized_store(i24* %ptr, i24 %val) sanitize_memory { -; CHECK: @odd_sized_store(i24* {{.*}}[[PTR:%.+]], i24 {{.*}}[[VAL:%.+]]) -; CHECK: [[VAL_PTR:%.*]] = alloca i24, align 1 -; CHECK: store i24 [[VAL]], i24* [[VAL_PTR]] -; CHECK: [[VAL_PTR_I8:%.*]] = bitcast i24* [[VAL_PTR]] to i8* -; CHECK: [[PTR_I8:%.*]] = bitcast i24* [[PTR]] to i8* - -; CHECK: ptrtoint i8* [[PTR_I8]] -; CHECK: xor -; CHECK: [[SPTR_I8:%.*]] = inttoptr -; CHECK: call void @llvm.memset{{.*}}(i8* align 1 [[SPTR_I8]], i8 0, i64 3 -; CHECK-ORIGIN: call void @__msan_set_origin(i8* [[PTR_I8]], i64 3, i32 0) - -; CHECK: call void @__atomic_store(i64 3, i8* [[VAL_PTR_I8]], i8* [[PTR_I8]], i32 3) -; CHECK: ret void - %val_ptr = alloca i24, align 1 - store i24 %val, i24* %val_ptr - %val_ptr_i8 = bitcast i24* %val_ptr to i8* - %ptr_i8 = bitcast i24* %ptr to i8* - call void @__atomic_store(i64 3, i8* %val_ptr_i8, i8* %ptr_i8, i32 0) - ret void -} -