mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-23 03:02:36 +01:00
3b69235500
By default, a non-template variable of non-volatile const-qualified type having namespace-scope has internal linkage, so no need for `static`.
849 lines
34 KiB
C++
849 lines
34 KiB
C++
//===-- ThreadSanitizer.cpp - race detector -------------------------------===//
|
|
//
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
//
|
|
// This file is a part of ThreadSanitizer, a race detector.
|
|
//
|
|
// The tool is under development, for the details about previous versions see
|
|
// http://code.google.com/p/data-race-test
|
|
//
|
|
// The instrumentation phase is quite simple:
|
|
// - Insert calls to run-time library before every memory access.
|
|
// - Optimizations may apply to avoid instrumenting some of the accesses.
|
|
// - Insert calls at function entry/exit.
|
|
// The rest is handled by the run-time library.
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
#include "llvm/Transforms/Instrumentation/ThreadSanitizer.h"
|
|
#include "llvm/ADT/DenseMap.h"
|
|
#include "llvm/ADT/Optional.h"
|
|
#include "llvm/ADT/SmallString.h"
|
|
#include "llvm/ADT/SmallVector.h"
|
|
#include "llvm/ADT/Statistic.h"
|
|
#include "llvm/ADT/StringExtras.h"
|
|
#include "llvm/Analysis/CaptureTracking.h"
|
|
#include "llvm/Analysis/TargetLibraryInfo.h"
|
|
#include "llvm/Analysis/ValueTracking.h"
|
|
#include "llvm/IR/DataLayout.h"
|
|
#include "llvm/IR/Function.h"
|
|
#include "llvm/IR/IRBuilder.h"
|
|
#include "llvm/IR/IntrinsicInst.h"
|
|
#include "llvm/IR/Intrinsics.h"
|
|
#include "llvm/IR/LLVMContext.h"
|
|
#include "llvm/IR/Metadata.h"
|
|
#include "llvm/IR/Module.h"
|
|
#include "llvm/IR/Type.h"
|
|
#include "llvm/InitializePasses.h"
|
|
#include "llvm/ProfileData/InstrProf.h"
|
|
#include "llvm/Support/CommandLine.h"
|
|
#include "llvm/Support/Debug.h"
|
|
#include "llvm/Support/MathExtras.h"
|
|
#include "llvm/Support/raw_ostream.h"
|
|
#include "llvm/Transforms/Instrumentation.h"
|
|
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
|
|
#include "llvm/Transforms/Utils/EscapeEnumerator.h"
|
|
#include "llvm/Transforms/Utils/Local.h"
|
|
#include "llvm/Transforms/Utils/ModuleUtils.h"
|
|
|
|
using namespace llvm;
|
|
|
|
#define DEBUG_TYPE "tsan"
|
|
|
|
static cl::opt<bool> ClInstrumentMemoryAccesses(
|
|
"tsan-instrument-memory-accesses", cl::init(true),
|
|
cl::desc("Instrument memory accesses"), cl::Hidden);
|
|
static cl::opt<bool>
|
|
ClInstrumentFuncEntryExit("tsan-instrument-func-entry-exit", cl::init(true),
|
|
cl::desc("Instrument function entry and exit"),
|
|
cl::Hidden);
|
|
static cl::opt<bool> ClHandleCxxExceptions(
|
|
"tsan-handle-cxx-exceptions", cl::init(true),
|
|
cl::desc("Handle C++ exceptions (insert cleanup blocks for unwinding)"),
|
|
cl::Hidden);
|
|
static cl::opt<bool> ClInstrumentAtomics("tsan-instrument-atomics",
|
|
cl::init(true),
|
|
cl::desc("Instrument atomics"),
|
|
cl::Hidden);
|
|
static cl::opt<bool> ClInstrumentMemIntrinsics(
|
|
"tsan-instrument-memintrinsics", cl::init(true),
|
|
cl::desc("Instrument memintrinsics (memset/memcpy/memmove)"), cl::Hidden);
|
|
static cl::opt<bool> ClDistinguishVolatile(
|
|
"tsan-distinguish-volatile", cl::init(false),
|
|
cl::desc("Emit special instrumentation for accesses to volatiles"),
|
|
cl::Hidden);
|
|
static cl::opt<bool> ClInstrumentReadBeforeWrite(
|
|
"tsan-instrument-read-before-write", cl::init(false),
|
|
cl::desc("Do not eliminate read instrumentation for read-before-writes"),
|
|
cl::Hidden);
|
|
static cl::opt<bool> ClCompoundReadBeforeWrite(
|
|
"tsan-compound-read-before-write", cl::init(false),
|
|
cl::desc("Emit special compound instrumentation for reads-before-writes"),
|
|
cl::Hidden);
|
|
|
|
STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
|
|
STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
|
|
STATISTIC(NumOmittedReadsBeforeWrite,
|
|
"Number of reads ignored due to following writes");
|
|
STATISTIC(NumAccessesWithBadSize, "Number of accesses with bad size");
|
|
STATISTIC(NumInstrumentedVtableWrites, "Number of vtable ptr writes");
|
|
STATISTIC(NumInstrumentedVtableReads, "Number of vtable ptr reads");
|
|
STATISTIC(NumOmittedReadsFromConstantGlobals,
|
|
"Number of reads from constant globals");
|
|
STATISTIC(NumOmittedReadsFromVtable, "Number of vtable reads");
|
|
STATISTIC(NumOmittedNonCaptured, "Number of accesses ignored due to capturing");
|
|
|
|
const char kTsanModuleCtorName[] = "tsan.module_ctor";
|
|
const char kTsanInitName[] = "__tsan_init";
|
|
|
|
namespace {
|
|
|
|
/// ThreadSanitizer: instrument the code in module to find races.
|
|
///
|
|
/// Instantiating ThreadSanitizer inserts the tsan runtime library API function
|
|
/// declarations into the module if they don't exist already. Instantiating
|
|
/// ensures the __tsan_init function is in the list of global constructors for
|
|
/// the module.
|
|
struct ThreadSanitizer {
|
|
ThreadSanitizer() {
|
|
// Sanity check options and warn user.
|
|
if (ClInstrumentReadBeforeWrite && ClCompoundReadBeforeWrite) {
|
|
errs()
|
|
<< "warning: Option -tsan-compound-read-before-write has no effect "
|
|
"when -tsan-instrument-read-before-write is set.\n";
|
|
}
|
|
}
|
|
|
|
bool sanitizeFunction(Function &F, const TargetLibraryInfo &TLI);
|
|
|
|
private:
|
|
// Internal Instruction wrapper that contains more information about the
|
|
// Instruction from prior analysis.
|
|
struct InstructionInfo {
|
|
// Instrumentation emitted for this instruction is for a compounded set of
|
|
// read and write operations in the same basic block.
|
|
static constexpr unsigned kCompoundRW = (1U << 0);
|
|
|
|
explicit InstructionInfo(Instruction *Inst) : Inst(Inst) {}
|
|
|
|
Instruction *Inst;
|
|
unsigned Flags = 0;
|
|
};
|
|
|
|
void initialize(Module &M);
|
|
bool instrumentLoadOrStore(const InstructionInfo &II, const DataLayout &DL);
|
|
bool instrumentAtomic(Instruction *I, const DataLayout &DL);
|
|
bool instrumentMemIntrinsic(Instruction *I);
|
|
void chooseInstructionsToInstrument(SmallVectorImpl<Instruction *> &Local,
|
|
SmallVectorImpl<InstructionInfo> &All,
|
|
const DataLayout &DL);
|
|
bool addrPointsToConstantData(Value *Addr);
|
|
int getMemoryAccessFuncIndex(Value *Addr, const DataLayout &DL);
|
|
void InsertRuntimeIgnores(Function &F);
|
|
|
|
Type *IntptrTy;
|
|
FunctionCallee TsanFuncEntry;
|
|
FunctionCallee TsanFuncExit;
|
|
FunctionCallee TsanIgnoreBegin;
|
|
FunctionCallee TsanIgnoreEnd;
|
|
// Accesses sizes are powers of two: 1, 2, 4, 8, 16.
|
|
static const size_t kNumberOfAccessSizes = 5;
|
|
FunctionCallee TsanRead[kNumberOfAccessSizes];
|
|
FunctionCallee TsanWrite[kNumberOfAccessSizes];
|
|
FunctionCallee TsanUnalignedRead[kNumberOfAccessSizes];
|
|
FunctionCallee TsanUnalignedWrite[kNumberOfAccessSizes];
|
|
FunctionCallee TsanVolatileRead[kNumberOfAccessSizes];
|
|
FunctionCallee TsanVolatileWrite[kNumberOfAccessSizes];
|
|
FunctionCallee TsanUnalignedVolatileRead[kNumberOfAccessSizes];
|
|
FunctionCallee TsanUnalignedVolatileWrite[kNumberOfAccessSizes];
|
|
FunctionCallee TsanCompoundRW[kNumberOfAccessSizes];
|
|
FunctionCallee TsanUnalignedCompoundRW[kNumberOfAccessSizes];
|
|
FunctionCallee TsanAtomicLoad[kNumberOfAccessSizes];
|
|
FunctionCallee TsanAtomicStore[kNumberOfAccessSizes];
|
|
FunctionCallee TsanAtomicRMW[AtomicRMWInst::LAST_BINOP + 1]
|
|
[kNumberOfAccessSizes];
|
|
FunctionCallee TsanAtomicCAS[kNumberOfAccessSizes];
|
|
FunctionCallee TsanAtomicThreadFence;
|
|
FunctionCallee TsanAtomicSignalFence;
|
|
FunctionCallee TsanVptrUpdate;
|
|
FunctionCallee TsanVptrLoad;
|
|
FunctionCallee MemmoveFn, MemcpyFn, MemsetFn;
|
|
};
|
|
|
|
struct ThreadSanitizerLegacyPass : FunctionPass {
|
|
ThreadSanitizerLegacyPass() : FunctionPass(ID) {
|
|
initializeThreadSanitizerLegacyPassPass(*PassRegistry::getPassRegistry());
|
|
}
|
|
StringRef getPassName() const override;
|
|
void getAnalysisUsage(AnalysisUsage &AU) const override;
|
|
bool runOnFunction(Function &F) override;
|
|
bool doInitialization(Module &M) override;
|
|
static char ID; // Pass identification, replacement for typeid.
|
|
private:
|
|
Optional<ThreadSanitizer> TSan;
|
|
};
|
|
|
|
void insertModuleCtor(Module &M) {
|
|
getOrCreateSanitizerCtorAndInitFunctions(
|
|
M, kTsanModuleCtorName, kTsanInitName, /*InitArgTypes=*/{},
|
|
/*InitArgs=*/{},
|
|
// This callback is invoked when the functions are created the first
|
|
// time. Hook them into the global ctors list in that case:
|
|
[&](Function *Ctor, FunctionCallee) { appendToGlobalCtors(M, Ctor, 0); });
|
|
}
|
|
|
|
} // namespace
|
|
|
|
PreservedAnalyses ThreadSanitizerPass::run(Function &F,
|
|
FunctionAnalysisManager &FAM) {
|
|
ThreadSanitizer TSan;
|
|
if (TSan.sanitizeFunction(F, FAM.getResult<TargetLibraryAnalysis>(F)))
|
|
return PreservedAnalyses::none();
|
|
return PreservedAnalyses::all();
|
|
}
|
|
|
|
PreservedAnalyses ThreadSanitizerPass::run(Module &M,
|
|
ModuleAnalysisManager &MAM) {
|
|
insertModuleCtor(M);
|
|
return PreservedAnalyses::none();
|
|
}
|
|
|
|
char ThreadSanitizerLegacyPass::ID = 0;
|
|
INITIALIZE_PASS_BEGIN(ThreadSanitizerLegacyPass, "tsan",
|
|
"ThreadSanitizer: detects data races.", false, false)
|
|
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
|
|
INITIALIZE_PASS_END(ThreadSanitizerLegacyPass, "tsan",
|
|
"ThreadSanitizer: detects data races.", false, false)
|
|
|
|
StringRef ThreadSanitizerLegacyPass::getPassName() const {
|
|
return "ThreadSanitizerLegacyPass";
|
|
}
|
|
|
|
void ThreadSanitizerLegacyPass::getAnalysisUsage(AnalysisUsage &AU) const {
|
|
AU.addRequired<TargetLibraryInfoWrapperPass>();
|
|
}
|
|
|
|
bool ThreadSanitizerLegacyPass::doInitialization(Module &M) {
|
|
insertModuleCtor(M);
|
|
TSan.emplace();
|
|
return true;
|
|
}
|
|
|
|
bool ThreadSanitizerLegacyPass::runOnFunction(Function &F) {
|
|
auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
|
|
TSan->sanitizeFunction(F, TLI);
|
|
return true;
|
|
}
|
|
|
|
FunctionPass *llvm::createThreadSanitizerLegacyPassPass() {
|
|
return new ThreadSanitizerLegacyPass();
|
|
}
|
|
|
|
void ThreadSanitizer::initialize(Module &M) {
|
|
const DataLayout &DL = M.getDataLayout();
|
|
IntptrTy = DL.getIntPtrType(M.getContext());
|
|
|
|
IRBuilder<> IRB(M.getContext());
|
|
AttributeList Attr;
|
|
Attr = Attr.addAttribute(M.getContext(), AttributeList::FunctionIndex,
|
|
Attribute::NoUnwind);
|
|
// Initialize the callbacks.
|
|
TsanFuncEntry = M.getOrInsertFunction("__tsan_func_entry", Attr,
|
|
IRB.getVoidTy(), IRB.getInt8PtrTy());
|
|
TsanFuncExit =
|
|
M.getOrInsertFunction("__tsan_func_exit", Attr, IRB.getVoidTy());
|
|
TsanIgnoreBegin = M.getOrInsertFunction("__tsan_ignore_thread_begin", Attr,
|
|
IRB.getVoidTy());
|
|
TsanIgnoreEnd =
|
|
M.getOrInsertFunction("__tsan_ignore_thread_end", Attr, IRB.getVoidTy());
|
|
IntegerType *OrdTy = IRB.getInt32Ty();
|
|
for (size_t i = 0; i < kNumberOfAccessSizes; ++i) {
|
|
const unsigned ByteSize = 1U << i;
|
|
const unsigned BitSize = ByteSize * 8;
|
|
std::string ByteSizeStr = utostr(ByteSize);
|
|
std::string BitSizeStr = utostr(BitSize);
|
|
SmallString<32> ReadName("__tsan_read" + ByteSizeStr);
|
|
TsanRead[i] = M.getOrInsertFunction(ReadName, Attr, IRB.getVoidTy(),
|
|
IRB.getInt8PtrTy());
|
|
|
|
SmallString<32> WriteName("__tsan_write" + ByteSizeStr);
|
|
TsanWrite[i] = M.getOrInsertFunction(WriteName, Attr, IRB.getVoidTy(),
|
|
IRB.getInt8PtrTy());
|
|
|
|
SmallString<64> UnalignedReadName("__tsan_unaligned_read" + ByteSizeStr);
|
|
TsanUnalignedRead[i] = M.getOrInsertFunction(
|
|
UnalignedReadName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
|
|
|
|
SmallString<64> UnalignedWriteName("__tsan_unaligned_write" + ByteSizeStr);
|
|
TsanUnalignedWrite[i] = M.getOrInsertFunction(
|
|
UnalignedWriteName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
|
|
|
|
SmallString<64> VolatileReadName("__tsan_volatile_read" + ByteSizeStr);
|
|
TsanVolatileRead[i] = M.getOrInsertFunction(
|
|
VolatileReadName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
|
|
|
|
SmallString<64> VolatileWriteName("__tsan_volatile_write" + ByteSizeStr);
|
|
TsanVolatileWrite[i] = M.getOrInsertFunction(
|
|
VolatileWriteName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
|
|
|
|
SmallString<64> UnalignedVolatileReadName("__tsan_unaligned_volatile_read" +
|
|
ByteSizeStr);
|
|
TsanUnalignedVolatileRead[i] = M.getOrInsertFunction(
|
|
UnalignedVolatileReadName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
|
|
|
|
SmallString<64> UnalignedVolatileWriteName(
|
|
"__tsan_unaligned_volatile_write" + ByteSizeStr);
|
|
TsanUnalignedVolatileWrite[i] = M.getOrInsertFunction(
|
|
UnalignedVolatileWriteName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
|
|
|
|
SmallString<64> CompoundRWName("__tsan_read_write" + ByteSizeStr);
|
|
TsanCompoundRW[i] = M.getOrInsertFunction(
|
|
CompoundRWName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
|
|
|
|
SmallString<64> UnalignedCompoundRWName("__tsan_unaligned_read_write" +
|
|
ByteSizeStr);
|
|
TsanUnalignedCompoundRW[i] = M.getOrInsertFunction(
|
|
UnalignedCompoundRWName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
|
|
|
|
Type *Ty = Type::getIntNTy(M.getContext(), BitSize);
|
|
Type *PtrTy = Ty->getPointerTo();
|
|
SmallString<32> AtomicLoadName("__tsan_atomic" + BitSizeStr + "_load");
|
|
TsanAtomicLoad[i] =
|
|
M.getOrInsertFunction(AtomicLoadName, Attr, Ty, PtrTy, OrdTy);
|
|
|
|
SmallString<32> AtomicStoreName("__tsan_atomic" + BitSizeStr + "_store");
|
|
TsanAtomicStore[i] = M.getOrInsertFunction(
|
|
AtomicStoreName, Attr, IRB.getVoidTy(), PtrTy, Ty, OrdTy);
|
|
|
|
for (unsigned Op = AtomicRMWInst::FIRST_BINOP;
|
|
Op <= AtomicRMWInst::LAST_BINOP; ++Op) {
|
|
TsanAtomicRMW[Op][i] = nullptr;
|
|
const char *NamePart = nullptr;
|
|
if (Op == AtomicRMWInst::Xchg)
|
|
NamePart = "_exchange";
|
|
else if (Op == AtomicRMWInst::Add)
|
|
NamePart = "_fetch_add";
|
|
else if (Op == AtomicRMWInst::Sub)
|
|
NamePart = "_fetch_sub";
|
|
else if (Op == AtomicRMWInst::And)
|
|
NamePart = "_fetch_and";
|
|
else if (Op == AtomicRMWInst::Or)
|
|
NamePart = "_fetch_or";
|
|
else if (Op == AtomicRMWInst::Xor)
|
|
NamePart = "_fetch_xor";
|
|
else if (Op == AtomicRMWInst::Nand)
|
|
NamePart = "_fetch_nand";
|
|
else
|
|
continue;
|
|
SmallString<32> RMWName("__tsan_atomic" + itostr(BitSize) + NamePart);
|
|
TsanAtomicRMW[Op][i] =
|
|
M.getOrInsertFunction(RMWName, Attr, Ty, PtrTy, Ty, OrdTy);
|
|
}
|
|
|
|
SmallString<32> AtomicCASName("__tsan_atomic" + BitSizeStr +
|
|
"_compare_exchange_val");
|
|
TsanAtomicCAS[i] = M.getOrInsertFunction(AtomicCASName, Attr, Ty, PtrTy, Ty,
|
|
Ty, OrdTy, OrdTy);
|
|
}
|
|
TsanVptrUpdate =
|
|
M.getOrInsertFunction("__tsan_vptr_update", Attr, IRB.getVoidTy(),
|
|
IRB.getInt8PtrTy(), IRB.getInt8PtrTy());
|
|
TsanVptrLoad = M.getOrInsertFunction("__tsan_vptr_read", Attr,
|
|
IRB.getVoidTy(), IRB.getInt8PtrTy());
|
|
TsanAtomicThreadFence = M.getOrInsertFunction("__tsan_atomic_thread_fence",
|
|
Attr, IRB.getVoidTy(), OrdTy);
|
|
TsanAtomicSignalFence = M.getOrInsertFunction("__tsan_atomic_signal_fence",
|
|
Attr, IRB.getVoidTy(), OrdTy);
|
|
|
|
MemmoveFn =
|
|
M.getOrInsertFunction("memmove", Attr, IRB.getInt8PtrTy(),
|
|
IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy);
|
|
MemcpyFn =
|
|
M.getOrInsertFunction("memcpy", Attr, IRB.getInt8PtrTy(),
|
|
IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy);
|
|
MemsetFn =
|
|
M.getOrInsertFunction("memset", Attr, IRB.getInt8PtrTy(),
|
|
IRB.getInt8PtrTy(), IRB.getInt32Ty(), IntptrTy);
|
|
}
|
|
|
|
static bool isVtableAccess(Instruction *I) {
|
|
if (MDNode *Tag = I->getMetadata(LLVMContext::MD_tbaa))
|
|
return Tag->isTBAAVtableAccess();
|
|
return false;
|
|
}
|
|
|
|
// Do not instrument known races/"benign races" that come from compiler
|
|
// instrumentatin. The user has no way of suppressing them.
|
|
static bool shouldInstrumentReadWriteFromAddress(const Module *M, Value *Addr) {
|
|
// Peel off GEPs and BitCasts.
|
|
Addr = Addr->stripInBoundsOffsets();
|
|
|
|
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) {
|
|
if (GV->hasSection()) {
|
|
StringRef SectionName = GV->getSection();
|
|
// Check if the global is in the PGO counters section.
|
|
auto OF = Triple(M->getTargetTriple()).getObjectFormat();
|
|
if (SectionName.endswith(
|
|
getInstrProfSectionName(IPSK_cnts, OF, /*AddSegmentInfo=*/false)))
|
|
return false;
|
|
}
|
|
|
|
// Check if the global is private gcov data.
|
|
if (GV->getName().startswith("__llvm_gcov") ||
|
|
GV->getName().startswith("__llvm_gcda"))
|
|
return false;
|
|
}
|
|
|
|
// Do not instrument acesses from different address spaces; we cannot deal
|
|
// with them.
|
|
if (Addr) {
|
|
Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType());
|
|
if (PtrTy->getPointerAddressSpace() != 0)
|
|
return false;
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
bool ThreadSanitizer::addrPointsToConstantData(Value *Addr) {
|
|
// If this is a GEP, just analyze its pointer operand.
|
|
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Addr))
|
|
Addr = GEP->getPointerOperand();
|
|
|
|
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) {
|
|
if (GV->isConstant()) {
|
|
// Reads from constant globals can not race with any writes.
|
|
NumOmittedReadsFromConstantGlobals++;
|
|
return true;
|
|
}
|
|
} else if (LoadInst *L = dyn_cast<LoadInst>(Addr)) {
|
|
if (isVtableAccess(L)) {
|
|
// Reads from a vtable pointer can not race with any writes.
|
|
NumOmittedReadsFromVtable++;
|
|
return true;
|
|
}
|
|
}
|
|
return false;
|
|
}
|
|
|
|
// Instrumenting some of the accesses may be proven redundant.
|
|
// Currently handled:
|
|
// - read-before-write (within same BB, no calls between)
|
|
// - not captured variables
|
|
//
|
|
// We do not handle some of the patterns that should not survive
|
|
// after the classic compiler optimizations.
|
|
// E.g. two reads from the same temp should be eliminated by CSE,
|
|
// two writes should be eliminated by DSE, etc.
|
|
//
|
|
// 'Local' is a vector of insns within the same BB (no calls between).
|
|
// 'All' is a vector of insns that will be instrumented.
|
|
void ThreadSanitizer::chooseInstructionsToInstrument(
|
|
SmallVectorImpl<Instruction *> &Local,
|
|
SmallVectorImpl<InstructionInfo> &All, const DataLayout &DL) {
|
|
DenseMap<Value *, size_t> WriteTargets; // Map of addresses to index in All
|
|
// Iterate from the end.
|
|
for (Instruction *I : reverse(Local)) {
|
|
const bool IsWrite = isa<StoreInst>(*I);
|
|
Value *Addr = IsWrite ? cast<StoreInst>(I)->getPointerOperand()
|
|
: cast<LoadInst>(I)->getPointerOperand();
|
|
|
|
if (!shouldInstrumentReadWriteFromAddress(I->getModule(), Addr))
|
|
continue;
|
|
|
|
if (!IsWrite) {
|
|
const auto WriteEntry = WriteTargets.find(Addr);
|
|
if (!ClInstrumentReadBeforeWrite && WriteEntry != WriteTargets.end()) {
|
|
auto &WI = All[WriteEntry->second];
|
|
// If we distinguish volatile accesses and if either the read or write
|
|
// is volatile, do not omit any instrumentation.
|
|
const bool AnyVolatile =
|
|
ClDistinguishVolatile && (cast<LoadInst>(I)->isVolatile() ||
|
|
cast<StoreInst>(WI.Inst)->isVolatile());
|
|
if (!AnyVolatile) {
|
|
// We will write to this temp, so no reason to analyze the read.
|
|
// Mark the write instruction as compound.
|
|
WI.Flags |= InstructionInfo::kCompoundRW;
|
|
NumOmittedReadsBeforeWrite++;
|
|
continue;
|
|
}
|
|
}
|
|
|
|
if (addrPointsToConstantData(Addr)) {
|
|
// Addr points to some constant data -- it can not race with any writes.
|
|
continue;
|
|
}
|
|
}
|
|
|
|
if (isa<AllocaInst>(getUnderlyingObject(Addr)) &&
|
|
!PointerMayBeCaptured(Addr, true, true)) {
|
|
// The variable is addressable but not captured, so it cannot be
|
|
// referenced from a different thread and participate in a data race
|
|
// (see llvm/Analysis/CaptureTracking.h for details).
|
|
NumOmittedNonCaptured++;
|
|
continue;
|
|
}
|
|
|
|
// Instrument this instruction.
|
|
All.emplace_back(I);
|
|
if (IsWrite) {
|
|
// For read-before-write and compound instrumentation we only need one
|
|
// write target, and we can override any previous entry if it exists.
|
|
WriteTargets[Addr] = All.size() - 1;
|
|
}
|
|
}
|
|
Local.clear();
|
|
}
|
|
|
|
static bool isAtomic(Instruction *I) {
|
|
// TODO: Ask TTI whether synchronization scope is between threads.
|
|
if (LoadInst *LI = dyn_cast<LoadInst>(I))
|
|
return LI->isAtomic() && LI->getSyncScopeID() != SyncScope::SingleThread;
|
|
if (StoreInst *SI = dyn_cast<StoreInst>(I))
|
|
return SI->isAtomic() && SI->getSyncScopeID() != SyncScope::SingleThread;
|
|
if (isa<AtomicRMWInst>(I))
|
|
return true;
|
|
if (isa<AtomicCmpXchgInst>(I))
|
|
return true;
|
|
if (isa<FenceInst>(I))
|
|
return true;
|
|
return false;
|
|
}
|
|
|
|
void ThreadSanitizer::InsertRuntimeIgnores(Function &F) {
|
|
IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
|
|
IRB.CreateCall(TsanIgnoreBegin);
|
|
EscapeEnumerator EE(F, "tsan_ignore_cleanup", ClHandleCxxExceptions);
|
|
while (IRBuilder<> *AtExit = EE.Next()) {
|
|
AtExit->CreateCall(TsanIgnoreEnd);
|
|
}
|
|
}
|
|
|
|
bool ThreadSanitizer::sanitizeFunction(Function &F,
|
|
const TargetLibraryInfo &TLI) {
|
|
// This is required to prevent instrumenting call to __tsan_init from within
|
|
// the module constructor.
|
|
if (F.getName() == kTsanModuleCtorName)
|
|
return false;
|
|
// Naked functions can not have prologue/epilogue
|
|
// (__tsan_func_entry/__tsan_func_exit) generated, so don't instrument them at
|
|
// all.
|
|
if (F.hasFnAttribute(Attribute::Naked))
|
|
return false;
|
|
initialize(*F.getParent());
|
|
SmallVector<InstructionInfo, 8> AllLoadsAndStores;
|
|
SmallVector<Instruction*, 8> LocalLoadsAndStores;
|
|
SmallVector<Instruction*, 8> AtomicAccesses;
|
|
SmallVector<Instruction*, 8> MemIntrinCalls;
|
|
bool Res = false;
|
|
bool HasCalls = false;
|
|
bool SanitizeFunction = F.hasFnAttribute(Attribute::SanitizeThread);
|
|
const DataLayout &DL = F.getParent()->getDataLayout();
|
|
|
|
// Traverse all instructions, collect loads/stores/returns, check for calls.
|
|
for (auto &BB : F) {
|
|
for (auto &Inst : BB) {
|
|
if (isAtomic(&Inst))
|
|
AtomicAccesses.push_back(&Inst);
|
|
else if (isa<LoadInst>(Inst) || isa<StoreInst>(Inst))
|
|
LocalLoadsAndStores.push_back(&Inst);
|
|
else if (isa<CallInst>(Inst) || isa<InvokeInst>(Inst)) {
|
|
if (CallInst *CI = dyn_cast<CallInst>(&Inst))
|
|
maybeMarkSanitizerLibraryCallNoBuiltin(CI, &TLI);
|
|
if (isa<MemIntrinsic>(Inst))
|
|
MemIntrinCalls.push_back(&Inst);
|
|
HasCalls = true;
|
|
chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores,
|
|
DL);
|
|
}
|
|
}
|
|
chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores, DL);
|
|
}
|
|
|
|
// We have collected all loads and stores.
|
|
// FIXME: many of these accesses do not need to be checked for races
|
|
// (e.g. variables that do not escape, etc).
|
|
|
|
// Instrument memory accesses only if we want to report bugs in the function.
|
|
if (ClInstrumentMemoryAccesses && SanitizeFunction)
|
|
for (const auto &II : AllLoadsAndStores) {
|
|
Res |= instrumentLoadOrStore(II, DL);
|
|
}
|
|
|
|
// Instrument atomic memory accesses in any case (they can be used to
|
|
// implement synchronization).
|
|
if (ClInstrumentAtomics)
|
|
for (auto Inst : AtomicAccesses) {
|
|
Res |= instrumentAtomic(Inst, DL);
|
|
}
|
|
|
|
if (ClInstrumentMemIntrinsics && SanitizeFunction)
|
|
for (auto Inst : MemIntrinCalls) {
|
|
Res |= instrumentMemIntrinsic(Inst);
|
|
}
|
|
|
|
if (F.hasFnAttribute("sanitize_thread_no_checking_at_run_time")) {
|
|
assert(!F.hasFnAttribute(Attribute::SanitizeThread));
|
|
if (HasCalls)
|
|
InsertRuntimeIgnores(F);
|
|
}
|
|
|
|
// Instrument function entry/exit points if there were instrumented accesses.
|
|
if ((Res || HasCalls) && ClInstrumentFuncEntryExit) {
|
|
IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
|
|
Value *ReturnAddress = IRB.CreateCall(
|
|
Intrinsic::getDeclaration(F.getParent(), Intrinsic::returnaddress),
|
|
IRB.getInt32(0));
|
|
IRB.CreateCall(TsanFuncEntry, ReturnAddress);
|
|
|
|
EscapeEnumerator EE(F, "tsan_cleanup", ClHandleCxxExceptions);
|
|
while (IRBuilder<> *AtExit = EE.Next()) {
|
|
AtExit->CreateCall(TsanFuncExit, {});
|
|
}
|
|
Res = true;
|
|
}
|
|
return Res;
|
|
}
|
|
|
|
bool ThreadSanitizer::instrumentLoadOrStore(const InstructionInfo &II,
|
|
const DataLayout &DL) {
|
|
IRBuilder<> IRB(II.Inst);
|
|
const bool IsWrite = isa<StoreInst>(*II.Inst);
|
|
Value *Addr = IsWrite ? cast<StoreInst>(II.Inst)->getPointerOperand()
|
|
: cast<LoadInst>(II.Inst)->getPointerOperand();
|
|
|
|
// swifterror memory addresses are mem2reg promoted by instruction selection.
|
|
// As such they cannot have regular uses like an instrumentation function and
|
|
// it makes no sense to track them as memory.
|
|
if (Addr->isSwiftError())
|
|
return false;
|
|
|
|
int Idx = getMemoryAccessFuncIndex(Addr, DL);
|
|
if (Idx < 0)
|
|
return false;
|
|
if (IsWrite && isVtableAccess(II.Inst)) {
|
|
LLVM_DEBUG(dbgs() << " VPTR : " << *II.Inst << "\n");
|
|
Value *StoredValue = cast<StoreInst>(II.Inst)->getValueOperand();
|
|
// StoredValue may be a vector type if we are storing several vptrs at once.
|
|
// In this case, just take the first element of the vector since this is
|
|
// enough to find vptr races.
|
|
if (isa<VectorType>(StoredValue->getType()))
|
|
StoredValue = IRB.CreateExtractElement(
|
|
StoredValue, ConstantInt::get(IRB.getInt32Ty(), 0));
|
|
if (StoredValue->getType()->isIntegerTy())
|
|
StoredValue = IRB.CreateIntToPtr(StoredValue, IRB.getInt8PtrTy());
|
|
// Call TsanVptrUpdate.
|
|
IRB.CreateCall(TsanVptrUpdate,
|
|
{IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()),
|
|
IRB.CreatePointerCast(StoredValue, IRB.getInt8PtrTy())});
|
|
NumInstrumentedVtableWrites++;
|
|
return true;
|
|
}
|
|
if (!IsWrite && isVtableAccess(II.Inst)) {
|
|
IRB.CreateCall(TsanVptrLoad,
|
|
IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()));
|
|
NumInstrumentedVtableReads++;
|
|
return true;
|
|
}
|
|
|
|
const unsigned Alignment = IsWrite ? cast<StoreInst>(II.Inst)->getAlignment()
|
|
: cast<LoadInst>(II.Inst)->getAlignment();
|
|
const bool IsCompoundRW =
|
|
ClCompoundReadBeforeWrite && (II.Flags & InstructionInfo::kCompoundRW);
|
|
const bool IsVolatile = ClDistinguishVolatile &&
|
|
(IsWrite ? cast<StoreInst>(II.Inst)->isVolatile()
|
|
: cast<LoadInst>(II.Inst)->isVolatile());
|
|
assert((!IsVolatile || !IsCompoundRW) && "Compound volatile invalid!");
|
|
|
|
Type *OrigTy = cast<PointerType>(Addr->getType())->getElementType();
|
|
const uint32_t TypeSize = DL.getTypeStoreSizeInBits(OrigTy);
|
|
FunctionCallee OnAccessFunc = nullptr;
|
|
if (Alignment == 0 || Alignment >= 8 || (Alignment % (TypeSize / 8)) == 0) {
|
|
if (IsCompoundRW)
|
|
OnAccessFunc = TsanCompoundRW[Idx];
|
|
else if (IsVolatile)
|
|
OnAccessFunc = IsWrite ? TsanVolatileWrite[Idx] : TsanVolatileRead[Idx];
|
|
else
|
|
OnAccessFunc = IsWrite ? TsanWrite[Idx] : TsanRead[Idx];
|
|
} else {
|
|
if (IsCompoundRW)
|
|
OnAccessFunc = TsanUnalignedCompoundRW[Idx];
|
|
else if (IsVolatile)
|
|
OnAccessFunc = IsWrite ? TsanUnalignedVolatileWrite[Idx]
|
|
: TsanUnalignedVolatileRead[Idx];
|
|
else
|
|
OnAccessFunc = IsWrite ? TsanUnalignedWrite[Idx] : TsanUnalignedRead[Idx];
|
|
}
|
|
IRB.CreateCall(OnAccessFunc, IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()));
|
|
if (IsCompoundRW || IsWrite)
|
|
NumInstrumentedWrites++;
|
|
if (IsCompoundRW || !IsWrite)
|
|
NumInstrumentedReads++;
|
|
return true;
|
|
}
|
|
|
|
static ConstantInt *createOrdering(IRBuilder<> *IRB, AtomicOrdering ord) {
|
|
uint32_t v = 0;
|
|
switch (ord) {
|
|
case AtomicOrdering::NotAtomic:
|
|
llvm_unreachable("unexpected atomic ordering!");
|
|
case AtomicOrdering::Unordered: LLVM_FALLTHROUGH;
|
|
case AtomicOrdering::Monotonic: v = 0; break;
|
|
// Not specified yet:
|
|
// case AtomicOrdering::Consume: v = 1; break;
|
|
case AtomicOrdering::Acquire: v = 2; break;
|
|
case AtomicOrdering::Release: v = 3; break;
|
|
case AtomicOrdering::AcquireRelease: v = 4; break;
|
|
case AtomicOrdering::SequentiallyConsistent: v = 5; break;
|
|
}
|
|
return IRB->getInt32(v);
|
|
}
|
|
|
|
// If a memset intrinsic gets inlined by the code gen, we will miss races on it.
|
|
// So, we either need to ensure the intrinsic is not inlined, or instrument it.
|
|
// We do not instrument memset/memmove/memcpy intrinsics (too complicated),
|
|
// instead we simply replace them with regular function calls, which are then
|
|
// intercepted by the run-time.
|
|
// Since tsan is running after everyone else, the calls should not be
|
|
// replaced back with intrinsics. If that becomes wrong at some point,
|
|
// we will need to call e.g. __tsan_memset to avoid the intrinsics.
|
|
bool ThreadSanitizer::instrumentMemIntrinsic(Instruction *I) {
|
|
IRBuilder<> IRB(I);
|
|
if (MemSetInst *M = dyn_cast<MemSetInst>(I)) {
|
|
IRB.CreateCall(
|
|
MemsetFn,
|
|
{IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()),
|
|
IRB.CreateIntCast(M->getArgOperand(1), IRB.getInt32Ty(), false),
|
|
IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false)});
|
|
I->eraseFromParent();
|
|
} else if (MemTransferInst *M = dyn_cast<MemTransferInst>(I)) {
|
|
IRB.CreateCall(
|
|
isa<MemCpyInst>(M) ? MemcpyFn : MemmoveFn,
|
|
{IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()),
|
|
IRB.CreatePointerCast(M->getArgOperand(1), IRB.getInt8PtrTy()),
|
|
IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false)});
|
|
I->eraseFromParent();
|
|
}
|
|
return false;
|
|
}
|
|
|
|
// Both llvm and ThreadSanitizer atomic operations are based on C++11/C1x
|
|
// standards. For background see C++11 standard. A slightly older, publicly
|
|
// available draft of the standard (not entirely up-to-date, but close enough
|
|
// for casual browsing) is available here:
|
|
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
|
|
// The following page contains more background information:
|
|
// http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
|
|
|
|
bool ThreadSanitizer::instrumentAtomic(Instruction *I, const DataLayout &DL) {
|
|
IRBuilder<> IRB(I);
|
|
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
|
|
Value *Addr = LI->getPointerOperand();
|
|
int Idx = getMemoryAccessFuncIndex(Addr, DL);
|
|
if (Idx < 0)
|
|
return false;
|
|
const unsigned ByteSize = 1U << Idx;
|
|
const unsigned BitSize = ByteSize * 8;
|
|
Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
|
|
Type *PtrTy = Ty->getPointerTo();
|
|
Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
|
|
createOrdering(&IRB, LI->getOrdering())};
|
|
Type *OrigTy = cast<PointerType>(Addr->getType())->getElementType();
|
|
Value *C = IRB.CreateCall(TsanAtomicLoad[Idx], Args);
|
|
Value *Cast = IRB.CreateBitOrPointerCast(C, OrigTy);
|
|
I->replaceAllUsesWith(Cast);
|
|
} else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
|
|
Value *Addr = SI->getPointerOperand();
|
|
int Idx = getMemoryAccessFuncIndex(Addr, DL);
|
|
if (Idx < 0)
|
|
return false;
|
|
const unsigned ByteSize = 1U << Idx;
|
|
const unsigned BitSize = ByteSize * 8;
|
|
Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
|
|
Type *PtrTy = Ty->getPointerTo();
|
|
Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
|
|
IRB.CreateBitOrPointerCast(SI->getValueOperand(), Ty),
|
|
createOrdering(&IRB, SI->getOrdering())};
|
|
CallInst *C = CallInst::Create(TsanAtomicStore[Idx], Args);
|
|
ReplaceInstWithInst(I, C);
|
|
} else if (AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) {
|
|
Value *Addr = RMWI->getPointerOperand();
|
|
int Idx = getMemoryAccessFuncIndex(Addr, DL);
|
|
if (Idx < 0)
|
|
return false;
|
|
FunctionCallee F = TsanAtomicRMW[RMWI->getOperation()][Idx];
|
|
if (!F)
|
|
return false;
|
|
const unsigned ByteSize = 1U << Idx;
|
|
const unsigned BitSize = ByteSize * 8;
|
|
Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
|
|
Type *PtrTy = Ty->getPointerTo();
|
|
Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
|
|
IRB.CreateIntCast(RMWI->getValOperand(), Ty, false),
|
|
createOrdering(&IRB, RMWI->getOrdering())};
|
|
CallInst *C = CallInst::Create(F, Args);
|
|
ReplaceInstWithInst(I, C);
|
|
} else if (AtomicCmpXchgInst *CASI = dyn_cast<AtomicCmpXchgInst>(I)) {
|
|
Value *Addr = CASI->getPointerOperand();
|
|
int Idx = getMemoryAccessFuncIndex(Addr, DL);
|
|
if (Idx < 0)
|
|
return false;
|
|
const unsigned ByteSize = 1U << Idx;
|
|
const unsigned BitSize = ByteSize * 8;
|
|
Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
|
|
Type *PtrTy = Ty->getPointerTo();
|
|
Value *CmpOperand =
|
|
IRB.CreateBitOrPointerCast(CASI->getCompareOperand(), Ty);
|
|
Value *NewOperand =
|
|
IRB.CreateBitOrPointerCast(CASI->getNewValOperand(), Ty);
|
|
Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
|
|
CmpOperand,
|
|
NewOperand,
|
|
createOrdering(&IRB, CASI->getSuccessOrdering()),
|
|
createOrdering(&IRB, CASI->getFailureOrdering())};
|
|
CallInst *C = IRB.CreateCall(TsanAtomicCAS[Idx], Args);
|
|
Value *Success = IRB.CreateICmpEQ(C, CmpOperand);
|
|
Value *OldVal = C;
|
|
Type *OrigOldValTy = CASI->getNewValOperand()->getType();
|
|
if (Ty != OrigOldValTy) {
|
|
// The value is a pointer, so we need to cast the return value.
|
|
OldVal = IRB.CreateIntToPtr(C, OrigOldValTy);
|
|
}
|
|
|
|
Value *Res =
|
|
IRB.CreateInsertValue(UndefValue::get(CASI->getType()), OldVal, 0);
|
|
Res = IRB.CreateInsertValue(Res, Success, 1);
|
|
|
|
I->replaceAllUsesWith(Res);
|
|
I->eraseFromParent();
|
|
} else if (FenceInst *FI = dyn_cast<FenceInst>(I)) {
|
|
Value *Args[] = {createOrdering(&IRB, FI->getOrdering())};
|
|
FunctionCallee F = FI->getSyncScopeID() == SyncScope::SingleThread
|
|
? TsanAtomicSignalFence
|
|
: TsanAtomicThreadFence;
|
|
CallInst *C = CallInst::Create(F, Args);
|
|
ReplaceInstWithInst(I, C);
|
|
}
|
|
return true;
|
|
}
|
|
|
|
int ThreadSanitizer::getMemoryAccessFuncIndex(Value *Addr,
|
|
const DataLayout &DL) {
|
|
Type *OrigPtrTy = Addr->getType();
|
|
Type *OrigTy = cast<PointerType>(OrigPtrTy)->getElementType();
|
|
assert(OrigTy->isSized());
|
|
uint32_t TypeSize = DL.getTypeStoreSizeInBits(OrigTy);
|
|
if (TypeSize != 8 && TypeSize != 16 &&
|
|
TypeSize != 32 && TypeSize != 64 && TypeSize != 128) {
|
|
NumAccessesWithBadSize++;
|
|
// Ignore all unusual sizes.
|
|
return -1;
|
|
}
|
|
size_t Idx = countTrailingZeros(TypeSize / 8);
|
|
assert(Idx < kNumberOfAccessSizes);
|
|
return Idx;
|
|
}
|