1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-22 10:42:39 +01:00

[dfsan] Fix clang-tidy warnings

This addresses ~50 clang-tidy warnings on dfsan instrumentation pass.
It also contains some refactoring (all non-functional changes) to eliminate some variables and simplify code.

Reviewed By: stephan.yichao.zhao

Differential Revision: https://reviews.llvm.org/D97714
This commit is contained in:
George Balatsouras 2021-03-02 14:56:16 -08:00
parent fc4e42741b
commit 5ee5202199

View File

@ -58,6 +58,7 @@
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Triple.h"
#include "llvm/ADT/iterator.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/Argument.h"
#include "llvm/IR/Attributes.h"
@ -109,19 +110,19 @@
using namespace llvm;
// This must be consistent with ShadowWidthBits.
static const Align kShadowTLSAlignment = Align(2);
static const Align ShadowTLSAlignment = Align(2);
static const Align kMinOriginAlignment = Align(4);
static const Align MinOriginAlignment = Align(4);
// The size of TLS variables. These constants must be kept in sync with the ones
// in dfsan.cpp.
static const unsigned kArgTLSSize = 800;
static const unsigned kRetvalTLSSize = 800;
static const unsigned ArgTLSSize = 800;
static const unsigned RetvalTLSSize = 800;
// External symbol to be used when generating the shadow address for
// architectures with multiple VMAs. Instead of using a constant integer
// the runtime will set the external mask based on the VMA range.
const char kDFSanExternShadowPtrMask[] = "__dfsan_shadow_ptr_mask";
const char DFSanExternShadowPtrMask[] = "__dfsan_shadow_ptr_mask";
// The -dfsan-preserve-alignment flag controls whether this pass assumes that
// alignment requirements provided by the input IR are correct. For example,
@ -148,10 +149,10 @@ static cl::list<std::string> ClABIListFiles(
// Controls whether the pass uses IA_Args or IA_TLS as the ABI for instrumented
// functions (see DataFlowSanitizer::InstrumentedABI below).
static cl::opt<bool> ClArgsABI(
"dfsan-args-abi",
cl::desc("Use the argument ABI rather than the TLS ABI"),
cl::Hidden);
static cl::opt<bool>
ClArgsABI("dfsan-args-abi",
cl::desc("Use the argument ABI rather than the TLS ABI"),
cl::Hidden);
// Controls whether the pass includes or ignores the labels of pointers in load
// instructions.
@ -213,7 +214,7 @@ static cl::opt<int> ClTrackOrigins("dfsan-track-origins",
cl::desc("Track origins of labels"),
cl::Hidden, cl::init(0));
static StringRef GetGlobalTypeString(const GlobalValue &G) {
static StringRef getGlobalTypeString(const GlobalValue &G) {
// Types of GlobalVariables are always pointer types.
Type *GType = G.getValueType();
// For now we support excluding struct types only.
@ -229,7 +230,7 @@ namespace {
class DFSanABIList {
std::unique_ptr<SpecialCaseList> SCL;
public:
public:
DFSanABIList() = default;
void set(std::unique_ptr<SpecialCaseList> List) { SCL = std::move(List); }
@ -253,7 +254,7 @@ class DFSanABIList {
return SCL->inSection("dataflow", "fun", GA.getName(), Category);
return SCL->inSection("dataflow", "global", GA.getName(), Category) ||
SCL->inSection("dataflow", "type", GetGlobalTypeString(GA),
SCL->inSection("dataflow", "type", getGlobalTypeString(GA),
Category);
}
@ -267,20 +268,18 @@ class DFSanABIList {
/// function type into another. This struct is immutable. It holds metadata
/// useful for updating calls of the old function to the new type.
struct TransformedFunction {
TransformedFunction(FunctionType* OriginalType,
FunctionType* TransformedType,
TransformedFunction(FunctionType *OriginalType, FunctionType *TransformedType,
std::vector<unsigned> ArgumentIndexMapping)
: OriginalType(OriginalType),
TransformedType(TransformedType),
: OriginalType(OriginalType), TransformedType(TransformedType),
ArgumentIndexMapping(ArgumentIndexMapping) {}
// Disallow copies.
TransformedFunction(const TransformedFunction&) = delete;
TransformedFunction& operator=(const TransformedFunction&) = delete;
TransformedFunction(const TransformedFunction &) = delete;
TransformedFunction &operator=(const TransformedFunction &) = delete;
// Allow moves.
TransformedFunction(TransformedFunction&&) = default;
TransformedFunction& operator=(TransformedFunction&&) = default;
TransformedFunction(TransformedFunction &&) = default;
TransformedFunction &operator=(TransformedFunction &&) = default;
/// Type of the function before the transformation.
FunctionType *OriginalType;
@ -299,9 +298,9 @@ struct TransformedFunction {
/// Given function attributes from a call site for the original function,
/// return function attributes appropriate for a call to the transformed
/// function.
AttributeList TransformFunctionAttributes(
const TransformedFunction& TransformedFunction,
LLVMContext& Ctx, AttributeList CallSiteAttrs) {
AttributeList
transformFunctionAttributes(const TransformedFunction &TransformedFunction,
LLVMContext &Ctx, AttributeList CallSiteAttrs) {
// Construct a vector of AttributeSet for each function argument.
std::vector<llvm::AttributeSet> ArgumentAttributes(
@ -310,23 +309,22 @@ AttributeList TransformFunctionAttributes(
// Copy attributes from the parameter of the original function to the
// transformed version. 'ArgumentIndexMapping' holds the mapping from
// old argument position to new.
for (unsigned i=0, ie = TransformedFunction.ArgumentIndexMapping.size();
i < ie; ++i) {
unsigned TransformedIndex = TransformedFunction.ArgumentIndexMapping[i];
ArgumentAttributes[TransformedIndex] = CallSiteAttrs.getParamAttributes(i);
for (unsigned I = 0, IE = TransformedFunction.ArgumentIndexMapping.size();
I < IE; ++I) {
unsigned TransformedIndex = TransformedFunction.ArgumentIndexMapping[I];
ArgumentAttributes[TransformedIndex] = CallSiteAttrs.getParamAttributes(I);
}
// Copy annotations on varargs arguments.
for (unsigned i = TransformedFunction.OriginalType->getNumParams(),
ie = CallSiteAttrs.getNumAttrSets(); i<ie; ++i) {
ArgumentAttributes.push_back(CallSiteAttrs.getParamAttributes(i));
for (unsigned I = TransformedFunction.OriginalType->getNumParams(),
IE = CallSiteAttrs.getNumAttrSets();
I < IE; ++I) {
ArgumentAttributes.push_back(CallSiteAttrs.getParamAttributes(I));
}
return AttributeList::get(
Ctx,
CallSiteAttrs.getFnAttributes(),
CallSiteAttrs.getRetAttributes(),
llvm::makeArrayRef(ArgumentAttributes));
return AttributeList::get(Ctx, CallSiteAttrs.getFnAttributes(),
CallSiteAttrs.getRetAttributes(),
llvm::makeArrayRef(ArgumentAttributes));
}
class DataFlowSanitizer {
@ -489,7 +487,7 @@ class DataFlowSanitizer {
/// Returns the shadow type of of V's type.
Type *getShadowTy(Value *V);
const uint64_t kNumOfElementsInArgOrgTLS = kArgTLSSize / OriginWidthBytes;
const uint64_t NumOfElementsInArgOrgTLS = ArgTLSSize / OriginWidthBytes;
public:
DataFlowSanitizer(const std::vector<std::string> &ABIListFiles);
@ -722,17 +720,16 @@ TransformedFunction DataFlowSanitizer::getCustomFunctionType(FunctionType *T) {
// at call sites can be updated.
std::vector<unsigned> ArgumentIndexMapping;
for (unsigned I = 0, E = T->getNumParams(); I != E; ++I) {
Type *Param_type = T->getParamType(I);
Type *ParamType = T->getParamType(I);
FunctionType *FT;
if (isa<PointerType>(Param_type) &&
(FT = dyn_cast<FunctionType>(
cast<PointerType>(Param_type)->getElementType()))) {
if (isa<PointerType>(ParamType) &&
(FT = dyn_cast<FunctionType>(ParamType->getPointerElementType()))) {
ArgumentIndexMapping.push_back(ArgTypes.size());
ArgTypes.push_back(getTrampolineFunctionType(FT)->getPointerTo());
ArgTypes.push_back(Type::getInt8PtrTy(*Ctx));
} else {
ArgumentIndexMapping.push_back(ArgTypes.size());
ArgTypes.push_back(Param_type);
ArgTypes.push_back(ParamType);
}
}
for (unsigned I = 0, E = T->getNumParams(); I != E; ++I)
@ -772,10 +769,10 @@ bool DataFlowSanitizer::isZeroShadow(Value *V) {
}
bool DataFlowSanitizer::shouldTrackOrigins() {
static const bool kShouldTrackOrigins =
static const bool ShouldTrackOrigins =
ClTrackOrigins && getInstrumentedABI() == DataFlowSanitizer::IA_TLS &&
ClFast16Labels;
return kShouldTrackOrigins;
return ShouldTrackOrigins;
}
bool DataFlowSanitizer::shouldTrackFieldsAndIndices() {
@ -922,11 +919,6 @@ Type *DataFlowSanitizer::getShadowTy(Value *V) {
bool DataFlowSanitizer::init(Module &M) {
Triple TargetTriple(M.getTargetTriple());
bool IsX86_64 = TargetTriple.getArch() == Triple::x86_64;
bool IsMIPS64 = TargetTriple.isMIPS64();
bool IsAArch64 = TargetTriple.getArch() == Triple::aarch64 ||
TargetTriple.getArch() == Triple::aarch64_be;
const DataLayout &DL = M.getDataLayout();
Mod = &M;
@ -941,15 +933,23 @@ bool DataFlowSanitizer::init(Module &M) {
ShadowPtrMul = ConstantInt::getSigned(IntptrTy, ShadowWidthBytes);
OriginBase = ConstantInt::get(IntptrTy, 0x200000000000LL);
ZeroOrigin = ConstantInt::getSigned(OriginTy, 0);
if (IsX86_64)
switch (TargetTriple.getArch()) {
case Triple::x86_64:
ShadowPtrMask = ConstantInt::getSigned(IntptrTy, ~0x700000000000LL);
else if (IsMIPS64)
break;
case Triple::mips64:
case Triple::mips64el:
ShadowPtrMask = ConstantInt::getSigned(IntptrTy, ~0xF000000000LL);
// AArch64 supports multiple VMAs and the shadow mask is set at runtime.
else if (IsAArch64)
break;
case Triple::aarch64:
case Triple::aarch64_be:
// AArch64 supports multiple VMAs and the shadow mask is set at runtime.
DFSanRuntimeShadowMask = true;
else
break;
default:
report_fatal_error("unsupported triple");
}
Type *DFSanUnionArgs[2] = {PrimitiveShadowTy, PrimitiveShadowTy};
DFSanUnionFnTy =
@ -1059,10 +1059,9 @@ DataFlowSanitizer::buildWrapperFunction(Function *F, StringRef NewFName,
BB);
new UnreachableInst(*Ctx, BB);
} else {
std::vector<Value *> Args;
unsigned n = FT->getNumParams();
for (Function::arg_iterator ai = NewF->arg_begin(); n != 0; ++ai, --n)
Args.push_back(&*ai);
auto ArgIt = pointer_iterator<Argument *>(NewF->arg_begin());
std::vector<Value *> Args(ArgIt, ArgIt + FT->getNumParams());
CallInst *CI = CallInst::Create(F, Args, "", BB);
if (FT->getReturnType()->isVoidTy())
ReturnInst::Create(*Ctx, BB);
@ -1082,16 +1081,13 @@ Constant *DataFlowSanitizer::getOrBuildTrampolineFunction(FunctionType *FT,
F->setLinkage(GlobalValue::LinkOnceODRLinkage);
BasicBlock *BB = BasicBlock::Create(*Ctx, "entry", F);
std::vector<Value *> Args;
Function::arg_iterator AI = F->arg_begin(); ++AI;
Function::arg_iterator AI = F->arg_begin() + 1;
for (unsigned N = FT->getNumParams(); N != 0; ++AI, --N)
Args.push_back(&*AI);
CallInst *CI = CallInst::Create(FT, &*F->arg_begin(), Args, "", BB);
ReturnInst *RI;
Type *RetType = FT->getReturnType();
if (RetType->isVoidTy())
RI = ReturnInst::Create(*Ctx, BB);
else
RI = ReturnInst::Create(*Ctx, CI, BB);
ReturnInst *RI = RetType->isVoidTy() ? ReturnInst::Create(*Ctx, BB)
: ReturnInst::Create(*Ctx, CI, BB);
// F is called by a wrapped custom function with primitive shadows. So
// its arguments and return value need conversion.
@ -1299,7 +1295,7 @@ bool DataFlowSanitizer::runImpl(Module &M) {
bool Changed = false;
auto getOrInsertGlobal = [this, &Changed](StringRef Name,
auto GetOrInsertGlobal = [this, &Changed](StringRef Name,
Type *Ty) -> Constant * {
Constant *C = Mod->getOrInsertGlobal(Name, Ty);
if (GlobalVariable *G = dyn_cast<GlobalVariable>(C)) {
@ -1310,15 +1306,15 @@ bool DataFlowSanitizer::runImpl(Module &M) {
};
// These globals must be kept in sync with the ones in dfsan.cpp.
ArgTLS = getOrInsertGlobal(
"__dfsan_arg_tls",
ArrayType::get(Type::getInt64Ty(*Ctx), kArgTLSSize / 8));
RetvalTLS = getOrInsertGlobal(
ArgTLS =
GetOrInsertGlobal("__dfsan_arg_tls",
ArrayType::get(Type::getInt64Ty(*Ctx), ArgTLSSize / 8));
RetvalTLS = GetOrInsertGlobal(
"__dfsan_retval_tls",
ArrayType::get(Type::getInt64Ty(*Ctx), kRetvalTLSSize / 8));
ArgOriginTLSTy = ArrayType::get(OriginTy, kNumOfElementsInArgOrgTLS);
ArgOriginTLS = getOrInsertGlobal("__dfsan_arg_origin_tls", ArgOriginTLSTy);
RetvalOriginTLS = getOrInsertGlobal("__dfsan_retval_origin_tls", OriginTy);
ArrayType::get(Type::getInt64Ty(*Ctx), RetvalTLSSize / 8));
ArgOriginTLSTy = ArrayType::get(OriginTy, NumOfElementsInArgOrgTLS);
ArgOriginTLS = GetOrInsertGlobal("__dfsan_arg_origin_tls", ArgOriginTLSTy);
RetvalOriginTLS = GetOrInsertGlobal("__dfsan_retval_origin_tls", OriginTy);
(void)Mod->getOrInsertGlobal("__dfsan_track_origins", OriginTy, [&] {
Changed = true;
@ -1331,39 +1327,42 @@ bool DataFlowSanitizer::runImpl(Module &M) {
injectMetadataGlobals(M);
ExternalShadowMask =
Mod->getOrInsertGlobal(kDFSanExternShadowPtrMask, IntptrTy);
Mod->getOrInsertGlobal(DFSanExternShadowPtrMask, IntptrTy);
initializeCallbackFunctions(M);
initializeRuntimeFunctions(M);
std::vector<Function *> FnsToInstrument;
SmallPtrSet<Function *, 2> FnsWithNativeABI;
for (Function &i : M)
if (!i.isIntrinsic() && !DFSanRuntimeFunctions.contains(&i))
FnsToInstrument.push_back(&i);
for (Function &F : M)
if (!F.isIntrinsic() && !DFSanRuntimeFunctions.contains(&F))
FnsToInstrument.push_back(&F);
// Give function aliases prefixes when necessary, and build wrappers where the
// instrumentedness is inconsistent.
for (Module::alias_iterator i = M.alias_begin(), e = M.alias_end(); i != e;) {
GlobalAlias *GA = &*i;
++i;
for (Module::alias_iterator AI = M.alias_begin(), AE = M.alias_end();
AI != AE;) {
GlobalAlias *GA = &*AI;
++AI;
// Don't stop on weak. We assume people aren't playing games with the
// instrumentedness of overridden weak aliases.
if (auto F = dyn_cast<Function>(GA->getBaseObject())) {
bool GAInst = isInstrumented(GA), FInst = isInstrumented(F);
if (GAInst && FInst) {
addGlobalNamePrefix(GA);
} else if (GAInst != FInst) {
// Non-instrumented alias of an instrumented function, or vice versa.
// Replace the alias with a native-ABI wrapper of the aliasee. The pass
// below will take care of instrumenting it.
Function *NewF =
buildWrapperFunction(F, "", GA->getLinkage(), F->getFunctionType());
GA->replaceAllUsesWith(ConstantExpr::getBitCast(NewF, GA->getType()));
NewF->takeName(GA);
GA->eraseFromParent();
FnsToInstrument.push_back(NewF);
}
auto *F = dyn_cast<Function>(GA->getBaseObject());
if (!F)
continue;
bool GAInst = isInstrumented(GA), FInst = isInstrumented(F);
if (GAInst && FInst) {
addGlobalNamePrefix(GA);
} else if (GAInst != FInst) {
// Non-instrumented alias of an instrumented function, or vice versa.
// Replace the alias with a native-ABI wrapper of the aliasee. The pass
// below will take care of instrumenting it.
Function *NewF =
buildWrapperFunction(F, "", GA->getLinkage(), F->getFunctionType());
GA->replaceAllUsesWith(ConstantExpr::getBitCast(NewF, GA->getType()));
NewF->takeName(GA);
GA->eraseFromParent();
FnsToInstrument.push_back(NewF);
}
}
@ -1372,10 +1371,10 @@ bool DataFlowSanitizer::runImpl(Module &M) {
// First, change the ABI of every function in the module. ABI-listed
// functions keep their original ABI and get a wrapper function.
for (std::vector<Function *>::iterator i = FnsToInstrument.begin(),
e = FnsToInstrument.end();
i != e; ++i) {
Function &F = **i;
for (std::vector<Function *>::iterator FI = FnsToInstrument.begin(),
FE = FnsToInstrument.end();
FI != FE; ++FI) {
Function &F = **FI;
FunctionType *FT = F.getFunctionType();
bool IsZeroArgsVoidRet = (FT->getNumParams() == 0 && !FT->isVarArg() &&
@ -1414,7 +1413,7 @@ bool DataFlowSanitizer::runImpl(Module &M) {
ConstantExpr::getBitCast(NewF, PointerType::getUnqual(FT)));
NewF->takeName(&F);
F.eraseFromParent();
*i = NewF;
*FI = NewF;
addGlobalNamePrefix(NewF);
} else {
addGlobalNamePrefix(&F);
@ -1423,22 +1422,20 @@ bool DataFlowSanitizer::runImpl(Module &M) {
// Build a wrapper function for F. The wrapper simply calls F, and is
// added to FnsToInstrument so that any instrumentation according to its
// WrapperKind is done in the second pass below.
FunctionType *NewFT = getInstrumentedABI() == IA_Args
? getArgsFunctionType(FT)
: FT;
FunctionType *NewFT =
getInstrumentedABI() == IA_Args ? getArgsFunctionType(FT) : FT;
// If the function being wrapped has local linkage, then preserve the
// function's linkage in the wrapper function.
GlobalValue::LinkageTypes wrapperLinkage =
F.hasLocalLinkage()
? F.getLinkage()
: GlobalValue::LinkOnceODRLinkage;
GlobalValue::LinkageTypes WrapperLinkage =
F.hasLocalLinkage() ? F.getLinkage()
: GlobalValue::LinkOnceODRLinkage;
Function *NewF = buildWrapperFunction(
&F,
(shouldTrackOrigins() ? std::string("dfso$") : std::string("dfsw$")) +
std::string(F.getName()),
wrapperLinkage, NewFT);
WrapperLinkage, NewFT);
if (getInstrumentedABI() == IA_TLS)
NewF->removeAttributes(AttributeList::FunctionIndex, ReadOnlyNoneAttrs);
@ -1447,7 +1444,7 @@ bool DataFlowSanitizer::runImpl(Module &M) {
F.replaceAllUsesWith(WrappedFnCst);
UnwrappedFnMap[WrappedFnCst] = &F;
*i = NewF;
*FI = NewF;
if (!F.isDeclaration()) {
// This function is probably defining an interposition of an
@ -1459,34 +1456,34 @@ bool DataFlowSanitizer::runImpl(Module &M) {
// This code needs to rebuild the iterators, as they may be invalidated
// by the push_back, taking care that the new range does not include
// any functions added by this code.
size_t N = i - FnsToInstrument.begin(),
Count = e - FnsToInstrument.begin();
size_t N = FI - FnsToInstrument.begin(),
Count = FE - FnsToInstrument.begin();
FnsToInstrument.push_back(&F);
i = FnsToInstrument.begin() + N;
e = FnsToInstrument.begin() + Count;
FI = FnsToInstrument.begin() + N;
FE = FnsToInstrument.begin() + Count;
}
// Hopefully, nobody will try to indirectly call a vararg
// function... yet.
// Hopefully, nobody will try to indirectly call a vararg
// function... yet.
} else if (FT->isVarArg()) {
UnwrappedFnMap[&F] = &F;
*i = nullptr;
*FI = nullptr;
}
}
for (Function *i : FnsToInstrument) {
if (!i || i->isDeclaration())
for (Function *F : FnsToInstrument) {
if (!F || F->isDeclaration())
continue;
removeUnreachableBlocks(*i);
removeUnreachableBlocks(*F);
DFSanFunction DFSF(*this, i, FnsWithNativeABI.count(i));
DFSanFunction DFSF(*this, F, FnsWithNativeABI.count(F));
// DFSanVisitor may create new basic blocks, which confuses df_iterator.
// Build a copy of the list before iterating over it.
SmallVector<BasicBlock *, 4> BBList(depth_first(&i->getEntryBlock()));
SmallVector<BasicBlock *, 4> BBList(depth_first(&F->getEntryBlock()));
for (BasicBlock *i : BBList) {
Instruction *Inst = &i->front();
for (BasicBlock *BB : BBList) {
Instruction *Inst = &BB->front();
while (true) {
// DFSanVisitor may split the current basic block, changing the current
// instruction's next pointer and moving the next instruction to the
@ -1507,14 +1504,12 @@ bool DataFlowSanitizer::runImpl(Module &M) {
// until we have visited every block. Therefore, the code that handles phi
// nodes adds them to the PHIFixups list so that they can be properly
// handled here.
for (std::vector<std::pair<PHINode *, PHINode *>>::iterator
i = DFSF.PHIFixups.begin(),
e = DFSF.PHIFixups.end();
i != e; ++i) {
for (unsigned val = 0, n = i->first->getNumIncomingValues(); val != n;
++val) {
i->second->setIncomingValue(
val, DFSF.getShadow(i->first->getIncomingValue(val)));
for (auto PHIFixup : DFSF.PHIFixups) {
PHINode *PN, *ShadowPN;
std::tie(PN, ShadowPN) = PHIFixup;
for (unsigned Val = 0, N = PN->getNumIncomingValues(); Val < N; ++Val) {
ShadowPN->setIncomingValue(Val,
DFSF.getShadow(PN->getIncomingValue(Val)));
}
}
@ -1578,7 +1573,7 @@ Value *DFSanFunction::getOrigin(Value *V) {
return DFS.ZeroOrigin;
switch (IA) {
case DataFlowSanitizer::IA_TLS: {
if (A->getArgNo() < DFS.kNumOfElementsInArgOrgTLS) {
if (A->getArgNo() < DFS.NumOfElementsInArgOrgTLS) {
Instruction *ArgOriginTLSPos = &*F->getEntryBlock().begin();
IRBuilder<> IRB(ArgOriginTLSPos);
Value *ArgOriginPtr = getArgOriginTLS(A->getArgNo(), IRB);
@ -1621,20 +1616,20 @@ Value *DFSanFunction::getShadowForTLSArgument(Argument *A) {
unsigned Size = DL.getTypeAllocSize(DFS.getShadowTy(&FArg));
if (A != &FArg) {
ArgOffset += alignTo(Size, kShadowTLSAlignment);
if (ArgOffset > kArgTLSSize)
ArgOffset += alignTo(Size, ShadowTLSAlignment);
if (ArgOffset > ArgTLSSize)
break; // ArgTLS overflows, uses a zero shadow.
continue;
}
if (ArgOffset + Size > kArgTLSSize)
if (ArgOffset + Size > ArgTLSSize)
break; // ArgTLS overflows, uses a zero shadow.
Instruction *ArgTLSPos = &*F->getEntryBlock().begin();
IRBuilder<> IRB(ArgTLSPos);
Value *ArgShadowPtr = getArgTLS(FArg.getType(), ArgOffset, IRB);
return IRB.CreateAlignedLoad(DFS.getShadowTy(&FArg), ArgShadowPtr,
kShadowTLSAlignment);
ShadowTLSAlignment);
}
return DFS.getZeroShadow(A);
@ -1655,10 +1650,9 @@ Value *DFSanFunction::getShadow(Value *V) {
}
case DataFlowSanitizer::IA_Args: {
unsigned ArgIdx = A->getArgNo() + F->arg_size() / 2;
Function::arg_iterator i = F->arg_begin();
while (ArgIdx--)
++i;
Shadow = &*i;
Function::arg_iterator Arg = F->arg_begin();
std::advance(Arg, ArgIdx);
Shadow = &*Arg;
assert(Shadow->getType() == DFS.PrimitiveShadowTy);
break;
}
@ -1704,8 +1698,8 @@ DataFlowSanitizer::getShadowOriginAddress(Value *Addr, Align InstAlignment,
const Align Alignment = llvm::assumeAligned(InstAlignment.value());
// When alignment is >= 4, Addr must be aligned to 4, otherwise it is UB.
// So Mask is unnecessary.
if (Alignment < kMinOriginAlignment) {
uint64_t Mask = kMinOriginAlignment.value() - 1;
if (Alignment < MinOriginAlignment) {
uint64_t Mask = MinOriginAlignment.value() - 1;
OriginLong = IRB.CreateAnd(OriginLong, ConstantInt::get(IntptrTy, ~Mask));
}
OriginPtr = IRB.CreateIntToPtr(OriginLong, OriginPtrTy);
@ -1743,8 +1737,9 @@ Value *DFSanFunction::combineShadows(Value *V1, Value *V2, Instruction *Pos) {
if (std::includes(V1Elems->second.begin(), V1Elems->second.end(),
V2Elems->second.begin(), V2Elems->second.end())) {
return collapseToPrimitiveShadow(V1, Pos);
} else if (std::includes(V2Elems->second.begin(), V2Elems->second.end(),
V1Elems->second.begin(), V1Elems->second.end())) {
}
if (std::includes(V2Elems->second.begin(), V2Elems->second.end(),
V1Elems->second.begin(), V1Elems->second.end())) {
return collapseToPrimitiveShadow(V2, Pos);
}
} else if (V1Elems != ShadowElements.end()) {
@ -1823,9 +1818,9 @@ Value *DFSanFunction::combineOperandShadows(Instruction *Inst) {
return DFS.getZeroShadow(Inst);
Value *Shadow = getShadow(Inst->getOperand(0));
for (unsigned i = 1, n = Inst->getNumOperands(); i != n; ++i) {
Shadow = combineShadows(Shadow, getShadow(Inst->getOperand(i)), Inst);
}
for (unsigned I = 1, N = Inst->getNumOperands(); I < N; ++I)
Shadow = combineShadows(Shadow, getShadow(Inst->getOperand(I)), Inst);
return expandFromPrimitiveShadow(Inst->getType(), Shadow, Inst);
}
@ -1985,10 +1980,10 @@ Value *DFSanFunction::loadLegacyShadowFast(Value *ShadowAddr, uint64_t Size,
Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align,
Instruction *Pos) {
if (AllocaInst *AI = dyn_cast<AllocaInst>(Addr)) {
const auto i = AllocaShadowMap.find(AI);
if (i != AllocaShadowMap.end()) {
const auto I = AllocaShadowMap.find(AI);
if (I != AllocaShadowMap.end()) {
IRBuilder<> IRB(Pos);
return IRB.CreateLoad(DFS.PrimitiveShadowTy, i->second);
return IRB.CreateLoad(DFS.PrimitiveShadowTy, I->second);
}
}
@ -2116,10 +2111,10 @@ void DFSanFunction::storePrimitiveShadow(Value *Addr, uint64_t Size,
Value *PrimitiveShadow,
Instruction *Pos) {
if (AllocaInst *AI = dyn_cast<AllocaInst>(Addr)) {
const auto i = AllocaShadowMap.find(AI);
if (i != AllocaShadowMap.end()) {
const auto I = AllocaShadowMap.find(AI);
if (I != AllocaShadowMap.end()) {
IRBuilder<> IRB(Pos);
IRB.CreateStore(PrimitiveShadow, i->second);
IRB.CreateStore(PrimitiveShadow, I->second);
return;
}
}
@ -2138,10 +2133,10 @@ void DFSanFunction::storePrimitiveShadow(Value *Addr, uint64_t Size,
auto *ShadowVecTy =
FixedVectorType::get(DFS.PrimitiveShadowTy, ShadowVecSize);
Value *ShadowVec = UndefValue::get(ShadowVecTy);
for (unsigned i = 0; i != ShadowVecSize; ++i) {
for (unsigned I = 0; I != ShadowVecSize; ++I) {
ShadowVec = IRB.CreateInsertElement(
ShadowVec, PrimitiveShadow,
ConstantInt::get(Type::getInt32Ty(*DFS.Ctx), i));
ConstantInt::get(Type::getInt32Ty(*DFS.Ctx), I));
}
Value *ShadowVecAddr =
IRB.CreateBitCast(ShadowAddr, PointerType::getUnqual(ShadowVecTy));
@ -2431,11 +2426,11 @@ void DFSanVisitor::visitReturnInst(ReturnInst &RI) {
Type *RT = DFSF.F->getFunctionType()->getReturnType();
unsigned Size =
getDataLayout().getTypeAllocSize(DFSF.DFS.getShadowTy(RT));
if (Size <= kRetvalTLSSize) {
if (Size <= RetvalTLSSize) {
// If the size overflows, stores nothing. At callsite, oversized return
// shadows are set to zero.
IRB.CreateAlignedStore(S, DFSF.getRetvalTLS(RT, IRB),
kShadowTLSAlignment);
ShadowTLSAlignment);
}
if (DFSF.DFS.shouldTrackOrigins()) {
Value *O = DFSF.getOrigin(RI.getReturnValue());
@ -2582,14 +2577,13 @@ bool DFSanVisitor::visitWrappedCallBase(Function &F, CallBase &CB) {
// Adds non-variable arguments.
auto *I = CB.arg_begin();
for (unsigned n = FT->getNumParams(); n != 0; ++I, --n) {
for (unsigned N = FT->getNumParams(); N != 0; ++I, --N) {
Type *T = (*I)->getType();
FunctionType *ParamFT;
if (isa<PointerType>(T) &&
(ParamFT = dyn_cast<FunctionType>(
cast<PointerType>(T)->getElementType()))) {
(ParamFT = dyn_cast<FunctionType>(T->getPointerElementType()))) {
std::string TName = "dfst";
TName += utostr(FT->getNumParams() - n);
TName += utostr(FT->getNumParams() - N);
TName += "$";
TName += F.getName();
Constant *T = DFSF.DFS.getOrBuildTrampolineFunction(ParamFT, TName);
@ -2615,7 +2609,7 @@ bool DFSanVisitor::visitWrappedCallBase(Function &F, CallBase &CB) {
CallInst *CustomCI = IRB.CreateCall(CustomF, Args);
CustomCI->setCallingConv(CI->getCallingConv());
CustomCI->setAttributes(TransformFunctionAttributes(
CustomCI->setAttributes(transformFunctionAttributes(
CustomFn, CI->getContext(), CI->getAttributes()));
// Update the parameter attributes of the custom call instruction to
@ -2666,10 +2660,10 @@ void DFSanVisitor::visitCallBase(CallBase &CB) {
if (F == DFSF.DFS.DFSanVarargWrapperFn.getCallee()->stripPointerCasts())
return;
DenseMap<Value *, Function *>::iterator i =
DenseMap<Value *, Function *>::iterator UnwrappedFnIt =
DFSF.DFS.UnwrappedFnMap.find(CB.getCalledOperand());
if (i != DFSF.DFS.UnwrappedFnMap.end())
if (visitWrappedCallBase(*i->second, CB))
if (UnwrappedFnIt != DFSF.DFS.UnwrappedFnMap.end())
if (visitWrappedCallBase(*UnwrappedFnIt->second, CB))
return;
IRBuilder<> IRB(&CB);
@ -2684,7 +2678,7 @@ void DFSanVisitor::visitCallBase(CallBase &CB) {
if (ShouldTrackOrigins) {
// Ignore overflowed origins
Value *ArgShadow = DFSF.getShadow(CB.getArgOperand(I));
if (I < DFSF.DFS.kNumOfElementsInArgOrgTLS &&
if (I < DFSF.DFS.NumOfElementsInArgOrgTLS &&
!DFSF.DFS.isZeroShadow(ArgShadow))
IRB.CreateStore(DFSF.getOrigin(CB.getArgOperand(I)),
DFSF.getArgOriginTLS(I, IRB));
@ -2694,13 +2688,13 @@ void DFSanVisitor::visitCallBase(CallBase &CB) {
DL.getTypeAllocSize(DFSF.DFS.getShadowTy(FT->getParamType(I)));
// Stop storing if arguments' size overflows. Inside a function, arguments
// after overflow have zero shadow values.
if (ArgOffset + Size > kArgTLSSize)
if (ArgOffset + Size > ArgTLSSize)
break;
IRB.CreateAlignedStore(
DFSF.getShadow(CB.getArgOperand(I)),
DFSF.getArgTLS(FT->getParamType(I), ArgOffset, IRB),
kShadowTLSAlignment);
ArgOffset += alignTo(Size, kShadowTLSAlignment);
ShadowTLSAlignment);
ArgOffset += alignTo(Size, ShadowTLSAlignment);
}
}
@ -2724,13 +2718,13 @@ void DFSanVisitor::visitCallBase(CallBase &CB) {
IRBuilder<> NextIRB(Next);
const DataLayout &DL = getDataLayout();
unsigned Size = DL.getTypeAllocSize(DFSF.DFS.getShadowTy(&CB));
if (Size > kRetvalTLSSize) {
if (Size > RetvalTLSSize) {
// Set overflowed return shadow to be zero.
DFSF.setShadow(&CB, DFSF.DFS.getZeroShadow(&CB));
} else {
LoadInst *LI = NextIRB.CreateAlignedLoad(
DFSF.DFS.getShadowTy(&CB), DFSF.getRetvalTLS(CB.getType(), NextIRB),
kShadowTLSAlignment, "_dfsret");
ShadowTLSAlignment, "_dfsret");
DFSF.SkipInsts.insert(LI);
DFSF.setShadow(&CB, LI);
DFSF.NonZeroChecks.push_back(LI);
@ -2751,30 +2745,35 @@ void DFSanVisitor::visitCallBase(CallBase &CB) {
FunctionType *NewFT = DFSF.DFS.getArgsFunctionType(FT);
Value *Func =
IRB.CreateBitCast(CB.getCalledOperand(), PointerType::getUnqual(NewFT));
std::vector<Value *> Args;
auto i = CB.arg_begin(), E = CB.arg_end();
for (unsigned n = FT->getNumParams(); n != 0; ++i, --n)
Args.push_back(*i);
const unsigned NumParams = FT->getNumParams();
i = CB.arg_begin();
for (unsigned n = FT->getNumParams(); n != 0; ++i, --n)
Args.push_back(DFSF.getShadow(*i));
// Copy original arguments.
auto *ArgIt = CB.arg_begin(), *ArgEnd = CB.arg_end();
std::vector<Value *> Args(NumParams);
std::copy_n(ArgIt, NumParams, Args.begin());
// Add shadow arguments by transforming original arguments.
std::generate_n(std::back_inserter(Args), NumParams,
[&]() { return DFSF.getShadow(*ArgIt++); });
if (FT->isVarArg()) {
unsigned VarArgSize = CB.arg_size() - FT->getNumParams();
unsigned VarArgSize = CB.arg_size() - NumParams;
ArrayType *VarArgArrayTy =
ArrayType::get(DFSF.DFS.PrimitiveShadowTy, VarArgSize);
AllocaInst *VarArgShadow =
new AllocaInst(VarArgArrayTy, getDataLayout().getAllocaAddrSpace(),
"", &DFSF.F->getEntryBlock().front());
new AllocaInst(VarArgArrayTy, getDataLayout().getAllocaAddrSpace(),
"", &DFSF.F->getEntryBlock().front());
Args.push_back(IRB.CreateConstGEP2_32(VarArgArrayTy, VarArgShadow, 0, 0));
for (unsigned n = 0; i != E; ++i, ++n) {
// Copy remaining var args.
unsigned GepIndex = 0;
std::for_each(ArgIt, ArgEnd, [&](Value *Arg) {
IRB.CreateStore(
DFSF.getShadow(*i),
IRB.CreateConstGEP2_32(VarArgArrayTy, VarArgShadow, 0, n));
Args.push_back(*i);
}
DFSF.getShadow(Arg),
IRB.CreateConstGEP2_32(VarArgArrayTy, VarArgShadow, 0, GepIndex++));
Args.push_back(Arg);
});
}
CallBase *NewCB;
@ -2811,10 +2810,8 @@ void DFSanVisitor::visitPHINode(PHINode &PN) {
// Give the shadow phi node valid predecessors to fool SplitEdge into working.
Value *UndefShadow = UndefValue::get(ShadowTy);
for (PHINode::block_iterator i = PN.block_begin(), e = PN.block_end(); i != e;
++i) {
ShadowPN->addIncoming(UndefShadow, *i);
}
for (BasicBlock *BB : PN.blocks())
ShadowPN->addIncoming(UndefShadow, BB);
DFSF.PHIFixups.push_back(std::make_pair(&PN, ShadowPN));
DFSF.setShadow(&PN, ShadowPN);