mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-01-31 20:51:52 +01:00
[MSAN] Avoid dangling ActualFnStart when replacing instruction
This would be a problem if the entire instrumented function was a call to e.g. memcpy Use FnPrologueEnd Instruction* instead of ActualFnStart BB* Differential Revision: https://reviews.llvm.org/D86001
This commit is contained in:
parent
d19dc4d38b
commit
c42ceea934
@ -1056,7 +1056,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
|
||||
ValueMap<Value*, Value*> ShadowMap, OriginMap;
|
||||
std::unique_ptr<VarArgHelper> VAHelper;
|
||||
const TargetLibraryInfo *TLI;
|
||||
Instruction *ActualFnStart;
|
||||
Instruction *FnPrologueEnd;
|
||||
|
||||
// The following flags disable parts of MSan instrumentation based on
|
||||
// exclusion list contents and command-line options.
|
||||
@ -1095,10 +1095,11 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
|
||||
removeUnreachableBlocks(F);
|
||||
|
||||
MS.initializeCallbacks(*F.getParent());
|
||||
ActualFnStart = F.getEntryBlock().getFirstNonPHI();
|
||||
FnPrologueEnd = IRBuilder<>(F.getEntryBlock().getFirstNonPHI())
|
||||
.CreateIntrinsic(Intrinsic::donothing, {}, {});
|
||||
|
||||
if (MS.CompileKernel) {
|
||||
IRBuilder<> IRB(ActualFnStart);
|
||||
IRBuilder<> IRB(FnPrologueEnd);
|
||||
insertKmsanPrologue(IRB);
|
||||
}
|
||||
|
||||
@ -1107,6 +1108,11 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
|
||||
<< F.getName() << "'\n");
|
||||
}
|
||||
|
||||
bool isInPrologue(Instruction &I) {
|
||||
return I.getParent() == FnPrologueEnd->getParent() &&
|
||||
(&I == FnPrologueEnd || I.comesBefore(FnPrologueEnd));
|
||||
}
|
||||
|
||||
Value *updateOrigin(Value *V, IRBuilder<> &IRB) {
|
||||
if (MS.TrackOrigins <= 1) return V;
|
||||
return IRB.CreateCall(MS.MsanChainOriginFn, V);
|
||||
@ -1269,6 +1275,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
|
||||
LLVM_DEBUG(dbgs() << "DONE:\n" << F);
|
||||
}
|
||||
|
||||
// Returns the last instruction in the new prologue
|
||||
void insertKmsanPrologue(IRBuilder<> &IRB) {
|
||||
Value *ContextState = IRB.CreateCall(MS.MsanGetContextStateFn, {});
|
||||
Constant *Zero = IRB.getInt32(0);
|
||||
@ -1295,7 +1302,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
|
||||
// Iterate all BBs in depth-first order and create shadow instructions
|
||||
// for all instructions (where applicable).
|
||||
// For PHI nodes we create dummy shadow PHIs which will be finalized later.
|
||||
for (BasicBlock *BB : depth_first(ActualFnStart->getParent()))
|
||||
for (BasicBlock *BB : depth_first(FnPrologueEnd->getParent()))
|
||||
visit(*BB);
|
||||
|
||||
// Finalize PHI nodes.
|
||||
@ -1662,7 +1669,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
|
||||
if (*ShadowPtr)
|
||||
return *ShadowPtr;
|
||||
Function *F = A->getParent();
|
||||
IRBuilder<> EntryIRB(ActualFnStart);
|
||||
IRBuilder<> EntryIRB(FnPrologueEnd);
|
||||
unsigned ArgOffset = 0;
|
||||
const DataLayout &DL = F->getParent()->getDataLayout();
|
||||
for (auto &FArg : F->args()) {
|
||||
@ -1880,9 +1887,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
|
||||
void visit(Instruction &I) {
|
||||
if (I.getMetadata("nosanitize"))
|
||||
return;
|
||||
// Don't want to visit if we're in the zone before ActualFnStart
|
||||
if (I.getParent() == ActualFnStart->getParent() &&
|
||||
I.comesBefore(ActualFnStart))
|
||||
// Don't want to visit if we're in the prologue
|
||||
if (isInPrologue(I))
|
||||
return;
|
||||
InstVisitor<MemorySanitizerVisitor>::visit(I);
|
||||
}
|
||||
@ -4309,7 +4315,7 @@ struct VarArgAMD64Helper : public VarArgHelper {
|
||||
if (!VAStartInstrumentationList.empty()) {
|
||||
// If there is a va_start in this function, make a backup copy of
|
||||
// va_arg_tls somewhere in the function entry block.
|
||||
IRBuilder<> IRB(MSV.ActualFnStart);
|
||||
IRBuilder<> IRB(MSV.FnPrologueEnd);
|
||||
VAArgOverflowSize =
|
||||
IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
|
||||
Value *CopySize =
|
||||
@ -4455,7 +4461,7 @@ struct VarArgMIPS64Helper : public VarArgHelper {
|
||||
void finalizeInstrumentation() override {
|
||||
assert(!VAArgSize && !VAArgTLSCopy &&
|
||||
"finalizeInstrumentation called twice");
|
||||
IRBuilder<> IRB(MSV.ActualFnStart);
|
||||
IRBuilder<> IRB(MSV.FnPrologueEnd);
|
||||
VAArgSize = IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
|
||||
Value *CopySize = IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, 0),
|
||||
VAArgSize);
|
||||
@ -4648,7 +4654,7 @@ struct VarArgAArch64Helper : public VarArgHelper {
|
||||
if (!VAStartInstrumentationList.empty()) {
|
||||
// If there is a va_start in this function, make a backup copy of
|
||||
// va_arg_tls somewhere in the function entry block.
|
||||
IRBuilder<> IRB(MSV.ActualFnStart);
|
||||
IRBuilder<> IRB(MSV.FnPrologueEnd);
|
||||
VAArgOverflowSize =
|
||||
IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
|
||||
Value *CopySize =
|
||||
@ -4893,7 +4899,7 @@ struct VarArgPowerPC64Helper : public VarArgHelper {
|
||||
void finalizeInstrumentation() override {
|
||||
assert(!VAArgSize && !VAArgTLSCopy &&
|
||||
"finalizeInstrumentation called twice");
|
||||
IRBuilder<> IRB(MSV.ActualFnStart);
|
||||
IRBuilder<> IRB(MSV.FnPrologueEnd);
|
||||
VAArgSize = IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
|
||||
Value *CopySize = IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, 0),
|
||||
VAArgSize);
|
||||
@ -5212,7 +5218,7 @@ struct VarArgSystemZHelper : public VarArgHelper {
|
||||
if (!VAStartInstrumentationList.empty()) {
|
||||
// If there is a va_start in this function, make a backup copy of
|
||||
// va_arg_tls somewhere in the function entry block.
|
||||
IRBuilder<> IRB(MSV.ActualFnStart);
|
||||
IRBuilder<> IRB(MSV.FnPrologueEnd);
|
||||
VAArgOverflowSize =
|
||||
IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
|
||||
Value *CopySize =
|
||||
|
@ -18,8 +18,8 @@ entry:
|
||||
|
||||
; CHECK-LABEL: @InsertValue(
|
||||
; CHECK-DAG: [[Sx:%.*]] = load i32, i32* {{.*}}@__msan_param_tls to i32*)
|
||||
; CHECK: [[A:%.*]] = insertvalue [2 x i32] [i32 -1, i32 -1], i32 [[Sx]], 0
|
||||
; CHECK-DAG: [[Sy:%.*]] = load i32, i32* {{.*}}@__msan_param_tls to i64), i64 8) to i32*)
|
||||
; CHECK: [[A:%.*]] = insertvalue [2 x i32] [i32 -1, i32 -1], i32 [[Sx]], 0
|
||||
; CHECK: [[B:%.*]] = insertvalue [2 x i32] [[A]], i32 [[Sy]], 1
|
||||
; CHECK: store [2 x i32] [[B]], [2 x i32]* {{.*}}@__msan_retval_tls
|
||||
; CHECK: ret [2 x i32]
|
||||
@ -34,8 +34,8 @@ entry:
|
||||
|
||||
; CHECK-LABEL: @InsertValueDouble(
|
||||
; CHECK-DAG: [[Sx:%.*]] = load i64, i64* getelementptr {{.*}}@__msan_param_tls, i32 0, i32 0
|
||||
; CHECK: [[A:%.*]] = insertvalue [2 x i64] [i64 -1, i64 -1], i64 [[Sx]], 0
|
||||
; CHECK-DAG: [[Sy:%.*]] = load i64, i64* {{.*}}@__msan_param_tls to i64), i64 8) to i64*)
|
||||
; CHECK: [[A:%.*]] = insertvalue [2 x i64] [i64 -1, i64 -1], i64 [[Sx]], 0
|
||||
; CHECK: [[B:%.*]] = insertvalue [2 x i64] [[A]], i64 [[Sy]], 1
|
||||
; CHECK: store [2 x i64] [[B]], [2 x i64]* {{.*}}@__msan_retval_tls
|
||||
; CHECK: ret [2 x double]
|
||||
|
@ -49,3 +49,6 @@ entry:
|
||||
; CHECK-NOT: writeonly
|
||||
; CHECK-NOT: argmemonly
|
||||
; CHECK-NOT: speculatable
|
||||
|
||||
; CHECK: Function Attrs: nounwind readnone willreturn
|
||||
; CHECK-NEXT: declare void @llvm.donothing
|
||||
|
@ -19,8 +19,8 @@ entry:
|
||||
|
||||
; CHECK-LABEL: @clmul00
|
||||
; CHECK: %[[S0:.*]] = load <2 x i64>, <2 x i64>* {{.*}}@__msan_param_tls
|
||||
; CHECK: %[[SHUF0:.*]] = shufflevector <2 x i64> %[[S0]], <2 x i64> undef, <2 x i32> zeroinitializer
|
||||
; CHECK: %[[S1:.*]] = load <2 x i64>, <2 x i64>* {{.*}}@__msan_param_tls
|
||||
; CHECK: %[[SHUF0:.*]] = shufflevector <2 x i64> %[[S0]], <2 x i64> undef, <2 x i32> zeroinitializer
|
||||
; CHECK: %[[SHUF1:.*]] = shufflevector <2 x i64> %[[S1]], <2 x i64> undef, <2 x i32> zeroinitializer
|
||||
; CHECK: %[[SRET:.*]] = or <2 x i64> %[[SHUF0]], %[[SHUF1]]
|
||||
; CHECK: store <2 x i64> %[[SRET]], <2 x i64>* {{.*}}@__msan_retval_tls
|
||||
@ -33,8 +33,8 @@ entry:
|
||||
|
||||
; CHECK-LABEL: @clmul10
|
||||
; CHECK: %[[S0:.*]] = load <2 x i64>, <2 x i64>* {{.*}}@__msan_param_tls
|
||||
; CHECK: %[[SHUF0:.*]] = shufflevector <2 x i64> %[[S0]], <2 x i64> undef, <2 x i32> zeroinitializer
|
||||
; CHECK: %[[S1:.*]] = load <2 x i64>, <2 x i64>* {{.*}}@__msan_param_tls
|
||||
; CHECK: %[[SHUF0:.*]] = shufflevector <2 x i64> %[[S0]], <2 x i64> undef, <2 x i32> zeroinitializer
|
||||
; CHECK: %[[SHUF1:.*]] = shufflevector <2 x i64> %[[S1]], <2 x i64> undef, <2 x i32> <i32 1, i32 1>
|
||||
; CHECK: %[[SRET:.*]] = or <2 x i64> %[[SHUF0]], %[[SHUF1]]
|
||||
; CHECK: store <2 x i64> %[[SRET]], <2 x i64>* {{.*}}@__msan_retval_tls
|
||||
@ -47,8 +47,8 @@ entry:
|
||||
|
||||
; CHECK-LABEL: @clmul11_256
|
||||
; CHECK: %[[S0:.*]] = load <4 x i64>, <4 x i64>* {{.*}}@__msan_param_tls
|
||||
; CHECK: %[[SHUF0:.*]] = shufflevector <4 x i64> %[[S0]], <4 x i64> undef, <4 x i32> <i32 1, i32 1, i32 3, i32 3>
|
||||
; CHECK: %[[S1:.*]] = load <4 x i64>, <4 x i64>* {{.*}}@__msan_param_tls
|
||||
; CHECK: %[[SHUF0:.*]] = shufflevector <4 x i64> %[[S0]], <4 x i64> undef, <4 x i32> <i32 1, i32 1, i32 3, i32 3>
|
||||
; CHECK: %[[SHUF1:.*]] = shufflevector <4 x i64> %[[S1]], <4 x i64> undef, <4 x i32> <i32 1, i32 1, i32 3, i32 3>
|
||||
; CHECK: %[[SRET:.*]] = or <4 x i64> %[[SHUF0]], %[[SHUF1]]
|
||||
; CHECK: store <4 x i64> %[[SRET]], <4 x i64>* {{.*}}@__msan_retval_tls
|
||||
@ -61,8 +61,8 @@ entry:
|
||||
|
||||
; CHECK-LABEL: @clmul01_512
|
||||
; CHECK: %[[S0:.*]] = load <8 x i64>, <8 x i64>* {{.*}}@__msan_param_tls
|
||||
; CHECK: %[[SHUF0:.*]] = shufflevector <8 x i64> %[[S0]], <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
|
||||
; CHECK: %[[S1:.*]] = load <8 x i64>, <8 x i64>* {{.*}}@__msan_param_tls
|
||||
; CHECK: %[[SHUF0:.*]] = shufflevector <8 x i64> %[[S0]], <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
|
||||
; CHECK: %[[SHUF1:.*]] = shufflevector <8 x i64> %[[S1]], <8 x i64> undef, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7>
|
||||
; CHECK: %[[SRET:.*]] = or <8 x i64> %[[SHUF0]], %[[SHUF1]]
|
||||
; ORIGIN: %[[FLAT:.*]] = bitcast <8 x i64> %[[SHUF1]] to i512
|
||||
|
@ -72,14 +72,14 @@ entry:
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @Load(
|
||||
; CHECK: %[[A:.*]] = load <4 x i64>, {{.*}}@__msan_param_tls to i64), i64 8)
|
||||
; CHECK-ORIGIN: %[[O:.*]] = load i32, {{.*}}@__msan_param_origin_tls to i64), i64 8)
|
||||
; CHECK: %[[B:.*]] = ptrtoint <4 x double>* %p to i64
|
||||
; CHECK: %[[C:.*]] = xor i64 %[[B]], 87960930222080
|
||||
; CHECK: %[[D:.*]] = inttoptr i64 %[[C]] to <4 x i64>*
|
||||
; CHECK-ORIGIN: %[[E:.*]] = add i64 %[[C]], 17592186044416
|
||||
; CHECK-ORIGIN: %[[F:.*]] = and i64 %[[E]], -4
|
||||
; CHECK-ORIGIN: %[[G:.*]] = inttoptr i64 %[[F]] to i32*
|
||||
; CHECK: %[[A:.*]] = load <4 x i64>, {{.*}}@__msan_param_tls to i64), i64 8)
|
||||
; CHECK-ORIGIN: %[[O:.*]] = load i32, {{.*}}@__msan_param_origin_tls to i64), i64 8)
|
||||
; CHECK: %[[E:.*]] = call <4 x i64> @llvm.masked.load.v4i64.p0v4i64(<4 x i64>* %[[D]], i32 1, <4 x i1> %mask, <4 x i64> %[[A]])
|
||||
; CHECK-ORIGIN: %[[H:.*]] = load i32, i32* %[[G]]
|
||||
; CHECK-ORIGIN: %[[O2:.*]] = select i1 %{{.*}}, i32 %[[O]], i32 %[[H]]
|
||||
|
@ -257,6 +257,7 @@ declare void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture w
|
||||
|
||||
define void @atomic_memcpy(i8* nocapture %x, i8* nocapture %y) nounwind {
|
||||
; CHECK-LABEL: atomic_memcpy
|
||||
; CHECK-NEXT: call void @llvm.donothing
|
||||
; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 2 %y, i64 16, i32 1)
|
||||
; CHECK-NEXT: ret void
|
||||
call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 2 %y, i64 16, i32 1)
|
||||
@ -265,6 +266,7 @@ define void @atomic_memcpy(i8* nocapture %x, i8* nocapture %y) nounwind {
|
||||
|
||||
define void @atomic_memmove(i8* nocapture %x, i8* nocapture %y) nounwind {
|
||||
; CHECK-LABEL: atomic_memmove
|
||||
; CHECK-NEXT: call void @llvm.donothing
|
||||
; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 2 %y, i64 16, i32 1)
|
||||
; CHECK-NEXT: ret void
|
||||
call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 2 %y, i64 16, i32 1)
|
||||
@ -273,6 +275,7 @@ define void @atomic_memmove(i8* nocapture %x, i8* nocapture %y) nounwind {
|
||||
|
||||
define void @atomic_memset(i8* nocapture %x) nounwind {
|
||||
; CHECK-LABEL: atomic_memset
|
||||
; CHECK-NEXT: call void @llvm.donothing
|
||||
; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %x, i8 88, i64 16, i32 1)
|
||||
; CHECK-NEXT: ret void
|
||||
call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %x, i8 88, i64 16, i32 1)
|
||||
@ -389,6 +392,7 @@ entry:
|
||||
; CHECK-LABEL: @IntToPtr
|
||||
; CHECK: load i64, i64*{{.*}}__msan_param_tls
|
||||
; CHECK-ORIGINS-NEXT: load i32, i32*{{.*}}__msan_param_origin_tls
|
||||
; CHECK-NEXT: call void @llvm.donothing
|
||||
; CHECK-NEXT: inttoptr
|
||||
; CHECK-NEXT: store i64{{.*}}__msan_retval_tls
|
||||
; CHECK: ret i8*
|
||||
|
@ -7,6 +7,7 @@ target triple = "x86_64-unknown-linux-gnu"
|
||||
|
||||
define noundef i32 @NormalRet() nounwind uwtable sanitize_memory {
|
||||
; CHECK-LABEL: @NormalRet(
|
||||
; CHECK-NEXT: call void @llvm.donothing()
|
||||
; CHECK-NEXT: ret i32 123
|
||||
;
|
||||
ret i32 123
|
||||
@ -14,6 +15,7 @@ define noundef i32 @NormalRet() nounwind uwtable sanitize_memory {
|
||||
|
||||
define i32 @PartialRet() nounwind uwtable sanitize_memory {
|
||||
; CHECK-LABEL: @PartialRet(
|
||||
; CHECK-NEXT: call void @llvm.donothing()
|
||||
; CHECK-NEXT: store i32 0, i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
|
||||
; CHECK-NEXT: store i32 0, i32* @__msan_retval_origin_tls, align 4
|
||||
; CHECK-NEXT: ret i32 123
|
||||
@ -23,6 +25,7 @@ define i32 @PartialRet() nounwind uwtable sanitize_memory {
|
||||
|
||||
define noundef i32 @LoadedRet() nounwind uwtable sanitize_memory {
|
||||
; CHECK-LABEL: @LoadedRet(
|
||||
; CHECK-NEXT: call void @llvm.donothing()
|
||||
; CHECK-NEXT: [[P:%.*]] = inttoptr i64 0 to i32*
|
||||
; CHECK-NEXT: [[O:%.*]] = load i32, i32* [[P]], align 4
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint i32* [[P]] to i64
|
||||
@ -35,7 +38,7 @@ define noundef i32 @LoadedRet() nounwind uwtable sanitize_memory {
|
||||
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[_MSLD]], 0
|
||||
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof !0
|
||||
; CHECK: 7:
|
||||
; CHECK-NEXT: call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #1
|
||||
; CHECK-NEXT: call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) [[ATTR2:#.*]]
|
||||
; CHECK-NEXT: unreachable
|
||||
; CHECK: 8:
|
||||
; CHECK-NEXT: ret i32 [[O]]
|
||||
@ -48,6 +51,7 @@ define noundef i32 @LoadedRet() nounwind uwtable sanitize_memory {
|
||||
|
||||
define void @NormalArg(i32 noundef %a) nounwind uwtable sanitize_memory {
|
||||
; CHECK-LABEL: @NormalArg(
|
||||
; CHECK-NEXT: call void @llvm.donothing()
|
||||
; CHECK-NEXT: [[P:%.*]] = inttoptr i64 0 to i32*
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint i32* [[P]] to i64
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
|
||||
@ -67,6 +71,7 @@ define void @PartialArg(i32 %a) nounwind uwtable sanitize_memory {
|
||||
; CHECK-LABEL: @PartialArg(
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* bitcast ([100 x i64]* @__msan_param_tls to i32*), align 8
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__msan_param_origin_tls, i32 0, i32 0), align 4
|
||||
; CHECK-NEXT: call void @llvm.donothing()
|
||||
; CHECK-NEXT: [[P:%.*]] = inttoptr i64 0 to i32*
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint i32* [[P]] to i64
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080
|
||||
@ -90,8 +95,9 @@ define void @PartialArg(i32 %a) nounwind uwtable sanitize_memory {
|
||||
|
||||
define void @CallNormal() nounwind uwtable sanitize_memory {
|
||||
; CHECK-LABEL: @CallNormal(
|
||||
; CHECK-NEXT: [[R:%.*]] = call i32 @NormalRet() #0
|
||||
; CHECK-NEXT: call void @NormalArg(i32 [[R]]) #0
|
||||
; CHECK-NEXT: call void @llvm.donothing()
|
||||
; CHECK-NEXT: [[R:%.*]] = call i32 @NormalRet() [[ATTR0:#.*]]
|
||||
; CHECK-NEXT: call void @NormalArg(i32 [[R]]) [[ATTR0]]
|
||||
; CHECK-NEXT: ret void
|
||||
;
|
||||
%r = call i32 @NormalRet() nounwind uwtable sanitize_memory
|
||||
@ -101,6 +107,7 @@ define void @CallNormal() nounwind uwtable sanitize_memory {
|
||||
|
||||
define void @CallWithLoaded() nounwind uwtable sanitize_memory {
|
||||
; CHECK-LABEL: @CallWithLoaded(
|
||||
; CHECK-NEXT: call void @llvm.donothing()
|
||||
; CHECK-NEXT: [[P:%.*]] = inttoptr i64 0 to i32*
|
||||
; CHECK-NEXT: [[O:%.*]] = load i32, i32* [[P]], align 4
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint i32* [[P]] to i64
|
||||
@ -113,10 +120,10 @@ define void @CallWithLoaded() nounwind uwtable sanitize_memory {
|
||||
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[_MSLD]], 0
|
||||
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof !0
|
||||
; CHECK: 7:
|
||||
; CHECK-NEXT: call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #1
|
||||
; CHECK-NEXT: call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) [[ATTR2]]
|
||||
; CHECK-NEXT: unreachable
|
||||
; CHECK: 8:
|
||||
; CHECK-NEXT: call void @NormalArg(i32 [[O]]) #0
|
||||
; CHECK-NEXT: call void @NormalArg(i32 [[O]]) [[ATTR0]]
|
||||
; CHECK-NEXT: ret void
|
||||
;
|
||||
%p = inttoptr i64 0 to i32 *
|
||||
@ -127,13 +134,14 @@ define void @CallWithLoaded() nounwind uwtable sanitize_memory {
|
||||
|
||||
define void @CallPartial() nounwind uwtable sanitize_memory {
|
||||
; CHECK-LABEL: @CallPartial(
|
||||
; CHECK-NEXT: call void @llvm.donothing()
|
||||
; CHECK-NEXT: store i32 0, i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
|
||||
; CHECK-NEXT: [[R:%.*]] = call i32 @PartialRet() #0
|
||||
; CHECK-NEXT: [[R:%.*]] = call i32 @PartialRet() [[ATTR0]]
|
||||
; CHECK-NEXT: [[_MSRET:%.*]] = load i32, i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* @__msan_retval_origin_tls, align 4
|
||||
; CHECK-NEXT: store i32 [[_MSRET]], i32* bitcast ([100 x i64]* @__msan_param_tls to i32*), align 8
|
||||
; CHECK-NEXT: store i32 [[TMP1]], i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__msan_param_origin_tls, i32 0, i32 0), align 4
|
||||
; CHECK-NEXT: call void @PartialArg(i32 [[R]]) #0
|
||||
; CHECK-NEXT: call void @PartialArg(i32 [[R]]) [[ATTR0]]
|
||||
; CHECK-NEXT: ret void
|
||||
;
|
||||
%r = call i32 @PartialRet() nounwind uwtable sanitize_memory
|
||||
|
@ -45,11 +45,11 @@ entry:
|
||||
; CHECK: [[BASE:%[0-9]+]] = ptrtoint {{.*}} [[PARAM_SHADOW]]
|
||||
; CHECK: [[SHADOW_PTR:%[a-z0-9_]+]] = inttoptr {{.*}} [[BASE]]
|
||||
; CHECK: [[SHADOW:%[a-z0-9]+]] = load i64, i64* [[SHADOW_PTR]]
|
||||
; CHECK: [[BASE2:%[0-9]+]] = ptrtoint {{.*}} [[PARAM_SHADOW]]
|
||||
; Load the shadow of %p and check it
|
||||
; CHECK: icmp ne i64 [[SHADOW]]
|
||||
; CHECK: br i1
|
||||
; CHECK: {{^[0-9]+}}:
|
||||
; CHECK: [[BASE2:%[0-9]+]] = ptrtoint {{.*}} [[PARAM_SHADOW]]
|
||||
; CHECK: @__msan_metadata_ptr_for_store_1(i8* %p)
|
||||
; CHECK: store i8
|
||||
; If the new shadow is non-zero, jump to __msan_chain_origin()
|
||||
|
@ -63,6 +63,7 @@ declare <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> %a, <8 x i16> %b) nounwind
|
||||
; CHECK-ORIGINS: load i32, i32* {{.*}} @__msan_param_origin_tls
|
||||
; CHECK-NEXT: load <8 x i16>, <8 x i16>* {{.*}} @__msan_param_tls
|
||||
; CHECK-ORIGINS: load i32, i32* {{.*}} @__msan_param_origin_tls
|
||||
; CHECK-NEXT: call void @llvm.donothing
|
||||
; CHECK-NEXT: = or <8 x i16>
|
||||
; CHECK-ORIGINS: = bitcast <8 x i16> {{.*}} to i128
|
||||
; CHECK-ORIGINS-NEXT: = icmp ne i128 {{.*}}, 0
|
||||
|
@ -18,6 +18,7 @@ entry:
|
||||
|
||||
; CHECK: define void @foo
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: call void @llvm.donothing
|
||||
; CHECK-NEXT: %id = call token @llvm.coro.id
|
||||
; CHECK-NEXT: call i1 @llvm.coro.alloc(token %id)
|
||||
; CHECK-NEXT: ret void
|
||||
|
Loading…
x
Reference in New Issue
Block a user