1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2025-01-31 20:51:52 +01:00

[NFC][DwarfDebug] Add test for variables with a single location which

don't span their entire scope.

The previous commit (6d1c40c171e) is an older version of the test.

Reviewed By: aprantl, vsk

Differential Revision: https://reviews.llvm.org/D79573
This commit is contained in:
OCHyams 2020-05-07 12:41:20 +01:00 committed by Tyker
parent 5a1aa7d07d
commit ceba7b314a
15 changed files with 967 additions and 80 deletions

View File

@ -122,6 +122,9 @@ inline RetainedKnowledge getKnowledgeFromUseInAssume(const Use *U) {
U->getOperandNo());
}
/// Tag in operand bundle indicating that this bundle should be ignored.
constexpr StringRef IgnoreBundleTag = "ignore";
/// Return true iff the operand bundles of the provided llvm.assume doesn't
/// contain any valuable information. This is true when:
/// - The operand bundle is empty
@ -154,6 +157,11 @@ RetainedKnowledge getKnowledgeValidInContext(
const Instruction *CtxI, const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr);
/// This extracts the Knowledge from an element of an operand bundle.
/// This is mostly for use in the assume builder.
RetainedKnowledge getKnowledgeFromBundle(CallInst &Assume,
const CallBase::BundleOpInfo &BOI);
} // namespace llvm
#endif

View File

@ -31,6 +31,7 @@ class LLVMContextImpl;
class Module;
class OptPassGate;
template <typename T> class SmallVectorImpl;
template <typename T> class StringMapEntry;
class SMDiagnostic;
class StringRef;
class Twine;
@ -107,6 +108,10 @@ public:
/// \see LLVMContext::getOperandBundleTagID
void getOperandBundleTags(SmallVectorImpl<StringRef> &Result) const;
/// getOrInsertBundleTag - Returns the Tag to use for an operand bundle of
/// name TagName.
StringMapEntry<uint32_t> *getOrInsertBundleTag(StringRef TagName) const;
/// getOperandBundleTagID - Maps a bundle tag to an integer ID. Every bundle
/// tag registered with an LLVMContext has an unique ID.
uint32_t getOperandBundleTagID(StringRef Tag) const;

View File

@ -71,6 +71,7 @@ void initializeAggressiveInstCombinerLegacyPassPass(PassRegistry&);
void initializeAliasSetPrinterPass(PassRegistry&);
void initializeAlignmentFromAssumptionsPass(PassRegistry&);
void initializeAlwaysInlinerLegacyPassPass(PassRegistry&);
void initializeAssumeSimplifyPassLegacyPassPass(PassRegistry &);
void initializeOpenMPOptLegacyPassPass(PassRegistry &);
void initializeArgPromotionPass(PassRegistry&);
void initializeAssumptionCacheTrackerPass(PassRegistry&);

View File

@ -147,6 +147,13 @@ FunctionPass *createUnifyLoopExitsPass();
// into a natural loop.
//
FunctionPass *createFixIrreduciblePass();
}
//===----------------------------------------------------------------------===//
//
// AssumeSimplify - remove redundant assumes and merge assumes in the same
// BasicBlock when possible.
//
FunctionPass *createAssumeSimplifyPass();
} // namespace llvm
#endif

View File

@ -41,6 +41,14 @@ IntrinsicInst *buildAssumeFromInst(Instruction *I);
void salvageKnowledge(Instruction *I, AssumptionCache *AC = nullptr,
DominatorTree *DT = nullptr);
/// This pass attempts to minimize the number of assume without loosing any
/// information.
struct AssumeSimplifyPass : public PassInfoMixin<AssumeSimplifyPass> {
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
FunctionPass *createAssumeSimplifyPass();
/// This pass will try to build an llvm.assume for every instruction in the
/// function. Its main purpose is testing.
struct AssumeBuilderPass : public PassInfoMixin<AssumeBuilderPass> {

View File

@ -89,11 +89,13 @@ void llvm::fillMapFromAssume(CallInst &AssumeCI, RetainedKnowledgeMap &Result) {
}
}
static RetainedKnowledge
getKnowledgeFromBundle(CallInst &Assume, const CallBase::BundleOpInfo &BOI) {
RetainedKnowledge
llvm::getKnowledgeFromBundle(CallInst &Assume,
const CallBase::BundleOpInfo &BOI) {
RetainedKnowledge Result;
Result.AttrKind = Attribute::getAttrKindFromName(BOI.Tag->getKey());
Result.WasOn = getValueFromBundleOpInfo(Assume, BOI, ABA_WasOn);
if (bundleHasArgument(BOI, ABA_WasOn))
Result.WasOn = getValueFromBundleOpInfo(Assume, BOI, ABA_WasOn);
if (BOI.End - BOI.Begin > ABA_Argument)
Result.ArgValue =
cast<ConstantInt>(getValueFromBundleOpInfo(Assume, BOI, ABA_Argument))
@ -116,7 +118,7 @@ bool llvm::isAssumeWithEmptyBundle(CallInst &CI) {
"this function is intended to be used on llvm.assume");
return none_of(Assume.bundle_op_infos(),
[](const CallBase::BundleOpInfo &BOI) {
return BOI.Tag->getKey() != "ignore";
return BOI.Tag->getKey() != IgnoreBundleTag;
});
}

View File

@ -80,7 +80,7 @@ findAffectedValues(CallInst *CI,
for (unsigned Idx = 0; Idx != CI->getNumOperandBundles(); Idx++) {
if (CI->getOperandBundleAt(Idx).Inputs.size() > ABA_WasOn &&
CI->getOperandBundleAt(Idx).getTagName() != "ignore")
CI->getOperandBundleAt(Idx).getTagName() != IgnoreBundleTag)
AddAffected(CI->getOperandBundleAt(Idx).Inputs[ABA_WasOn], Idx);
}

View File

@ -282,6 +282,11 @@ void LLVMContext::getOperandBundleTags(SmallVectorImpl<StringRef> &Tags) const {
pImpl->getOperandBundleTags(Tags);
}
StringMapEntry<uint32_t> *
LLVMContext::getOrInsertBundleTag(StringRef TagName) const {
return pImpl->getOrInsertBundleTag(TagName);
}
uint32_t LLVMContext::getOperandBundleTagID(StringRef Tag) const {
return pImpl->getOperandBundleTagID(Tag);
}

View File

@ -259,6 +259,7 @@ extern cl::opt<bool> EnableOrderFileInstrumentation;
extern cl::opt<bool> FlattenedProfileUsed;
extern cl::opt<AttributorRunOption> AttributorRun;
extern cl::opt<bool> EnableKnowledgeRetention;
const PassBuilder::OptimizationLevel PassBuilder::OptimizationLevel::O0 = {
/*SpeedLevel*/ 0,
@ -425,6 +426,8 @@ PassBuilder::buildFunctionSimplificationPipeline(OptimizationLevel Level,
// Catch trivial redundancies
FPM.addPass(EarlyCSEPass(true /* Enable mem-ssa. */));
if (EnableKnowledgeRetention)
FPM.addPass(AssumeSimplifyPass());
// Hoisting of scalars and load expressions.
if (Level.getSpeedupLevel() > 1) {

View File

@ -167,6 +167,7 @@ FUNCTION_PASS("adce", ADCEPass())
FUNCTION_PASS("add-discriminators", AddDiscriminatorsPass())
FUNCTION_PASS("aggressive-instcombine", AggressiveInstCombinePass())
FUNCTION_PASS("assume-builder", AssumeBuilderPass())
FUNCTION_PASS("assume-simplify", AssumeSimplifyPass())
FUNCTION_PASS("alignment-from-assumptions", AlignmentFromAssumptionsPass())
FUNCTION_PASS("bdce", BDCEPass())
FUNCTION_PASS("bounds-checking", BoundsCheckingPass())

View File

@ -165,6 +165,8 @@ cl::opt<AttributorRunOption> AttributorRun(
clEnumValN(AttributorRunOption::NONE, "none",
"disable attributor runs")));
extern cl::opt<bool> EnableKnowledgeRetention;
PassManagerBuilder::PassManagerBuilder() {
OptLevel = 2;
SizeLevel = 0;
@ -359,6 +361,8 @@ void PassManagerBuilder::addFunctionSimplificationPasses(
assert(OptLevel >= 1 && "Calling function optimizer with no optimization level!");
MPM.add(createSROAPass());
MPM.add(createEarlyCSEPass(true /* Enable mem-ssa. */)); // Catch trivial redundancies
if (EnableKnowledgeRetention)
MPM.add(createAssumeSimplifyPass());
if (OptLevel > 1) {
if (EnableGVNHoist)

View File

@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/Analysis/AssumeBundleQueries.h"
#include "llvm/Analysis/AssumptionCache.h"
@ -16,7 +17,9 @@
#include "llvm/IR/InstIterator.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Module.h"
#include "llvm/InitializePasses.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Transforms/Utils/Local.h"
using namespace llvm;
@ -90,7 +93,7 @@ struct AssumeBuilderState {
}
void addKnowledge(RetainedKnowledge RK) {
if (tryToPreserveWithoutAddingAssume(RK))
if (RK.AttrKind == Attribute::None || tryToPreserveWithoutAddingAssume(RK))
return;
MapKey Key{RK.WasOn, RK.AttrKind};
auto Lookup = AssumedKnowledgeMap.find(Key);
@ -210,6 +213,275 @@ void llvm::salvageKnowledge(Instruction *I, AssumptionCache *AC, DominatorTree*
}
}
namespace {
struct AssumeSimplify {
Function &F;
AssumptionCache &AC;
DominatorTree *DT;
LLVMContext &C;
SmallDenseSet<IntrinsicInst *> CleanupToDo;
StringMapEntry<uint32_t> *IgnoreTag;
SmallDenseMap<BasicBlock *, SmallVector<IntrinsicInst *, 4>, 8> BBToAssume;
bool MadeChange = false;
AssumeSimplify(Function &F, AssumptionCache &AC, DominatorTree *DT,
LLVMContext &C)
: F(F), AC(AC), DT(DT), C(C),
IgnoreTag(C.getOrInsertBundleTag(IgnoreBundleTag)) {}
void buildMapping(bool FilterBooleanArgument) {
BBToAssume.clear();
for (Value *V : AC.assumptions()) {
if (!V)
continue;
IntrinsicInst *Assume = cast<IntrinsicInst>(V);
if (FilterBooleanArgument) {
auto *Arg = dyn_cast<ConstantInt>(Assume->getOperand(0));
if (!Arg || Arg->isZero())
continue;
}
BBToAssume[Assume->getParent()].push_back(Assume);
}
for (auto &Elem : BBToAssume) {
llvm::sort(Elem.second,
[](const IntrinsicInst *LHS, const IntrinsicInst *RHS) {
return LHS->comesBefore(RHS);
});
}
}
/// Remove all asumes in CleanupToDo if there boolean argument is true and
/// ForceCleanup is set or the assume doesn't hold valuable knowledge.
void RunCleanup(bool ForceCleanup) {
for (IntrinsicInst *Assume : CleanupToDo) {
auto *Arg = dyn_cast<ConstantInt>(Assume->getOperand(0));
if (!Arg || Arg->isZero() ||
(!ForceCleanup && !isAssumeWithEmptyBundle(*Assume)))
continue;
MadeChange = true;
Assume->eraseFromParent();
}
CleanupToDo.clear();
}
/// Remove knowledge stored in assume when it is already know by an attribute
/// or an other assume. This can when valid update an existing knowledge in an
/// attribute or an other assume.
void dropRedundantKnowledge() {
struct MapValue {
IntrinsicInst *Assume;
unsigned ArgValue;
CallInst::BundleOpInfo *BOI;
};
buildMapping(false);
SmallDenseMap<std::pair<Value *, Attribute::AttrKind>,
SmallVector<MapValue, 2>, 16>
Knowledge;
for (BasicBlock *BB : depth_first(&F))
for (Value *V : BBToAssume[BB]) {
if (!V)
continue;
IntrinsicInst *Assume = cast<IntrinsicInst>(V);
for (CallInst::BundleOpInfo &BOI : Assume->bundle_op_infos()) {
auto RemoveFromAssume = [&]() {
CleanupToDo.insert(Assume);
if (BOI.Begin != BOI.End) {
Use *U = &Assume->op_begin()[BOI.Begin + ABA_WasOn];
U->set(UndefValue::get(U->get()->getType()));
}
BOI.Tag = IgnoreTag;
};
if (BOI.Tag == IgnoreTag) {
CleanupToDo.insert(Assume);
continue;
}
RetainedKnowledge RK = getKnowledgeFromBundle(*Assume, BOI);
if (auto *Arg = dyn_cast_or_null<Argument>(RK.WasOn)) {
bool HasSameKindAttr = Arg->hasAttribute(RK.AttrKind);
if (HasSameKindAttr)
if (!Attribute::doesAttrKindHaveArgument(RK.AttrKind) ||
Arg->getAttribute(RK.AttrKind).getValueAsInt() >=
RK.ArgValue) {
RemoveFromAssume();
continue;
}
if (isValidAssumeForContext(
Assume, &*F.getEntryBlock().getFirstInsertionPt()) ||
Assume == &*F.getEntryBlock().getFirstInsertionPt()) {
if (HasSameKindAttr)
Arg->removeAttr(RK.AttrKind);
Arg->addAttr(Attribute::get(C, RK.AttrKind, RK.ArgValue));
MadeChange = true;
RemoveFromAssume();
continue;
}
}
auto &Lookup = Knowledge[{RK.WasOn, RK.AttrKind}];
for (MapValue &Elem : Lookup) {
if (!isValidAssumeForContext(Elem.Assume, Assume, DT))
continue;
if (Elem.ArgValue >= RK.ArgValue) {
RemoveFromAssume();
continue;
} else if (isValidAssumeForContext(Assume, Elem.Assume, DT)) {
Elem.Assume->op_begin()[Elem.BOI->Begin + ABA_Argument].set(
ConstantInt::get(Type::getInt64Ty(C), RK.ArgValue));
MadeChange = true;
RemoveFromAssume();
continue;
}
}
Lookup.push_back({Assume, RK.ArgValue, &BOI});
}
}
}
using MergeIterator = SmallVectorImpl<IntrinsicInst *>::iterator;
/// Merge all Assumes from Begin to End in and insert the resulting assume as
/// high as possible in the basicblock.
void mergeRange(BasicBlock *BB, MergeIterator Begin, MergeIterator End) {
if (Begin == End || std::next(Begin) == End)
return;
/// Provide no additional information so that AssumeBuilderState doesn't
/// try to do any punning since it already has been done better.
AssumeBuilderState Builder(F.getParent());
/// For now it is initialized to the best value it could have
Instruction *InsertPt = BB->getFirstNonPHI();
if (isa<LandingPadInst>(InsertPt))
InsertPt = InsertPt->getNextNode();
for (IntrinsicInst *I : make_range(Begin, End)) {
CleanupToDo.insert(I);
for (CallInst::BundleOpInfo &BOI : I->bundle_op_infos()) {
RetainedKnowledge RK = getKnowledgeFromBundle(*I, BOI);
if (!RK)
continue;
Builder.addKnowledge(RK);
if (auto *I = dyn_cast_or_null<Instruction>(RK.WasOn))
if (I->getParent() == InsertPt->getParent() &&
(InsertPt->comesBefore(I) || InsertPt == I))
InsertPt = I->getNextNode();
}
}
/// Adjust InsertPt if it is before Begin, since mergeAssumes only
/// guarantees we can place the resulting assume between Begin and End.
if (InsertPt->comesBefore(*Begin))
for (auto It = (*Begin)->getIterator(), E = InsertPt->getIterator();
It != E; --It)
if (!isGuaranteedToTransferExecutionToSuccessor(&*It)) {
InsertPt = It->getNextNode();
break;
}
IntrinsicInst *MergedAssume = Builder.build();
if (!MergedAssume)
return;
MadeChange = true;
MergedAssume->insertBefore(InsertPt);
AC.registerAssumption(MergedAssume);
}
/// Merge assume when they are in the same BasicBlock and for all instruction
/// between them isGuaranteedToTransferExecutionToSuccessor returns true.
void mergeAssumes() {
buildMapping(true);
SmallVector<MergeIterator, 4> SplitPoints;
for (auto &Elem : BBToAssume) {
SmallVectorImpl<IntrinsicInst *> &AssumesInBB = Elem.second;
if (AssumesInBB.size() < 2)
continue;
/// AssumesInBB is already sorted by order in the block.
BasicBlock::iterator It = AssumesInBB.front()->getIterator();
BasicBlock::iterator E = AssumesInBB.back()->getIterator();
SplitPoints.push_back(AssumesInBB.begin());
MergeIterator LastSplit = AssumesInBB.begin();
for (; It != E; ++It)
if (!isGuaranteedToTransferExecutionToSuccessor(&*It)) {
for (; (*LastSplit)->comesBefore(&*It); ++LastSplit)
;
if (SplitPoints.back() != LastSplit)
SplitPoints.push_back(LastSplit);
}
SplitPoints.push_back(AssumesInBB.end());
for (auto SplitIt = SplitPoints.begin();
SplitIt != std::prev(SplitPoints.end()); SplitIt++) {
mergeRange(Elem.first, *SplitIt, *(SplitIt + 1));
}
SplitPoints.clear();
}
}
};
bool simplifyAssumes(Function &F, AssumptionCache *AC, DominatorTree *DT) {
AssumeSimplify AS(F, *AC, DT, F.getContext());
/// Remove knowledge that is already known by a dominating other assume or an
/// attribute.
AS.dropRedundantKnowledge();
/// Remove assume that are empty.
AS.RunCleanup(false);
/// Merge assume in the same basicblock when possible.
AS.mergeAssumes();
/// Remove assume that were merged.
AS.RunCleanup(true);
return AS.MadeChange;
}
} // namespace
PreservedAnalyses AssumeSimplifyPass::run(Function &F,
FunctionAnalysisManager &AM) {
if (!EnableKnowledgeRetention)
return PreservedAnalyses::all();
simplifyAssumes(F, &AM.getResult<AssumptionAnalysis>(F),
AM.getCachedResult<DominatorTreeAnalysis>(F));
return PreservedAnalyses::all();
}
class AssumeSimplifyPassLegacyPass : public FunctionPass {
public:
static char ID;
AssumeSimplifyPassLegacyPass() : FunctionPass(ID) {
initializeAssumeSimplifyPassLegacyPassPass(
*PassRegistry::getPassRegistry());
}
bool runOnFunction(Function &F) override {
if (skipFunction(F) || !EnableKnowledgeRetention)
return false;
DominatorTreeWrapperPass *DT =
getAnalysisIfAvailable<DominatorTreeWrapperPass>();
AssumptionCache &AC =
getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
return simplifyAssumes(F, &AC, DT ? &DT->getDomTree() : nullptr);
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
}
};
char AssumeSimplifyPassLegacyPass::ID = 0;
INITIALIZE_PASS_BEGIN(AssumeSimplifyPassLegacyPass, "assume-simplify",
"Assume Simplify", false, false)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
INITIALIZE_PASS_END(AssumeSimplifyPassLegacyPass, "assume-simplify",
"Assume Simplify", false, false)
FunctionPass *llvm::createAssumeSimplifyPass() {
return new AssumeSimplifyPassLegacyPass();
}
PreservedAnalyses AssumeBuilderPass::run(Function &F,
FunctionAnalysisManager &AM) {
AssumptionCache* AC = AM.getCachedResult<AssumptionAnalysis>(F);

View File

@ -24,6 +24,7 @@ using namespace llvm;
/// library.
void llvm::initializeTransformUtils(PassRegistry &Registry) {
initializeAddDiscriminatorsLegacyPassPass(Registry);
initializeAssumeSimplifyPassLegacyPassPass(Registry);
initializeBreakCriticalEdgesPass(Registry);
initializeCanonicalizeAliasesLegacyPassPass(Registry);
initializeInstNamerPass(Registry);

View File

@ -1,25 +1,28 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature
; RUN: opt -passes='assume-builder,verify' --enable-knowledge-retention -S %s | FileCheck %s --check-prefixes=BASIC
; RUN: opt -passes='assume-builder,verify' --enable-knowledge-retention --assume-preserve-all -S %s | FileCheck %s --check-prefixes=ALL
; RUN: opt -passes='require<assumptions>,assume-builder,verify' --enable-knowledge-retention -S %s | FileCheck %s --check-prefixes=WITH-AC
; RUN: opt -passes='require<domtree>,require<assumptions>,assume-builder,verify' --enable-knowledge-retention -S %s | FileCheck %s --check-prefixes=CROSS-BLOCK
; RUN: opt -passes='assume-builder,require<domtree>,require<assumptions>,assume-simplify,verify' --enable-knowledge-retention -S %s | FileCheck %s --check-prefixes=FULL-SIMPLIFY
target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
declare void @func(i32*, i32*)
declare void @func_cold(i32*) cold
declare void @func_cold(i32*) cold willreturn nounwind
declare void @func_strbool(i32*) "no-jump-tables"
declare void @func_many(i32*) "no-jump-tables" nounwind "less-precise-fpmad" willreturn norecurse
declare void @func_argattr(i32* align 8, i32* nonnull) nounwind
declare void @may_throw()
define void @test(i32* %P, i32* %P1, i32* %P2, i32* %P3) {
; BASIC-LABEL: @test(
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "nonnull"(i32* [[P:%.*]]), "dereferenceable"(i32* [[P]], i64 16) ]
; BASIC-LABEL: define {{[^@]+}}@test
; BASIC-SAME: (i32* [[P:%.*]], i32* [[P1:%.*]], i32* [[P2:%.*]], i32* [[P3:%.*]])
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "nonnull"(i32* [[P]]), "dereferenceable"(i32* [[P]], i64 16) ]
; BASIC-NEXT: call void @func(i32* nonnull dereferenceable(16) [[P]], i32* null)
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P1:%.*]], i64 12), "nonnull"(i32* [[P]]) ]
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P1]], i64 12), "nonnull"(i32* [[P]]) ]
; BASIC-NEXT: call void @func(i32* dereferenceable(12) [[P1]], i32* nonnull [[P]])
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P1]], i64 12), "cold"() ]
; BASIC-NEXT: call void @func_cold(i32* dereferenceable(12) [[P1]]) #0
; BASIC-NEXT: call void @func_cold(i32* dereferenceable(12) [[P1]]) #6
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P1]], i64 12), "cold"() ]
; BASIC-NEXT: call void @func_cold(i32* dereferenceable(12) [[P1]])
; BASIC-NEXT: call void @func(i32* [[P1]], i32* [[P]])
@ -28,20 +31,21 @@ define void @test(i32* %P, i32* %P1, i32* %P2, i32* %P3) {
; BASIC-NEXT: call void @func(i32* dereferenceable(32) [[P]], i32* dereferenceable(8) [[P]])
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[P1]], i64 8) ]
; BASIC-NEXT: call void @func_many(i32* align 8 [[P1]])
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[P2:%.*]], i64 8), "nonnull"(i32* [[P3:%.*]]) ]
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[P2]], i64 8), "nonnull"(i32* [[P3]]) ]
; BASIC-NEXT: call void @func_argattr(i32* [[P2]], i32* [[P3]])
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "nonnull"(i32* [[P1]]), "nonnull"(i32* [[P]]) ]
; BASIC-NEXT: call void @func(i32* nonnull [[P1]], i32* nonnull [[P]])
; BASIC-NEXT: ret void
;
; ALL-LABEL: @test(
; ALL-NEXT: call void @llvm.assume(i1 true) [ "nonnull"(i32* [[P:%.*]]), "dereferenceable"(i32* [[P]], i64 16) ]
; ALL-LABEL: define {{[^@]+}}@test
; ALL-SAME: (i32* [[P:%.*]], i32* [[P1:%.*]], i32* [[P2:%.*]], i32* [[P3:%.*]])
; ALL-NEXT: call void @llvm.assume(i1 true) [ "nonnull"(i32* [[P]]), "dereferenceable"(i32* [[P]], i64 16) ]
; ALL-NEXT: call void @func(i32* nonnull dereferenceable(16) [[P]], i32* null)
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P1:%.*]], i64 12), "nonnull"(i32* [[P]]) ]
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P1]], i64 12), "nonnull"(i32* [[P]]) ]
; ALL-NEXT: call void @func(i32* dereferenceable(12) [[P1]], i32* nonnull [[P]])
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P1]], i64 12), "cold"() ]
; ALL-NEXT: call void @func_cold(i32* dereferenceable(12) [[P1]]) #0
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P1]], i64 12), "cold"() ]
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P1]], i64 12), "cold"(), "nounwind"(), "willreturn"() ]
; ALL-NEXT: call void @func_cold(i32* dereferenceable(12) [[P1]]) #6
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P1]], i64 12), "cold"(), "nounwind"(), "willreturn"() ]
; ALL-NEXT: call void @func_cold(i32* dereferenceable(12) [[P1]])
; ALL-NEXT: call void @func(i32* [[P1]], i32* [[P]])
; ALL-NEXT: call void @func_strbool(i32* [[P1]])
@ -49,19 +53,20 @@ define void @test(i32* %P, i32* %P1, i32* %P2, i32* %P3) {
; ALL-NEXT: call void @func(i32* dereferenceable(32) [[P]], i32* dereferenceable(8) [[P]])
; ALL-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[P1]], i64 8), "norecurse"(), "nounwind"(), "willreturn"() ]
; ALL-NEXT: call void @func_many(i32* align 8 [[P1]])
; ALL-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[P2:%.*]], i64 8), "nonnull"(i32* [[P3:%.*]]), "nounwind"() ]
; ALL-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[P2]], i64 8), "nonnull"(i32* [[P3]]), "nounwind"() ]
; ALL-NEXT: call void @func_argattr(i32* [[P2]], i32* [[P3]])
; ALL-NEXT: call void @llvm.assume(i1 true) [ "nonnull"(i32* [[P1]]), "nonnull"(i32* [[P]]) ]
; ALL-NEXT: call void @func(i32* nonnull [[P1]], i32* nonnull [[P]])
; ALL-NEXT: ret void
;
; WITH-AC-LABEL: @test(
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "nonnull"(i32* [[P:%.*]]), "dereferenceable"(i32* [[P]], i64 16) ]
; WITH-AC-LABEL: define {{[^@]+}}@test
; WITH-AC-SAME: (i32* [[P:%.*]], i32* [[P1:%.*]], i32* [[P2:%.*]], i32* [[P3:%.*]])
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "nonnull"(i32* [[P]]), "dereferenceable"(i32* [[P]], i64 16) ]
; WITH-AC-NEXT: call void @func(i32* nonnull dereferenceable(16) [[P]], i32* null)
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P1:%.*]], i64 12) ]
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P1]], i64 12) ]
; WITH-AC-NEXT: call void @func(i32* dereferenceable(12) [[P1]], i32* nonnull [[P]])
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "cold"() ]
; WITH-AC-NEXT: call void @func_cold(i32* dereferenceable(12) [[P1]]) #0
; WITH-AC-NEXT: call void @func_cold(i32* dereferenceable(12) [[P1]]) #6
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "cold"() ]
; WITH-AC-NEXT: call void @func_cold(i32* dereferenceable(12) [[P1]])
; WITH-AC-NEXT: call void @func(i32* [[P1]], i32* [[P]])
@ -70,19 +75,20 @@ define void @test(i32* %P, i32* %P1, i32* %P2, i32* %P3) {
; WITH-AC-NEXT: call void @func(i32* dereferenceable(32) [[P]], i32* dereferenceable(8) [[P]])
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[P1]], i64 8) ]
; WITH-AC-NEXT: call void @func_many(i32* align 8 [[P1]])
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[P2:%.*]], i64 8), "nonnull"(i32* [[P3:%.*]]) ]
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[P2]], i64 8), "nonnull"(i32* [[P3]]) ]
; WITH-AC-NEXT: call void @func_argattr(i32* [[P2]], i32* [[P3]])
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "nonnull"(i32* [[P1]]) ]
; WITH-AC-NEXT: call void @func(i32* nonnull [[P1]], i32* nonnull [[P]])
; WITH-AC-NEXT: ret void
;
; CROSS-BLOCK-LABEL: @test(
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "nonnull"(i32* [[P:%.*]]), "dereferenceable"(i32* [[P]], i64 16) ]
; CROSS-BLOCK-LABEL: define {{[^@]+}}@test
; CROSS-BLOCK-SAME: (i32* [[P:%.*]], i32* [[P1:%.*]], i32* [[P2:%.*]], i32* [[P3:%.*]])
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "nonnull"(i32* [[P]]), "dereferenceable"(i32* [[P]], i64 16) ]
; CROSS-BLOCK-NEXT: call void @func(i32* nonnull dereferenceable(16) [[P]], i32* null)
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P1:%.*]], i64 12) ]
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P1]], i64 12) ]
; CROSS-BLOCK-NEXT: call void @func(i32* dereferenceable(12) [[P1]], i32* nonnull [[P]])
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "cold"() ]
; CROSS-BLOCK-NEXT: call void @func_cold(i32* dereferenceable(12) [[P1]]) #0
; CROSS-BLOCK-NEXT: call void @func_cold(i32* dereferenceable(12) [[P1]]) #6
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "cold"() ]
; CROSS-BLOCK-NEXT: call void @func_cold(i32* dereferenceable(12) [[P1]])
; CROSS-BLOCK-NEXT: call void @func(i32* [[P1]], i32* [[P]])
@ -91,13 +97,31 @@ define void @test(i32* %P, i32* %P1, i32* %P2, i32* %P3) {
; CROSS-BLOCK-NEXT: call void @func(i32* dereferenceable(32) [[P]], i32* dereferenceable(8) [[P]])
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[P1]], i64 8) ]
; CROSS-BLOCK-NEXT: call void @func_many(i32* align 8 [[P1]])
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[P2:%.*]], i64 8), "nonnull"(i32* [[P3:%.*]]) ]
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[P2]], i64 8), "nonnull"(i32* [[P3]]) ]
; CROSS-BLOCK-NEXT: call void @func_argattr(i32* [[P2]], i32* [[P3]])
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "nonnull"(i32* [[P1]]) ]
; CROSS-BLOCK-NEXT: call void @func(i32* nonnull [[P1]], i32* nonnull [[P]])
; CROSS-BLOCK-NEXT: ret void
;
; FULL-SIMPLIFY-LABEL: define {{[^@]+}}@test
; FULL-SIMPLIFY-SAME: (i32* nonnull dereferenceable(16) [[P:%.*]], i32* [[P1:%.*]], i32* [[P2:%.*]], i32* [[P3:%.*]])
; FULL-SIMPLIFY-NEXT: call void @func(i32* nonnull dereferenceable(16) [[P]], i32* null)
; FULL-SIMPLIFY-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P1]], i64 12), "ignore"(i32* undef) ]
; FULL-SIMPLIFY-NEXT: call void @func(i32* dereferenceable(12) [[P1]], i32* nonnull [[P]])
; FULL-SIMPLIFY-NEXT: call void @llvm.assume(i1 true) [ "ignore"(i32* undef, i64 12), "cold"() ]
; FULL-SIMPLIFY-NEXT: call void @func_cold(i32* dereferenceable(12) [[P1]]) #6
; FULL-SIMPLIFY-NEXT: call void @func_cold(i32* dereferenceable(12) [[P1]])
; FULL-SIMPLIFY-NEXT: call void @func(i32* [[P1]], i32* [[P]])
; FULL-SIMPLIFY-NEXT: call void @func_strbool(i32* [[P1]])
; FULL-SIMPLIFY-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 32) ]
; FULL-SIMPLIFY-NEXT: call void @func(i32* dereferenceable(32) [[P]], i32* dereferenceable(8) [[P]])
; FULL-SIMPLIFY-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[P1]], i64 8), "align"(i32* [[P2]], i64 8), "nonnull"(i32* [[P3]]) ]
; FULL-SIMPLIFY-NEXT: call void @func_many(i32* align 8 [[P1]])
; FULL-SIMPLIFY-NEXT: call void @func_argattr(i32* [[P2]], i32* [[P3]])
; FULL-SIMPLIFY-NEXT: call void @llvm.assume(i1 true) [ "nonnull"(i32* [[P1]]), "ignore"(i32* undef) ]
; FULL-SIMPLIFY-NEXT: call void @func(i32* nonnull [[P1]], i32* nonnull [[P]])
; FULL-SIMPLIFY-NEXT: ret void
;
call void @func(i32* nonnull dereferenceable(16) %P, i32* null)
call void @func(i32* dereferenceable(12) %P1, i32* nonnull %P)
call void @func_cold(i32* dereferenceable(12) %P1) cold
@ -114,17 +138,18 @@ define void @test(i32* %P, i32* %P1, i32* %P2, i32* %P3) {
%struct.S = type { i32, i8, i32* }
define i32 @test2(%struct.S* %0, i32* %1, i8* %2) {
; BASIC-LABEL: @test2(
; BASIC-LABEL: define {{[^@]+}}@test2
; BASIC-SAME: (%struct.S* [[TMP0:%.*]], i32* [[TMP1:%.*]], i8* [[TMP2:%.*]])
; BASIC-NEXT: [[TMP4:%.*]] = alloca %struct.S*, align 8
; BASIC-NEXT: [[TMP5:%.*]] = alloca i32*, align 8
; BASIC-NEXT: [[TMP6:%.*]] = alloca i8*, align 8
; BASIC-NEXT: [[TMP7:%.*]] = alloca [[STRUCT_S:%.*]], align 8
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "nonnull"(%struct.S** [[TMP4]]), "align"(%struct.S** [[TMP4]], i64 8) ]
; BASIC-NEXT: store %struct.S* [[TMP0:%.*]], %struct.S** [[TMP4]], align 8
; BASIC-NEXT: store %struct.S* [[TMP0]], %struct.S** [[TMP4]], align 8
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32** [[TMP5]], i64 8), "nonnull"(i32** [[TMP5]]), "align"(i32** [[TMP5]], i64 8) ]
; BASIC-NEXT: store i32* [[TMP1:%.*]], i32** [[TMP5]], align 8
; BASIC-NEXT: store i32* [[TMP1]], i32** [[TMP5]], align 8
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8** [[TMP6]], i64 8), "nonnull"(i8** [[TMP6]]) ]
; BASIC-NEXT: store i8* [[TMP2:%.*]], i8** [[TMP6]]
; BASIC-NEXT: store i8* [[TMP2]], i8** [[TMP6]]
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32** [[TMP5]], i64 8), "nonnull"(i32** [[TMP5]]), "align"(i32** [[TMP5]], i64 8) ]
; BASIC-NEXT: [[TMP8:%.*]] = load i32*, i32** [[TMP5]], align 8
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP8]], i64 4), "nonnull"(i32* [[TMP8]]), "align"(i32* [[TMP8]], i64 4) ]
@ -161,17 +186,18 @@ define i32 @test2(%struct.S* %0, i32* %1, i8* %2) {
; BASIC-NEXT: [[TMP28:%.*]] = add nsw i32 [[TMP23]], [[TMP27]]
; BASIC-NEXT: ret i32 [[TMP28]]
;
; ALL-LABEL: @test2(
; ALL-LABEL: define {{[^@]+}}@test2
; ALL-SAME: (%struct.S* [[TMP0:%.*]], i32* [[TMP1:%.*]], i8* [[TMP2:%.*]])
; ALL-NEXT: [[TMP4:%.*]] = alloca %struct.S*, align 8
; ALL-NEXT: [[TMP5:%.*]] = alloca i32*, align 8
; ALL-NEXT: [[TMP6:%.*]] = alloca i8*, align 8
; ALL-NEXT: [[TMP7:%.*]] = alloca [[STRUCT_S:%.*]], align 8
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "nonnull"(%struct.S** [[TMP4]]), "align"(%struct.S** [[TMP4]], i64 8) ]
; ALL-NEXT: store %struct.S* [[TMP0:%.*]], %struct.S** [[TMP4]], align 8
; ALL-NEXT: store %struct.S* [[TMP0]], %struct.S** [[TMP4]], align 8
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32** [[TMP5]], i64 8), "nonnull"(i32** [[TMP5]]), "align"(i32** [[TMP5]], i64 8) ]
; ALL-NEXT: store i32* [[TMP1:%.*]], i32** [[TMP5]], align 8
; ALL-NEXT: store i32* [[TMP1]], i32** [[TMP5]], align 8
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8** [[TMP6]], i64 8), "nonnull"(i8** [[TMP6]]) ]
; ALL-NEXT: store i8* [[TMP2:%.*]], i8** [[TMP6]]
; ALL-NEXT: store i8* [[TMP2]], i8** [[TMP6]]
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32** [[TMP5]], i64 8), "nonnull"(i32** [[TMP5]]), "align"(i32** [[TMP5]], i64 8) ]
; ALL-NEXT: [[TMP8:%.*]] = load i32*, i32** [[TMP5]], align 8
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP8]], i64 4), "nonnull"(i32* [[TMP8]]), "align"(i32* [[TMP8]], i64 4) ]
@ -208,17 +234,18 @@ define i32 @test2(%struct.S* %0, i32* %1, i8* %2) {
; ALL-NEXT: [[TMP28:%.*]] = add nsw i32 [[TMP23]], [[TMP27]]
; ALL-NEXT: ret i32 [[TMP28]]
;
; WITH-AC-LABEL: @test2(
; WITH-AC-LABEL: define {{[^@]+}}@test2
; WITH-AC-SAME: (%struct.S* [[TMP0:%.*]], i32* [[TMP1:%.*]], i8* [[TMP2:%.*]])
; WITH-AC-NEXT: [[TMP4:%.*]] = alloca %struct.S*, align 8
; WITH-AC-NEXT: [[TMP5:%.*]] = alloca i32*, align 8
; WITH-AC-NEXT: [[TMP6:%.*]] = alloca i8*, align 8
; WITH-AC-NEXT: [[TMP7:%.*]] = alloca [[STRUCT_S:%.*]], align 8
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "nonnull"(%struct.S** [[TMP4]]), "align"(%struct.S** [[TMP4]], i64 8) ]
; WITH-AC-NEXT: store %struct.S* [[TMP0:%.*]], %struct.S** [[TMP4]], align 8
; WITH-AC-NEXT: store %struct.S* [[TMP0]], %struct.S** [[TMP4]], align 8
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32** [[TMP5]], i64 8), "nonnull"(i32** [[TMP5]]), "align"(i32** [[TMP5]], i64 8) ]
; WITH-AC-NEXT: store i32* [[TMP1:%.*]], i32** [[TMP5]], align 8
; WITH-AC-NEXT: store i32* [[TMP1]], i32** [[TMP5]], align 8
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8** [[TMP6]], i64 8), "nonnull"(i8** [[TMP6]]) ]
; WITH-AC-NEXT: store i8* [[TMP2:%.*]], i8** [[TMP6]]
; WITH-AC-NEXT: store i8* [[TMP2]], i8** [[TMP6]]
; WITH-AC-NEXT: [[TMP8:%.*]] = load i32*, i32** [[TMP5]], align 8
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP8]], i64 4), "nonnull"(i32* [[TMP8]]), "align"(i32* [[TMP8]], i64 4) ]
; WITH-AC-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
@ -250,17 +277,18 @@ define i32 @test2(%struct.S* %0, i32* %1, i8* %2) {
; WITH-AC-NEXT: [[TMP28:%.*]] = add nsw i32 [[TMP23]], [[TMP27]]
; WITH-AC-NEXT: ret i32 [[TMP28]]
;
; CROSS-BLOCK-LABEL: @test2(
; CROSS-BLOCK-LABEL: define {{[^@]+}}@test2
; CROSS-BLOCK-SAME: (%struct.S* [[TMP0:%.*]], i32* [[TMP1:%.*]], i8* [[TMP2:%.*]])
; CROSS-BLOCK-NEXT: [[TMP4:%.*]] = alloca %struct.S*, align 8
; CROSS-BLOCK-NEXT: [[TMP5:%.*]] = alloca i32*, align 8
; CROSS-BLOCK-NEXT: [[TMP6:%.*]] = alloca i8*, align 8
; CROSS-BLOCK-NEXT: [[TMP7:%.*]] = alloca [[STRUCT_S:%.*]], align 8
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "nonnull"(%struct.S** [[TMP4]]), "align"(%struct.S** [[TMP4]], i64 8) ]
; CROSS-BLOCK-NEXT: store %struct.S* [[TMP0:%.*]], %struct.S** [[TMP4]], align 8
; CROSS-BLOCK-NEXT: store %struct.S* [[TMP0]], %struct.S** [[TMP4]], align 8
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32** [[TMP5]], i64 8), "nonnull"(i32** [[TMP5]]), "align"(i32** [[TMP5]], i64 8) ]
; CROSS-BLOCK-NEXT: store i32* [[TMP1:%.*]], i32** [[TMP5]], align 8
; CROSS-BLOCK-NEXT: store i32* [[TMP1]], i32** [[TMP5]], align 8
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8** [[TMP6]], i64 8), "nonnull"(i8** [[TMP6]]) ]
; CROSS-BLOCK-NEXT: store i8* [[TMP2:%.*]], i8** [[TMP6]]
; CROSS-BLOCK-NEXT: store i8* [[TMP2]], i8** [[TMP6]]
; CROSS-BLOCK-NEXT: [[TMP8:%.*]] = load i32*, i32** [[TMP5]], align 8
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP8]], i64 4), "nonnull"(i32* [[TMP8]]), "align"(i32* [[TMP8]], i64 4) ]
; CROSS-BLOCK-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
@ -291,6 +319,40 @@ define i32 @test2(%struct.S* %0, i32* %1, i8* %2) {
; CROSS-BLOCK-NEXT: [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
; CROSS-BLOCK-NEXT: [[TMP28:%.*]] = add nsw i32 [[TMP23]], [[TMP27]]
; CROSS-BLOCK-NEXT: ret i32 [[TMP28]]
;
; FULL-SIMPLIFY-LABEL: define {{[^@]+}}@test2
; FULL-SIMPLIFY-SAME: (%struct.S* [[TMP0:%.*]], i32* [[TMP1:%.*]], i8* [[TMP2:%.*]])
; FULL-SIMPLIFY-NEXT: [[TMP4:%.*]] = alloca %struct.S*, align 8
; FULL-SIMPLIFY-NEXT: [[TMP5:%.*]] = alloca i32*, align 8
; FULL-SIMPLIFY-NEXT: [[TMP6:%.*]] = alloca i8*, align 8
; FULL-SIMPLIFY-NEXT: [[TMP7:%.*]] = alloca [[STRUCT_S:%.*]], align 8
; FULL-SIMPLIFY-NEXT: store %struct.S* [[TMP0]], %struct.S** [[TMP4]], align 8
; FULL-SIMPLIFY-NEXT: store i32* [[TMP1]], i32** [[TMP5]], align 8
; FULL-SIMPLIFY-NEXT: store i8* [[TMP2]], i8** [[TMP6]]
; FULL-SIMPLIFY-NEXT: [[TMP8:%.*]] = load i32*, i32** [[TMP5]], align 8
; FULL-SIMPLIFY-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
; FULL-SIMPLIFY-NEXT: [[TMP10:%.*]] = trunc i32 [[TMP9]] to i8
; FULL-SIMPLIFY-NEXT: [[TMP11:%.*]] = load i8*, i8** [[TMP6]], align 8
; FULL-SIMPLIFY-NEXT: store i8 [[TMP10]], i8* [[TMP11]], align 1
; FULL-SIMPLIFY-NEXT: [[TMP12:%.*]] = bitcast %struct.S* [[TMP7]] to i8*
; FULL-SIMPLIFY-NEXT: [[TMP13:%.*]] = load %struct.S*, %struct.S** [[TMP4]]
; FULL-SIMPLIFY-NEXT: [[TMP14:%.*]] = bitcast %struct.S* [[TMP13]] to i8*
; FULL-SIMPLIFY-NEXT: [[TMP15:%.*]] = bitcast %struct.S* [[TMP7]] to i8*
; FULL-SIMPLIFY-NEXT: [[TMP16:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
; FULL-SIMPLIFY-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP16]], i32 0, i32 0
; FULL-SIMPLIFY-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 8
; FULL-SIMPLIFY-NEXT: [[TMP19:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
; FULL-SIMPLIFY-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP19]], i32 0, i32 1
; FULL-SIMPLIFY-NEXT: [[TMP21:%.*]] = load i8, i8* [[TMP20]], align 4
; FULL-SIMPLIFY-NEXT: [[TMP22:%.*]] = sext i8 [[TMP21]] to i32
; FULL-SIMPLIFY-NEXT: [[TMP23:%.*]] = add nsw i32 [[TMP18]], [[TMP22]]
; FULL-SIMPLIFY-NEXT: [[TMP24:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
; FULL-SIMPLIFY-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP24]], i32 0, i32 2
; FULL-SIMPLIFY-NEXT: [[TMP26:%.*]] = load i32*, i32** [[TMP25]], align 8
; FULL-SIMPLIFY-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "nonnull"(%struct.S** [[TMP4]]), "align"(%struct.S** [[TMP4]], i64 8), "dereferenceable"(i32** [[TMP5]], i64 8), "nonnull"(i32** [[TMP5]]), "align"(i32** [[TMP5]], i64 8), "dereferenceable"(i8** [[TMP6]], i64 8), "nonnull"(i8** [[TMP6]]), "dereferenceable"(i32* [[TMP8]], i64 4), "nonnull"(i32* [[TMP8]]), "align"(i32* [[TMP8]], i64 4), "align"(i8** [[TMP6]], i64 8), "dereferenceable"(i8* [[TMP11]], i64 1), "nonnull"(i8* [[TMP11]]), "dereferenceable"(i32* [[TMP17]], i64 4), "nonnull"(i32* [[TMP17]]), "align"(i32* [[TMP17]], i64 8), "dereferenceable"(i8* [[TMP20]], i64 1), "nonnull"(i8* [[TMP20]]), "align"(i8* [[TMP20]], i64 4), "dereferenceable"(i32** [[TMP25]], i64 8), "nonnull"(i32** [[TMP25]]), "align"(i32** [[TMP25]], i64 8), "dereferenceable"(i32* [[TMP26]], i64 4), "nonnull"(i32* [[TMP26]]), "align"(i32* [[TMP26]], i64 4) ]
; FULL-SIMPLIFY-NEXT: [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
; FULL-SIMPLIFY-NEXT: [[TMP28:%.*]] = add nsw i32 [[TMP23]], [[TMP27]]
; FULL-SIMPLIFY-NEXT: ret i32 [[TMP28]]
;
%4 = alloca %struct.S*, align 8
%5 = alloca i32*, align 8
@ -325,17 +387,18 @@ define i32 @test2(%struct.S* %0, i32* %1, i8* %2) {
}
define i32 @test3(%struct.S* %0, i32* %1, i8* %2) "null-pointer-is-valid"="true" {
; BASIC-LABEL: @test3(
; BASIC-LABEL: define {{[^@]+}}@test3
; BASIC-SAME: (%struct.S* [[TMP0:%.*]], i32* [[TMP1:%.*]], i8* [[TMP2:%.*]]) #4
; BASIC-NEXT: [[TMP4:%.*]] = alloca %struct.S*, align 8
; BASIC-NEXT: [[TMP5:%.*]] = alloca i32*, align 8
; BASIC-NEXT: [[TMP6:%.*]] = alloca i8*, align 8
; BASIC-NEXT: [[TMP7:%.*]] = alloca [[STRUCT_S:%.*]], align 8
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "align"(%struct.S** [[TMP4]], i64 8) ]
; BASIC-NEXT: store %struct.S* [[TMP0:%.*]], %struct.S** [[TMP4]], align 8
; BASIC-NEXT: store %struct.S* [[TMP0]], %struct.S** [[TMP4]], align 8
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32** [[TMP5]], i64 8), "align"(i32** [[TMP5]], i64 8) ]
; BASIC-NEXT: store i32* [[TMP1:%.*]], i32** [[TMP5]], align 8
; BASIC-NEXT: store i32* [[TMP1]], i32** [[TMP5]], align 8
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8** [[TMP6]], i64 8), "align"(i8** [[TMP6]], i64 8) ]
; BASIC-NEXT: store i8* [[TMP2:%.*]], i8** [[TMP6]], align 8
; BASIC-NEXT: store i8* [[TMP2]], i8** [[TMP6]], align 8
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32** [[TMP5]], i64 8), "align"(i32** [[TMP5]], i64 8) ]
; BASIC-NEXT: [[TMP8:%.*]] = load i32*, i32** [[TMP5]], align 8
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP8]], i64 4), "align"(i32* [[TMP8]], i64 4) ]
@ -349,6 +412,7 @@ define i32 @test3(%struct.S* %0, i32* %1, i8* %2) "null-pointer-is-valid"="true"
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "align"(%struct.S** [[TMP4]], i64 32) ]
; BASIC-NEXT: [[TMP13:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 32
; BASIC-NEXT: [[TMP14:%.*]] = bitcast %struct.S* [[TMP13]] to i8*
; BASIC-NEXT: call void @may_throw()
; BASIC-NEXT: [[TMP15:%.*]] = bitcast %struct.S* [[TMP7]] to i8*
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "align"(%struct.S** [[TMP4]], i64 8) ]
; BASIC-NEXT: [[TMP16:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
@ -372,17 +436,18 @@ define i32 @test3(%struct.S* %0, i32* %1, i8* %2) "null-pointer-is-valid"="true"
; BASIC-NEXT: [[TMP28:%.*]] = add nsw i32 [[TMP23]], [[TMP27]]
; BASIC-NEXT: ret i32 [[TMP28]]
;
; ALL-LABEL: @test3(
; ALL-LABEL: define {{[^@]+}}@test3
; ALL-SAME: (%struct.S* [[TMP0:%.*]], i32* [[TMP1:%.*]], i8* [[TMP2:%.*]]) #4
; ALL-NEXT: [[TMP4:%.*]] = alloca %struct.S*, align 8
; ALL-NEXT: [[TMP5:%.*]] = alloca i32*, align 8
; ALL-NEXT: [[TMP6:%.*]] = alloca i8*, align 8
; ALL-NEXT: [[TMP7:%.*]] = alloca [[STRUCT_S:%.*]], align 8
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "align"(%struct.S** [[TMP4]], i64 8) ]
; ALL-NEXT: store %struct.S* [[TMP0:%.*]], %struct.S** [[TMP4]], align 8
; ALL-NEXT: store %struct.S* [[TMP0]], %struct.S** [[TMP4]], align 8
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32** [[TMP5]], i64 8), "align"(i32** [[TMP5]], i64 8) ]
; ALL-NEXT: store i32* [[TMP1:%.*]], i32** [[TMP5]], align 8
; ALL-NEXT: store i32* [[TMP1]], i32** [[TMP5]], align 8
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8** [[TMP6]], i64 8), "align"(i8** [[TMP6]], i64 8) ]
; ALL-NEXT: store i8* [[TMP2:%.*]], i8** [[TMP6]], align 8
; ALL-NEXT: store i8* [[TMP2]], i8** [[TMP6]], align 8
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32** [[TMP5]], i64 8), "align"(i32** [[TMP5]], i64 8) ]
; ALL-NEXT: [[TMP8:%.*]] = load i32*, i32** [[TMP5]], align 8
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP8]], i64 4), "align"(i32* [[TMP8]], i64 4) ]
@ -396,6 +461,7 @@ define i32 @test3(%struct.S* %0, i32* %1, i8* %2) "null-pointer-is-valid"="true"
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "align"(%struct.S** [[TMP4]], i64 32) ]
; ALL-NEXT: [[TMP13:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 32
; ALL-NEXT: [[TMP14:%.*]] = bitcast %struct.S* [[TMP13]] to i8*
; ALL-NEXT: call void @may_throw()
; ALL-NEXT: [[TMP15:%.*]] = bitcast %struct.S* [[TMP7]] to i8*
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "align"(%struct.S** [[TMP4]], i64 8) ]
; ALL-NEXT: [[TMP16:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
@ -419,17 +485,18 @@ define i32 @test3(%struct.S* %0, i32* %1, i8* %2) "null-pointer-is-valid"="true"
; ALL-NEXT: [[TMP28:%.*]] = add nsw i32 [[TMP23]], [[TMP27]]
; ALL-NEXT: ret i32 [[TMP28]]
;
; WITH-AC-LABEL: @test3(
; WITH-AC-LABEL: define {{[^@]+}}@test3
; WITH-AC-SAME: (%struct.S* [[TMP0:%.*]], i32* [[TMP1:%.*]], i8* [[TMP2:%.*]]) #4
; WITH-AC-NEXT: [[TMP4:%.*]] = alloca %struct.S*, align 8
; WITH-AC-NEXT: [[TMP5:%.*]] = alloca i32*, align 8
; WITH-AC-NEXT: [[TMP6:%.*]] = alloca i8*, align 8
; WITH-AC-NEXT: [[TMP7:%.*]] = alloca [[STRUCT_S:%.*]], align 8
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "align"(%struct.S** [[TMP4]], i64 32) ]
; WITH-AC-NEXT: store %struct.S* [[TMP0:%.*]], %struct.S** [[TMP4]], align 8
; WITH-AC-NEXT: store %struct.S* [[TMP0]], %struct.S** [[TMP4]], align 8
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32** [[TMP5]], i64 8), "align"(i32** [[TMP5]], i64 8) ]
; WITH-AC-NEXT: store i32* [[TMP1:%.*]], i32** [[TMP5]], align 8
; WITH-AC-NEXT: store i32* [[TMP1]], i32** [[TMP5]], align 8
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8** [[TMP6]], i64 8), "align"(i8** [[TMP6]], i64 8) ]
; WITH-AC-NEXT: store i8* [[TMP2:%.*]], i8** [[TMP6]], align 8
; WITH-AC-NEXT: store i8* [[TMP2]], i8** [[TMP6]], align 8
; WITH-AC-NEXT: [[TMP8:%.*]] = load i32*, i32** [[TMP5]], align 8
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP8]], i64 4), "align"(i32* [[TMP8]], i64 4) ]
; WITH-AC-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
@ -440,6 +507,7 @@ define i32 @test3(%struct.S* %0, i32* %1, i8* %2) "null-pointer-is-valid"="true"
; WITH-AC-NEXT: [[TMP12:%.*]] = bitcast %struct.S* [[TMP7]] to i8*
; WITH-AC-NEXT: [[TMP13:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 32
; WITH-AC-NEXT: [[TMP14:%.*]] = bitcast %struct.S* [[TMP13]] to i8*
; WITH-AC-NEXT: call void @may_throw()
; WITH-AC-NEXT: [[TMP15:%.*]] = bitcast %struct.S* [[TMP7]] to i8*
; WITH-AC-NEXT: [[TMP16:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
; WITH-AC-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP16]], i32 0, i32 0
@ -460,17 +528,18 @@ define i32 @test3(%struct.S* %0, i32* %1, i8* %2) "null-pointer-is-valid"="true"
; WITH-AC-NEXT: [[TMP28:%.*]] = add nsw i32 [[TMP23]], [[TMP27]]
; WITH-AC-NEXT: ret i32 [[TMP28]]
;
; CROSS-BLOCK-LABEL: @test3(
; CROSS-BLOCK-LABEL: define {{[^@]+}}@test3
; CROSS-BLOCK-SAME: (%struct.S* [[TMP0:%.*]], i32* [[TMP1:%.*]], i8* [[TMP2:%.*]]) #4
; CROSS-BLOCK-NEXT: [[TMP4:%.*]] = alloca %struct.S*, align 8
; CROSS-BLOCK-NEXT: [[TMP5:%.*]] = alloca i32*, align 8
; CROSS-BLOCK-NEXT: [[TMP6:%.*]] = alloca i8*, align 8
; CROSS-BLOCK-NEXT: [[TMP7:%.*]] = alloca [[STRUCT_S:%.*]], align 8
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "align"(%struct.S** [[TMP4]], i64 32) ]
; CROSS-BLOCK-NEXT: store %struct.S* [[TMP0:%.*]], %struct.S** [[TMP4]], align 8
; CROSS-BLOCK-NEXT: store %struct.S* [[TMP0]], %struct.S** [[TMP4]], align 8
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32** [[TMP5]], i64 8), "align"(i32** [[TMP5]], i64 8) ]
; CROSS-BLOCK-NEXT: store i32* [[TMP1:%.*]], i32** [[TMP5]], align 8
; CROSS-BLOCK-NEXT: store i32* [[TMP1]], i32** [[TMP5]], align 8
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8** [[TMP6]], i64 8), "align"(i8** [[TMP6]], i64 8) ]
; CROSS-BLOCK-NEXT: store i8* [[TMP2:%.*]], i8** [[TMP6]], align 8
; CROSS-BLOCK-NEXT: store i8* [[TMP2]], i8** [[TMP6]], align 8
; CROSS-BLOCK-NEXT: [[TMP8:%.*]] = load i32*, i32** [[TMP5]], align 8
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP8]], i64 4), "align"(i32* [[TMP8]], i64 4) ]
; CROSS-BLOCK-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
@ -481,6 +550,7 @@ define i32 @test3(%struct.S* %0, i32* %1, i8* %2) "null-pointer-is-valid"="true"
; CROSS-BLOCK-NEXT: [[TMP12:%.*]] = bitcast %struct.S* [[TMP7]] to i8*
; CROSS-BLOCK-NEXT: [[TMP13:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 32
; CROSS-BLOCK-NEXT: [[TMP14:%.*]] = bitcast %struct.S* [[TMP13]] to i8*
; CROSS-BLOCK-NEXT: call void @may_throw()
; CROSS-BLOCK-NEXT: [[TMP15:%.*]] = bitcast %struct.S* [[TMP7]] to i8*
; CROSS-BLOCK-NEXT: [[TMP16:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
; CROSS-BLOCK-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP16]], i32 0, i32 0
@ -500,6 +570,42 @@ define i32 @test3(%struct.S* %0, i32* %1, i8* %2) "null-pointer-is-valid"="true"
; CROSS-BLOCK-NEXT: [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
; CROSS-BLOCK-NEXT: [[TMP28:%.*]] = add nsw i32 [[TMP23]], [[TMP27]]
; CROSS-BLOCK-NEXT: ret i32 [[TMP28]]
;
; FULL-SIMPLIFY-LABEL: define {{[^@]+}}@test3
; FULL-SIMPLIFY-SAME: (%struct.S* [[TMP0:%.*]], i32* [[TMP1:%.*]], i8* [[TMP2:%.*]]) #4
; FULL-SIMPLIFY-NEXT: [[TMP4:%.*]] = alloca %struct.S*, align 8
; FULL-SIMPLIFY-NEXT: [[TMP5:%.*]] = alloca i32*, align 8
; FULL-SIMPLIFY-NEXT: [[TMP6:%.*]] = alloca i8*, align 8
; FULL-SIMPLIFY-NEXT: [[TMP7:%.*]] = alloca [[STRUCT_S:%.*]], align 8
; FULL-SIMPLIFY-NEXT: store %struct.S* [[TMP0]], %struct.S** [[TMP4]], align 8
; FULL-SIMPLIFY-NEXT: store i32* [[TMP1]], i32** [[TMP5]], align 8
; FULL-SIMPLIFY-NEXT: store i8* [[TMP2]], i8** [[TMP6]], align 8
; FULL-SIMPLIFY-NEXT: [[TMP8:%.*]] = load i32*, i32** [[TMP5]], align 8
; FULL-SIMPLIFY-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
; FULL-SIMPLIFY-NEXT: [[TMP10:%.*]] = trunc i32 [[TMP9]] to i8
; FULL-SIMPLIFY-NEXT: [[TMP11:%.*]] = load i8*, i8** [[TMP6]]
; FULL-SIMPLIFY-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "align"(%struct.S** [[TMP4]], i64 32), "dereferenceable"(i32** [[TMP5]], i64 8), "align"(i32** [[TMP5]], i64 8), "dereferenceable"(i8** [[TMP6]], i64 8), "align"(i8** [[TMP6]], i64 8), "dereferenceable"(i32* [[TMP8]], i64 4), "align"(i32* [[TMP8]], i64 4), "dereferenceable"(i8* [[TMP11]], i64 1) ]
; FULL-SIMPLIFY-NEXT: store i8 [[TMP10]], i8* [[TMP11]], align 1
; FULL-SIMPLIFY-NEXT: [[TMP12:%.*]] = bitcast %struct.S* [[TMP7]] to i8*
; FULL-SIMPLIFY-NEXT: [[TMP13:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 32
; FULL-SIMPLIFY-NEXT: [[TMP14:%.*]] = bitcast %struct.S* [[TMP13]] to i8*
; FULL-SIMPLIFY-NEXT: call void @may_throw()
; FULL-SIMPLIFY-NEXT: [[TMP15:%.*]] = bitcast %struct.S* [[TMP7]] to i8*
; FULL-SIMPLIFY-NEXT: [[TMP16:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
; FULL-SIMPLIFY-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP16]], i32 0, i32 0
; FULL-SIMPLIFY-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 8
; FULL-SIMPLIFY-NEXT: [[TMP19:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
; FULL-SIMPLIFY-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP19]], i32 0, i32 1
; FULL-SIMPLIFY-NEXT: [[TMP21:%.*]] = load i8, i8* [[TMP20]], align 4
; FULL-SIMPLIFY-NEXT: [[TMP22:%.*]] = sext i8 [[TMP21]] to i32
; FULL-SIMPLIFY-NEXT: [[TMP23:%.*]] = add nsw i32 [[TMP18]], [[TMP22]]
; FULL-SIMPLIFY-NEXT: [[TMP24:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
; FULL-SIMPLIFY-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP24]], i32 0, i32 2
; FULL-SIMPLIFY-NEXT: [[TMP26:%.*]] = load i32*, i32** [[TMP25]]
; FULL-SIMPLIFY-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP17]], i64 4), "align"(i32* [[TMP17]], i64 8), "dereferenceable"(i8* [[TMP20]], i64 1), "align"(i8* [[TMP20]], i64 4), "dereferenceable"(i32** [[TMP25]], i64 8), "dereferenceable"(i32* [[TMP26]], i64 4), "align"(i32* [[TMP26]], i64 4) ]
; FULL-SIMPLIFY-NEXT: [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
; FULL-SIMPLIFY-NEXT: [[TMP28:%.*]] = add nsw i32 [[TMP23]], [[TMP27]]
; FULL-SIMPLIFY-NEXT: ret i32 [[TMP28]]
;
%4 = alloca %struct.S*, align 8
%5 = alloca i32*, align 8
@ -516,6 +622,7 @@ define i32 @test3(%struct.S* %0, i32* %1, i8* %2) "null-pointer-is-valid"="true"
%12 = bitcast %struct.S* %7 to i8*
%13 = load %struct.S*, %struct.S** %4, align 32
%14 = bitcast %struct.S* %13 to i8*
call void @may_throw()
%15 = bitcast %struct.S* %7 to i8*
%16 = load %struct.S*, %struct.S** %4, align 8
%17 = getelementptr inbounds %struct.S, %struct.S* %16, i32 0, i32 0
@ -534,12 +641,13 @@ define i32 @test3(%struct.S* %0, i32* %1, i8* %2) "null-pointer-is-valid"="true"
}
define dso_local i32 @_Z6squarePi(i32* %P, i32* %P1, i1 %cond) {
; BASIC-LABEL: @_Z6squarePi(
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P:%.*]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ]
; BASIC-LABEL: define {{[^@]+}}@_Z6squarePi
; BASIC-SAME: (i32* [[P:%.*]], i32* [[P1:%.*]], i1 [[COND:%.*]])
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ]
; BASIC-NEXT: store i32 0, i32* [[P]], align 4
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P1:%.*]], i64 4), "nonnull"(i32* [[P1]]), "align"(i32* [[P1]], i64 8) ]
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P1]], i64 4), "nonnull"(i32* [[P1]]), "align"(i32* [[P1]], i64 8) ]
; BASIC-NEXT: store i32 0, i32* [[P1]], align 8
; BASIC-NEXT: br i1 [[COND:%.*]], label [[A:%.*]], label [[B:%.*]]
; BASIC-NEXT: br i1 [[COND]], label [[A:%.*]], label [[B:%.*]]
; BASIC: A:
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 8) ]
; BASIC-NEXT: store i32 0, i32* [[P]], align 8
@ -559,12 +667,13 @@ define dso_local i32 @_Z6squarePi(i32* %P, i32* %P1, i1 %cond) {
; BASIC-NEXT: store i32 0, i32* [[P1]], align 4
; BASIC-NEXT: ret i32 0
;
; ALL-LABEL: @_Z6squarePi(
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P:%.*]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ]
; ALL-LABEL: define {{[^@]+}}@_Z6squarePi
; ALL-SAME: (i32* [[P:%.*]], i32* [[P1:%.*]], i1 [[COND:%.*]])
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ]
; ALL-NEXT: store i32 0, i32* [[P]], align 4
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P1:%.*]], i64 4), "nonnull"(i32* [[P1]]), "align"(i32* [[P1]], i64 8) ]
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P1]], i64 4), "nonnull"(i32* [[P1]]), "align"(i32* [[P1]], i64 8) ]
; ALL-NEXT: store i32 0, i32* [[P1]], align 8
; ALL-NEXT: br i1 [[COND:%.*]], label [[A:%.*]], label [[B:%.*]]
; ALL-NEXT: br i1 [[COND]], label [[A:%.*]], label [[B:%.*]]
; ALL: A:
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 8) ]
; ALL-NEXT: store i32 0, i32* [[P]], align 8
@ -584,12 +693,13 @@ define dso_local i32 @_Z6squarePi(i32* %P, i32* %P1, i1 %cond) {
; ALL-NEXT: store i32 0, i32* [[P1]], align 4
; ALL-NEXT: ret i32 0
;
; WITH-AC-LABEL: @_Z6squarePi(
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P:%.*]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ]
; WITH-AC-LABEL: define {{[^@]+}}@_Z6squarePi
; WITH-AC-SAME: (i32* [[P:%.*]], i32* [[P1:%.*]], i1 [[COND:%.*]])
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ]
; WITH-AC-NEXT: store i32 0, i32* [[P]], align 4
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P1:%.*]], i64 4), "nonnull"(i32* [[P1]]), "align"(i32* [[P1]], i64 8) ]
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P1]], i64 4), "nonnull"(i32* [[P1]]), "align"(i32* [[P1]], i64 8) ]
; WITH-AC-NEXT: store i32 0, i32* [[P1]], align 8
; WITH-AC-NEXT: br i1 [[COND:%.*]], label [[A:%.*]], label [[B:%.*]]
; WITH-AC-NEXT: br i1 [[COND]], label [[A:%.*]], label [[B:%.*]]
; WITH-AC: A:
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[P]], i64 8) ]
; WITH-AC-NEXT: store i32 0, i32* [[P]], align 8
@ -608,12 +718,13 @@ define dso_local i32 @_Z6squarePi(i32* %P, i32* %P1, i1 %cond) {
; WITH-AC-NEXT: store i32 0, i32* [[P1]], align 4
; WITH-AC-NEXT: ret i32 0
;
; CROSS-BLOCK-LABEL: @_Z6squarePi(
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P:%.*]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ]
; CROSS-BLOCK-LABEL: define {{[^@]+}}@_Z6squarePi
; CROSS-BLOCK-SAME: (i32* [[P:%.*]], i32* [[P1:%.*]], i1 [[COND:%.*]])
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ]
; CROSS-BLOCK-NEXT: store i32 0, i32* [[P]], align 4
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P1:%.*]], i64 4), "nonnull"(i32* [[P1]]), "align"(i32* [[P1]], i64 8) ]
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P1]], i64 4), "nonnull"(i32* [[P1]]), "align"(i32* [[P1]], i64 8) ]
; CROSS-BLOCK-NEXT: store i32 0, i32* [[P1]], align 8
; CROSS-BLOCK-NEXT: br i1 [[COND:%.*]], label [[A:%.*]], label [[B:%.*]]
; CROSS-BLOCK-NEXT: br i1 [[COND]], label [[A:%.*]], label [[B:%.*]]
; CROSS-BLOCK: A:
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[P]], i64 8) ]
; CROSS-BLOCK-NEXT: store i32 0, i32* [[P]], align 8
@ -629,6 +740,27 @@ define dso_local i32 @_Z6squarePi(i32* %P, i32* %P1, i1 %cond) {
; CROSS-BLOCK-NEXT: store i32 0, i32* [[P]], align 32
; CROSS-BLOCK-NEXT: store i32 0, i32* [[P1]], align 4
; CROSS-BLOCK-NEXT: ret i32 0
;
; FULL-SIMPLIFY-LABEL: define {{[^@]+}}@_Z6squarePi
; FULL-SIMPLIFY-SAME: (i32* nonnull align 4 dereferenceable(4) [[P:%.*]], i32* nonnull align 8 dereferenceable(4) [[P1:%.*]], i1 [[COND:%.*]])
; FULL-SIMPLIFY-NEXT: store i32 0, i32* [[P]], align 4
; FULL-SIMPLIFY-NEXT: store i32 0, i32* [[P1]], align 8
; FULL-SIMPLIFY-NEXT: br i1 [[COND]], label [[A:%.*]], label [[B:%.*]]
; FULL-SIMPLIFY: A:
; FULL-SIMPLIFY-NEXT: call void @llvm.assume(i1 true) [ "ignore"(i32* undef, i64 4), "ignore"(i32* undef), "align"(i32* [[P]], i64 8) ]
; FULL-SIMPLIFY-NEXT: store i32 0, i32* [[P]], align 8
; FULL-SIMPLIFY-NEXT: store i32 0, i32* [[P1]], align 4
; FULL-SIMPLIFY-NEXT: br i1 [[COND]], label [[C:%.*]], label [[B]]
; FULL-SIMPLIFY: B:
; FULL-SIMPLIFY-NEXT: call void @llvm.assume(i1 true) [ "ignore"(i32* undef, i64 4), "ignore"(i32* undef), "align"(i32* [[P]], i64 8) ]
; FULL-SIMPLIFY-NEXT: store i32 0, i32* [[P]], align 8
; FULL-SIMPLIFY-NEXT: store i32 0, i32* [[P1]], align 8
; FULL-SIMPLIFY-NEXT: br label [[C]]
; FULL-SIMPLIFY: C:
; FULL-SIMPLIFY-NEXT: call void @llvm.assume(i1 true) [ "ignore"(i32* undef, i64 4), "ignore"(i32* undef), "align"(i32* [[P]], i64 32) ]
; FULL-SIMPLIFY-NEXT: store i32 0, i32* [[P]], align 32
; FULL-SIMPLIFY-NEXT: store i32 0, i32* [[P1]], align 4
; FULL-SIMPLIFY-NEXT: ret i32 0
;
store i32 0, i32* %P, align 4
store i32 0, i32* %P1, align 8
@ -646,3 +778,126 @@ C:
store i32 0, i32* %P1, align 4
ret i32 0
}
define dso_local i32 @test4A(i32* %0, i32* %1, i32 %2, i32 %3) {
; BASIC-LABEL: define {{[^@]+}}@test4A
; BASIC-SAME: (i32* [[TMP0:%.*]], i32* [[TMP1:%.*]], i32 [[TMP2:%.*]], i32 [[TMP3:%.*]])
; BASIC-NEXT: [[TMP5:%.*]] = icmp ne i32* [[TMP1]], null
; BASIC-NEXT: br i1 [[TMP5]], label [[TMP6:%.*]], label [[TMP12:%.*]]
; BASIC: 6:
; BASIC-NEXT: [[TMP7:%.*]] = add nsw i32 [[TMP3]], [[TMP2]]
; BASIC-NEXT: call void @may_throw()
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP0]], i64 4), "nonnull"(i32* [[TMP0]]), "align"(i32* [[TMP0]], i64 4) ]
; BASIC-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP0]], align 4
; BASIC-NEXT: [[TMP9:%.*]] = add nsw i32 [[TMP7]], [[TMP8]]
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP0]], i64 4), "nonnull"(i32* [[TMP0]]), "align"(i32* [[TMP0]], i64 4) ]
; BASIC-NEXT: store i32 0, i32* [[TMP0]], align 4
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP1]], i64 4), "nonnull"(i32* [[TMP1]]), "align"(i32* [[TMP1]], i64 4) ]
; BASIC-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP1]], align 4
; BASIC-NEXT: [[TMP11:%.*]] = add nsw i32 [[TMP9]], [[TMP10]]
; BASIC-NEXT: call void @may_throw()
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP1]], i64 4), "nonnull"(i32* [[TMP1]]), "align"(i32* [[TMP1]], i64 4) ]
; BASIC-NEXT: store i32 [[TMP11]], i32* [[TMP1]], align 4
; BASIC-NEXT: br label [[TMP12]]
; BASIC: 12:
; BASIC-NEXT: ret i32 0
;
; ALL-LABEL: define {{[^@]+}}@test4A
; ALL-SAME: (i32* [[TMP0:%.*]], i32* [[TMP1:%.*]], i32 [[TMP2:%.*]], i32 [[TMP3:%.*]])
; ALL-NEXT: [[TMP5:%.*]] = icmp ne i32* [[TMP1]], null
; ALL-NEXT: br i1 [[TMP5]], label [[TMP6:%.*]], label [[TMP12:%.*]]
; ALL: 6:
; ALL-NEXT: [[TMP7:%.*]] = add nsw i32 [[TMP3]], [[TMP2]]
; ALL-NEXT: call void @may_throw()
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP0]], i64 4), "nonnull"(i32* [[TMP0]]), "align"(i32* [[TMP0]], i64 4) ]
; ALL-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP0]], align 4
; ALL-NEXT: [[TMP9:%.*]] = add nsw i32 [[TMP7]], [[TMP8]]
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP0]], i64 4), "nonnull"(i32* [[TMP0]]), "align"(i32* [[TMP0]], i64 4) ]
; ALL-NEXT: store i32 0, i32* [[TMP0]], align 4
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP1]], i64 4), "nonnull"(i32* [[TMP1]]), "align"(i32* [[TMP1]], i64 4) ]
; ALL-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP1]], align 4
; ALL-NEXT: [[TMP11:%.*]] = add nsw i32 [[TMP9]], [[TMP10]]
; ALL-NEXT: call void @may_throw()
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP1]], i64 4), "nonnull"(i32* [[TMP1]]), "align"(i32* [[TMP1]], i64 4) ]
; ALL-NEXT: store i32 [[TMP11]], i32* [[TMP1]], align 4
; ALL-NEXT: br label [[TMP12]]
; ALL: 12:
; ALL-NEXT: ret i32 0
;
; WITH-AC-LABEL: define {{[^@]+}}@test4A
; WITH-AC-SAME: (i32* [[TMP0:%.*]], i32* [[TMP1:%.*]], i32 [[TMP2:%.*]], i32 [[TMP3:%.*]])
; WITH-AC-NEXT: [[TMP5:%.*]] = icmp ne i32* [[TMP1]], null
; WITH-AC-NEXT: br i1 [[TMP5]], label [[TMP6:%.*]], label [[TMP12:%.*]]
; WITH-AC: 6:
; WITH-AC-NEXT: [[TMP7:%.*]] = add nsw i32 [[TMP3]], [[TMP2]]
; WITH-AC-NEXT: call void @may_throw()
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP0]], i64 4), "nonnull"(i32* [[TMP0]]), "align"(i32* [[TMP0]], i64 4) ]
; WITH-AC-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP0]], align 4
; WITH-AC-NEXT: [[TMP9:%.*]] = add nsw i32 [[TMP7]], [[TMP8]]
; WITH-AC-NEXT: store i32 0, i32* [[TMP0]], align 4
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP1]], i64 4), "nonnull"(i32* [[TMP1]]), "align"(i32* [[TMP1]], i64 4) ]
; WITH-AC-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP1]], align 4
; WITH-AC-NEXT: [[TMP11:%.*]] = add nsw i32 [[TMP9]], [[TMP10]]
; WITH-AC-NEXT: call void @may_throw()
; WITH-AC-NEXT: store i32 [[TMP11]], i32* [[TMP1]], align 4
; WITH-AC-NEXT: br label [[TMP12]]
; WITH-AC: 12:
; WITH-AC-NEXT: ret i32 0
;
; CROSS-BLOCK-LABEL: define {{[^@]+}}@test4A
; CROSS-BLOCK-SAME: (i32* [[TMP0:%.*]], i32* [[TMP1:%.*]], i32 [[TMP2:%.*]], i32 [[TMP3:%.*]])
; CROSS-BLOCK-NEXT: [[TMP5:%.*]] = icmp ne i32* [[TMP1]], null
; CROSS-BLOCK-NEXT: br i1 [[TMP5]], label [[TMP6:%.*]], label [[TMP12:%.*]]
; CROSS-BLOCK: 6:
; CROSS-BLOCK-NEXT: [[TMP7:%.*]] = add nsw i32 [[TMP3]], [[TMP2]]
; CROSS-BLOCK-NEXT: call void @may_throw()
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP0]], i64 4), "nonnull"(i32* [[TMP0]]), "align"(i32* [[TMP0]], i64 4) ]
; CROSS-BLOCK-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP0]], align 4
; CROSS-BLOCK-NEXT: [[TMP9:%.*]] = add nsw i32 [[TMP7]], [[TMP8]]
; CROSS-BLOCK-NEXT: store i32 0, i32* [[TMP0]], align 4
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP1]], i64 4), "nonnull"(i32* [[TMP1]]), "align"(i32* [[TMP1]], i64 4) ]
; CROSS-BLOCK-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP1]], align 4
; CROSS-BLOCK-NEXT: [[TMP11:%.*]] = add nsw i32 [[TMP9]], [[TMP10]]
; CROSS-BLOCK-NEXT: call void @may_throw()
; CROSS-BLOCK-NEXT: store i32 [[TMP11]], i32* [[TMP1]], align 4
; CROSS-BLOCK-NEXT: br label [[TMP12]]
; CROSS-BLOCK: 12:
; CROSS-BLOCK-NEXT: ret i32 0
;
; FULL-SIMPLIFY-LABEL: define {{[^@]+}}@test4A
; FULL-SIMPLIFY-SAME: (i32* [[TMP0:%.*]], i32* [[TMP1:%.*]], i32 [[TMP2:%.*]], i32 [[TMP3:%.*]])
; FULL-SIMPLIFY-NEXT: [[TMP5:%.*]] = icmp ne i32* [[TMP1]], null
; FULL-SIMPLIFY-NEXT: br i1 [[TMP5]], label [[TMP6:%.*]], label [[TMP12:%.*]]
; FULL-SIMPLIFY: 6:
; FULL-SIMPLIFY-NEXT: [[TMP7:%.*]] = add nsw i32 [[TMP3]], [[TMP2]]
; FULL-SIMPLIFY-NEXT: call void @may_throw()
; FULL-SIMPLIFY-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP0]], i64 4), "nonnull"(i32* [[TMP0]]), "align"(i32* [[TMP0]], i64 4), "dereferenceable"(i32* [[TMP1]], i64 4), "nonnull"(i32* [[TMP1]]), "align"(i32* [[TMP1]], i64 4) ]
; FULL-SIMPLIFY-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP0]], align 4
; FULL-SIMPLIFY-NEXT: [[TMP9:%.*]] = add nsw i32 [[TMP7]], [[TMP8]]
; FULL-SIMPLIFY-NEXT: store i32 0, i32* [[TMP0]], align 4
; FULL-SIMPLIFY-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP1]], align 4
; FULL-SIMPLIFY-NEXT: [[TMP11:%.*]] = add nsw i32 [[TMP9]], [[TMP10]]
; FULL-SIMPLIFY-NEXT: call void @may_throw()
; FULL-SIMPLIFY-NEXT: store i32 [[TMP11]], i32* [[TMP1]], align 4
; FULL-SIMPLIFY-NEXT: br label [[TMP12]]
; FULL-SIMPLIFY: 12:
; FULL-SIMPLIFY-NEXT: ret i32 0
;
%5 = icmp ne i32* %1, null
br i1 %5, label %6, label %12
6: ; preds = %4
%7 = add nsw i32 %3, %2
call void @may_throw()
%8 = load i32, i32* %0, align 4
%9 = add nsw i32 %7, %8
store i32 0, i32* %0, align 4
%10 = load i32, i32* %1, align 4
%11 = add nsw i32 %9, %10
call void @may_throw()
store i32 %11, i32* %1, align 4
br label %12
12: ; preds = %6, %4
ret i32 0
}

View File

@ -0,0 +1,315 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature
; RUN: opt -passes='require<domtree>,require<assumptions>,assume-simplify,verify' --enable-knowledge-retention -S %s | FileCheck %s
target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
declare void @may_throw()
declare void @llvm.assume(i1)
define i32 @test1(i32* %0, i32* %1, i32 %2, i32 %3) {
; CHECK-LABEL: define {{[^@]+}}@test1
; CHECK-SAME: (i32* nonnull dereferenceable(4) [[TMP0:%.*]], i32* [[TMP1:%.*]], i32 [[TMP2:%.*]], i32 [[TMP3:%.*]])
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne i32 [[TMP2]], 4
; CHECK-NEXT: br i1 [[TMP5]], label [[TMP6:%.*]], label [[A:%.*]]
; CHECK: 6:
; CHECK-NEXT: [[TMP7:%.*]] = add nsw i32 [[TMP3]], [[TMP2]]
; CHECK-NEXT: call void @may_throw()
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[TMP0]], i64 4), "align"(i32* [[TMP1]], i64 4), "nonnull"(i32* [[TMP1]]) ]
; CHECK-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP0]], align 4
; CHECK-NEXT: [[TMP9:%.*]] = add nsw i32 [[TMP7]], [[TMP8]]
; CHECK-NEXT: store i32 0, i32* [[TMP0]], align 4
; CHECK-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP1]], align 4
; CHECK-NEXT: [[TMP11:%.*]] = add nsw i32 [[TMP9]], [[TMP10]]
; CHECK-NEXT: call void @may_throw()
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP1]], i64 4), "ignore"(i32* undef) ]
; CHECK-NEXT: store i32 [[TMP11]], i32* [[TMP1]], align 4
; CHECK-NEXT: br label [[B:%.*]]
; CHECK: A:
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[TMP0]], i64 4), "ignore"(i32* undef, i64 4), "ignore"(i32* undef) ]
; CHECK-NEXT: br label [[B]]
; CHECK: B:
; CHECK-NEXT: ret i32 0
;
%5 = icmp ne i32 %2, 4
call void @llvm.assume(i1 true) ["dereferenceable"(i32* %0, i64 4), "nonnull"(i32* %0) ]
br i1 %5, label %6, label %A
6: ; preds = %4
%7 = add nsw i32 %3, %2
call void @may_throw()
%8 = load i32, i32* %0, align 4
%9 = add nsw i32 %7, %8
store i32 0, i32* %0, align 4
call void @llvm.assume(i1 true) [ "align"(i32* %0, i64 4), "dereferenceable"(i32* %0, i64 4) ]
%10 = load i32, i32* %1, align 4
%11 = add nsw i32 %9, %10
call void @llvm.assume(i1 true) [ "align"(i32* %1, i64 4), "nonnull"(i32* %1) ]
call void @may_throw()
call void @llvm.assume(i1 true) [ "dereferenceable"(i32* %1, i64 4), "nonnull"(i32* %1) ]
store i32 %11, i32* %1, align 4
br label %B
A:
call void @llvm.assume(i1 true) [ "align"(i32* %0, i64 4), "dereferenceable"(i32* %0, i64 4), "nonnull"(i32* %0) ]
br label %B
B: ; preds = %6, %4
ret i32 0
}
define i32 @test2(i32** %0, i32* %1, i32 %2, i32 %3) {
; CHECK-LABEL: define {{[^@]+}}@test2
; CHECK-SAME: (i32** [[TMP0:%.*]], i32* nonnull align 4 dereferenceable(4) [[TMP1:%.*]], i32 [[TMP2:%.*]], i32 [[TMP3:%.*]])
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 0
; CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
; CHECK-NEXT: [[TMP7:%.*]] = icmp ne i32 [[TMP6]], 0
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 0
; CHECK-NEXT: br i1 [[TMP7]], label [[TMP9:%.*]], label [[TMP19:%.*]]
; CHECK: 9:
; CHECK-NEXT: call void @may_throw()
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[TMP8]], i64 4), "dereferenceable"(i32* [[TMP8]], i64 4), "nonnull"(i32* [[TMP8]]) ]
; CHECK-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP8]], align 4
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 2
; CHECK-NEXT: store i32 [[TMP10]], i32* [[TMP11]], align 4
; CHECK-NEXT: call void @may_throw()
; CHECK-NEXT: call void @may_throw()
; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32*, i32** [[TMP0]], i64 1
; CHECK-NEXT: [[TMP13:%.*]] = load i32*, i32** [[TMP12]], align 8
; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, i32* [[TMP13]], i64 0
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[TMP11]], i64 4), "dereferenceable"(i32* [[TMP11]], i64 4), "nonnull"(i32* [[TMP11]]), "align"(i32* [[TMP14]], i64 4), "dereferenceable"(i32* [[TMP14]], i64 4), "nonnull"(i32* [[TMP14]]) ]
; CHECK-NEXT: [[TMP15:%.*]] = load i32, i32* [[TMP14]], align 4
; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32*, i32** [[TMP0]], i64 1
; CHECK-NEXT: [[TMP17:%.*]] = load i32*, i32** [[TMP16]], align 8
; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, i32* [[TMP17]], i64 2
; CHECK-NEXT: store i32 [[TMP15]], i32* [[TMP18]], align 4
; CHECK-NEXT: call void @may_throw()
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32** [[TMP0]], i64 4), "dereferenceable"(i32** [[TMP0]], i64 4), "nonnull"(i32** [[TMP0]]) ]
; CHECK-NEXT: br label [[TMP35:%.*]]
; CHECK: 19:
; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32*, i32** [[TMP0]], i64 7
; CHECK-NEXT: [[TMP21:%.*]] = load i32*, i32** [[TMP20]], align 8
; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, i32* [[TMP21]], i64 0
; CHECK-NEXT: [[TMP23:%.*]] = load i32, i32* [[TMP22]], align 4
; CHECK-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
; CHECK-NEXT: br i1 [[TMP24]], label [[TMP25:%.*]], label [[TMP33:%.*]]
; CHECK: 25:
; CHECK-NEXT: call void @may_throw()
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32** [[TMP0]], i64 4), "dereferenceable"(i32** [[TMP0]], i64 4), "nonnull"(i32** [[TMP0]]) ]
; CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds i32*, i32** [[TMP0]], i64 2
; CHECK-NEXT: [[TMP27:%.*]] = load i32*, i32** [[TMP26]], align 8
; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds i32, i32* [[TMP27]], i64 0
; CHECK-NEXT: [[TMP29:%.*]] = load i32, i32* [[TMP28]], align 4
; CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds i32*, i32** [[TMP0]], i64 2
; CHECK-NEXT: [[TMP31:%.*]] = load i32*, i32** [[TMP30]], align 8
; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds i32, i32* [[TMP31]], i64 2
; CHECK-NEXT: store i32 [[TMP29]], i32* [[TMP32]], align 4
; CHECK-NEXT: call void @may_throw()
; CHECK-NEXT: br label [[TMP33]]
; CHECK: 33:
; CHECK-NEXT: br label [[TMP34:%.*]]
; CHECK: 34:
; CHECK-NEXT: br label [[TMP35]]
; CHECK: 35:
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32** [[TMP0]], i64 4), "dereferenceable"(i32** [[TMP0]], i64 4), "nonnull"(i32** [[TMP0]]) ]
; CHECK-NEXT: ret i32 0
;
%5 = getelementptr inbounds i32, i32* %1, i64 0
%6 = load i32, i32* %5, align 4
%7 = icmp ne i32 %6, 0
call void @llvm.assume(i1 true) [ "align"(i32* %1, i64 4), "dereferenceable"(i32* %1, i64 4) ]
call void @llvm.assume(i1 true) [ "align"(i32* %1, i64 4), "nonnull"(i32* %1) ]
%8 = getelementptr inbounds i32, i32* %1, i64 0
br i1 %7, label %9, label %19
9: ; preds = %4
call void @may_throw()
call void @llvm.assume(i1 true) [ "align"(i32* %8, i64 4), "dereferenceable"(i32* %8, i64 4), "nonnull"(i32* %8) ]
%10 = load i32, i32* %8, align 4
%11 = getelementptr inbounds i32, i32* %1, i64 2
store i32 %10, i32* %11, align 4
call void @may_throw()
call void @may_throw()
call void @llvm.assume(i1 true) [ "align"(i32* %11, i64 4), "dereferenceable"(i32* %11, i64 4), "nonnull"(i32* %11) ]
%12 = getelementptr inbounds i32*, i32** %0, i64 1
%13 = load i32*, i32** %12, align 8
%14 = getelementptr inbounds i32, i32* %13, i64 0
%15 = load i32, i32* %14, align 4
call void @llvm.assume(i1 true) [ "align"(i32* %14, i64 4), "dereferenceable"(i32* %14, i64 4), "nonnull"(i32* %14) ]
%16 = getelementptr inbounds i32*, i32** %0, i64 1
%17 = load i32*, i32** %16, align 8
%18 = getelementptr inbounds i32, i32* %17, i64 2
store i32 %15, i32* %18, align 4
call void @may_throw()
call void @llvm.assume(i1 true) [ "align"(i32** %0, i64 4), "dereferenceable"(i32** %0, i64 4), "nonnull"(i32** %0) ]
br label %35
19: ; preds = %4
%20 = getelementptr inbounds i32*, i32** %0, i64 7
%21 = load i32*, i32** %20, align 8
%22 = getelementptr inbounds i32, i32* %21, i64 0
%23 = load i32, i32* %22, align 4
%24 = icmp ne i32 %23, 0
br i1 %24, label %25, label %33
25: ; preds = %19
call void @may_throw()
call void @llvm.assume(i1 true) [ "align"(i32** %0, i64 4), "dereferenceable"(i32** %0, i64 4), "nonnull"(i32** %0) ]
%26 = getelementptr inbounds i32*, i32** %0, i64 2
%27 = load i32*, i32** %26, align 8
%28 = getelementptr inbounds i32, i32* %27, i64 0
%29 = load i32, i32* %28, align 4
%30 = getelementptr inbounds i32*, i32** %0, i64 2
%31 = load i32*, i32** %30, align 8
%32 = getelementptr inbounds i32, i32* %31, i64 2
store i32 %29, i32* %32, align 4
call void @may_throw()
br label %33
33: ; preds = %25, %19
br label %34
34: ; preds = %33
br label %35
35: ; preds = %34, %8
call void @llvm.assume(i1 true) [ "align"(i32** %0, i64 4), "dereferenceable"(i32** %0, i64 4), "nonnull"(i32** %0) ]
ret i32 0
}
define i32 @test3(i32* nonnull %p, i32 %i) {
; CHECK-LABEL: define {{[^@]+}}@test3
; CHECK-SAME: (i32* nonnull [[P:%.*]], i32 [[I:%.*]])
; CHECK-NEXT: [[COND:%.*]] = icmp ne i32 [[I]], 0
; CHECK-NEXT: br i1 [[COND]], label [[A:%.*]], label [[B:%.*]]
; CHECK: A:
; CHECK-NEXT: ret i32 0
; CHECK: B:
; CHECK-NEXT: [[RET:%.*]] = load i32, i32* [[P]]
; CHECK-NEXT: ret i32 [[RET]]
;
%cond = icmp ne i32 %i, 0
call void @llvm.assume(i1 true) [ "nonnull"(i32* %p) ]
br i1 %cond, label %A, label %B
A:
ret i32 0
B:
%ret = load i32, i32* %p
ret i32 %ret
}
define i32 @test4(i32* %p, i32 %i) {
; CHECK-LABEL: define {{[^@]+}}@test4
; CHECK-SAME: (i32* nonnull dereferenceable(32) [[P:%.*]], i32 [[I:%.*]])
; CHECK-NEXT: [[COND:%.*]] = icmp ne i32 [[I]], 0
; CHECK-NEXT: br i1 [[COND]], label [[A:%.*]], label [[B:%.*]]
; CHECK: A:
; CHECK-NEXT: ret i32 0
; CHECK: B:
; CHECK-NEXT: [[RET:%.*]] = load i32, i32* [[P]]
; CHECK-NEXT: ret i32 [[RET]]
;
%cond = icmp ne i32 %i, 0
call void @llvm.assume(i1 true) [ "nonnull"(i32* %p), "dereferenceable"(i32* %p, i32 32) ]
br i1 %cond, label %A, label %B
A:
ret i32 0
B:
%ret = load i32, i32* %p
ret i32 %ret
}
define i32 @test4A(i32* %p, i32 %i) {
; CHECK-LABEL: define {{[^@]+}}@test4A
; CHECK-SAME: (i32* [[P:%.*]], i32 [[I:%.*]])
; CHECK-NEXT: call void @may_throw()
; CHECK-NEXT: [[COND:%.*]] = icmp ne i32 [[I]], 0
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "nonnull"(i32* [[P]]), "dereferenceable"(i32* [[P]], i32 32) ]
; CHECK-NEXT: br i1 [[COND]], label [[A:%.*]], label [[B:%.*]]
; CHECK: A:
; CHECK-NEXT: ret i32 0
; CHECK: B:
; CHECK-NEXT: [[RET:%.*]] = load i32, i32* [[P]]
; CHECK-NEXT: ret i32 [[RET]]
;
call void @may_throw()
%cond = icmp ne i32 %i, 0
call void @llvm.assume(i1 true) [ "nonnull"(i32* %p), "dereferenceable"(i32* %p, i32 32) ]
br i1 %cond, label %A, label %B
A:
ret i32 0
B:
%ret = load i32, i32* %p
ret i32 %ret
}
define i32 @test5(i32* dereferenceable(64) %p, i32 %i) {
; CHECK-LABEL: define {{[^@]+}}@test5
; CHECK-SAME: (i32* nonnull dereferenceable(64) [[P:%.*]], i32 [[I:%.*]])
; CHECK-NEXT: [[COND:%.*]] = icmp ne i32 [[I]], 0
; CHECK-NEXT: br i1 [[COND]], label [[A:%.*]], label [[B:%.*]]
; CHECK: A:
; CHECK-NEXT: ret i32 0
; CHECK: B:
; CHECK-NEXT: [[RET:%.*]] = load i32, i32* [[P]]
; CHECK-NEXT: ret i32 [[RET]]
;
%cond = icmp ne i32 %i, 0
call void @llvm.assume(i1 true) [ "nonnull"(i32* %p), "dereferenceable"(i32* %p, i32 32) ]
br i1 %cond, label %A, label %B
A:
ret i32 0
B:
%ret = load i32, i32* %p
ret i32 %ret
}
define i32 @test5A(i32* dereferenceable(8) %p, i32 %i) {
; CHECK-LABEL: define {{[^@]+}}@test5A
; CHECK-SAME: (i32* dereferenceable(32) [[P:%.*]], i32 [[I:%.*]])
; CHECK-NEXT: [[COND:%.*]] = icmp ne i32 [[I]], 0
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "cold"(), "ignore"(i32* undef, i32 32) ]
; CHECK-NEXT: br i1 [[COND]], label [[A:%.*]], label [[B:%.*]]
; CHECK: A:
; CHECK-NEXT: ret i32 0
; CHECK: B:
; CHECK-NEXT: [[RET:%.*]] = load i32, i32* [[P]]
; CHECK-NEXT: ret i32 [[RET]]
;
%cond = icmp ne i32 %i, 0
call void @llvm.assume(i1 true) [ "cold"(), "dereferenceable"(i32* %p, i32 32) ]
br i1 %cond, label %A, label %B
A:
ret i32 0
B:
%ret = load i32, i32* %p
ret i32 %ret
}
define i32 @test6() {
; CHECK-LABEL: define {{[^@]+}}@test6()
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "cold"() ]
; CHECK-NEXT: call void @may_throw()
; CHECK-NEXT: ret i32 0
;
call void @llvm.assume(i1 true) [ "cold"() ]
call void @llvm.assume(i1 true) [ "cold"() ]
call void @may_throw()
call void @llvm.assume(i1 true) [ "cold"() ]
ret i32 0
}
define i32 @test7(i32* %p) {
; CHECK-LABEL: define {{[^@]+}}@test7
; CHECK-SAME: (i32* align 4 dereferenceable(4) [[P:%.*]])
; CHECK-NEXT: [[P1:%.*]] = bitcast i32* [[P]] to i8*
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "cold"(), "align"(i8* [[P1]], i64 4), "nonnull"(i8* [[P1]]) ]
; CHECK-NEXT: ret i32 0
;
%p1 = bitcast i32* %p to i8*
call void @llvm.assume(i1 true) [ "cold"() ]
call void @llvm.assume(i1 true) [ "align"(i32* %p, i32 4) ]
call void @llvm.assume(i1 true) [ "dereferenceable"(i32* %p, i32 4) ]
call void @llvm.assume(i1 true) [ "align"(i8* %p1, i32 4), "nonnull"(i8* %p1) ]
ret i32 0
}