1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-22 10:42:39 +01:00

[AssumeBundles] add cannonicalisation to the assume builder

Summary:
this reduces significantly the number of assumes generated without aftecting too much
the information that is preserved. this improves the compile-time cost
of enable-knowledge-retention significantly.

Reviewers: jdoerfert, sstefan1

Reviewed By: jdoerfert

Subscribers: hiraditya, asbirlea, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D79650
This commit is contained in:
Tyker 2020-06-19 10:32:09 +02:00
parent 08a3df5192
commit c00c14abfd
12 changed files with 596 additions and 236 deletions

View File

@ -545,6 +545,9 @@ public:
});
}
/// Compute the maximum alignment that this GEP is garranteed to preserve.
Align getMaxPreservedAlignment(const DataLayout &DL) const;
/// Accumulate the constant address offset of this GEP if possible.
///
/// This routine accepts an APInt into which it will try to accumulate the

View File

@ -646,10 +646,12 @@ public:
///
/// Returns the original pointer value. If this is called on a non-pointer
/// value, it returns 'this'.
const Value *stripInBoundsOffsets() const;
Value *stripInBoundsOffsets() {
const Value *stripInBoundsOffsets(function_ref<void(const Value *)> Func =
[](const Value *) {}) const;
inline Value *stripInBoundsOffsets(function_ref<void(const Value *)> Func =
[](const Value *) {}) {
return const_cast<Value *>(
static_cast<const Value *>(this)->stripInBoundsOffsets());
static_cast<const Value *>(this)->stripInBoundsOffsets(Func));
}
/// Returns the number of bytes known to be dereferenceable for the

View File

@ -31,6 +31,33 @@ Type *GEPOperator::getResultElementType() const {
return cast<GetElementPtrConstantExpr>(this)->getResultElementType();
}
Align GEPOperator::getMaxPreservedAlignment(const DataLayout &DL) const {
/// compute the worse possible offset for every level of the GEP et accumulate
/// the minimum alignment into Result.
Align Result = Align(llvm::Value::MaximumAlignment);
for (gep_type_iterator GTI = gep_type_begin(this), GTE = gep_type_end(this);
GTI != GTE; ++GTI) {
int64_t Offset = 1;
ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
if (StructType *STy = GTI.getStructTypeOrNull()) {
const StructLayout *SL = DL.getStructLayout(STy);
Offset = SL->getElementOffset(OpC->getZExtValue());
} else {
assert(GTI.isSequential() && "should be sequencial");
/// If the index isn't know we take 1 because it is the index that will
/// give the worse alignment of the offset.
int64_t ElemCount = 1;
if (OpC)
ElemCount = OpC->getZExtValue();
Offset = DL.getTypeAllocSize(GTI.getIndexedType()) * ElemCount;
}
Result = Align(MinAlign(Offset, Result.value()));
}
return Result;
}
bool GEPOperator::accumulateConstantOffset(
const DataLayout &DL, APInt &Offset,
function_ref<bool(Value &, APInt &)> ExternalAnalysis) const {

View File

@ -516,7 +516,9 @@ enum PointerStripKind {
};
template <PointerStripKind StripKind>
static const Value *stripPointerCastsAndOffsets(const Value *V) {
static const Value *stripPointerCastsAndOffsets(
const Value *V,
function_ref<void(const Value *)> Func = [](const Value *) {}) {
if (!V->getType()->isPointerTy())
return V;
@ -526,6 +528,7 @@ static const Value *stripPointerCastsAndOffsets(const Value *V) {
Visited.insert(V);
do {
Func(V);
if (auto *GEP = dyn_cast<GEPOperator>(V)) {
switch (StripKind) {
case PSK_ZeroIndices:
@ -667,8 +670,9 @@ const Value *Value::stripAndAccumulateConstantOffsets(
return V;
}
const Value *Value::stripInBoundsOffsets() const {
return stripPointerCastsAndOffsets<PSK_InBounds>(this);
const Value *
Value::stripInBoundsOffsets(function_ref<void(const Value *)> Func) const {
return stripPointerCastsAndOffsets<PSK_InBounds>(this, Func);
}
uint64_t Value::getPointerDereferenceableBytes(const DataLayout &DL,

View File

@ -48,6 +48,39 @@ bool isUsefullToPreserve(Attribute::AttrKind Kind) {
}
}
/// This function will try to transform the given knowledge into a more
/// canonical one. the canonical knowledge maybe the given one.
RetainedKnowledge canonicalizedKnowledge(RetainedKnowledge RK, Module *M) {
switch (RK.AttrKind) {
default:
return RK;
case Attribute::NonNull:
RK.WasOn = GetUnderlyingObject(RK.WasOn, M->getDataLayout());
return RK;
case Attribute::Alignment: {
Value *V = RK.WasOn->stripInBoundsOffsets([&](const Value *Strip) {
if (auto *GEP = dyn_cast<GEPOperator>(Strip))
RK.ArgValue =
MinAlign(RK.ArgValue,
GEP->getMaxPreservedAlignment(M->getDataLayout()).value());
});
RK.WasOn = V;
return RK;
}
case Attribute::Dereferenceable:
case Attribute::DereferenceableOrNull: {
int64_t Offset = 0;
Value *V = GetPointerBaseWithConstantOffset(
RK.WasOn, Offset, M->getDataLayout(), /*AllowNonInBounds*/ false);
if (Offset < 0)
return RK;
RK.ArgValue = RK.ArgValue + Offset;
RK.WasOn = V;
}
}
return RK;
}
/// This class contain all knowledge that have been gather while building an
/// llvm.assume and the function to manipulate it.
struct AssumeBuilderState {
@ -55,16 +88,16 @@ struct AssumeBuilderState {
using MapKey = std::pair<Value *, Attribute::AttrKind>;
SmallMapVector<MapKey, unsigned, 8> AssumedKnowledgeMap;
Instruction *InsertBeforeInstruction = nullptr;
Instruction *InstBeingRemoved = nullptr;
AssumptionCache* AC = nullptr;
DominatorTree* DT = nullptr;
AssumeBuilderState(Module *M, Instruction *I = nullptr,
AssumptionCache *AC = nullptr, DominatorTree *DT = nullptr)
: M(M), InsertBeforeInstruction(I), AC(AC), DT(DT) {}
: M(M), InstBeingRemoved(I), AC(AC), DT(DT) {}
bool tryToPreserveWithoutAddingAssume(RetainedKnowledge RK) {
if (!InsertBeforeInstruction || !AC || !RK.WasOn)
if (!InstBeingRemoved || !RK.WasOn)
return false;
bool HasBeenPreserved = false;
Use* ToUpdate = nullptr;
@ -72,12 +105,12 @@ struct AssumeBuilderState {
RK.WasOn, {RK.AttrKind}, AC,
[&](RetainedKnowledge RKOther, Instruction *Assume,
const CallInst::BundleOpInfo *Bundle) {
if (!isValidAssumeForContext(Assume, InsertBeforeInstruction, DT))
if (!isValidAssumeForContext(Assume, InstBeingRemoved, DT))
return false;
if (RKOther.ArgValue >= RK.ArgValue) {
HasBeenPreserved = true;
return true;
} else if (isValidAssumeForContext(InsertBeforeInstruction, Assume,
} else if (isValidAssumeForContext(InstBeingRemoved, Assume,
DT)) {
HasBeenPreserved = true;
IntrinsicInst *Intr = cast<IntrinsicInst>(Assume);
@ -92,8 +125,41 @@ struct AssumeBuilderState {
return HasBeenPreserved;
}
bool isKnowledgeWorthPreserving(RetainedKnowledge RK) {
if (!RK)
return false;
if (!RK.WasOn)
return true;
if (RK.WasOn->getType()->isPointerTy()) {
Value *UnderlyingPtr = GetUnderlyingObject(RK.WasOn, M->getDataLayout());
if (isa<AllocaInst>(UnderlyingPtr) || isa<GlobalValue>(UnderlyingPtr))
return false;
}
if (auto *Arg = dyn_cast<Argument>(RK.WasOn)) {
if (Arg->hasAttribute(RK.AttrKind) &&
(!Attribute::doesAttrKindHaveArgument(RK.AttrKind) ||
Arg->getAttribute(RK.AttrKind).getValueAsInt() >= RK.ArgValue))
return false;
return true;
}
if (auto *Inst = dyn_cast<Instruction>(RK.WasOn))
if (wouldInstructionBeTriviallyDead(Inst)) {
if (RK.WasOn->use_empty())
return false;
Use *SingleUse = RK.WasOn->getSingleUndroppableUse();
if (SingleUse && SingleUse->getUser() == InstBeingRemoved)
return false;
}
return true;
}
void addKnowledge(RetainedKnowledge RK) {
if (RK.AttrKind == Attribute::None || tryToPreserveWithoutAddingAssume(RK))
RK = canonicalizedKnowledge(RK, M);
if (!isKnowledgeWorthPreserving(RK))
return;
if (tryToPreserveWithoutAddingAssume(RK))
return;
MapKey Key{RK.WasOn, RK.AttrKind};
auto Lookup = AssumedKnowledgeMap.find(Key);

View File

@ -15,31 +15,17 @@ declare void @llvm.assume(i1)
; operations on another array. Important for scientific codes.
;
define i32 @different_array_test(i64 %A, i64 %B) {
; NO_ASSUME-LABEL: @different_array_test(
; NO_ASSUME-NEXT: [[ARRAY11:%.*]] = alloca [100 x i32], align 4
; NO_ASSUME-NEXT: [[ARRAY22:%.*]] = alloca [200 x i32], align 4
; NO_ASSUME-NEXT: [[ARRAY22_SUB:%.*]] = getelementptr inbounds [200 x i32], [200 x i32]* [[ARRAY22]], i64 0, i64 0
; NO_ASSUME-NEXT: [[ARRAY11_SUB:%.*]] = getelementptr inbounds [100 x i32], [100 x i32]* [[ARRAY11]], i64 0, i64 0
; NO_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[ARRAY11_SUB]], i32 4) ]
; NO_ASSUME-NEXT: call void @external(i32* nonnull [[ARRAY11_SUB]])
; NO_ASSUME-NEXT: call void @external(i32* nonnull [[ARRAY22_SUB]])
; NO_ASSUME-NEXT: [[POINTER2:%.*]] = getelementptr [200 x i32], [200 x i32]* [[ARRAY22]], i64 0, i64 [[B:%.*]]
; NO_ASSUME-NEXT: store i32 7, i32* [[POINTER2]], align 4
; NO_ASSUME-NEXT: ret i32 0
;
; USE_ASSUME-LABEL: @different_array_test(
; USE_ASSUME-NEXT: [[ARRAY11:%.*]] = alloca [100 x i32], align 4
; USE_ASSUME-NEXT: [[ARRAY22:%.*]] = alloca [200 x i32], align 4
; USE_ASSUME-NEXT: [[ARRAY22_SUB:%.*]] = getelementptr inbounds [200 x i32], [200 x i32]* [[ARRAY22]], i64 0, i64 0
; USE_ASSUME-NEXT: [[ARRAY11_SUB:%.*]] = getelementptr inbounds [100 x i32], [100 x i32]* [[ARRAY11]], i64 0, i64 0
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[ARRAY11_SUB]], i32 4) ]
; USE_ASSUME-NEXT: call void @external(i32* nonnull [[ARRAY11_SUB]])
; USE_ASSUME-NEXT: call void @external(i32* nonnull [[ARRAY22_SUB]])
; USE_ASSUME-NEXT: [[POINTER:%.*]] = getelementptr [100 x i32], [100 x i32]* [[ARRAY11]], i64 0, i64 [[A:%.*]]
; USE_ASSUME-NEXT: [[POINTER2:%.*]] = getelementptr [200 x i32], [200 x i32]* [[ARRAY22]], i64 0, i64 [[B:%.*]]
; USE_ASSUME-NEXT: store i32 7, i32* [[POINTER2]], align 4
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[POINTER]], i64 4), "nonnull"(i32* [[POINTER]]), "align"(i32* [[POINTER]], i64 4) ]
; USE_ASSUME-NEXT: ret i32 0
; CHECK-LABEL: @different_array_test(
; CHECK-NEXT: [[ARRAY11:%.*]] = alloca [100 x i32], align 4
; CHECK-NEXT: [[ARRAY22:%.*]] = alloca [200 x i32], align 4
; CHECK-NEXT: [[ARRAY22_SUB:%.*]] = getelementptr inbounds [200 x i32], [200 x i32]* [[ARRAY22]], i64 0, i64 0
; CHECK-NEXT: [[ARRAY11_SUB:%.*]] = getelementptr inbounds [100 x i32], [100 x i32]* [[ARRAY11]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[ARRAY11_SUB]], i32 4) ]
; CHECK-NEXT: call void @external(i32* nonnull [[ARRAY11_SUB]])
; CHECK-NEXT: call void @external(i32* nonnull [[ARRAY22_SUB]])
; CHECK-NEXT: [[POINTER2:%.*]] = getelementptr [200 x i32], [200 x i32]* [[ARRAY22]], i64 0, i64 [[B:%.*]]
; CHECK-NEXT: store i32 7, i32* [[POINTER2]], align 4
; CHECK-NEXT: ret i32 0
;
%Array1 = alloca i32, i32 100
%Array2 = alloca i32, i32 200
@ -63,23 +49,13 @@ define i32 @different_array_test(i64 %A, i64 %B) {
; interfere with each other. Again, important for scientific codes.
;
define i32 @constant_array_index_test() {
; NO_ASSUME-LABEL: @constant_array_index_test(
; NO_ASSUME-NEXT: [[ARRAY1:%.*]] = alloca [100 x i32], align 4
; NO_ASSUME-NEXT: [[ARRAY1_SUB:%.*]] = getelementptr inbounds [100 x i32], [100 x i32]* [[ARRAY1]], i64 0, i64 0
; NO_ASSUME-NEXT: call void @external(i32* nonnull [[ARRAY1_SUB]])
; NO_ASSUME-NEXT: [[P2:%.*]] = getelementptr inbounds [100 x i32], [100 x i32]* [[ARRAY1]], i64 0, i64 6
; NO_ASSUME-NEXT: store i32 1, i32* [[P2]], align 4
; NO_ASSUME-NEXT: ret i32 0
;
; USE_ASSUME-LABEL: @constant_array_index_test(
; USE_ASSUME-NEXT: [[ARRAY1:%.*]] = alloca [100 x i32], align 4
; USE_ASSUME-NEXT: [[ARRAY1_SUB:%.*]] = getelementptr inbounds [100 x i32], [100 x i32]* [[ARRAY1]], i64 0, i64 0
; USE_ASSUME-NEXT: call void @external(i32* nonnull [[ARRAY1_SUB]])
; USE_ASSUME-NEXT: [[P1:%.*]] = getelementptr inbounds [100 x i32], [100 x i32]* [[ARRAY1]], i64 0, i64 7
; USE_ASSUME-NEXT: [[P2:%.*]] = getelementptr inbounds [100 x i32], [100 x i32]* [[ARRAY1]], i64 0, i64 6
; USE_ASSUME-NEXT: store i32 1, i32* [[P2]], align 4
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P1]], i64 4), "nonnull"(i32* [[P1]]), "align"(i32* [[P1]], i64 4) ]
; USE_ASSUME-NEXT: ret i32 0
; CHECK-LABEL: @constant_array_index_test(
; CHECK-NEXT: [[ARRAY1:%.*]] = alloca [100 x i32], align 4
; CHECK-NEXT: [[ARRAY1_SUB:%.*]] = getelementptr inbounds [100 x i32], [100 x i32]* [[ARRAY1]], i64 0, i64 0
; CHECK-NEXT: call void @external(i32* nonnull [[ARRAY1_SUB]])
; CHECK-NEXT: [[P2:%.*]] = getelementptr inbounds [100 x i32], [100 x i32]* [[ARRAY1]], i64 0, i64 6
; CHECK-NEXT: store i32 1, i32* [[P2]], align 4
; CHECK-NEXT: ret i32 0
;
%Array = alloca i32, i32 100
call void @external(i32* %Array)
@ -128,7 +104,7 @@ define i32 @gep_distance_test2({i32,i32}* %A, i64 %distance) {
; USE_ASSUME-NEXT: [[A1:%.*]] = getelementptr { i32, i32 }, { i32, i32 }* [[A:%.*]], i64 0, i32 0
; USE_ASSUME-NEXT: [[B:%.*]] = getelementptr { i32, i32 }, { i32, i32 }* [[A]], i64 [[DISTANCE:%.*]], i32 1
; USE_ASSUME-NEXT: store i32 7, i32* [[B]], align 4
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[A1]], i64 4), "nonnull"(i32* [[A1]]), "align"(i32* [[A1]], i64 4) ]
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[A1]], i64 4), "nonnull"({ i32, i32 }* [[A]]), "align"(i32* [[A1]], i64 4) ]
; USE_ASSUME-NEXT: ret i32 0
;
%A1 = getelementptr {i32,i32}, {i32,i32}* %A, i64 0, i32 0
@ -167,18 +143,11 @@ define i32 @gep_distance_test3(i32 * %A) {
; Test that we can disambiguate globals reached through constantexpr geps
define i32 @constexpr_test() {
; NO_ASSUME-LABEL: @constexpr_test(
; NO_ASSUME-NEXT: [[X:%.*]] = alloca i32, align 4
; NO_ASSUME-NEXT: call void @external(i32* nonnull [[X]])
; NO_ASSUME-NEXT: store i32 5, i32* getelementptr inbounds ({ i32 }, { i32 }* @Global, i64 0, i32 0), align 4
; NO_ASSUME-NEXT: ret i32 0
;
; USE_ASSUME-LABEL: @constexpr_test(
; USE_ASSUME-NEXT: [[X:%.*]] = alloca i32, align 4
; USE_ASSUME-NEXT: call void @external(i32* nonnull [[X]])
; USE_ASSUME-NEXT: store i32 5, i32* getelementptr inbounds ({ i32 }, { i32 }* @Global, i64 0, i32 0), align 4
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[X]], i64 4), "nonnull"(i32* [[X]]), "align"(i32* [[X]], i64 4) ]
; USE_ASSUME-NEXT: ret i32 0
; CHECK-LABEL: @constexpr_test(
; CHECK-NEXT: [[X:%.*]] = alloca i32, align 4
; CHECK-NEXT: call void @external(i32* nonnull [[X]])
; CHECK-NEXT: store i32 5, i32* getelementptr inbounds ({ i32 }, { i32 }* @Global, i64 0, i32 0), align 4
; CHECK-NEXT: ret i32 0
;
%X = alloca i32
call void @external(i32* %X)

View File

@ -57,16 +57,10 @@ define i32 @caller2(i32* dereferenceable(31) %t1) {
; Make sure that we don't propagate a smaller dereferenceable amount.
define i32 @caller3(i32* dereferenceable(33) %t1) {
; NO_ASSUME-LABEL: define {{[^@]+}}@caller3
; NO_ASSUME-SAME: (i32* dereferenceable(33) [[T1:%.*]])
; NO_ASSUME-NEXT: [[T2_I:%.*]] = load i32, i32* [[T1]]
; NO_ASSUME-NEXT: ret i32 [[T2_I]]
;
; USE_ASSUME-LABEL: define {{[^@]+}}@caller3
; USE_ASSUME-SAME: (i32* dereferenceable(33) [[T1:%.*]])
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[T1]], i64 32) ]
; USE_ASSUME-NEXT: [[T2_I:%.*]] = load i32, i32* [[T1]]
; USE_ASSUME-NEXT: ret i32 [[T2_I]]
; CHECK-LABEL: define {{[^@]+}}@caller3
; CHECK-SAME: (i32* dereferenceable(33) [[T1:%.*]])
; CHECK-NEXT: [[T2_I:%.*]] = load i32, i32* [[T1]]
; CHECK-NEXT: ret i32 [[T2_I]]
;
%t2 = tail call i32 @callee(i32* dereferenceable(32) %t1)
ret i32 %t2

View File

@ -1,4 +1,4 @@
; NOTE: Assertions have been autogenerated by update_test_checks.py
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -instsimplify -S | FileCheck %s
@zeroinit = constant {} zeroinitializer
@ -6,7 +6,7 @@
define i32 @crash_on_zeroinit() {
; CHECK-LABEL: @crash_on_zeroinit(
; CHECK: ret i32 0
; CHECK-NEXT: ret i32 0
;
%load = load i32, i32* bitcast ({}* @zeroinit to i32*)
ret i32 %load
@ -14,7 +14,7 @@ define i32 @crash_on_zeroinit() {
define i32 @crash_on_undef() {
; CHECK-LABEL: @crash_on_undef(
; CHECK: ret i32 undef
; CHECK-NEXT: ret i32 undef
;
%load = load i32, i32* bitcast ({}* @undef to i32*)
ret i32 %load
@ -24,7 +24,8 @@ define i32 @crash_on_undef() {
define <8 x i32> @partial_load() {
; CHECK-LABEL: @partial_load(
; CHECK: ret <8 x i32> <i32 0, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48>
; CHECK-NEXT: ret <8 x i32> <i32 0, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48>
;
%load = load <8 x i32>, <8 x i32>* bitcast (i32* getelementptr ([8 x i32], [8 x i32]* @GV, i64 0, i64 -1) to <8 x i32>*)
ret <8 x i32> %load
}
@ -35,6 +36,7 @@ define <8 x i32> @partial_load() {
define <3 x float> @load_vec3() {
; CHECK-LABEL: @load_vec3(
; CHECK-NEXT: ret <3 x float> undef
;
%1 = load <3 x float>, <3 x float>* getelementptr inbounds (<3 x float>, <3 x float>* @constvec, i64 1)
ret <3 x float> %1
}

View File

@ -51,7 +51,7 @@ define void @fn1() {
; USE_ASSUME-NEXT: call void @__msan_warning_noreturn()
; USE_ASSUME-NEXT: unreachable
; USE_ASSUME: bb2:
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[F_IBLOCK]], i64 4), "nonnull"(i32* [[F_IBLOCK]]), "align"(i32* [[F_IBLOCK]], i64 4) ]
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.PyFrameObject* [[TMP]], i64 4), "nonnull"(%struct.PyFrameObject* [[TMP]]), "align"(%struct.PyFrameObject* [[TMP]], i64 4) ]
; USE_ASSUME-NEXT: [[TMP4:%.*]] = ptrtoint i32* [[F_IBLOCK]] to i64
; USE_ASSUME-NEXT: [[TOBOOL:%.*]] = icmp eq i64 [[TMP4]], 0
; USE_ASSUME-NEXT: br i1 [[TOBOOL]], label [[BB13:%.*]], label [[BB15:%.*]]

View File

@ -10,32 +10,18 @@
@n_spills = external global i32 ; <i32*> [#uses=2]
define i32 @reload(%struct.rtx_def* %first, i32 %global, %struct.FILE* %dumpfile) {
; NO_ASSUME-LABEL: @reload(
; NO_ASSUME-NEXT: cond_next2835.1:
; NO_ASSUME-NEXT: br label [[BB2928:%.*]]
; NO_ASSUME: bb2928:
; NO_ASSUME-NEXT: br i1 false, label [[COND_NEXT2943:%.*]], label [[COND_TRUE2935:%.*]]
; NO_ASSUME: cond_true2935:
; NO_ASSUME-NEXT: br label [[COND_NEXT2943]]
; NO_ASSUME: cond_next2943:
; NO_ASSUME-NEXT: br i1 false, label [[BB2982_PREHEADER:%.*]], label [[BB2928]]
; NO_ASSUME: bb2982.preheader:
; NO_ASSUME-NEXT: store i8 undef, i8* null
; NO_ASSUME-NEXT: ret i32 undef
;
; USE_ASSUME-LABEL: @reload(
; USE_ASSUME-NEXT: cond_next2835.1:
; USE_ASSUME-NEXT: br label [[BB2928:%.*]]
; USE_ASSUME: bb2928:
; USE_ASSUME-NEXT: br i1 false, label [[COND_NEXT2943:%.*]], label [[COND_TRUE2935:%.*]]
; USE_ASSUME: cond_true2935:
; USE_ASSUME-NEXT: br label [[COND_NEXT2943]]
; USE_ASSUME: cond_next2943:
; USE_ASSUME-NEXT: br i1 false, label [[BB2982_PREHEADER:%.*]], label [[BB2928]]
; USE_ASSUME: bb2982.preheader:
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* @n_spills, i64 4), "nonnull"(i32* @n_spills), "align"(i32* @n_spills, i64 4) ]
; USE_ASSUME-NEXT: store i8 undef, i8* null
; USE_ASSUME-NEXT: ret i32 undef
; CHECK-LABEL: @reload(
; CHECK-NEXT: cond_next2835.1:
; CHECK-NEXT: br label [[BB2928:%.*]]
; CHECK: bb2928:
; CHECK-NEXT: br i1 false, label [[COND_NEXT2943:%.*]], label [[COND_TRUE2935:%.*]]
; CHECK: cond_true2935:
; CHECK-NEXT: br label [[COND_NEXT2943]]
; CHECK: cond_next2943:
; CHECK-NEXT: br i1 false, label [[BB2982_PREHEADER:%.*]], label [[BB2928]]
; CHECK: bb2982.preheader:
; CHECK-NEXT: store i8 undef, i8* null
; CHECK-NEXT: ret i32 undef
;
cond_next2835.1: ; preds = %cond_next2861
%tmp2922 = load i32, i32* @n_spills, align 4 ; <i32> [#uses=0]

View File

@ -19,11 +19,11 @@ define void @test(i32* %P, i32* %P1, i32* %P2, i32* %P3) {
; BASIC-SAME: (i32* [[P:%.*]], i32* [[P1:%.*]], i32* [[P2:%.*]], i32* [[P3:%.*]])
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "nonnull"(i32* [[P]]), "dereferenceable"(i32* [[P]], i64 16) ]
; BASIC-NEXT: call void @func(i32* nonnull dereferenceable(16) [[P]], i32* null)
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P1]], i64 12), "nonnull"(i32* [[P]]) ]
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P1]], i64 12) ]
; BASIC-NEXT: call void @func(i32* dereferenceable(12) [[P1]], i32* nonnull [[P]])
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P1]], i64 12), "cold"() ]
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "cold"() ]
; BASIC-NEXT: call void @func_cold(i32* dereferenceable(12) [[P1]]) #6
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P1]], i64 12), "cold"() ]
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "cold"() ]
; BASIC-NEXT: call void @func_cold(i32* dereferenceable(12) [[P1]])
; BASIC-NEXT: call void @func(i32* [[P1]], i32* [[P]])
; BASIC-NEXT: call void @func_strbool(i32* [[P1]])
@ -33,7 +33,7 @@ define void @test(i32* %P, i32* %P1, i32* %P2, i32* %P3) {
; BASIC-NEXT: call void @func_many(i32* align 8 [[P1]])
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[P2]], i64 8), "nonnull"(i32* [[P3]]) ]
; BASIC-NEXT: call void @func_argattr(i32* [[P2]], i32* [[P3]])
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "nonnull"(i32* [[P1]]), "nonnull"(i32* [[P]]) ]
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "nonnull"(i32* [[P1]]) ]
; BASIC-NEXT: call void @func(i32* nonnull [[P1]], i32* nonnull [[P]])
; BASIC-NEXT: ret void
;
@ -41,11 +41,11 @@ define void @test(i32* %P, i32* %P1, i32* %P2, i32* %P3) {
; ALL-SAME: (i32* [[P:%.*]], i32* [[P1:%.*]], i32* [[P2:%.*]], i32* [[P3:%.*]])
; ALL-NEXT: call void @llvm.assume(i1 true) [ "nonnull"(i32* [[P]]), "dereferenceable"(i32* [[P]], i64 16) ]
; ALL-NEXT: call void @func(i32* nonnull dereferenceable(16) [[P]], i32* null)
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P1]], i64 12), "nonnull"(i32* [[P]]) ]
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P1]], i64 12) ]
; ALL-NEXT: call void @func(i32* dereferenceable(12) [[P1]], i32* nonnull [[P]])
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P1]], i64 12), "cold"(), "nounwind"(), "willreturn"() ]
; ALL-NEXT: call void @llvm.assume(i1 true) [ "cold"(), "nounwind"(), "willreturn"() ]
; ALL-NEXT: call void @func_cold(i32* dereferenceable(12) [[P1]]) #6
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P1]], i64 12), "cold"(), "nounwind"(), "willreturn"() ]
; ALL-NEXT: call void @llvm.assume(i1 true) [ "cold"(), "nounwind"(), "willreturn"() ]
; ALL-NEXT: call void @func_cold(i32* dereferenceable(12) [[P1]])
; ALL-NEXT: call void @func(i32* [[P1]], i32* [[P]])
; ALL-NEXT: call void @func_strbool(i32* [[P1]])
@ -55,7 +55,7 @@ define void @test(i32* %P, i32* %P1, i32* %P2, i32* %P3) {
; ALL-NEXT: call void @func_many(i32* align 8 [[P1]])
; ALL-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[P2]], i64 8), "nonnull"(i32* [[P3]]), "nounwind"() ]
; ALL-NEXT: call void @func_argattr(i32* [[P2]], i32* [[P3]])
; ALL-NEXT: call void @llvm.assume(i1 true) [ "nonnull"(i32* [[P1]]), "nonnull"(i32* [[P]]) ]
; ALL-NEXT: call void @llvm.assume(i1 true) [ "nonnull"(i32* [[P1]]) ]
; ALL-NEXT: call void @func(i32* nonnull [[P1]], i32* nonnull [[P]])
; ALL-NEXT: ret void
;
@ -106,9 +106,9 @@ define void @test(i32* %P, i32* %P1, i32* %P2, i32* %P3) {
; FULL-SIMPLIFY-LABEL: define {{[^@]+}}@test
; FULL-SIMPLIFY-SAME: (i32* nonnull dereferenceable(16) [[P:%.*]], i32* [[P1:%.*]], i32* [[P2:%.*]], i32* [[P3:%.*]])
; FULL-SIMPLIFY-NEXT: call void @func(i32* nonnull dereferenceable(16) [[P]], i32* null)
; FULL-SIMPLIFY-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P1]], i64 12), "ignore"(i32* undef) ]
; FULL-SIMPLIFY-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P1]], i64 12) ]
; FULL-SIMPLIFY-NEXT: call void @func(i32* dereferenceable(12) [[P1]], i32* nonnull [[P]])
; FULL-SIMPLIFY-NEXT: call void @llvm.assume(i1 true) [ "ignore"(i32* undef, i64 12), "cold"() ]
; FULL-SIMPLIFY-NEXT: call void @llvm.assume(i1 true) [ "cold"() ]
; FULL-SIMPLIFY-NEXT: call void @func_cold(i32* dereferenceable(12) [[P1]]) #6
; FULL-SIMPLIFY-NEXT: call void @func_cold(i32* dereferenceable(12) [[P1]])
; FULL-SIMPLIFY-NEXT: call void @func(i32* [[P1]], i32* [[P]])
@ -118,7 +118,7 @@ define void @test(i32* %P, i32* %P1, i32* %P2, i32* %P3) {
; FULL-SIMPLIFY-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[P1]], i64 8), "align"(i32* [[P2]], i64 8), "nonnull"(i32* [[P3]]) ]
; FULL-SIMPLIFY-NEXT: call void @func_many(i32* align 8 [[P1]])
; FULL-SIMPLIFY-NEXT: call void @func_argattr(i32* [[P2]], i32* [[P3]])
; FULL-SIMPLIFY-NEXT: call void @llvm.assume(i1 true) [ "nonnull"(i32* [[P1]]), "ignore"(i32* undef) ]
; FULL-SIMPLIFY-NEXT: call void @llvm.assume(i1 true) [ "nonnull"(i32* [[P1]]) ]
; FULL-SIMPLIFY-NEXT: call void @func(i32* nonnull [[P1]], i32* nonnull [[P]])
; FULL-SIMPLIFY-NEXT: ret void
;
@ -144,44 +144,32 @@ define i32 @test2(%struct.S* %0, i32* %1, i8* %2) {
; BASIC-NEXT: [[TMP5:%.*]] = alloca i32*, align 8
; BASIC-NEXT: [[TMP6:%.*]] = alloca i8*, align 8
; BASIC-NEXT: [[TMP7:%.*]] = alloca [[STRUCT_S:%.*]], align 8
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "nonnull"(%struct.S** [[TMP4]]), "align"(%struct.S** [[TMP4]], i64 8) ]
; BASIC-NEXT: store %struct.S* [[TMP0]], %struct.S** [[TMP4]], align 8
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32** [[TMP5]], i64 8), "nonnull"(i32** [[TMP5]]), "align"(i32** [[TMP5]], i64 8) ]
; BASIC-NEXT: store i32* [[TMP1]], i32** [[TMP5]], align 8
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8** [[TMP6]], i64 8), "nonnull"(i8** [[TMP6]]), "align"(i8** [[TMP6]], i64 8) ]
; BASIC-NEXT: store i8* [[TMP2]], i8** [[TMP6]], align 8
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32** [[TMP5]], i64 8), "nonnull"(i32** [[TMP5]]), "align"(i32** [[TMP5]], i64 8) ]
; BASIC-NEXT: [[TMP8:%.*]] = load i32*, i32** [[TMP5]], align 8
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP8]], i64 4), "nonnull"(i32* [[TMP8]]), "align"(i32* [[TMP8]], i64 4) ]
; BASIC-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
; BASIC-NEXT: [[TMP10:%.*]] = trunc i32 [[TMP9]] to i8
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8** [[TMP6]], i64 8), "nonnull"(i8** [[TMP6]]), "align"(i8** [[TMP6]], i64 8) ]
; BASIC-NEXT: [[TMP11:%.*]] = load i8*, i8** [[TMP6]], align 8
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8* [[TMP11]], i64 1), "nonnull"(i8* [[TMP11]]) ]
; BASIC-NEXT: store i8 [[TMP10]], i8* [[TMP11]], align 1
; BASIC-NEXT: [[TMP12:%.*]] = bitcast %struct.S* [[TMP7]] to i8*
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "nonnull"(%struct.S** [[TMP4]]), "align"(%struct.S** [[TMP4]], i64 8) ]
; BASIC-NEXT: [[TMP13:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
; BASIC-NEXT: [[TMP14:%.*]] = bitcast %struct.S* [[TMP13]] to i8*
; BASIC-NEXT: [[TMP15:%.*]] = bitcast %struct.S* [[TMP7]] to i8*
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "nonnull"(%struct.S** [[TMP4]]), "align"(%struct.S** [[TMP4]], i64 8) ]
; BASIC-NEXT: [[TMP16:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
; BASIC-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP16]], i32 0, i32 0
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP17]], i64 4), "nonnull"(i32* [[TMP17]]), "align"(i32* [[TMP17]], i64 8) ]
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S* [[TMP16]], i64 4), "nonnull"(%struct.S* [[TMP16]]), "align"(%struct.S* [[TMP16]], i64 8) ]
; BASIC-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 8
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "nonnull"(%struct.S** [[TMP4]]), "align"(%struct.S** [[TMP4]], i64 8) ]
; BASIC-NEXT: [[TMP19:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
; BASIC-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP19]], i32 0, i32 1
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8* [[TMP20]], i64 1), "nonnull"(i8* [[TMP20]]), "align"(i8* [[TMP20]], i64 4) ]
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S* [[TMP19]], i64 5), "nonnull"(%struct.S* [[TMP19]]), "align"(%struct.S* [[TMP19]], i64 4) ]
; BASIC-NEXT: [[TMP21:%.*]] = load i8, i8* [[TMP20]], align 4
; BASIC-NEXT: [[TMP22:%.*]] = sext i8 [[TMP21]] to i32
; BASIC-NEXT: [[TMP23:%.*]] = add nsw i32 [[TMP18]], [[TMP22]]
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "nonnull"(%struct.S** [[TMP4]]), "align"(%struct.S** [[TMP4]], i64 8) ]
; BASIC-NEXT: [[TMP24:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
; BASIC-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP24]], i32 0, i32 2
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32** [[TMP25]], i64 8), "nonnull"(i32** [[TMP25]]), "align"(i32** [[TMP25]], i64 8) ]
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S* [[TMP24]], i64 16), "nonnull"(%struct.S* [[TMP24]]), "align"(%struct.S* [[TMP24]], i64 8) ]
; BASIC-NEXT: [[TMP26:%.*]] = load i32*, i32** [[TMP25]], align 8
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP26]], i64 4), "nonnull"(i32* [[TMP26]]), "align"(i32* [[TMP26]], i64 4) ]
; BASIC-NEXT: [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
; BASIC-NEXT: [[TMP28:%.*]] = add nsw i32 [[TMP23]], [[TMP27]]
; BASIC-NEXT: ret i32 [[TMP28]]
@ -192,44 +180,32 @@ define i32 @test2(%struct.S* %0, i32* %1, i8* %2) {
; ALL-NEXT: [[TMP5:%.*]] = alloca i32*, align 8
; ALL-NEXT: [[TMP6:%.*]] = alloca i8*, align 8
; ALL-NEXT: [[TMP7:%.*]] = alloca [[STRUCT_S:%.*]], align 8
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "nonnull"(%struct.S** [[TMP4]]), "align"(%struct.S** [[TMP4]], i64 8) ]
; ALL-NEXT: store %struct.S* [[TMP0]], %struct.S** [[TMP4]], align 8
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32** [[TMP5]], i64 8), "nonnull"(i32** [[TMP5]]), "align"(i32** [[TMP5]], i64 8) ]
; ALL-NEXT: store i32* [[TMP1]], i32** [[TMP5]], align 8
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8** [[TMP6]], i64 8), "nonnull"(i8** [[TMP6]]), "align"(i8** [[TMP6]], i64 8) ]
; ALL-NEXT: store i8* [[TMP2]], i8** [[TMP6]], align 8
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32** [[TMP5]], i64 8), "nonnull"(i32** [[TMP5]]), "align"(i32** [[TMP5]], i64 8) ]
; ALL-NEXT: [[TMP8:%.*]] = load i32*, i32** [[TMP5]], align 8
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP8]], i64 4), "nonnull"(i32* [[TMP8]]), "align"(i32* [[TMP8]], i64 4) ]
; ALL-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
; ALL-NEXT: [[TMP10:%.*]] = trunc i32 [[TMP9]] to i8
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8** [[TMP6]], i64 8), "nonnull"(i8** [[TMP6]]), "align"(i8** [[TMP6]], i64 8) ]
; ALL-NEXT: [[TMP11:%.*]] = load i8*, i8** [[TMP6]], align 8
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8* [[TMP11]], i64 1), "nonnull"(i8* [[TMP11]]) ]
; ALL-NEXT: store i8 [[TMP10]], i8* [[TMP11]], align 1
; ALL-NEXT: [[TMP12:%.*]] = bitcast %struct.S* [[TMP7]] to i8*
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "nonnull"(%struct.S** [[TMP4]]), "align"(%struct.S** [[TMP4]], i64 8) ]
; ALL-NEXT: [[TMP13:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
; ALL-NEXT: [[TMP14:%.*]] = bitcast %struct.S* [[TMP13]] to i8*
; ALL-NEXT: [[TMP15:%.*]] = bitcast %struct.S* [[TMP7]] to i8*
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "nonnull"(%struct.S** [[TMP4]]), "align"(%struct.S** [[TMP4]], i64 8) ]
; ALL-NEXT: [[TMP16:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
; ALL-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP16]], i32 0, i32 0
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP17]], i64 4), "nonnull"(i32* [[TMP17]]), "align"(i32* [[TMP17]], i64 8) ]
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S* [[TMP16]], i64 4), "nonnull"(%struct.S* [[TMP16]]), "align"(%struct.S* [[TMP16]], i64 8) ]
; ALL-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 8
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "nonnull"(%struct.S** [[TMP4]]), "align"(%struct.S** [[TMP4]], i64 8) ]
; ALL-NEXT: [[TMP19:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
; ALL-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP19]], i32 0, i32 1
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8* [[TMP20]], i64 1), "nonnull"(i8* [[TMP20]]), "align"(i8* [[TMP20]], i64 4) ]
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S* [[TMP19]], i64 5), "nonnull"(%struct.S* [[TMP19]]), "align"(%struct.S* [[TMP19]], i64 4) ]
; ALL-NEXT: [[TMP21:%.*]] = load i8, i8* [[TMP20]], align 4
; ALL-NEXT: [[TMP22:%.*]] = sext i8 [[TMP21]] to i32
; ALL-NEXT: [[TMP23:%.*]] = add nsw i32 [[TMP18]], [[TMP22]]
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "nonnull"(%struct.S** [[TMP4]]), "align"(%struct.S** [[TMP4]], i64 8) ]
; ALL-NEXT: [[TMP24:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
; ALL-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP24]], i32 0, i32 2
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32** [[TMP25]], i64 8), "nonnull"(i32** [[TMP25]]), "align"(i32** [[TMP25]], i64 8) ]
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S* [[TMP24]], i64 16), "nonnull"(%struct.S* [[TMP24]]), "align"(%struct.S* [[TMP24]], i64 8) ]
; ALL-NEXT: [[TMP26:%.*]] = load i32*, i32** [[TMP25]], align 8
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP26]], i64 4), "nonnull"(i32* [[TMP26]]), "align"(i32* [[TMP26]], i64 4) ]
; ALL-NEXT: [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
; ALL-NEXT: [[TMP28:%.*]] = add nsw i32 [[TMP23]], [[TMP27]]
; ALL-NEXT: ret i32 [[TMP28]]
@ -240,18 +216,13 @@ define i32 @test2(%struct.S* %0, i32* %1, i8* %2) {
; WITH-AC-NEXT: [[TMP5:%.*]] = alloca i32*, align 8
; WITH-AC-NEXT: [[TMP6:%.*]] = alloca i8*, align 8
; WITH-AC-NEXT: [[TMP7:%.*]] = alloca [[STRUCT_S:%.*]], align 8
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "nonnull"(%struct.S** [[TMP4]]), "align"(%struct.S** [[TMP4]], i64 8) ]
; WITH-AC-NEXT: store %struct.S* [[TMP0]], %struct.S** [[TMP4]], align 8
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32** [[TMP5]], i64 8), "nonnull"(i32** [[TMP5]]), "align"(i32** [[TMP5]], i64 8) ]
; WITH-AC-NEXT: store i32* [[TMP1]], i32** [[TMP5]], align 8
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8** [[TMP6]], i64 8), "nonnull"(i8** [[TMP6]]), "align"(i8** [[TMP6]], i64 8) ]
; WITH-AC-NEXT: store i8* [[TMP2]], i8** [[TMP6]], align 8
; WITH-AC-NEXT: [[TMP8:%.*]] = load i32*, i32** [[TMP5]], align 8
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP8]], i64 4), "nonnull"(i32* [[TMP8]]), "align"(i32* [[TMP8]], i64 4) ]
; WITH-AC-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
; WITH-AC-NEXT: [[TMP10:%.*]] = trunc i32 [[TMP9]] to i8
; WITH-AC-NEXT: [[TMP11:%.*]] = load i8*, i8** [[TMP6]], align 8
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8* [[TMP11]], i64 1), "nonnull"(i8* [[TMP11]]) ]
; WITH-AC-NEXT: store i8 [[TMP10]], i8* [[TMP11]], align 1
; WITH-AC-NEXT: [[TMP12:%.*]] = bitcast %struct.S* [[TMP7]] to i8*
; WITH-AC-NEXT: [[TMP13:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
@ -259,19 +230,18 @@ define i32 @test2(%struct.S* %0, i32* %1, i8* %2) {
; WITH-AC-NEXT: [[TMP15:%.*]] = bitcast %struct.S* [[TMP7]] to i8*
; WITH-AC-NEXT: [[TMP16:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
; WITH-AC-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP16]], i32 0, i32 0
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP17]], i64 4), "nonnull"(i32* [[TMP17]]), "align"(i32* [[TMP17]], i64 8) ]
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S* [[TMP16]], i64 4), "nonnull"(%struct.S* [[TMP16]]), "align"(%struct.S* [[TMP16]], i64 8) ]
; WITH-AC-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 8
; WITH-AC-NEXT: [[TMP19:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
; WITH-AC-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP19]], i32 0, i32 1
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8* [[TMP20]], i64 1), "nonnull"(i8* [[TMP20]]), "align"(i8* [[TMP20]], i64 4) ]
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S* [[TMP19]], i64 5), "nonnull"(%struct.S* [[TMP19]]), "align"(%struct.S* [[TMP19]], i64 4) ]
; WITH-AC-NEXT: [[TMP21:%.*]] = load i8, i8* [[TMP20]], align 4
; WITH-AC-NEXT: [[TMP22:%.*]] = sext i8 [[TMP21]] to i32
; WITH-AC-NEXT: [[TMP23:%.*]] = add nsw i32 [[TMP18]], [[TMP22]]
; WITH-AC-NEXT: [[TMP24:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
; WITH-AC-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP24]], i32 0, i32 2
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32** [[TMP25]], i64 8), "nonnull"(i32** [[TMP25]]), "align"(i32** [[TMP25]], i64 8) ]
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S* [[TMP24]], i64 16), "nonnull"(%struct.S* [[TMP24]]), "align"(%struct.S* [[TMP24]], i64 8) ]
; WITH-AC-NEXT: [[TMP26:%.*]] = load i32*, i32** [[TMP25]], align 8
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP26]], i64 4), "nonnull"(i32* [[TMP26]]), "align"(i32* [[TMP26]], i64 4) ]
; WITH-AC-NEXT: [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
; WITH-AC-NEXT: [[TMP28:%.*]] = add nsw i32 [[TMP23]], [[TMP27]]
; WITH-AC-NEXT: ret i32 [[TMP28]]
@ -282,18 +252,13 @@ define i32 @test2(%struct.S* %0, i32* %1, i8* %2) {
; CROSS-BLOCK-NEXT: [[TMP5:%.*]] = alloca i32*, align 8
; CROSS-BLOCK-NEXT: [[TMP6:%.*]] = alloca i8*, align 8
; CROSS-BLOCK-NEXT: [[TMP7:%.*]] = alloca [[STRUCT_S:%.*]], align 8
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "nonnull"(%struct.S** [[TMP4]]), "align"(%struct.S** [[TMP4]], i64 8) ]
; CROSS-BLOCK-NEXT: store %struct.S* [[TMP0]], %struct.S** [[TMP4]], align 8
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32** [[TMP5]], i64 8), "nonnull"(i32** [[TMP5]]), "align"(i32** [[TMP5]], i64 8) ]
; CROSS-BLOCK-NEXT: store i32* [[TMP1]], i32** [[TMP5]], align 8
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8** [[TMP6]], i64 8), "nonnull"(i8** [[TMP6]]), "align"(i8** [[TMP6]], i64 8) ]
; CROSS-BLOCK-NEXT: store i8* [[TMP2]], i8** [[TMP6]], align 8
; CROSS-BLOCK-NEXT: [[TMP8:%.*]] = load i32*, i32** [[TMP5]], align 8
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP8]], i64 4), "nonnull"(i32* [[TMP8]]), "align"(i32* [[TMP8]], i64 4) ]
; CROSS-BLOCK-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
; CROSS-BLOCK-NEXT: [[TMP10:%.*]] = trunc i32 [[TMP9]] to i8
; CROSS-BLOCK-NEXT: [[TMP11:%.*]] = load i8*, i8** [[TMP6]], align 8
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8* [[TMP11]], i64 1), "nonnull"(i8* [[TMP11]]) ]
; CROSS-BLOCK-NEXT: store i8 [[TMP10]], i8* [[TMP11]], align 1
; CROSS-BLOCK-NEXT: [[TMP12:%.*]] = bitcast %struct.S* [[TMP7]] to i8*
; CROSS-BLOCK-NEXT: [[TMP13:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
@ -301,19 +266,18 @@ define i32 @test2(%struct.S* %0, i32* %1, i8* %2) {
; CROSS-BLOCK-NEXT: [[TMP15:%.*]] = bitcast %struct.S* [[TMP7]] to i8*
; CROSS-BLOCK-NEXT: [[TMP16:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
; CROSS-BLOCK-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP16]], i32 0, i32 0
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP17]], i64 4), "nonnull"(i32* [[TMP17]]), "align"(i32* [[TMP17]], i64 8) ]
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S* [[TMP16]], i64 4), "nonnull"(%struct.S* [[TMP16]]), "align"(%struct.S* [[TMP16]], i64 8) ]
; CROSS-BLOCK-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 8
; CROSS-BLOCK-NEXT: [[TMP19:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
; CROSS-BLOCK-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP19]], i32 0, i32 1
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8* [[TMP20]], i64 1), "nonnull"(i8* [[TMP20]]), "align"(i8* [[TMP20]], i64 4) ]
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S* [[TMP19]], i64 5), "nonnull"(%struct.S* [[TMP19]]), "align"(%struct.S* [[TMP19]], i64 4) ]
; CROSS-BLOCK-NEXT: [[TMP21:%.*]] = load i8, i8* [[TMP20]], align 4
; CROSS-BLOCK-NEXT: [[TMP22:%.*]] = sext i8 [[TMP21]] to i32
; CROSS-BLOCK-NEXT: [[TMP23:%.*]] = add nsw i32 [[TMP18]], [[TMP22]]
; CROSS-BLOCK-NEXT: [[TMP24:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
; CROSS-BLOCK-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP24]], i32 0, i32 2
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32** [[TMP25]], i64 8), "nonnull"(i32** [[TMP25]]), "align"(i32** [[TMP25]], i64 8) ]
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S* [[TMP24]], i64 16), "nonnull"(%struct.S* [[TMP24]]), "align"(%struct.S* [[TMP24]], i64 8) ]
; CROSS-BLOCK-NEXT: [[TMP26:%.*]] = load i32*, i32** [[TMP25]], align 8
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP26]], i64 4), "nonnull"(i32* [[TMP26]]), "align"(i32* [[TMP26]], i64 4) ]
; CROSS-BLOCK-NEXT: [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
; CROSS-BLOCK-NEXT: [[TMP28:%.*]] = add nsw i32 [[TMP23]], [[TMP27]]
; CROSS-BLOCK-NEXT: ret i32 [[TMP28]]
@ -345,9 +309,9 @@ define i32 @test2(%struct.S* %0, i32* %1, i8* %2) {
; FULL-SIMPLIFY-NEXT: [[TMP22:%.*]] = sext i8 [[TMP21]] to i32
; FULL-SIMPLIFY-NEXT: [[TMP23:%.*]] = add nsw i32 [[TMP18]], [[TMP22]]
; FULL-SIMPLIFY-NEXT: [[TMP24:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
; FULL-SIMPLIFY-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S* [[TMP16]], i64 4), "nonnull"(%struct.S* [[TMP16]]), "align"(%struct.S* [[TMP16]], i64 8), "dereferenceable"(%struct.S* [[TMP19]], i64 5), "nonnull"(%struct.S* [[TMP19]]), "align"(%struct.S* [[TMP19]], i64 4), "dereferenceable"(%struct.S* [[TMP24]], i64 16), "nonnull"(%struct.S* [[TMP24]]), "align"(%struct.S* [[TMP24]], i64 8) ]
; FULL-SIMPLIFY-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP24]], i32 0, i32 2
; FULL-SIMPLIFY-NEXT: [[TMP26:%.*]] = load i32*, i32** [[TMP25]], align 8
; FULL-SIMPLIFY-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "nonnull"(%struct.S** [[TMP4]]), "align"(%struct.S** [[TMP4]], i64 8), "dereferenceable"(i32** [[TMP5]], i64 8), "nonnull"(i32** [[TMP5]]), "align"(i32** [[TMP5]], i64 8), "dereferenceable"(i8** [[TMP6]], i64 8), "nonnull"(i8** [[TMP6]]), "align"(i8** [[TMP6]], i64 8), "dereferenceable"(i32* [[TMP8]], i64 4), "nonnull"(i32* [[TMP8]]), "align"(i32* [[TMP8]], i64 4), "dereferenceable"(i8* [[TMP11]], i64 1), "nonnull"(i8* [[TMP11]]), "dereferenceable"(i32* [[TMP17]], i64 4), "nonnull"(i32* [[TMP17]]), "align"(i32* [[TMP17]], i64 8), "dereferenceable"(i8* [[TMP20]], i64 1), "nonnull"(i8* [[TMP20]]), "align"(i8* [[TMP20]], i64 4), "dereferenceable"(i32** [[TMP25]], i64 8), "nonnull"(i32** [[TMP25]]), "align"(i32** [[TMP25]], i64 8), "dereferenceable"(i32* [[TMP26]], i64 4), "nonnull"(i32* [[TMP26]]), "align"(i32* [[TMP26]], i64 4) ]
; FULL-SIMPLIFY-NEXT: [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
; FULL-SIMPLIFY-NEXT: [[TMP28:%.*]] = add nsw i32 [[TMP23]], [[TMP27]]
; FULL-SIMPLIFY-NEXT: ret i32 [[TMP28]]
@ -384,52 +348,40 @@ define i32 @test2(%struct.S* %0, i32* %1, i8* %2) {
ret i32 %28
}
define i32 @test3(%struct.S* %0, i32* %1, i8* %2) null_pointer_is_valid {
define i32 @test3(%struct.S* %0, i32* %1, i8* %2) "null-pointer-is-valid"="true" {
; BASIC-LABEL: define {{[^@]+}}@test3
; BASIC-SAME: (%struct.S* [[TMP0:%.*]], i32* [[TMP1:%.*]], i8* [[TMP2:%.*]]) #4
; BASIC-NEXT: [[TMP4:%.*]] = alloca %struct.S*, align 8
; BASIC-NEXT: [[TMP5:%.*]] = alloca i32*, align 8
; BASIC-NEXT: [[TMP6:%.*]] = alloca i8*, align 8
; BASIC-NEXT: [[TMP7:%.*]] = alloca [[STRUCT_S:%.*]], align 8
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "align"(%struct.S** [[TMP4]], i64 8) ]
; BASIC-NEXT: store %struct.S* [[TMP0]], %struct.S** [[TMP4]], align 8
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32** [[TMP5]], i64 8), "align"(i32** [[TMP5]], i64 8) ]
; BASIC-NEXT: store i32* [[TMP1]], i32** [[TMP5]], align 8
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8** [[TMP6]], i64 8), "align"(i8** [[TMP6]], i64 8) ]
; BASIC-NEXT: store i8* [[TMP2]], i8** [[TMP6]], align 8
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32** [[TMP5]], i64 8), "align"(i32** [[TMP5]], i64 8) ]
; BASIC-NEXT: [[TMP8:%.*]] = load i32*, i32** [[TMP5]], align 8
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP8]], i64 4), "align"(i32* [[TMP8]], i64 4) ]
; BASIC-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
; BASIC-NEXT: [[TMP10:%.*]] = trunc i32 [[TMP9]] to i8
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8** [[TMP6]], i64 8), "align"(i8** [[TMP6]], i64 8) ]
; BASIC-NEXT: [[TMP11:%.*]] = load i8*, i8** [[TMP6]], align 8
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8* [[TMP11]], i64 1) ]
; BASIC-NEXT: store i8 [[TMP10]], i8* [[TMP11]], align 1
; BASIC-NEXT: [[TMP12:%.*]] = bitcast %struct.S* [[TMP7]] to i8*
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "align"(%struct.S** [[TMP4]], i64 32) ]
; BASIC-NEXT: [[TMP13:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 32
; BASIC-NEXT: [[TMP14:%.*]] = bitcast %struct.S* [[TMP13]] to i8*
; BASIC-NEXT: call void @may_throw()
; BASIC-NEXT: [[TMP15:%.*]] = bitcast %struct.S* [[TMP7]] to i8*
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "align"(%struct.S** [[TMP4]], i64 8) ]
; BASIC-NEXT: [[TMP16:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
; BASIC-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP16]], i32 0, i32 0
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP17]], i64 4), "align"(i32* [[TMP17]], i64 8) ]
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S* [[TMP16]], i64 4), "nonnull"(%struct.S* [[TMP16]]), "align"(%struct.S* [[TMP16]], i64 8) ]
; BASIC-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 8
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "align"(%struct.S** [[TMP4]], i64 8) ]
; BASIC-NEXT: [[TMP19:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
; BASIC-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP19]], i32 0, i32 1
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8* [[TMP20]], i64 1), "align"(i8* [[TMP20]], i64 4) ]
; BASIC-NEXT: [[TMP21:%.*]] = load i8, i8* [[TMP20]], align 4
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S* [[TMP19]], i64 5), "nonnull"(%struct.S* [[TMP19]]), "align"(%struct.S* [[TMP19]], i64 4) ]
; BASIC-NEXT: [[TMP21:%.*]] = load i8, i8* [[TMP20]], align 8
; BASIC-NEXT: [[TMP22:%.*]] = sext i8 [[TMP21]] to i32
; BASIC-NEXT: [[TMP23:%.*]] = add nsw i32 [[TMP18]], [[TMP22]]
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "align"(%struct.S** [[TMP4]], i64 8) ]
; BASIC-NEXT: [[TMP24:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
; BASIC-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP24]], i32 0, i32 2
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32** [[TMP25]], i64 8), "align"(i32** [[TMP25]], i64 8) ]
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S* [[TMP24]], i64 16), "nonnull"(%struct.S* [[TMP24]]), "align"(%struct.S* [[TMP24]], i64 8) ]
; BASIC-NEXT: [[TMP26:%.*]] = load i32*, i32** [[TMP25]], align 8
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP26]], i64 4), "align"(i32* [[TMP26]], i64 4) ]
; BASIC-NEXT: [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
; BASIC-NEXT: [[TMP28:%.*]] = add nsw i32 [[TMP23]], [[TMP27]]
; BASIC-NEXT: ret i32 [[TMP28]]
@ -440,45 +392,33 @@ define i32 @test3(%struct.S* %0, i32* %1, i8* %2) null_pointer_is_valid {
; ALL-NEXT: [[TMP5:%.*]] = alloca i32*, align 8
; ALL-NEXT: [[TMP6:%.*]] = alloca i8*, align 8
; ALL-NEXT: [[TMP7:%.*]] = alloca [[STRUCT_S:%.*]], align 8
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "align"(%struct.S** [[TMP4]], i64 8) ]
; ALL-NEXT: store %struct.S* [[TMP0]], %struct.S** [[TMP4]], align 8
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32** [[TMP5]], i64 8), "align"(i32** [[TMP5]], i64 8) ]
; ALL-NEXT: store i32* [[TMP1]], i32** [[TMP5]], align 8
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8** [[TMP6]], i64 8), "align"(i8** [[TMP6]], i64 8) ]
; ALL-NEXT: store i8* [[TMP2]], i8** [[TMP6]], align 8
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32** [[TMP5]], i64 8), "align"(i32** [[TMP5]], i64 8) ]
; ALL-NEXT: [[TMP8:%.*]] = load i32*, i32** [[TMP5]], align 8
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP8]], i64 4), "align"(i32* [[TMP8]], i64 4) ]
; ALL-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
; ALL-NEXT: [[TMP10:%.*]] = trunc i32 [[TMP9]] to i8
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8** [[TMP6]], i64 8), "align"(i8** [[TMP6]], i64 8) ]
; ALL-NEXT: [[TMP11:%.*]] = load i8*, i8** [[TMP6]], align 8
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8* [[TMP11]], i64 1) ]
; ALL-NEXT: store i8 [[TMP10]], i8* [[TMP11]], align 1
; ALL-NEXT: [[TMP12:%.*]] = bitcast %struct.S* [[TMP7]] to i8*
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "align"(%struct.S** [[TMP4]], i64 32) ]
; ALL-NEXT: [[TMP13:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 32
; ALL-NEXT: [[TMP14:%.*]] = bitcast %struct.S* [[TMP13]] to i8*
; ALL-NEXT: call void @may_throw()
; ALL-NEXT: [[TMP15:%.*]] = bitcast %struct.S* [[TMP7]] to i8*
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "align"(%struct.S** [[TMP4]], i64 8) ]
; ALL-NEXT: [[TMP16:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
; ALL-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP16]], i32 0, i32 0
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP17]], i64 4), "align"(i32* [[TMP17]], i64 8) ]
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S* [[TMP16]], i64 4), "nonnull"(%struct.S* [[TMP16]]), "align"(%struct.S* [[TMP16]], i64 8) ]
; ALL-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 8
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "align"(%struct.S** [[TMP4]], i64 8) ]
; ALL-NEXT: [[TMP19:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
; ALL-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP19]], i32 0, i32 1
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8* [[TMP20]], i64 1), "align"(i8* [[TMP20]], i64 4) ]
; ALL-NEXT: [[TMP21:%.*]] = load i8, i8* [[TMP20]], align 4
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S* [[TMP19]], i64 5), "nonnull"(%struct.S* [[TMP19]]), "align"(%struct.S* [[TMP19]], i64 4) ]
; ALL-NEXT: [[TMP21:%.*]] = load i8, i8* [[TMP20]], align 8
; ALL-NEXT: [[TMP22:%.*]] = sext i8 [[TMP21]] to i32
; ALL-NEXT: [[TMP23:%.*]] = add nsw i32 [[TMP18]], [[TMP22]]
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "align"(%struct.S** [[TMP4]], i64 8) ]
; ALL-NEXT: [[TMP24:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
; ALL-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP24]], i32 0, i32 2
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32** [[TMP25]], i64 8), "align"(i32** [[TMP25]], i64 8) ]
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S* [[TMP24]], i64 16), "nonnull"(%struct.S* [[TMP24]]), "align"(%struct.S* [[TMP24]], i64 8) ]
; ALL-NEXT: [[TMP26:%.*]] = load i32*, i32** [[TMP25]], align 8
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP26]], i64 4), "align"(i32* [[TMP26]], i64 4) ]
; ALL-NEXT: [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
; ALL-NEXT: [[TMP28:%.*]] = add nsw i32 [[TMP23]], [[TMP27]]
; ALL-NEXT: ret i32 [[TMP28]]
@ -489,18 +429,13 @@ define i32 @test3(%struct.S* %0, i32* %1, i8* %2) null_pointer_is_valid {
; WITH-AC-NEXT: [[TMP5:%.*]] = alloca i32*, align 8
; WITH-AC-NEXT: [[TMP6:%.*]] = alloca i8*, align 8
; WITH-AC-NEXT: [[TMP7:%.*]] = alloca [[STRUCT_S:%.*]], align 8
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "align"(%struct.S** [[TMP4]], i64 32) ]
; WITH-AC-NEXT: store %struct.S* [[TMP0]], %struct.S** [[TMP4]], align 8
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32** [[TMP5]], i64 8), "align"(i32** [[TMP5]], i64 8) ]
; WITH-AC-NEXT: store i32* [[TMP1]], i32** [[TMP5]], align 8
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8** [[TMP6]], i64 8), "align"(i8** [[TMP6]], i64 8) ]
; WITH-AC-NEXT: store i8* [[TMP2]], i8** [[TMP6]], align 8
; WITH-AC-NEXT: [[TMP8:%.*]] = load i32*, i32** [[TMP5]], align 8
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP8]], i64 4), "align"(i32* [[TMP8]], i64 4) ]
; WITH-AC-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
; WITH-AC-NEXT: [[TMP10:%.*]] = trunc i32 [[TMP9]] to i8
; WITH-AC-NEXT: [[TMP11:%.*]] = load i8*, i8** [[TMP6]], align 8
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8* [[TMP11]], i64 1) ]
; WITH-AC-NEXT: store i8 [[TMP10]], i8* [[TMP11]], align 1
; WITH-AC-NEXT: [[TMP12:%.*]] = bitcast %struct.S* [[TMP7]] to i8*
; WITH-AC-NEXT: [[TMP13:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 32
@ -509,19 +444,18 @@ define i32 @test3(%struct.S* %0, i32* %1, i8* %2) null_pointer_is_valid {
; WITH-AC-NEXT: [[TMP15:%.*]] = bitcast %struct.S* [[TMP7]] to i8*
; WITH-AC-NEXT: [[TMP16:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
; WITH-AC-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP16]], i32 0, i32 0
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP17]], i64 4), "align"(i32* [[TMP17]], i64 8) ]
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S* [[TMP16]], i64 4), "nonnull"(%struct.S* [[TMP16]]), "align"(%struct.S* [[TMP16]], i64 8) ]
; WITH-AC-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 8
; WITH-AC-NEXT: [[TMP19:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
; WITH-AC-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP19]], i32 0, i32 1
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8* [[TMP20]], i64 1), "align"(i8* [[TMP20]], i64 4) ]
; WITH-AC-NEXT: [[TMP21:%.*]] = load i8, i8* [[TMP20]], align 4
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S* [[TMP19]], i64 5), "nonnull"(%struct.S* [[TMP19]]), "align"(%struct.S* [[TMP19]], i64 4) ]
; WITH-AC-NEXT: [[TMP21:%.*]] = load i8, i8* [[TMP20]], align 8
; WITH-AC-NEXT: [[TMP22:%.*]] = sext i8 [[TMP21]] to i32
; WITH-AC-NEXT: [[TMP23:%.*]] = add nsw i32 [[TMP18]], [[TMP22]]
; WITH-AC-NEXT: [[TMP24:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
; WITH-AC-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP24]], i32 0, i32 2
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32** [[TMP25]], i64 8), "align"(i32** [[TMP25]], i64 8) ]
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S* [[TMP24]], i64 16), "nonnull"(%struct.S* [[TMP24]]), "align"(%struct.S* [[TMP24]], i64 8) ]
; WITH-AC-NEXT: [[TMP26:%.*]] = load i32*, i32** [[TMP25]], align 8
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP26]], i64 4), "align"(i32* [[TMP26]], i64 4) ]
; WITH-AC-NEXT: [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
; WITH-AC-NEXT: [[TMP28:%.*]] = add nsw i32 [[TMP23]], [[TMP27]]
; WITH-AC-NEXT: ret i32 [[TMP28]]
@ -532,18 +466,13 @@ define i32 @test3(%struct.S* %0, i32* %1, i8* %2) null_pointer_is_valid {
; CROSS-BLOCK-NEXT: [[TMP5:%.*]] = alloca i32*, align 8
; CROSS-BLOCK-NEXT: [[TMP6:%.*]] = alloca i8*, align 8
; CROSS-BLOCK-NEXT: [[TMP7:%.*]] = alloca [[STRUCT_S:%.*]], align 8
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "align"(%struct.S** [[TMP4]], i64 32) ]
; CROSS-BLOCK-NEXT: store %struct.S* [[TMP0]], %struct.S** [[TMP4]], align 8
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32** [[TMP5]], i64 8), "align"(i32** [[TMP5]], i64 8) ]
; CROSS-BLOCK-NEXT: store i32* [[TMP1]], i32** [[TMP5]], align 8
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8** [[TMP6]], i64 8), "align"(i8** [[TMP6]], i64 8) ]
; CROSS-BLOCK-NEXT: store i8* [[TMP2]], i8** [[TMP6]], align 8
; CROSS-BLOCK-NEXT: [[TMP8:%.*]] = load i32*, i32** [[TMP5]], align 8
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP8]], i64 4), "align"(i32* [[TMP8]], i64 4) ]
; CROSS-BLOCK-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
; CROSS-BLOCK-NEXT: [[TMP10:%.*]] = trunc i32 [[TMP9]] to i8
; CROSS-BLOCK-NEXT: [[TMP11:%.*]] = load i8*, i8** [[TMP6]], align 8
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8* [[TMP11]], i64 1) ]
; CROSS-BLOCK-NEXT: store i8 [[TMP10]], i8* [[TMP11]], align 1
; CROSS-BLOCK-NEXT: [[TMP12:%.*]] = bitcast %struct.S* [[TMP7]] to i8*
; CROSS-BLOCK-NEXT: [[TMP13:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 32
@ -552,19 +481,18 @@ define i32 @test3(%struct.S* %0, i32* %1, i8* %2) null_pointer_is_valid {
; CROSS-BLOCK-NEXT: [[TMP15:%.*]] = bitcast %struct.S* [[TMP7]] to i8*
; CROSS-BLOCK-NEXT: [[TMP16:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
; CROSS-BLOCK-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP16]], i32 0, i32 0
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP17]], i64 4), "align"(i32* [[TMP17]], i64 8) ]
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S* [[TMP16]], i64 4), "nonnull"(%struct.S* [[TMP16]]), "align"(%struct.S* [[TMP16]], i64 8) ]
; CROSS-BLOCK-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 8
; CROSS-BLOCK-NEXT: [[TMP19:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
; CROSS-BLOCK-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP19]], i32 0, i32 1
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8* [[TMP20]], i64 1), "align"(i8* [[TMP20]], i64 4) ]
; CROSS-BLOCK-NEXT: [[TMP21:%.*]] = load i8, i8* [[TMP20]], align 4
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S* [[TMP19]], i64 5), "nonnull"(%struct.S* [[TMP19]]), "align"(%struct.S* [[TMP19]], i64 4) ]
; CROSS-BLOCK-NEXT: [[TMP21:%.*]] = load i8, i8* [[TMP20]], align 8
; CROSS-BLOCK-NEXT: [[TMP22:%.*]] = sext i8 [[TMP21]] to i32
; CROSS-BLOCK-NEXT: [[TMP23:%.*]] = add nsw i32 [[TMP18]], [[TMP22]]
; CROSS-BLOCK-NEXT: [[TMP24:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
; CROSS-BLOCK-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP24]], i32 0, i32 2
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32** [[TMP25]], i64 8), "align"(i32** [[TMP25]], i64 8) ]
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S* [[TMP24]], i64 16), "nonnull"(%struct.S* [[TMP24]]), "align"(%struct.S* [[TMP24]], i64 8) ]
; CROSS-BLOCK-NEXT: [[TMP26:%.*]] = load i32*, i32** [[TMP25]], align 8
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP26]], i64 4), "align"(i32* [[TMP26]], i64 4) ]
; CROSS-BLOCK-NEXT: [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
; CROSS-BLOCK-NEXT: [[TMP28:%.*]] = add nsw i32 [[TMP23]], [[TMP27]]
; CROSS-BLOCK-NEXT: ret i32 [[TMP28]]
@ -582,7 +510,6 @@ define i32 @test3(%struct.S* %0, i32* %1, i8* %2) null_pointer_is_valid {
; FULL-SIMPLIFY-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
; FULL-SIMPLIFY-NEXT: [[TMP10:%.*]] = trunc i32 [[TMP9]] to i8
; FULL-SIMPLIFY-NEXT: [[TMP11:%.*]] = load i8*, i8** [[TMP6]], align 8
; FULL-SIMPLIFY-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "align"(%struct.S** [[TMP4]], i64 32), "dereferenceable"(i32** [[TMP5]], i64 8), "align"(i32** [[TMP5]], i64 8), "dereferenceable"(i8** [[TMP6]], i64 8), "align"(i8** [[TMP6]], i64 8), "dereferenceable"(i32* [[TMP8]], i64 4), "align"(i32* [[TMP8]], i64 4), "dereferenceable"(i8* [[TMP11]], i64 1) ]
; FULL-SIMPLIFY-NEXT: store i8 [[TMP10]], i8* [[TMP11]], align 1
; FULL-SIMPLIFY-NEXT: [[TMP12:%.*]] = bitcast %struct.S* [[TMP7]] to i8*
; FULL-SIMPLIFY-NEXT: [[TMP13:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 32
@ -594,13 +521,13 @@ define i32 @test3(%struct.S* %0, i32* %1, i8* %2) null_pointer_is_valid {
; FULL-SIMPLIFY-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 8
; FULL-SIMPLIFY-NEXT: [[TMP19:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
; FULL-SIMPLIFY-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP19]], i32 0, i32 1
; FULL-SIMPLIFY-NEXT: [[TMP21:%.*]] = load i8, i8* [[TMP20]], align 4
; FULL-SIMPLIFY-NEXT: [[TMP21:%.*]] = load i8, i8* [[TMP20]], align 8
; FULL-SIMPLIFY-NEXT: [[TMP22:%.*]] = sext i8 [[TMP21]] to i32
; FULL-SIMPLIFY-NEXT: [[TMP23:%.*]] = add nsw i32 [[TMP18]], [[TMP22]]
; FULL-SIMPLIFY-NEXT: [[TMP24:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
; FULL-SIMPLIFY-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S* [[TMP16]], i64 4), "nonnull"(%struct.S* [[TMP16]]), "align"(%struct.S* [[TMP16]], i64 8), "dereferenceable"(%struct.S* [[TMP19]], i64 5), "nonnull"(%struct.S* [[TMP19]]), "align"(%struct.S* [[TMP19]], i64 4), "dereferenceable"(%struct.S* [[TMP24]], i64 16), "nonnull"(%struct.S* [[TMP24]]), "align"(%struct.S* [[TMP24]], i64 8) ]
; FULL-SIMPLIFY-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP24]], i32 0, i32 2
; FULL-SIMPLIFY-NEXT: [[TMP26:%.*]] = load i32*, i32** [[TMP25]], align 8
; FULL-SIMPLIFY-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP17]], i64 4), "align"(i32* [[TMP17]], i64 8), "dereferenceable"(i8* [[TMP20]], i64 1), "align"(i8* [[TMP20]], i64 4), "dereferenceable"(i32** [[TMP25]], i64 8), "align"(i32** [[TMP25]], i64 8), "dereferenceable"(i32* [[TMP26]], i64 4), "align"(i32* [[TMP26]], i64 4) ]
; FULL-SIMPLIFY-NEXT: [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
; FULL-SIMPLIFY-NEXT: [[TMP28:%.*]] = add nsw i32 [[TMP23]], [[TMP27]]
; FULL-SIMPLIFY-NEXT: ret i32 [[TMP28]]
@ -627,7 +554,7 @@ define i32 @test3(%struct.S* %0, i32* %1, i8* %2) null_pointer_is_valid {
%18 = load i32, i32* %17, align 8
%19 = load %struct.S*, %struct.S** %4, align 8
%20 = getelementptr inbounds %struct.S, %struct.S* %19, i32 0, i32 1
%21 = load i8, i8* %20, align 4
%21 = load i8, i8* %20, align 8
%22 = sext i8 %21 to i32
%23 = add nsw i32 %18, %22
%24 = load %struct.S*, %struct.S** %4, align 8
@ -647,9 +574,8 @@ define dso_local i32 @_Z6squarePi(i32* %P, i32* %P1, i1 %cond) {
; BASIC-NEXT: store i32 0, i32* [[P1]], align 8
; BASIC-NEXT: br i1 [[COND]], label [[A:%.*]], label [[B:%.*]]
; BASIC: A:
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 8) ]
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[P]], i64 8) ]
; BASIC-NEXT: store i32 0, i32* [[P]], align 8
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P1]], i64 4), "nonnull"(i32* [[P1]]), "align"(i32* [[P1]], i64 4) ]
; BASIC-NEXT: store i32 0, i32* [[P1]], align 4
; BASIC-NEXT: br i1 [[COND]], label [[C:%.*]], label [[B]]
; BASIC: B:
@ -673,9 +599,8 @@ define dso_local i32 @_Z6squarePi(i32* %P, i32* %P1, i1 %cond) {
; ALL-NEXT: store i32 0, i32* [[P1]], align 8
; ALL-NEXT: br i1 [[COND]], label [[A:%.*]], label [[B:%.*]]
; ALL: A:
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 8) ]
; ALL-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[P]], i64 8) ]
; ALL-NEXT: store i32 0, i32* [[P]], align 8
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P1]], i64 4), "nonnull"(i32* [[P1]]), "align"(i32* [[P1]], i64 4) ]
; ALL-NEXT: store i32 0, i32* [[P1]], align 4
; ALL-NEXT: br i1 [[COND]], label [[C:%.*]], label [[B]]
; ALL: B:
@ -745,7 +670,7 @@ define dso_local i32 @_Z6squarePi(i32* %P, i32* %P1, i1 %cond) {
; FULL-SIMPLIFY-NEXT: store i32 0, i32* [[P1]], align 8
; FULL-SIMPLIFY-NEXT: br i1 [[COND]], label [[A:%.*]], label [[B:%.*]]
; FULL-SIMPLIFY: A:
; FULL-SIMPLIFY-NEXT: call void @llvm.assume(i1 true) [ "ignore"(i32* undef, i64 4), "ignore"(i32* undef), "align"(i32* [[P]], i64 8) ]
; FULL-SIMPLIFY-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[P]], i64 8) ]
; FULL-SIMPLIFY-NEXT: store i32 0, i32* [[P]], align 8
; FULL-SIMPLIFY-NEXT: store i32 0, i32* [[P1]], align 4
; FULL-SIMPLIFY-NEXT: br i1 [[COND]], label [[C:%.*]], label [[B]]
@ -788,13 +713,11 @@ define dso_local i32 @test4A(i32* %0, i32* %1, i32 %2, i32 %3) {
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP0]], i64 4), "nonnull"(i32* [[TMP0]]), "align"(i32* [[TMP0]], i64 4) ]
; BASIC-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP0]], align 4
; BASIC-NEXT: [[TMP9:%.*]] = add nsw i32 [[TMP7]], [[TMP8]]
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP0]], i64 4), "nonnull"(i32* [[TMP0]]), "align"(i32* [[TMP0]], i64 4) ]
; BASIC-NEXT: store i32 0, i32* [[TMP0]], align 4
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP1]], i64 4), "nonnull"(i32* [[TMP1]]), "align"(i32* [[TMP1]], i64 4) ]
; BASIC-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP1]], align 4
; BASIC-NEXT: [[TMP11:%.*]] = add nsw i32 [[TMP9]], [[TMP10]]
; BASIC-NEXT: call void @may_throw()
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP1]], i64 4), "nonnull"(i32* [[TMP1]]), "align"(i32* [[TMP1]], i64 4) ]
; BASIC-NEXT: store i32 [[TMP11]], i32* [[TMP1]], align 4
; BASIC-NEXT: br label [[TMP12]]
; BASIC: 12:
@ -810,13 +733,11 @@ define dso_local i32 @test4A(i32* %0, i32* %1, i32 %2, i32 %3) {
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP0]], i64 4), "nonnull"(i32* [[TMP0]]), "align"(i32* [[TMP0]], i64 4) ]
; ALL-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP0]], align 4
; ALL-NEXT: [[TMP9:%.*]] = add nsw i32 [[TMP7]], [[TMP8]]
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP0]], i64 4), "nonnull"(i32* [[TMP0]]), "align"(i32* [[TMP0]], i64 4) ]
; ALL-NEXT: store i32 0, i32* [[TMP0]], align 4
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP1]], i64 4), "nonnull"(i32* [[TMP1]]), "align"(i32* [[TMP1]], i64 4) ]
; ALL-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP1]], align 4
; ALL-NEXT: [[TMP11:%.*]] = add nsw i32 [[TMP9]], [[TMP10]]
; ALL-NEXT: call void @may_throw()
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP1]], i64 4), "nonnull"(i32* [[TMP1]]), "align"(i32* [[TMP1]], i64 4) ]
; ALL-NEXT: store i32 [[TMP11]], i32* [[TMP1]], align 4
; ALL-NEXT: br label [[TMP12]]
; ALL: 12:
@ -977,3 +898,389 @@ Exit: ; preds = %7, %5
}
declare dso_local i32 @__gxx_personality_v0(...)
define dso_local i32 @test5(i8* %0, i32 %1) {
; BASIC-LABEL: define {{[^@]+}}@test5
; BASIC-SAME: (i8* [[TMP0:%.*]], i32 [[TMP1:%.*]])
; BASIC-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to i64*
; BASIC-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP0]] to i16*
; BASIC-NEXT: [[TMP5:%.*]] = sext i32 [[TMP1]] to i64
; BASIC-NEXT: [[TMP6:%.*]] = getelementptr inbounds i16, i16* [[TMP4]], i64 [[TMP5]]
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i16* [[TMP6]], i64 2), "nonnull"(i8* [[TMP0]]), "align"(i8* [[TMP0]], i64 8) ]
; BASIC-NEXT: [[TMP7:%.*]] = load i16, i16* [[TMP6]], align 2
; BASIC-NEXT: [[A:%.*]] = load i16, i16* [[TMP6]], align 4
; BASIC-NEXT: [[TMP8:%.*]] = sext i16 [[TMP7]] to i64
; BASIC-NEXT: [[TMP9:%.*]] = getelementptr inbounds i64, i64* [[TMP3]], i64 [[TMP8]]
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i64* [[TMP9]], i64 8) ]
; BASIC-NEXT: [[TMP10:%.*]] = load i64, i64* [[TMP9]], align 16
; BASIC-NEXT: [[B:%.*]] = load i64, i64* [[TMP9]], align 32
; BASIC-NEXT: [[TMP11:%.*]] = trunc i64 [[TMP10]] to i32
; BASIC-NEXT: ret i32 [[TMP11]]
;
; ALL-LABEL: define {{[^@]+}}@test5
; ALL-SAME: (i8* [[TMP0:%.*]], i32 [[TMP1:%.*]])
; ALL-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to i64*
; ALL-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP0]] to i16*
; ALL-NEXT: [[TMP5:%.*]] = sext i32 [[TMP1]] to i64
; ALL-NEXT: [[TMP6:%.*]] = getelementptr inbounds i16, i16* [[TMP4]], i64 [[TMP5]]
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i16* [[TMP6]], i64 2), "nonnull"(i8* [[TMP0]]), "align"(i8* [[TMP0]], i64 8) ]
; ALL-NEXT: [[TMP7:%.*]] = load i16, i16* [[TMP6]], align 2
; ALL-NEXT: [[A:%.*]] = load i16, i16* [[TMP6]], align 4
; ALL-NEXT: [[TMP8:%.*]] = sext i16 [[TMP7]] to i64
; ALL-NEXT: [[TMP9:%.*]] = getelementptr inbounds i64, i64* [[TMP3]], i64 [[TMP8]]
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i64* [[TMP9]], i64 8) ]
; ALL-NEXT: [[TMP10:%.*]] = load i64, i64* [[TMP9]], align 16
; ALL-NEXT: [[B:%.*]] = load i64, i64* [[TMP9]], align 32
; ALL-NEXT: [[TMP11:%.*]] = trunc i64 [[TMP10]] to i32
; ALL-NEXT: ret i32 [[TMP11]]
;
; WITH-AC-LABEL: define {{[^@]+}}@test5
; WITH-AC-SAME: (i8* [[TMP0:%.*]], i32 [[TMP1:%.*]])
; WITH-AC-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to i64*
; WITH-AC-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP0]] to i16*
; WITH-AC-NEXT: [[TMP5:%.*]] = sext i32 [[TMP1]] to i64
; WITH-AC-NEXT: [[TMP6:%.*]] = getelementptr inbounds i16, i16* [[TMP4]], i64 [[TMP5]]
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i16* [[TMP6]], i64 2), "nonnull"(i8* [[TMP0]]), "align"(i8* [[TMP0]], i64 8) ]
; WITH-AC-NEXT: [[TMP7:%.*]] = load i16, i16* [[TMP6]], align 2
; WITH-AC-NEXT: [[A:%.*]] = load i16, i16* [[TMP6]], align 4
; WITH-AC-NEXT: [[TMP8:%.*]] = sext i16 [[TMP7]] to i64
; WITH-AC-NEXT: [[TMP9:%.*]] = getelementptr inbounds i64, i64* [[TMP3]], i64 [[TMP8]]
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i64* [[TMP9]], i64 8) ]
; WITH-AC-NEXT: [[TMP10:%.*]] = load i64, i64* [[TMP9]], align 16
; WITH-AC-NEXT: [[B:%.*]] = load i64, i64* [[TMP9]], align 32
; WITH-AC-NEXT: [[TMP11:%.*]] = trunc i64 [[TMP10]] to i32
; WITH-AC-NEXT: ret i32 [[TMP11]]
;
; CROSS-BLOCK-LABEL: define {{[^@]+}}@test5
; CROSS-BLOCK-SAME: (i8* [[TMP0:%.*]], i32 [[TMP1:%.*]])
; CROSS-BLOCK-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to i64*
; CROSS-BLOCK-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP0]] to i16*
; CROSS-BLOCK-NEXT: [[TMP5:%.*]] = sext i32 [[TMP1]] to i64
; CROSS-BLOCK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i16, i16* [[TMP4]], i64 [[TMP5]]
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i16* [[TMP6]], i64 2), "nonnull"(i8* [[TMP0]]), "align"(i8* [[TMP0]], i64 8) ]
; CROSS-BLOCK-NEXT: [[TMP7:%.*]] = load i16, i16* [[TMP6]], align 2
; CROSS-BLOCK-NEXT: [[A:%.*]] = load i16, i16* [[TMP6]], align 4
; CROSS-BLOCK-NEXT: [[TMP8:%.*]] = sext i16 [[TMP7]] to i64
; CROSS-BLOCK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i64, i64* [[TMP3]], i64 [[TMP8]]
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i64* [[TMP9]], i64 8) ]
; CROSS-BLOCK-NEXT: [[TMP10:%.*]] = load i64, i64* [[TMP9]], align 16
; CROSS-BLOCK-NEXT: [[B:%.*]] = load i64, i64* [[TMP9]], align 32
; CROSS-BLOCK-NEXT: [[TMP11:%.*]] = trunc i64 [[TMP10]] to i32
; CROSS-BLOCK-NEXT: ret i32 [[TMP11]]
;
; FULL-SIMPLIFY-LABEL: define {{[^@]+}}@test5
; FULL-SIMPLIFY-SAME: (i8* nonnull align 8 [[TMP0:%.*]], i32 [[TMP1:%.*]])
; FULL-SIMPLIFY-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to i64*
; FULL-SIMPLIFY-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP0]] to i16*
; FULL-SIMPLIFY-NEXT: [[TMP5:%.*]] = sext i32 [[TMP1]] to i64
; FULL-SIMPLIFY-NEXT: [[TMP6:%.*]] = getelementptr inbounds i16, i16* [[TMP4]], i64 [[TMP5]]
; FULL-SIMPLIFY-NEXT: [[TMP7:%.*]] = load i16, i16* [[TMP6]], align 2
; FULL-SIMPLIFY-NEXT: [[A:%.*]] = load i16, i16* [[TMP6]], align 4
; FULL-SIMPLIFY-NEXT: [[TMP8:%.*]] = sext i16 [[TMP7]] to i64
; FULL-SIMPLIFY-NEXT: [[TMP9:%.*]] = getelementptr inbounds i64, i64* [[TMP3]], i64 [[TMP8]]
; FULL-SIMPLIFY-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i16* [[TMP6]], i64 2), "dereferenceable"(i64* [[TMP9]], i64 8) ]
; FULL-SIMPLIFY-NEXT: [[TMP10:%.*]] = load i64, i64* [[TMP9]], align 16
; FULL-SIMPLIFY-NEXT: [[B:%.*]] = load i64, i64* [[TMP9]], align 32
; FULL-SIMPLIFY-NEXT: [[TMP11:%.*]] = trunc i64 [[TMP10]] to i32
; FULL-SIMPLIFY-NEXT: ret i32 [[TMP11]]
;
%3 = bitcast i8* %0 to i64*
%4 = bitcast i8* %0 to i16*
%5 = sext i32 %1 to i64
%6 = getelementptr inbounds i16, i16* %4, i64 %5
%7 = load i16, i16* %6, align 2
%A = load i16, i16* %6, align 4
%8 = sext i16 %7 to i64
%9 = getelementptr inbounds i64, i64* %3, i64 %8
%10 = load i64, i64* %9, align 16
%B = load i64, i64* %9, align 32
%11 = trunc i64 %10 to i32
ret i32 %11
}
define i32 @test6(i32* %0, i32 %1, i32* %2) {
; BASIC-LABEL: define {{[^@]+}}@test6
; BASIC-SAME: (i32* [[TMP0:%.*]], i32 [[TMP1:%.*]], i32* [[TMP2:%.*]])
; BASIC-NEXT: br label [[TMP4:%.*]]
; BASIC: 4:
; BASIC-NEXT: [[DOT0:%.*]] = phi i32 [ 0, [[TMP3:%.*]] ], [ [[TMP16:%.*]], [[TMP6:%.*]] ]
; BASIC-NEXT: [[TMP5:%.*]] = icmp slt i32 [[DOT0]], [[TMP1]]
; BASIC-NEXT: br i1 [[TMP5]], label [[TMP6]], label [[TMP17:%.*]]
; BASIC: 6:
; BASIC-NEXT: [[TMP7:%.*]] = add nsw i32 [[TMP1]], [[DOT0]]
; BASIC-NEXT: [[TMP8:%.*]] = sext i32 [[TMP7]] to i64
; BASIC-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 [[TMP8]]
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "nonnull"(i32* [[TMP0]]), "align"(i32* [[TMP0]], i64 4) ]
; BASIC-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
; BASIC-NEXT: [[TMP11:%.*]] = mul nsw i32 [[DOT0]], [[TMP10]]
; BASIC-NEXT: [[TMP12:%.*]] = sext i32 [[DOT0]] to i64
; BASIC-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 [[TMP12]]
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP13]], i64 4) ]
; BASIC-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
; BASIC-NEXT: [[TMP15:%.*]] = add nsw i32 [[TMP14]], [[TMP11]]
; BASIC-NEXT: store i32 [[TMP15]], i32* [[TMP13]], align 4
; BASIC-NEXT: [[TMP16]] = add nsw i32 [[DOT0]], 1
; BASIC-NEXT: br label [[TMP4]]
; BASIC: 17:
; BASIC-NEXT: [[TMP18:%.*]] = sext i32 [[TMP1]] to i64
; BASIC-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, i32* [[TMP2]], i64 [[TMP18]]
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "nonnull"(i32* [[TMP2]]), "align"(i32* [[TMP2]], i64 4) ]
; BASIC-NEXT: [[TMP20:%.*]] = load i32, i32* [[TMP19]], align 4
; BASIC-NEXT: ret i32 [[TMP20]]
;
; ALL-LABEL: define {{[^@]+}}@test6
; ALL-SAME: (i32* [[TMP0:%.*]], i32 [[TMP1:%.*]], i32* [[TMP2:%.*]])
; ALL-NEXT: br label [[TMP4:%.*]]
; ALL: 4:
; ALL-NEXT: [[DOT0:%.*]] = phi i32 [ 0, [[TMP3:%.*]] ], [ [[TMP16:%.*]], [[TMP6:%.*]] ]
; ALL-NEXT: [[TMP5:%.*]] = icmp slt i32 [[DOT0]], [[TMP1]]
; ALL-NEXT: br i1 [[TMP5]], label [[TMP6]], label [[TMP17:%.*]]
; ALL: 6:
; ALL-NEXT: [[TMP7:%.*]] = add nsw i32 [[TMP1]], [[DOT0]]
; ALL-NEXT: [[TMP8:%.*]] = sext i32 [[TMP7]] to i64
; ALL-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 [[TMP8]]
; ALL-NEXT: call void @llvm.assume(i1 true) [ "nonnull"(i32* [[TMP0]]), "align"(i32* [[TMP0]], i64 4) ]
; ALL-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
; ALL-NEXT: [[TMP11:%.*]] = mul nsw i32 [[DOT0]], [[TMP10]]
; ALL-NEXT: [[TMP12:%.*]] = sext i32 [[DOT0]] to i64
; ALL-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 [[TMP12]]
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP13]], i64 4) ]
; ALL-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
; ALL-NEXT: [[TMP15:%.*]] = add nsw i32 [[TMP14]], [[TMP11]]
; ALL-NEXT: store i32 [[TMP15]], i32* [[TMP13]], align 4
; ALL-NEXT: [[TMP16]] = add nsw i32 [[DOT0]], 1
; ALL-NEXT: br label [[TMP4]]
; ALL: 17:
; ALL-NEXT: [[TMP18:%.*]] = sext i32 [[TMP1]] to i64
; ALL-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, i32* [[TMP2]], i64 [[TMP18]]
; ALL-NEXT: call void @llvm.assume(i1 true) [ "nonnull"(i32* [[TMP2]]), "align"(i32* [[TMP2]], i64 4) ]
; ALL-NEXT: [[TMP20:%.*]] = load i32, i32* [[TMP19]], align 4
; ALL-NEXT: ret i32 [[TMP20]]
;
; WITH-AC-LABEL: define {{[^@]+}}@test6
; WITH-AC-SAME: (i32* [[TMP0:%.*]], i32 [[TMP1:%.*]], i32* [[TMP2:%.*]])
; WITH-AC-NEXT: br label [[TMP4:%.*]]
; WITH-AC: 4:
; WITH-AC-NEXT: [[DOT0:%.*]] = phi i32 [ 0, [[TMP3:%.*]] ], [ [[TMP16:%.*]], [[TMP6:%.*]] ]
; WITH-AC-NEXT: [[TMP5:%.*]] = icmp slt i32 [[DOT0]], [[TMP1]]
; WITH-AC-NEXT: br i1 [[TMP5]], label [[TMP6]], label [[TMP17:%.*]]
; WITH-AC: 6:
; WITH-AC-NEXT: [[TMP7:%.*]] = add nsw i32 [[TMP1]], [[DOT0]]
; WITH-AC-NEXT: [[TMP8:%.*]] = sext i32 [[TMP7]] to i64
; WITH-AC-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 [[TMP8]]
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "nonnull"(i32* [[TMP0]]), "align"(i32* [[TMP0]], i64 4) ]
; WITH-AC-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
; WITH-AC-NEXT: [[TMP11:%.*]] = mul nsw i32 [[DOT0]], [[TMP10]]
; WITH-AC-NEXT: [[TMP12:%.*]] = sext i32 [[DOT0]] to i64
; WITH-AC-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 [[TMP12]]
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP13]], i64 4) ]
; WITH-AC-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
; WITH-AC-NEXT: [[TMP15:%.*]] = add nsw i32 [[TMP14]], [[TMP11]]
; WITH-AC-NEXT: store i32 [[TMP15]], i32* [[TMP13]], align 4
; WITH-AC-NEXT: [[TMP16]] = add nsw i32 [[DOT0]], 1
; WITH-AC-NEXT: br label [[TMP4]]
; WITH-AC: 17:
; WITH-AC-NEXT: [[TMP18:%.*]] = sext i32 [[TMP1]] to i64
; WITH-AC-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, i32* [[TMP2]], i64 [[TMP18]]
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "nonnull"(i32* [[TMP2]]), "align"(i32* [[TMP2]], i64 4) ]
; WITH-AC-NEXT: [[TMP20:%.*]] = load i32, i32* [[TMP19]], align 4
; WITH-AC-NEXT: ret i32 [[TMP20]]
;
; CROSS-BLOCK-LABEL: define {{[^@]+}}@test6
; CROSS-BLOCK-SAME: (i32* [[TMP0:%.*]], i32 [[TMP1:%.*]], i32* [[TMP2:%.*]])
; CROSS-BLOCK-NEXT: br label [[TMP4:%.*]]
; CROSS-BLOCK: 4:
; CROSS-BLOCK-NEXT: [[DOT0:%.*]] = phi i32 [ 0, [[TMP3:%.*]] ], [ [[TMP16:%.*]], [[TMP6:%.*]] ]
; CROSS-BLOCK-NEXT: [[TMP5:%.*]] = icmp slt i32 [[DOT0]], [[TMP1]]
; CROSS-BLOCK-NEXT: br i1 [[TMP5]], label [[TMP6]], label [[TMP17:%.*]]
; CROSS-BLOCK: 6:
; CROSS-BLOCK-NEXT: [[TMP7:%.*]] = add nsw i32 [[TMP1]], [[DOT0]]
; CROSS-BLOCK-NEXT: [[TMP8:%.*]] = sext i32 [[TMP7]] to i64
; CROSS-BLOCK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 [[TMP8]]
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "nonnull"(i32* [[TMP0]]), "align"(i32* [[TMP0]], i64 4) ]
; CROSS-BLOCK-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
; CROSS-BLOCK-NEXT: [[TMP11:%.*]] = mul nsw i32 [[DOT0]], [[TMP10]]
; CROSS-BLOCK-NEXT: [[TMP12:%.*]] = sext i32 [[DOT0]] to i64
; CROSS-BLOCK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 [[TMP12]]
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP13]], i64 4) ]
; CROSS-BLOCK-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
; CROSS-BLOCK-NEXT: [[TMP15:%.*]] = add nsw i32 [[TMP14]], [[TMP11]]
; CROSS-BLOCK-NEXT: store i32 [[TMP15]], i32* [[TMP13]], align 4
; CROSS-BLOCK-NEXT: [[TMP16]] = add nsw i32 [[DOT0]], 1
; CROSS-BLOCK-NEXT: br label [[TMP4]]
; CROSS-BLOCK: 17:
; CROSS-BLOCK-NEXT: [[TMP18:%.*]] = sext i32 [[TMP1]] to i64
; CROSS-BLOCK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, i32* [[TMP2]], i64 [[TMP18]]
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "nonnull"(i32* [[TMP2]]), "align"(i32* [[TMP2]], i64 4) ]
; CROSS-BLOCK-NEXT: [[TMP20:%.*]] = load i32, i32* [[TMP19]], align 4
; CROSS-BLOCK-NEXT: ret i32 [[TMP20]]
;
; FULL-SIMPLIFY-LABEL: define {{[^@]+}}@test6
; FULL-SIMPLIFY-SAME: (i32* [[TMP0:%.*]], i32 [[TMP1:%.*]], i32* [[TMP2:%.*]])
; FULL-SIMPLIFY-NEXT: br label [[TMP4:%.*]]
; FULL-SIMPLIFY: 4:
; FULL-SIMPLIFY-NEXT: [[DOT0:%.*]] = phi i32 [ 0, [[TMP3:%.*]] ], [ [[TMP16:%.*]], [[TMP6:%.*]] ]
; FULL-SIMPLIFY-NEXT: [[TMP5:%.*]] = icmp slt i32 [[DOT0]], [[TMP1]]
; FULL-SIMPLIFY-NEXT: br i1 [[TMP5]], label [[TMP6]], label [[TMP17:%.*]]
; FULL-SIMPLIFY: 6:
; FULL-SIMPLIFY-NEXT: [[TMP7:%.*]] = add nsw i32 [[TMP1]], [[DOT0]]
; FULL-SIMPLIFY-NEXT: [[TMP8:%.*]] = sext i32 [[TMP7]] to i64
; FULL-SIMPLIFY-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 [[TMP8]]
; FULL-SIMPLIFY-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
; FULL-SIMPLIFY-NEXT: [[TMP11:%.*]] = mul nsw i32 [[DOT0]], [[TMP10]]
; FULL-SIMPLIFY-NEXT: [[TMP12:%.*]] = sext i32 [[DOT0]] to i64
; FULL-SIMPLIFY-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 [[TMP12]]
; FULL-SIMPLIFY-NEXT: call void @llvm.assume(i1 true) [ "nonnull"(i32* [[TMP0]]), "align"(i32* [[TMP0]], i64 4), "dereferenceable"(i32* [[TMP13]], i64 4) ]
; FULL-SIMPLIFY-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
; FULL-SIMPLIFY-NEXT: [[TMP15:%.*]] = add nsw i32 [[TMP14]], [[TMP11]]
; FULL-SIMPLIFY-NEXT: store i32 [[TMP15]], i32* [[TMP13]], align 4
; FULL-SIMPLIFY-NEXT: [[TMP16]] = add nsw i32 [[DOT0]], 1
; FULL-SIMPLIFY-NEXT: br label [[TMP4]]
; FULL-SIMPLIFY: 17:
; FULL-SIMPLIFY-NEXT: [[TMP18:%.*]] = sext i32 [[TMP1]] to i64
; FULL-SIMPLIFY-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, i32* [[TMP2]], i64 [[TMP18]]
; FULL-SIMPLIFY-NEXT: call void @llvm.assume(i1 true) [ "nonnull"(i32* [[TMP2]]), "align"(i32* [[TMP2]], i64 4) ]
; FULL-SIMPLIFY-NEXT: [[TMP20:%.*]] = load i32, i32* [[TMP19]], align 4
; FULL-SIMPLIFY-NEXT: ret i32 [[TMP20]]
;
br label %4
4: ; preds = %6, %3
%.0 = phi i32 [ 0, %3 ], [ %16, %6 ]
%5 = icmp slt i32 %.0, %1
br i1 %5, label %6, label %17
6: ; preds = %4
%7 = add nsw i32 %1, %.0
%8 = sext i32 %7 to i64
%9 = getelementptr inbounds i32, i32* %0, i64 %8
%10 = load i32, i32* %9, align 4
%11 = mul nsw i32 %.0, %10
%12 = sext i32 %.0 to i64
%13 = getelementptr inbounds i32, i32* %0, i64 %12
%14 = load i32, i32* %13, align 4
%15 = add nsw i32 %14, %11
store i32 %15, i32* %13, align 4
%16 = add nsw i32 %.0, 1
br label %4
17: ; preds = %4
%18 = sext i32 %1 to i64
%19 = getelementptr inbounds i32, i32* %2, i64 %18
%20 = load i32, i32* %19, align 4
ret i32 %20
}
%struct.A = type { i8*, i64*, [4 x [4 x %struct.D]], i64 }
%struct.D = type { i64, i64 }
define i32 @test7(%struct.A* nonnull %0, i32 %1) {
; BASIC-LABEL: define {{[^@]+}}@test7
; BASIC-SAME: (%struct.A* nonnull [[TMP0:%.*]], i32 [[TMP1:%.*]])
; BASIC-NEXT: [[TMP3:%.*]] = sext i32 [[TMP1]] to i64
; BASIC-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_A:%.*]], %struct.A* [[TMP0]], i64 0, i32 3
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.A* [[TMP0]], i64 280), "align"(%struct.A* [[TMP0]], i64 16) ]
; BASIC-NEXT: [[TMP5:%.*]] = load i64, i64* [[TMP4]], align 8
; BASIC-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_A]], %struct.A* [[TMP0]], i64 0, i32 2, i64 [[TMP3]], i64 [[TMP5]], i32 0
; BASIC-NEXT: [[TMP7:%.*]] = load i64, i64* [[TMP6]], align 32
; BASIC-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_A]], %struct.A* [[TMP0]], i64 0, i32 1
; BASIC-NEXT: [[TMP9:%.*]] = load i64*, i64** [[TMP8]], align 8
; BASIC-NEXT: [[TMP10:%.*]] = getelementptr inbounds i64, i64* [[TMP9]], i64 [[TMP5]]
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i64* [[TMP10]], i64 8), "nonnull"(i64* [[TMP9]]), "align"(i64* [[TMP9]], i64 8) ]
; BASIC-NEXT: store i64 [[TMP7]], i64* [[TMP10]], align 8
; BASIC-NEXT: store i64 [[TMP7]], i64* [[TMP10]], align 8
; BASIC-NEXT: [[TMP11:%.*]] = bitcast %struct.A* [[TMP0]] to i32**
; BASIC-NEXT: [[TMP12:%.*]] = load i32*, i32** [[TMP11]], align 8
; BASIC-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
; BASIC-NEXT: ret i32 [[TMP13]]
;
; ALL-LABEL: define {{[^@]+}}@test7
; ALL-SAME: (%struct.A* nonnull [[TMP0:%.*]], i32 [[TMP1:%.*]])
; ALL-NEXT: [[TMP3:%.*]] = sext i32 [[TMP1]] to i64
; ALL-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_A:%.*]], %struct.A* [[TMP0]], i64 0, i32 3
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.A* [[TMP0]], i64 280), "align"(%struct.A* [[TMP0]], i64 16) ]
; ALL-NEXT: [[TMP5:%.*]] = load i64, i64* [[TMP4]], align 8
; ALL-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_A]], %struct.A* [[TMP0]], i64 0, i32 2, i64 [[TMP3]], i64 [[TMP5]], i32 0
; ALL-NEXT: [[TMP7:%.*]] = load i64, i64* [[TMP6]], align 32
; ALL-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_A]], %struct.A* [[TMP0]], i64 0, i32 1
; ALL-NEXT: [[TMP9:%.*]] = load i64*, i64** [[TMP8]], align 8
; ALL-NEXT: [[TMP10:%.*]] = getelementptr inbounds i64, i64* [[TMP9]], i64 [[TMP5]]
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i64* [[TMP10]], i64 8), "nonnull"(i64* [[TMP9]]), "align"(i64* [[TMP9]], i64 8) ]
; ALL-NEXT: store i64 [[TMP7]], i64* [[TMP10]], align 8
; ALL-NEXT: store i64 [[TMP7]], i64* [[TMP10]], align 8
; ALL-NEXT: [[TMP11:%.*]] = bitcast %struct.A* [[TMP0]] to i32**
; ALL-NEXT: [[TMP12:%.*]] = load i32*, i32** [[TMP11]], align 8
; ALL-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
; ALL-NEXT: ret i32 [[TMP13]]
;
; WITH-AC-LABEL: define {{[^@]+}}@test7
; WITH-AC-SAME: (%struct.A* nonnull [[TMP0:%.*]], i32 [[TMP1:%.*]])
; WITH-AC-NEXT: [[TMP3:%.*]] = sext i32 [[TMP1]] to i64
; WITH-AC-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_A:%.*]], %struct.A* [[TMP0]], i64 0, i32 3
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.A* [[TMP0]], i64 280), "align"(%struct.A* [[TMP0]], i64 16) ]
; WITH-AC-NEXT: [[TMP5:%.*]] = load i64, i64* [[TMP4]], align 8
; WITH-AC-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_A]], %struct.A* [[TMP0]], i64 0, i32 2, i64 [[TMP3]], i64 [[TMP5]], i32 0
; WITH-AC-NEXT: [[TMP7:%.*]] = load i64, i64* [[TMP6]], align 32
; WITH-AC-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_A]], %struct.A* [[TMP0]], i64 0, i32 1
; WITH-AC-NEXT: [[TMP9:%.*]] = load i64*, i64** [[TMP8]], align 8
; WITH-AC-NEXT: [[TMP10:%.*]] = getelementptr inbounds i64, i64* [[TMP9]], i64 [[TMP5]]
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i64* [[TMP10]], i64 8), "nonnull"(i64* [[TMP9]]), "align"(i64* [[TMP9]], i64 8) ]
; WITH-AC-NEXT: store i64 [[TMP7]], i64* [[TMP10]], align 8
; WITH-AC-NEXT: store i64 [[TMP7]], i64* [[TMP10]], align 8
; WITH-AC-NEXT: [[TMP11:%.*]] = bitcast %struct.A* [[TMP0]] to i32**
; WITH-AC-NEXT: [[TMP12:%.*]] = load i32*, i32** [[TMP11]], align 8
; WITH-AC-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
; WITH-AC-NEXT: ret i32 [[TMP13]]
;
; CROSS-BLOCK-LABEL: define {{[^@]+}}@test7
; CROSS-BLOCK-SAME: (%struct.A* nonnull [[TMP0:%.*]], i32 [[TMP1:%.*]])
; CROSS-BLOCK-NEXT: [[TMP3:%.*]] = sext i32 [[TMP1]] to i64
; CROSS-BLOCK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_A:%.*]], %struct.A* [[TMP0]], i64 0, i32 3
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.A* [[TMP0]], i64 280), "align"(%struct.A* [[TMP0]], i64 16) ]
; CROSS-BLOCK-NEXT: [[TMP5:%.*]] = load i64, i64* [[TMP4]], align 8
; CROSS-BLOCK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_A]], %struct.A* [[TMP0]], i64 0, i32 2, i64 [[TMP3]], i64 [[TMP5]], i32 0
; CROSS-BLOCK-NEXT: [[TMP7:%.*]] = load i64, i64* [[TMP6]], align 32
; CROSS-BLOCK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_A]], %struct.A* [[TMP0]], i64 0, i32 1
; CROSS-BLOCK-NEXT: [[TMP9:%.*]] = load i64*, i64** [[TMP8]], align 8
; CROSS-BLOCK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i64, i64* [[TMP9]], i64 [[TMP5]]
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i64* [[TMP10]], i64 8), "nonnull"(i64* [[TMP9]]), "align"(i64* [[TMP9]], i64 8) ]
; CROSS-BLOCK-NEXT: store i64 [[TMP7]], i64* [[TMP10]], align 8
; CROSS-BLOCK-NEXT: store i64 [[TMP7]], i64* [[TMP10]], align 8
; CROSS-BLOCK-NEXT: [[TMP11:%.*]] = bitcast %struct.A* [[TMP0]] to i32**
; CROSS-BLOCK-NEXT: [[TMP12:%.*]] = load i32*, i32** [[TMP11]], align 8
; CROSS-BLOCK-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
; CROSS-BLOCK-NEXT: ret i32 [[TMP13]]
;
; FULL-SIMPLIFY-LABEL: define {{[^@]+}}@test7
; FULL-SIMPLIFY-SAME: (%struct.A* nonnull align 16 dereferenceable(280) [[TMP0:%.*]], i32 [[TMP1:%.*]])
; FULL-SIMPLIFY-NEXT: [[TMP3:%.*]] = sext i32 [[TMP1]] to i64
; FULL-SIMPLIFY-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_A:%.*]], %struct.A* [[TMP0]], i64 0, i32 3
; FULL-SIMPLIFY-NEXT: [[TMP5:%.*]] = load i64, i64* [[TMP4]], align 8
; FULL-SIMPLIFY-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_A]], %struct.A* [[TMP0]], i64 0, i32 2, i64 [[TMP3]], i64 [[TMP5]], i32 0
; FULL-SIMPLIFY-NEXT: [[TMP7:%.*]] = load i64, i64* [[TMP6]], align 32
; FULL-SIMPLIFY-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_A]], %struct.A* [[TMP0]], i64 0, i32 1
; FULL-SIMPLIFY-NEXT: [[TMP9:%.*]] = load i64*, i64** [[TMP8]], align 8
; FULL-SIMPLIFY-NEXT: [[TMP10:%.*]] = getelementptr inbounds i64, i64* [[TMP9]], i64 [[TMP5]]
; FULL-SIMPLIFY-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i64* [[TMP10]], i64 8), "nonnull"(i64* [[TMP9]]), "align"(i64* [[TMP9]], i64 8) ]
; FULL-SIMPLIFY-NEXT: store i64 [[TMP7]], i64* [[TMP10]], align 8
; FULL-SIMPLIFY-NEXT: store i64 [[TMP7]], i64* [[TMP10]], align 8
; FULL-SIMPLIFY-NEXT: [[TMP11:%.*]] = bitcast %struct.A* [[TMP0]] to i32**
; FULL-SIMPLIFY-NEXT: [[TMP12:%.*]] = load i32*, i32** [[TMP11]], align 8
; FULL-SIMPLIFY-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
; FULL-SIMPLIFY-NEXT: ret i32 [[TMP13]]
;
%3 = sext i32 %1 to i64
%4 = getelementptr inbounds %struct.A, %struct.A* %0, i64 0, i32 3
%5 = load i64, i64* %4, align 8
%6 = getelementptr inbounds %struct.A, %struct.A* %0, i64 0, i32 2, i64 %3, i64 %5, i32 0
%7 = load i64, i64* %6, align 32
%8 = getelementptr inbounds %struct.A, %struct.A* %0, i64 0, i32 1
%9 = load i64*, i64** %8, align 8
%10 = getelementptr inbounds i64, i64* %9, i64 %5
store i64 %7, i64* %10, align 8
store i64 %7, i64* %10, align 8
%11 = bitcast %struct.A* %0 to i32**
%12 = load i32*, i32** %11, align 8
%13 = load i32, i32* %12, align 4
ret i32 %13
}

View File

@ -76,7 +76,7 @@ define i32 @test2(i32** %0, i32* %1, i32 %2, i32 %3) {
; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32*, i32** [[TMP0]], i64 1
; CHECK-NEXT: [[TMP13:%.*]] = load i32*, i32** [[TMP12]], align 8
; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, i32* [[TMP13]], i64 0
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[TMP11]], i64 4), "dereferenceable"(i32* [[TMP11]], i64 4), "nonnull"(i32* [[TMP11]]), "align"(i32* [[TMP14]], i64 4), "dereferenceable"(i32* [[TMP14]], i64 4), "nonnull"(i32* [[TMP14]]) ]
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP1]], i64 12), "align"(i32* [[TMP13]], i64 4), "dereferenceable"(i32* [[TMP13]], i64 4), "nonnull"(i32* [[TMP13]]) ]
; CHECK-NEXT: [[TMP15:%.*]] = load i32, i32* [[TMP14]], align 4
; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32*, i32** [[TMP0]], i64 1
; CHECK-NEXT: [[TMP17:%.*]] = load i32*, i32** [[TMP16]], align 8
@ -303,7 +303,7 @@ define i32 @test7(i32* %p) {
; CHECK-LABEL: define {{[^@]+}}@test7
; CHECK-SAME: (i32* align 4 dereferenceable(4) [[P:%.*]])
; CHECK-NEXT: [[P1:%.*]] = bitcast i32* [[P]] to i8*
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "cold"(), "align"(i8* [[P1]], i64 4), "nonnull"(i8* [[P1]]) ]
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "cold"(), "nonnull"(i32* [[P]]) ]
; CHECK-NEXT: ret i32 0
;
%p1 = bitcast i32* %p to i8*