1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-25 12:12:47 +01:00

StoreInst should store Align, not MaybeAlign

This is D77454, except for stores.  All the infrastructure work was done
for loads, so the remaining changes necessary are relatively small.

Differential Revision: https://reviews.llvm.org/D79968
This commit is contained in:
Eli Friedman 2020-05-14 14:48:10 -07:00
parent 85447130ae
commit ffa26401ac
57 changed files with 354 additions and 361 deletions

View File

@ -191,7 +191,7 @@ void ThinLtoInstrumentationLayer::compileFunctionReachedFlagSetter(
new StoreInst(ConstantInt::get(Int64Ty, 0),
B.CreateIntToPtr(ConstantInt::get(Int64Ty, SyncFlagAddr),
Int64Ty->getPointerTo()),
IsVolatile, MaybeAlign(64), AtomicOrdering::Release,
IsVolatile, Align(64), AtomicOrdering::Release,
SyncScope::System, NoInsertBefore));
}
}

View File

@ -306,17 +306,16 @@ protected:
public:
StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore);
StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
StoreInst(Value *Val, Value *Ptr, bool isVolatile = false,
Instruction *InsertBefore = nullptr);
StoreInst(Value *Val, Value *Ptr, bool isVolatile, Instruction *InsertBefore);
StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
StoreInst(Value *Val, Value *Ptr, bool isVolatile, MaybeAlign Align,
StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
Instruction *InsertBefore = nullptr);
StoreInst(Value *Val, Value *Ptr, bool isVolatile, MaybeAlign Align,
StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
BasicBlock *InsertAtEnd);
StoreInst(Value *Val, Value *Ptr, bool isVolatile, MaybeAlign Align,
StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System,
Instruction *InsertBefore = nullptr);
StoreInst(Value *Val, Value *Ptr, bool isVolatile, MaybeAlign Align,
StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd);
// allocate space for exactly two operands
@ -339,17 +338,13 @@ public:
/// Return the alignment of the access that is being performed
/// FIXME: Remove this function once transition to Align is over.
/// Use getAlign() instead.
unsigned getAlignment() const {
if (const auto MA = getAlign())
return MA->value();
return 0;
unsigned getAlignment() const { return getAlign().value(); }
Align getAlign() const {
return *decodeMaybeAlign((getSubclassDataFromInstruction() >> 1) & 31);
}
MaybeAlign getAlign() const {
return decodeMaybeAlign((getSubclassDataFromInstruction() >> 1) & 31);
}
void setAlignment(MaybeAlign Alignment);
void setAlignment(Align Alignment);
/// Returns the ordering constraint of this store instruction.
AtomicOrdering getOrdering() const {

View File

@ -7112,8 +7112,12 @@ int LLParser::ParseStore(Instruction *&Inst, PerFunctionState &PFS) {
if (Ordering == AtomicOrdering::Acquire ||
Ordering == AtomicOrdering::AcquireRelease)
return Error(Loc, "atomic store cannot use Acquire ordering");
if (!Alignment && !Val->getType()->isSized())
return Error(Loc, "storing unsized types is not allowed");
if (!Alignment)
Alignment = M->getDataLayout().getABITypeAlign(Val->getType());
Inst = new StoreInst(Val, Ptr, isVolatile, Alignment, Ordering, SSID);
Inst = new StoreInst(Val, Ptr, isVolatile, *Alignment, Ordering, SSID);
return AteExtraComma ? InstExtraComma : InstNormal;
}

View File

@ -4922,7 +4922,9 @@ Error BitcodeReader::parseFunctionBody(Function *F) {
MaybeAlign Align;
if (Error Err = parseAlignmentValue(Record[OpNum], Align))
return Err;
I = new StoreInst(Val, Ptr, Record[OpNum + 1], Align);
if (!Align)
Align = TheModule->getDataLayout().getABITypeAlign(Val->getType());
I = new StoreInst(Val, Ptr, Record[OpNum + 1], *Align);
InstructionList.push_back(I);
break;
}
@ -4955,7 +4957,9 @@ Error BitcodeReader::parseFunctionBody(Function *F) {
MaybeAlign Align;
if (Error Err = parseAlignmentValue(Record[OpNum], Align))
return Err;
I = new StoreInst(Val, Ptr, Record[OpNum + 1], Align, Ordering, SSID);
if (!Align)
return error("Alignment missing from atomic store");
I = new StoreInst(Val, Ptr, Record[OpNum + 1], *Align, Ordering, SSID);
InstructionList.push_back(I);
break;
}

View File

@ -160,10 +160,9 @@ static bool InsertRootInitializers(Function &F, ArrayRef<AllocaInst *> Roots) {
for (AllocaInst *Root : Roots)
if (!InitedRoots.count(Root)) {
StoreInst *SI = new StoreInst(
new StoreInst(
ConstantPointerNull::get(cast<PointerType>(Root->getAllocatedType())),
Root);
SI->insertAfter(Root);
Root, Root->getNextNode());
MadeChange = true;
}

View File

@ -244,10 +244,8 @@ int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) {
}
Align IRTranslator::getMemOpAlign(const Instruction &I) {
if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) {
Type *ValTy = SI->getValueOperand()->getType();
return SI->getAlign().getValueOr(DL->getABITypeAlign(ValTy));
}
if (const StoreInst *SI = dyn_cast<StoreInst>(&I))
return SI->getAlign();
if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) {
return DL->getValueOrABITypeAlignment(LI->getAlign(), LI->getType());
}

View File

@ -4616,7 +4616,7 @@ void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) {
MachineFunction &MF = DAG.getMachineFunction();
MachineMemOperand *MMO = MF.getMachineMemOperand(
MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
*I.getAlign(), AAMDNodes(), nullptr, SSID, Ordering);
I.getAlign(), AAMDNodes(), nullptr, SSID, Ordering);
SDValue Val = getValue(I.getValueOperand());
if (Val.getValueType() != MemVT)

View File

@ -466,8 +466,7 @@ bool SjLjEHPrepare::setupEntryBlockAndCallSites(Function &F) {
}
Instruction *StackAddr = CallInst::Create(StackAddrFn, "sp");
StackAddr->insertAfter(&I);
Instruction *StoreStackAddr = new StoreInst(StackAddr, StackPtr, true);
StoreStackAddr->insertAfter(StackAddr);
new StoreInst(StackAddr, StackPtr, true, StackAddr->getNextNode());
}
}

View File

@ -2025,7 +2025,7 @@ void LLVMSetAlignment(LLVMValueRef V, unsigned Bytes) {
else if (LoadInst *LI = dyn_cast<LoadInst>(P))
LI->setAlignment(Align(Bytes));
else if (StoreInst *SI = dyn_cast<StoreInst>(P))
SI->setAlignment(MaybeAlign(Bytes));
SI->setAlignment(Align(Bytes));
else
llvm_unreachable(
"only GlobalValue, AllocaInst, LoadInst and StoreInst have alignment");

View File

@ -1326,13 +1326,13 @@ void LoadInst::AssertOK() {
"Alignment required for atomic load");
}
Align computeLoadAlign(Type *Ty, BasicBlock *BB) {
Align computeLoadStoreDefaultAlign(Type *Ty, BasicBlock *BB) {
const DataLayout &DL = BB->getModule()->getDataLayout();
return DL.getABITypeAlign(Ty);
}
Align computeLoadAlign(Type *Ty, Instruction *I) {
return computeLoadAlign(Ty, I->getParent());
Align computeLoadStoreDefaultAlign(Type *Ty, Instruction *I) {
return computeLoadStoreDefaultAlign(Ty, I->getParent());
}
LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name,
@ -1345,13 +1345,13 @@ LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name,
LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
Instruction *InsertBef)
: LoadInst(Ty, Ptr, Name, isVolatile, computeLoadAlign(Ty, InsertBef),
InsertBef) {}
: LoadInst(Ty, Ptr, Name, isVolatile,
computeLoadStoreDefaultAlign(Ty, InsertBef), InsertBef) {}
LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
BasicBlock *InsertAE)
: LoadInst(Ty, Ptr, Name, isVolatile, computeLoadAlign(Ty, InsertAE),
InsertAE) {}
: LoadInst(Ty, Ptr, Name, isVolatile,
computeLoadStoreDefaultAlign(Ty, InsertAE), InsertAE) {}
LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
Align Align, Instruction *InsertBef)
@ -1418,23 +1418,27 @@ StoreInst::StoreInst(Value *val, Value *addr, BasicBlock *InsertAtEnd)
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
Instruction *InsertBefore)
: StoreInst(val, addr, isVolatile, /*Align=*/None, InsertBefore) {}
: StoreInst(val, addr, isVolatile,
computeLoadStoreDefaultAlign(val->getType(), InsertBefore),
InsertBefore) {}
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
BasicBlock *InsertAtEnd)
: StoreInst(val, addr, isVolatile, /*Align=*/None, InsertAtEnd) {}
: StoreInst(val, addr, isVolatile,
computeLoadStoreDefaultAlign(val->getType(), InsertAtEnd),
InsertAtEnd) {}
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, MaybeAlign Align,
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
Instruction *InsertBefore)
: StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
SyncScope::System, InsertBefore) {}
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, MaybeAlign Align,
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
BasicBlock *InsertAtEnd)
: StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
SyncScope::System, InsertAtEnd) {}
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, MaybeAlign Align,
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
AtomicOrdering Order, SyncScope::ID SSID,
Instruction *InsertBefore)
: Instruction(Type::getVoidTy(val->getContext()), Store,
@ -1448,7 +1452,7 @@ StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, MaybeAlign Align,
AssertOK();
}
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, MaybeAlign Align,
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
AtomicOrdering Order, SyncScope::ID SSID,
BasicBlock *InsertAtEnd)
: Instruction(Type::getVoidTy(val->getContext()), Store,
@ -1462,8 +1466,8 @@ StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, MaybeAlign Align,
AssertOK();
}
void StoreInst::setAlignment(MaybeAlign Alignment) {
assert((!Alignment || *Alignment <= MaximumAlignment) &&
void StoreInst::setAlignment(Align Alignment) {
assert(Alignment <= MaximumAlignment &&
"Alignment is greater than MaximumAlignment!");
setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) |
(encode(Alignment) << 1));
@ -4248,9 +4252,8 @@ LoadInst *LoadInst::cloneImpl() const {
}
StoreInst *StoreInst::cloneImpl() const {
return new StoreInst(getOperand(0), getOperand(1), isVolatile(),
MaybeAlign(getAlignment()), getOrdering(),
getSyncScopeID());
return new StoreInst(getOperand(0), getOperand(1), isVolatile(), getAlign(),
getOrdering(), getSyncScopeID());
}
AtomicCmpXchgInst *AtomicCmpXchgInst::cloneImpl() const {

View File

@ -387,9 +387,7 @@ bool AMDGPUPrintfRuntimeBinding::lowerPrintfForGpu(
Value *id_gep_cast =
new BitCastInst(BufferIdx, idPointer, "PrintBuffIdCast", Brnch);
StoreInst *stbuff =
new StoreInst(ConstantInt::get(I32Ty, UniqID), id_gep_cast);
stbuff->insertBefore(Brnch); // to Remove unused variable warning
new StoreInst(ConstantInt::get(I32Ty, UniqID), id_gep_cast, Brnch);
SmallVector<Value *, 2> FourthIdxList;
ConstantInt *fourInt =

View File

@ -114,7 +114,7 @@ bool NVPTXLowerAggrCopies::runOnFunction(Function &F) {
/* SrcAddr */ SrcAddr, /* DstAddr */ DstAddr,
/* CopyLen */ CopyLen,
/* SrcAlign */ LI->getAlign(),
/* DestAlign */ SI->getAlign().valueOrOne(),
/* DestAlign */ SI->getAlign(),
/* SrcIsVolatile */ LI->isVolatile(),
/* DstIsVolatile */ SI->isVolatile(), TTI);

View File

@ -160,7 +160,7 @@ Instruction *InstCombiner::SimplifyAnyMemTransfer(AnyMemTransferInst *MI) {
// into libcall in CodeGen. This is not evident performance gain so disable
// it now.
if (isa<AtomicMemTransferInst>(MI))
if (CopyDstAlign < Size || CopySrcAlign < Size)
if (*CopyDstAlign < Size || *CopySrcAlign < Size)
return nullptr;
// Use an integer load+store unless we can find something better.
@ -207,8 +207,7 @@ Instruction *InstCombiner::SimplifyAnyMemTransfer(AnyMemTransferInst *MI) {
StoreInst *S = Builder.CreateStore(L, Dest);
// Alignment from the mem intrinsic will be better, so use it.
S->setAlignment(
MaybeAlign(CopyDstAlign)); // FIXME: Check if we can use Align instead.
S->setAlignment(*CopyDstAlign);
if (CopyMD)
S->setMetadata(LLVMContext::MD_tbaa, CopyMD);
if (LoopMemParallelMD)
@ -1144,8 +1143,7 @@ Instruction *InstCombiner::simplifyMaskedStore(IntrinsicInst &II) {
// If the mask is all ones, this is a plain vector store of the 1st argument.
if (ConstMask->isAllOnesValue()) {
Value *StorePtr = II.getArgOperand(1);
MaybeAlign Alignment(
cast<ConstantInt>(II.getArgOperand(2))->getZExtValue());
Align Alignment(cast<ConstantInt>(II.getArgOperand(2))->getZExtValue());
return new StoreInst(II.getArgOperand(0), StorePtr, false, Alignment);
}
@ -2482,7 +2480,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
Type *OpPtrTy =
PointerType::getUnqual(II->getArgOperand(0)->getType());
Value *Ptr = Builder.CreateBitCast(II->getArgOperand(1), OpPtrTy);
return new StoreInst(II->getArgOperand(0), Ptr);
return new StoreInst(II->getArgOperand(0), Ptr, false, Align(16));
}
break;
case Intrinsic::ppc_vsx_stxvw4x:
@ -2524,7 +2522,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
Value *TOp = Builder.CreateFPTrunc(II->getArgOperand(0), VTy);
Type *OpPtrTy = PointerType::getUnqual(VTy);
Value *Ptr = Builder.CreateBitCast(II->getArgOperand(1), OpPtrTy);
return new StoreInst(TOp, Ptr);
return new StoreInst(TOp, Ptr, false, Align(16));
}
break;
case Intrinsic::ppc_qpx_qvstfd:
@ -2534,7 +2532,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
Type *OpPtrTy =
PointerType::getUnqual(II->getArgOperand(0)->getType());
Value *Ptr = Builder.CreateBitCast(II->getArgOperand(1), OpPtrTy);
return new StoreInst(II->getArgOperand(0), Ptr);
return new StoreInst(II->getArgOperand(0), Ptr, false, Align(32));
}
break;

View File

@ -1571,9 +1571,9 @@ bool InstCombiner::mergeStoreIntoSuccessor(StoreInst &SI) {
// Advance to a place where it is safe to insert the new store and insert it.
BBI = DestBB->getFirstInsertionPt();
StoreInst *NewSI = new StoreInst(MergedVal, SI.getOperand(1), SI.isVolatile(),
MaybeAlign(SI.getAlignment()),
SI.getOrdering(), SI.getSyncScopeID());
StoreInst *NewSI =
new StoreInst(MergedVal, SI.getOperand(1), SI.isVolatile(), SI.getAlign(),
SI.getOrdering(), SI.getSyncScopeID());
InsertNewInstBefore(NewSI, *BBI);
NewSI->setDebugLoc(MergedLoc);

View File

@ -1332,9 +1332,8 @@ static bool eliminateDeadStores(BasicBlock &BB, AliasAnalysis *AA,
auto *SI = new StoreInst(
ConstantInt::get(Earlier->getValueOperand()->getType(), Merged),
Earlier->getPointerOperand(), false,
MaybeAlign(Earlier->getAlignment()), Earlier->getOrdering(),
Earlier->getSyncScopeID(), DepWrite);
Earlier->getPointerOperand(), false, Earlier->getAlign(),
Earlier->getOrdering(), Earlier->getSyncScopeID(), DepWrite);
unsigned MDToKeep[] = {LLVMContext::MD_dbg, LLVMContext::MD_tbaa,
LLVMContext::MD_alias_scope,

View File

@ -894,9 +894,8 @@ private:
std::min(ReplacementLoad->getAlign(), cast<LoadInst>(I)->getAlign()));
++NumLoadsRemoved;
} else if (auto *ReplacementStore = dyn_cast<StoreInst>(Repl)) {
ReplacementStore->setAlignment(
MaybeAlign(std::min(ReplacementStore->getAlignment(),
cast<StoreInst>(I)->getAlignment())));
ReplacementStore->setAlignment(std::min(ReplacementStore->getAlign(),
cast<StoreInst>(I)->getAlign()));
++NumStoresRemoved;
} else if (auto *ReplacementAlloca = dyn_cast<AllocaInst>(Repl)) {
ReplacementAlloca->setAlignment(

View File

@ -1712,8 +1712,8 @@ insertRelocationStores(iterator_range<Value::user_iterator> GCRelocs,
cast<AllocaInst>(Alloca)->getAllocatedType(),
suffixed_name_or(Relocate, ".casted", ""));
StoreInst *Store = new StoreInst(CastedRelocatedValue, Alloca);
Store->insertAfter(cast<Instruction>(CastedRelocatedValue));
new StoreInst(CastedRelocatedValue, Alloca,
cast<Instruction>(CastedRelocatedValue)->getNextNode());
#ifndef NDEBUG
VisitedLiveValues.insert(OriginalValue);
@ -1735,8 +1735,8 @@ static void insertRematerializationStores(
"Can not find alloca for rematerialized value");
Value *Alloca = AllocaMap[OriginalValue];
StoreInst *Store = new StoreInst(RematerializedValue, Alloca);
Store->insertAfter(RematerializedValue);
new StoreInst(RematerializedValue, Alloca,
RematerializedValue->getNextNode());
#ifndef NDEBUG
VisitedLiveValues.insert(OriginalValue);
@ -1841,8 +1841,7 @@ static void relocationViaAlloca(
for (auto *AI : ToClobber) {
auto PT = cast<PointerType>(AI->getAllocatedType());
Constant *CPN = ConstantPointerNull::get(PT);
StoreInst *Store = new StoreInst(CPN, AI);
Store->insertBefore(IP);
new StoreInst(CPN, AI, IP);
}
};
@ -1904,7 +1903,8 @@ static void relocationViaAlloca(
// Emit store for the initial gc value. Store must be inserted after load,
// otherwise store will be in alloca's use list and an extra load will be
// inserted before it.
StoreInst *Store = new StoreInst(Def, Alloca);
StoreInst *Store = new StoreInst(Def, Alloca, /*volatile*/ false,
DL.getABITypeAlign(Def->getType()));
if (Instruction *Inst = dyn_cast<Instruction>(Def)) {
if (InvokeInst *Invoke = dyn_cast<InvokeInst>(Inst)) {
// InvokeInst is a terminator so the store need to be inserted into its

View File

@ -1134,8 +1134,7 @@ CallInst *CodeExtractor::emitCallAndSwitchStatement(Function *newFunction,
GetElementPtrInst *GEP = GetElementPtrInst::Create(
StructArgTy, Struct, Idx, "gep_" + StructValues[i]->getName());
codeReplacer->getInstList().push_back(GEP);
StoreInst *SI = new StoreInst(StructValues[i], GEP);
codeReplacer->getInstList().push_back(SI);
new StoreInst(StructValues[i], GEP, codeReplacer);
}
}

View File

@ -1,7 +1,7 @@
; RUN: opt < %s -cost-model -analyze -mtriple=aarch64-unknown | FileCheck %s
; RUN: opt < %s -cost-model -analyze -mtriple=aarch64-unknown -mattr=slow-misaligned-128store | FileCheck %s --check-prefix=SLOW_MISALIGNED_128_STORE
target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32"
target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-v256:32:256-a0:0:32-n32-S32"
; CHECK-LABEL: getMemoryOpCost
; SLOW_MISALIGNED_128_STORE-LABEL: getMemoryOpCost
define void @getMemoryOpCost() {

View File

@ -299,7 +299,7 @@ define i8 @optimizable() {
entry:
%ptr = alloca i8
; CHECK: 1 = MemoryDef(liveOnEntry)
; CHECK-NEXT: store i8 42, i8* %ptr, !invariant.group !0
; CHECK-NEXT: store i8 42, i8* %ptr, align 1, !invariant.group !0
store i8 42, i8* %ptr, !invariant.group !0
; CHECK: 2 = MemoryDef(1)
; CHECK-NEXT: call i8* @llvm.launder.invariant.group
@ -328,7 +328,7 @@ entry:
define i8 @unoptimizable2() {
%ptr = alloca i8
; CHECK: 1 = MemoryDef(liveOnEntry)
; CHECK-NEXT: store i8 42, i8* %ptr, !invariant.group !0
; CHECK-NEXT: store i8 42, i8* %ptr, align 1, !invariant.group !0
store i8 42, i8* %ptr, !invariant.group !0
; CHECK: 2 = MemoryDef(1)
; CHECK-NEXT: call i8* @llvm.launder.invariant.group

View File

@ -150,10 +150,10 @@ entry:
; CHECK-NEXT: store volatile i8 2, i8* %ptr1, align 1
store volatile i8 2, i8* %ptr1, align 1
; CHECK-NEXT: store i8 2, i8* %ptr1, !nontemporal !0
; CHECK-NEXT: store i8 2, i8* %ptr1, align 1, !nontemporal !0
store i8 2, i8* %ptr1, !nontemporal !0
; CHECK-NEXT: store volatile i8 2, i8* %ptr1, !nontemporal !0
; CHECK-NEXT: store volatile i8 2, i8* %ptr1, align 1, !nontemporal !0
store volatile i8 2, i8* %ptr1, !nontemporal !0
; CHECK-NEXT: store i8 2, i8* %ptr1, align 1, !nontemporal !0

View File

@ -3,7 +3,7 @@
define i32 @foo() nounwind ssp {
entry:
; CHECK: %retval = alloca i32
; CHECK: store i32 42, i32* %retval, !md !0
; CHECK: store i32 42, i32* %retval, align 4, !md !0
; CHECK: br label %0, !md !1
%retval = alloca i32
store i32 42, i32* %retval, !md !0

View File

@ -157,7 +157,7 @@ entry:
; CHECK: {{^[0-9]+}}:
; CHECK: @__msan_chain_origin
; Storing origin here:
; CHECK: store i32
; CHECK: store i64
; CHECK: br label
; CHECK: {{^[0-9]+}}:
; CHECK: store i64
@ -189,7 +189,7 @@ entry:
; CHECK: {{^[0-9]+}}:
; CHECK: @__msan_chain_origin
; Storing origin here:
; CHECK: store i32
; CHECK: store i64
; CHECK: br label
; CHECK: {{^[0-9]+}}:
; CHECK: store i128

View File

@ -18,7 +18,7 @@ define void @fn2(i32* %P, i1 %C) {
; IS__TUNIT____-NEXT: [[E_2:%.*]] = phi i32* [ [[P]], [[ENTRY:%.*]] ], [ null, [[FOR_COND1:%.*]] ]
; IS__TUNIT____-NEXT: [[TMP0:%.*]] = load i32, i32* [[E_2]], align 4
; IS__TUNIT____-NEXT: [[CALL:%.*]] = call i32 @fn1(i32 [[TMP0]])
; IS__TUNIT____-NEXT: store i32 [[CALL]], i32* [[P]], align 1
; IS__TUNIT____-NEXT: store i32 [[CALL]], i32* [[P]], align 4
; IS__TUNIT____-NEXT: br label [[FOR_COND1]]
; IS__TUNIT____: exit:
; IS__TUNIT____-NEXT: ret void
@ -80,7 +80,7 @@ define void @fn_no_null_opt(i32* %P, i1 %C) null_pointer_is_valid {
; IS__TUNIT____-NEXT: [[E_2:%.*]] = phi i32* [ undef, [[ENTRY:%.*]] ], [ null, [[FOR_COND1:%.*]] ]
; IS__TUNIT____-NEXT: [[TMP0:%.*]] = load i32, i32* null, align 4
; IS__TUNIT____-NEXT: [[CALL:%.*]] = call i32 @fn0(i32 [[TMP0]])
; IS__TUNIT____-NEXT: store i32 [[CALL]], i32* [[P]], align 1
; IS__TUNIT____-NEXT: store i32 [[CALL]], i32* [[P]], align 4
; IS__TUNIT____-NEXT: br label [[FOR_COND1]]
; IS__TUNIT____: exit:
; IS__TUNIT____-NEXT: ret void

View File

@ -403,7 +403,7 @@ define i32* @test10a(i32* align 32 %p) {
; CHECK-NEXT: br i1 [[C]], label [[T:%.*]], label [[F:%.*]]
; CHECK: t:
; CHECK-NEXT: [[R:%.*]] = call i32* @test10a(i32* nofree nonnull align 32 dereferenceable(4) "no-capture-maybe-returned" [[P]])
; CHECK-NEXT: store i32 1, i32* [[R]], align 1
; CHECK-NEXT: store i32 1, i32* [[R]], align 4
; CHECK-NEXT: [[G0:%.*]] = getelementptr i32, i32* [[P]], i32 8
; CHECK-NEXT: br label [[E:%.*]]
; CHECK: f:
@ -445,7 +445,7 @@ define i32* @test10b(i32* align 32 %p) {
; CHECK-NEXT: br i1 [[C]], label [[T:%.*]], label [[F:%.*]]
; CHECK: t:
; CHECK-NEXT: [[R:%.*]] = call i32* @test10b(i32* nofree nonnull align 32 dereferenceable(4) "no-capture-maybe-returned" [[P]])
; CHECK-NEXT: store i32 1, i32* [[R]], align 1
; CHECK-NEXT: store i32 1, i32* [[R]], align 4
; CHECK-NEXT: [[G0:%.*]] = getelementptr i32, i32* [[P]], i32 8
; CHECK-NEXT: br label [[E:%.*]]
; CHECK: f:

View File

@ -527,10 +527,10 @@ define i32 @require_cfg_analysis(i32 %c, i32* %p) {
; IS________OPM-NEXT: [[TOBOOL4:%.*]] = icmp eq i32 [[C]], 4
; IS________OPM-NEXT: br i1 [[TOBOOL4]], label [[L6:%.*]], label [[L7:%.*]]
; IS________OPM: l6:
; IS________OPM-NEXT: store i32 0, i32* [[P]], align 1
; IS________OPM-NEXT: store i32 0, i32* [[P]], align 4
; IS________OPM-NEXT: br label [[END:%.*]]
; IS________OPM: l7:
; IS________OPM-NEXT: store i32 1, i32* [[P]], align 1
; IS________OPM-NEXT: store i32 1, i32* [[P]], align 4
; IS________OPM-NEXT: br label [[END]]
; IS________OPM: end:
; IS________OPM-NEXT: ret i32 1

View File

@ -696,7 +696,7 @@ define void @test16b(i8 %v, i8** %P) {
; CHECK-LABEL: define {{[^@]+}}@test16b
; CHECK-SAME: (i8 [[V:%.*]], i8** nocapture writeonly [[P:%.*]])
; CHECK-NEXT: [[TMP1:%.*]] = tail call noalias i8* @malloc(i64 4)
; CHECK-NEXT: store i8* [[TMP1]], i8** [[P]], align 1
; CHECK-NEXT: store i8* [[TMP1]], i8** [[P]], align 8
; CHECK-NEXT: tail call void @no_sync_func(i8* nocapture nofree [[TMP1]])
; CHECK-NEXT: tail call void @free(i8* nocapture [[TMP1]])
; CHECK-NEXT: ret void
@ -712,7 +712,7 @@ define void @test16c(i8 %v, i8** %P) {
; CHECK-LABEL: define {{[^@]+}}@test16c
; CHECK-SAME: (i8 [[V:%.*]], i8** nocapture writeonly [[P:%.*]])
; CHECK-NEXT: [[TMP1:%.*]] = tail call noalias i8* @malloc(i64 4)
; CHECK-NEXT: store i8* [[TMP1]], i8** [[P]], align 1
; CHECK-NEXT: store i8* [[TMP1]], i8** [[P]], align 8
; CHECK-NEXT: tail call void @no_sync_func(i8* nocapture nofree [[TMP1]])
; CHECK-NEXT: tail call void @free(i8* nocapture [[TMP1]])
; CHECK-NEXT: ret void
@ -728,7 +728,7 @@ define void @test16d(i8 %v, i8** %P) {
; CHECK-LABEL: define {{[^@]+}}@test16d
; CHECK-SAME: (i8 [[V:%.*]], i8** nocapture writeonly [[P:%.*]])
; CHECK-NEXT: [[TMP1:%.*]] = tail call noalias i8* @malloc(i64 4)
; CHECK-NEXT: store i8* [[TMP1]], i8** [[P]], align 1
; CHECK-NEXT: store i8* [[TMP1]], i8** [[P]], align 8
; CHECK-NEXT: ret void
;
%1 = tail call noalias i8* @malloc(i64 4)

View File

@ -670,7 +670,7 @@ define void @nocapture_is_not_subsumed_1(i32* nocapture %b) {
; CHECK-SAME: (i32* nocapture [[B:%.*]])
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CALL:%.*]] = call i32* @unknown_i32p(i32* [[B]])
; CHECK-NEXT: store i32 0, i32* [[CALL]], align 1
; CHECK-NEXT: store i32 0, i32* [[CALL]], align 4
; CHECK-NEXT: ret void
;
entry:
@ -685,7 +685,7 @@ define void @nocapture_is_not_subsumed_2(i32* nocapture %b) {
; CHECK-SAME: (i32* nocapture [[B:%.*]])
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CALL:%.*]] = call i32* @readonly_i32p(i32* readonly [[B]])
; CHECK-NEXT: store i32 0, i32* [[CALL]], align 1
; CHECK-NEXT: store i32 0, i32* [[CALL]], align 4
; CHECK-NEXT: ret void
;
entry:

View File

@ -388,7 +388,7 @@ define void @fixpoint_changed(i32* %p) {
; CHECK-NEXT: br label [[SW_EPILOG]]
; CHECK: sw.epilog:
; CHECK-NEXT: [[X_0:%.*]] = phi i32 [ 255, [[FOR_BODY]] ], [ 253, [[SW_BB]] ]
; CHECK-NEXT: store i32 [[X_0]], i32* [[P]], align 1
; CHECK-NEXT: store i32 [[X_0]], i32* [[P]], align 4
; CHECK-NEXT: [[INC]] = add nsw i32 [[J_0]], 1
; CHECK-NEXT: br label [[FOR_COND]]
; CHECK: for.end:

View File

@ -11,7 +11,7 @@ enter:
%val = load i8, i8* @tmp, !invariant.group !0
%ptr = call i8* @llvm.launder.invariant.group.p0i8(i8* @tmp)
; CHECK: store i8 42, i8* @tmp{{$}}
; CHECK: store i8 42, i8* @tmp, align 1{{$}}
store i8 42, i8* %ptr, !invariant.group !0
ret void
@ -27,7 +27,7 @@ enter:
%val = load i8, i8* @tmp, !invariant.group !0
%ptr = call i8* @llvm.strip.invariant.group.p0i8(i8* @tmp)
; CHECK: store i8 42, i8* @tmp{{$}}
; CHECK: store i8 42, i8* @tmp, align 1{{$}}
store i8 42, i8* %ptr, !invariant.group !0
ret void

View File

@ -32,7 +32,7 @@ bb:
define void @test2(i8* %ptr) {
; CHECK-LABEL: @test2(
; CHECK-NEXT: store i8 0, i8* [[PTR:%.*]]
; CHECK-NEXT: store i8 0, i8* [[PTR:%.*]], align 1
; CHECK-NEXT: br label [[BB:%.*]]
; CHECK: bb:
; CHECK-NEXT: ret void
@ -46,7 +46,7 @@ bb:
define void @test2_no_null_opt(i8* %ptr) #0 {
; CHECK-LABEL: @test2_no_null_opt(
; CHECK-NEXT: store i8 0, i8* [[PTR:%.*]]
; CHECK-NEXT: store i8 0, i8* [[PTR:%.*]], align 1
; CHECK-NEXT: br label [[BB:%.*]]
; CHECK: bb:
; CHECK-NEXT: [[TMP1:%.*]] = icmp ne i8* [[PTR]], null

View File

@ -73,9 +73,9 @@ define void @test1(i32 *%ptr) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[BPTR:%.*]] = bitcast i32* [[PTR:%.*]] to i8*
; CHECK-NEXT: [[WPTR:%.*]] = bitcast i32* [[PTR]] to i16*
; CHECK-NEXT: store i16 -30062, i16* [[WPTR]]
; CHECK-NEXT: store i16 -30062, i16* [[WPTR]], align 2
; CHECK-NEXT: [[BPTR3:%.*]] = getelementptr inbounds i8, i8* [[BPTR]], i64 3
; CHECK-NEXT: store i8 47, i8* [[BPTR3]]
; CHECK-NEXT: store i8 47, i8* [[BPTR3]], align 1
; CHECK-NEXT: [[BPTR1:%.*]] = getelementptr inbounds i8, i8* [[BPTR]], i64 1
; CHECK-NEXT: [[WPTRP:%.*]] = bitcast i8* [[BPTR1]] to i16*
; CHECK-NEXT: store i16 2020, i16* [[WPTRP]], align 1
@ -151,7 +151,7 @@ entry:
define signext i8 @test3(i32 *%ptr) {
; CHECK-LABEL: @test3(
; CHECK-NEXT: entry:
; CHECK-NEXT: store i32 5, i32* [[PTR:%.*]]
; CHECK-NEXT: store i32 5, i32* [[PTR:%.*]], align 4
; CHECK-NEXT: [[BPTR:%.*]] = bitcast i32* [[PTR]] to i8*
; CHECK-NEXT: [[BPTRM1:%.*]] = getelementptr inbounds i8, i8* [[BPTR]], i64 -1
; CHECK-NEXT: [[BPTR1:%.*]] = getelementptr inbounds i8, i8* [[BPTR]], i64 1

View File

@ -12,14 +12,14 @@ declare void @use(i32 *)
; Cannot remove the store from the entry block, because the call in bb2 may throw.
define void @accessible_after_return_1(i32* noalias %P, i1 %c1) {
; CHECK-LABEL: @accessible_after_return_1(
; CHECK-NEXT: store i32 1, i32* [[P:%.*]]
; CHECK-NEXT: store i32 1, i32* [[P:%.*]], align 4
; CHECK-NEXT: br i1 [[C1:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
; CHECK: bb1:
; CHECK-NEXT: store i32 0, i32* [[P]]
; CHECK-NEXT: store i32 0, i32* [[P]], align 4
; CHECK-NEXT: br label [[BB5:%.*]]
; CHECK: bb2:
; CHECK-NEXT: call void @readnone_may_throw()
; CHECK-NEXT: store i32 3, i32* [[P]]
; CHECK-NEXT: store i32 3, i32* [[P]], align 4
; CHECK-NEXT: br label [[BB5]]
; CHECK: bb5:
; CHECK-NEXT: call void @use(i32* [[P]])
@ -46,19 +46,19 @@ bb5:
define void @accessible_after_return6(i32* %P, i1 %c.1, i1 %c.2) {
; CHECK-LABEL: @accessible_after_return6(
; CHECK-NEXT: entry:
; CHECK-NEXT: store i32 0, i32* [[P:%.*]]
; CHECK-NEXT: store i32 0, i32* [[P:%.*]], align 4
; CHECK-NEXT: br i1 [[C_1:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
; CHECK: bb1:
; CHECK-NEXT: br i1 [[C_2:%.*]], label [[BB3:%.*]], label [[BB4:%.*]]
; CHECK: bb2:
; CHECK-NEXT: store i32 1, i32* [[P]]
; CHECK-NEXT: store i32 1, i32* [[P]], align 4
; CHECK-NEXT: ret void
; CHECK: bb3:
; CHECK-NEXT: call void @readnone_may_throw()
; CHECK-NEXT: store i32 2, i32* [[P]]
; CHECK-NEXT: store i32 2, i32* [[P]], align 4
; CHECK-NEXT: ret void
; CHECK: bb4:
; CHECK-NEXT: store i32 3, i32* [[P]]
; CHECK-NEXT: store i32 3, i32* [[P]], align 4
; CHECK-NEXT: ret void
;
entry:
@ -93,11 +93,11 @@ define void @alloca_1(i1 %c1) {
; CHECK-NEXT: [[P:%.*]] = alloca i32
; CHECK-NEXT: br i1 [[C1:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
; CHECK: bb1:
; CHECK-NEXT: store i32 0, i32* [[P]]
; CHECK-NEXT: store i32 0, i32* [[P]], align 4
; CHECK-NEXT: br label [[BB5:%.*]]
; CHECK: bb2:
; CHECK-NEXT: call void @readnone_may_throw()
; CHECK-NEXT: store i32 3, i32* [[P]]
; CHECK-NEXT: store i32 3, i32* [[P]], align 4
; CHECK-NEXT: br label [[BB5]]
; CHECK: bb5:
; CHECK-NEXT: call void @use(i32* [[P]])
@ -130,16 +130,16 @@ define void @alloca_2(i1 %c.1, i1 %c.2) {
; CHECK-NEXT: [[P:%.*]] = alloca i32
; CHECK-NEXT: br i1 [[C_1:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
; CHECK: bb1:
; CHECK-NEXT: store i32 0, i32* [[P]]
; CHECK-NEXT: store i32 0, i32* [[P]], align 4
; CHECK-NEXT: br label [[BB5:%.*]]
; CHECK: bb2:
; CHECK-NEXT: br i1 [[C_2:%.*]], label [[BB3:%.*]], label [[BB4:%.*]]
; CHECK: bb3:
; CHECK-NEXT: call void @readnone_may_throw()
; CHECK-NEXT: store i32 3, i32* [[P]]
; CHECK-NEXT: store i32 3, i32* [[P]], align 4
; CHECK-NEXT: br label [[BB5]]
; CHECK: bb4:
; CHECK-NEXT: store i32 5, i32* [[P]]
; CHECK-NEXT: store i32 5, i32* [[P]], align 4
; CHECK-NEXT: br label [[BB5]]
; CHECK: bb5:
; CHECK-NEXT: call void @use(i32* [[P]])

View File

@ -9,13 +9,13 @@ declare void @use(i32 *)
define void @accessible_after_return_1(i32* noalias %P, i1 %c1) {
; CHECK-LABEL: @accessible_after_return_1(
; CHECK-NEXT: store i32 1, i32* [[P:%.*]]
; CHECK-NEXT: store i32 1, i32* [[P:%.*]], align 4
; CHECK-NEXT: br i1 [[C1:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
; CHECK: bb1:
; CHECK-NEXT: store i32 0, i32* [[P]]
; CHECK-NEXT: store i32 0, i32* [[P]], align 4
; CHECK-NEXT: br label [[BB5:%.*]]
; CHECK: bb2:
; CHECK-NEXT: store i32 3, i32* [[P]]
; CHECK-NEXT: store i32 3, i32* [[P]], align 4
; CHECK-NEXT: br label [[BB5]]
; CHECK: bb5:
; CHECK-NEXT: call void @use(i32* [[P]])
@ -38,18 +38,18 @@ bb5:
define void @accessible_after_return_2(i32* noalias %P, i1 %c.1, i1 %c.2) {
; CHECK-LABEL: @accessible_after_return_2(
; CHECK-NEXT: store i32 1, i32* [[P:%.*]]
; CHECK-NEXT: store i32 1, i32* [[P:%.*]], align 4
; CHECK-NEXT: br i1 [[C_1:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
; CHECK: bb1:
; CHECK-NEXT: store i32 0, i32* [[P]]
; CHECK-NEXT: store i32 0, i32* [[P]], align 4
; CHECK-NEXT: br label [[BB5:%.*]]
; CHECK: bb2:
; CHECK-NEXT: br i1 [[C_2:%.*]], label [[BB3:%.*]], label [[BB4:%.*]]
; CHECK: bb3:
; CHECK-NEXT: store i32 3, i32* [[P]]
; CHECK-NEXT: store i32 3, i32* [[P]], align 4
; CHECK-NEXT: br label [[BB5]]
; CHECK: bb4:
; CHECK-NEXT: store i32 5, i32* [[P]]
; CHECK-NEXT: store i32 5, i32* [[P]], align 4
; CHECK-NEXT: br label [[BB5]]
; CHECK: bb5:
; CHECK-NEXT: call void @use(i32* [[P]])
@ -79,10 +79,10 @@ bb5:
define void @accessible_after_return_3(i32* noalias %P, i1 %c1) {
; CHECK-LABEL: @accessible_after_return_3(
; CHECK-NEXT: store i32 1, i32* [[P:%.*]]
; CHECK-NEXT: store i32 1, i32* [[P:%.*]], align 4
; CHECK-NEXT: br i1 [[C1:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
; CHECK: bb1:
; CHECK-NEXT: store i32 0, i32* [[P]]
; CHECK-NEXT: store i32 0, i32* [[P]], align 4
; CHECK-NEXT: br label [[BB5:%.*]]
; CHECK: bb2:
; CHECK-NEXT: br label [[BB5]]
@ -107,10 +107,10 @@ bb5:
define void @accessible_after_return_4(i32* noalias %P, i1 %c1) {
; CHECK-LABEL: @accessible_after_return_4(
; CHECK-NEXT: store i32 1, i32* [[P:%.*]]
; CHECK-NEXT: store i32 1, i32* [[P:%.*]], align 4
; CHECK-NEXT: br i1 [[C1:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
; CHECK: bb1:
; CHECK-NEXT: store i32 0, i32* [[P]]
; CHECK-NEXT: store i32 0, i32* [[P]], align 4
; CHECK-NEXT: call void @use(i32* [[P]])
; CHECK-NEXT: br label [[BB5:%.*]]
; CHECK: bb2:
@ -143,10 +143,10 @@ define void @alloca_1(i1 %c1) {
; CHECK-NEXT: [[P:%.*]] = alloca i32
; CHECK-NEXT: br i1 [[C1:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
; CHECK: bb1:
; CHECK-NEXT: store i32 0, i32* [[P]]
; CHECK-NEXT: store i32 0, i32* [[P]], align 4
; CHECK-NEXT: br label [[BB5:%.*]]
; CHECK: bb2:
; CHECK-NEXT: store i32 3, i32* [[P]]
; CHECK-NEXT: store i32 3, i32* [[P]], align 4
; CHECK-NEXT: br label [[BB5]]
; CHECK: bb5:
; CHECK-NEXT: call void @use(i32* [[P]])
@ -175,15 +175,15 @@ define void @alloca_2(i1 %c.1, i1 %c.2) {
; CHECK-NEXT: [[P:%.*]] = alloca i32
; CHECK-NEXT: br i1 [[C_1:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
; CHECK: bb1:
; CHECK-NEXT: store i32 0, i32* [[P]]
; CHECK-NEXT: store i32 0, i32* [[P]], align 4
; CHECK-NEXT: br label [[BB5:%.*]]
; CHECK: bb2:
; CHECK-NEXT: br i1 [[C_2:%.*]], label [[BB3:%.*]], label [[BB4:%.*]]
; CHECK: bb3:
; CHECK-NEXT: store i32 3, i32* [[P]]
; CHECK-NEXT: store i32 3, i32* [[P]], align 4
; CHECK-NEXT: br label [[BB5]]
; CHECK: bb4:
; CHECK-NEXT: store i32 5, i32* [[P]]
; CHECK-NEXT: store i32 5, i32* [[P]], align 4
; CHECK-NEXT: br label [[BB5]]
; CHECK: bb5:
; CHECK-NEXT: call void @use(i32* [[P]])
@ -218,10 +218,10 @@ bb5:
define void @alloca_3(i1 %c1) {
; CHECK-LABEL: @alloca_3(
; CHECK-NEXT: [[P:%.*]] = alloca i32
; CHECK-NEXT: store i32 1, i32* [[P]]
; CHECK-NEXT: store i32 1, i32* [[P]], align 4
; CHECK-NEXT: br i1 [[C1:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
; CHECK: bb1:
; CHECK-NEXT: store i32 0, i32* [[P]]
; CHECK-NEXT: store i32 0, i32* [[P]], align 4
; CHECK-NEXT: br label [[BB5:%.*]]
; CHECK: bb2:
; CHECK-NEXT: br label [[BB5]]
@ -252,7 +252,7 @@ define void @alloca_4(i1 %c1) {
; CHECK-NEXT: [[P:%.*]] = alloca i32
; CHECK-NEXT: br i1 [[C1:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
; CHECK: bb1:
; CHECK-NEXT: store i32 0, i32* [[P]]
; CHECK-NEXT: store i32 0, i32* [[P]], align 4
; CHECK-NEXT: call void @use(i32* [[P]])
; CHECK-NEXT: br label [[BB5:%.*]]
; CHECK: bb2:

View File

@ -6,7 +6,7 @@ target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
define void @second_store_smaller(i32* noalias %P) {
; CHECK-LABEL: @second_store_smaller(
; CHECK-NEXT: store i32 1, i32* [[P:%.*]]
; CHECK-NEXT: store i32 1, i32* [[P:%.*]], align 4
; CHECK-NEXT: br i1 true, label [[BB1:%.*]], label [[BB2:%.*]]
; CHECK: bb1:
; CHECK-NEXT: br label [[BB3:%.*]]
@ -14,7 +14,7 @@ define void @second_store_smaller(i32* noalias %P) {
; CHECK-NEXT: br label [[BB3]]
; CHECK: bb3:
; CHECK-NEXT: [[P_I16:%.*]] = bitcast i32* [[P]] to i16*
; CHECK-NEXT: store i16 0, i16* [[P_I16]]
; CHECK-NEXT: store i16 0, i16* [[P_I16]], align 2
; CHECK-NEXT: ret void
;
store i32 1, i32* %P
@ -39,7 +39,7 @@ define void @second_store_bigger(i32* noalias %P) {
; CHECK-NEXT: br label [[BB3]]
; CHECK: bb3:
; CHECK-NEXT: [[P_I64:%.*]] = bitcast i32* [[P:%.*]] to i64*
; CHECK-NEXT: store i64 0, i64* [[P_I64]]
; CHECK-NEXT: store i64 0, i64* [[P_I64]], align 8
; CHECK-NEXT: ret void
;
store i32 1, i32* %P

View File

@ -12,7 +12,7 @@ define void @test2(i32* noalias %P) {
; CHECK: bb2:
; CHECK-NEXT: br label [[BB3]]
; CHECK: bb3:
; CHECK-NEXT: store i32 0, i32* [[P:%.*]]
; CHECK-NEXT: store i32 0, i32* [[P:%.*]], align 4
; CHECK-NEXT: ret void
;
store i32 1, i32* %P
@ -28,12 +28,12 @@ bb3:
define void @test3(i32* noalias %P) {
; CHECK-LABEL: @test3(
; CHECK-NEXT: store i32 0, i32* [[P:%.*]]
; CHECK-NEXT: store i32 0, i32* [[P:%.*]], align 4
; CHECK-NEXT: br i1 true, label [[BB1:%.*]], label [[BB2:%.*]]
; CHECK: bb1:
; CHECK-NEXT: br label [[BB3:%.*]]
; CHECK: bb2:
; CHECK-NEXT: store i32 0, i32* [[P]]
; CHECK-NEXT: store i32 0, i32* [[P]], align 4
; CHECK-NEXT: br label [[BB3]]
; CHECK: bb3:
; CHECK-NEXT: ret void
@ -59,8 +59,8 @@ define void @test7(i32* noalias %P, i32* noalias %Q) {
; CHECK: bb2:
; CHECK-NEXT: br label [[BB3]]
; CHECK: bb3:
; CHECK-NEXT: store i32 0, i32* [[Q:%.*]]
; CHECK-NEXT: store i32 0, i32* [[P]]
; CHECK-NEXT: store i32 0, i32* [[Q:%.*]], align 4
; CHECK-NEXT: store i32 0, i32* [[P]], align 4
; CHECK-NEXT: ret void
;
store i32 1, i32* %Q
@ -78,8 +78,8 @@ bb3:
define i32 @test22(i32* %P, i32* noalias %Q, i32* %R) {
; CHECK-LABEL: @test22(
; CHECK-NEXT: store i32 2, i32* [[P:%.*]]
; CHECK-NEXT: store i32 3, i32* [[Q:%.*]]
; CHECK-NEXT: store i32 2, i32* [[P:%.*]], align 4
; CHECK-NEXT: store i32 3, i32* [[Q:%.*]], align 4
; CHECK-NEXT: [[L:%.*]] = load i32, i32* [[R:%.*]], align 4
; CHECK-NEXT: ret i32 [[L]]
;
@ -92,14 +92,14 @@ define i32 @test22(i32* %P, i32* noalias %Q, i32* %R) {
define void @test9(i32* noalias %P) {
; CHECK-LABEL: @test9(
; CHECK-NEXT: store i32 0, i32* [[P:%.*]]
; CHECK-NEXT: store i32 0, i32* [[P:%.*]], align 4
; CHECK-NEXT: br i1 true, label [[BB1:%.*]], label [[BB2:%.*]]
; CHECK: bb1:
; CHECK-NEXT: br label [[BB3:%.*]]
; CHECK: bb2:
; CHECK-NEXT: ret void
; CHECK: bb3:
; CHECK-NEXT: store i32 0, i32* [[P]]
; CHECK-NEXT: store i32 0, i32* [[P]], align 4
; CHECK-NEXT: ret void
;
store i32 0, i32* %P
@ -118,9 +118,9 @@ bb3:
; alias %P. Note that uses point to the *first* def that may alias.
define void @overlapping_read(i32* %P) {
; CHECK-LABEL: @overlapping_read(
; CHECK-NEXT: store i32 0, i32* [[P:%.*]]
; CHECK-NEXT: store i32 0, i32* [[P:%.*]], align 4
; CHECK-NEXT: [[P_1:%.*]] = getelementptr i32, i32* [[P]], i32 1
; CHECK-NEXT: store i32 1, i32* [[P_1]]
; CHECK-NEXT: store i32 1, i32* [[P_1]], align 4
; CHECK-NEXT: [[P_64:%.*]] = bitcast i32* [[P]] to i64*
; CHECK-NEXT: [[LV:%.*]] = load i64, i64* [[P_64]], align 8
; CHECK-NEXT: br i1 true, label [[BB1:%.*]], label [[BB2:%.*]]
@ -129,7 +129,7 @@ define void @overlapping_read(i32* %P) {
; CHECK: bb2:
; CHECK-NEXT: br label [[BB3]]
; CHECK: bb3:
; CHECK-NEXT: store i32 2, i32* [[P]]
; CHECK-NEXT: store i32 2, i32* [[P]], align 4
; CHECK-NEXT: ret void
;
store i32 0, i32* %P
@ -150,10 +150,10 @@ bb3:
define void @test10(i32* %P) {
; CHECK-LABEL: @test10(
; CHECK-NEXT: store i32 0, i32* [[P:%.*]]
; CHECK-NEXT: store i32 0, i32* [[P:%.*]], align 4
; CHECK-NEXT: br i1 true, label [[BB1:%.*]], label [[BB2:%.*]]
; CHECK: bb1:
; CHECK-NEXT: store i32 0, i32* [[P]]
; CHECK-NEXT: store i32 0, i32* [[P]], align 4
; CHECK-NEXT: br label [[BB3:%.*]]
; CHECK: bb2:
; CHECK-NEXT: ret void
@ -177,7 +177,7 @@ define void @test11() {
; CHECK-NEXT: [[P:%.*]] = alloca i32
; CHECK-NEXT: br i1 true, label [[BB1:%.*]], label [[BB2:%.*]]
; CHECK: bb1:
; CHECK-NEXT: store i32 0, i32* [[P]]
; CHECK-NEXT: store i32 0, i32* [[P]], align 4
; CHECK-NEXT: br label [[BB3:%.*]]
; CHECK: bb2:
; CHECK-NEXT: ret void
@ -199,13 +199,13 @@ bb3:
define void @test12(i32* %P) {
; CHECK-LABEL: @test12(
; CHECK-NEXT: store i32 0, i32* [[P:%.*]]
; CHECK-NEXT: store i32 0, i32* [[P:%.*]], align 4
; CHECK-NEXT: br i1 true, label [[BB1:%.*]], label [[BB2:%.*]]
; CHECK: bb1:
; CHECK-NEXT: store i32 1, i32* [[P]]
; CHECK-NEXT: store i32 1, i32* [[P]], align 4
; CHECK-NEXT: br label [[BB3:%.*]]
; CHECK: bb2:
; CHECK-NEXT: store i32 1, i32* [[P]]
; CHECK-NEXT: store i32 1, i32* [[P]], align 4
; CHECK-NEXT: ret void
; CHECK: bb3:
; CHECK-NEXT: ret void
@ -225,13 +225,13 @@ bb3:
define void @test13(i32* %P) {
; CHECK-LABEL: @test13(
; CHECK-NEXT: store i32 0, i32* [[P:%.*]]
; CHECK-NEXT: store i32 0, i32* [[P:%.*]], align 4
; CHECK-NEXT: br i1 true, label [[BB1:%.*]], label [[BB2:%.*]]
; CHECK: bb1:
; CHECK-NEXT: store i32 1, i32* [[P]]
; CHECK-NEXT: store i32 1, i32* [[P]], align 4
; CHECK-NEXT: br label [[BB3:%.*]]
; CHECK: bb2:
; CHECK-NEXT: store i32 1, i32* [[P]]
; CHECK-NEXT: store i32 1, i32* [[P]], align 4
; CHECK-NEXT: br label [[BB3]]
; CHECK: bb3:
; CHECK-NEXT: ret void

View File

@ -6,8 +6,8 @@ target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:1
define void @test1(i32* %Q, i32* %P) {
; CHECK-LABEL: @test1(
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[Q:%.*]], i64 4), "nonnull"(i32* [[Q]]), "align"(i32* [[Q]], i64 4) ]
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P:%.*]], i64 4), "nonnull"(i32* [[P]]) ]
; CHECK-NEXT: store i32 0, i32* [[P]]
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P:%.*]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ]
; CHECK-NEXT: store i32 0, i32* [[P]], align 4
; CHECK-NEXT: ret void
;
%DEAD = load i32, i32* %Q

View File

@ -11,7 +11,7 @@ declare void @llvm.init.trampoline(i8*, i8*, i8*)
define void @test1(i32* %Q, i32* %P) {
; CHECK-LABEL: @test1(
; CHECK-NEXT: store i32 0, i32* [[P:%.*]]
; CHECK-NEXT: store i32 0, i32* [[P:%.*]], align 4
; CHECK-NEXT: ret void
;
%DEAD = load i32, i32* %Q
@ -42,7 +42,7 @@ define i32 @test3(i32* %g_addr) nounwind {
define void @test4(i32* %Q) {
; CHECK-LABEL: @test4(
; CHECK-NEXT: [[A:%.*]] = load i32, i32* [[Q:%.*]], align 4
; CHECK-NEXT: store volatile i32 [[A]], i32* [[Q]]
; CHECK-NEXT: store volatile i32 [[A]], i32* [[Q]], align 4
; CHECK-NEXT: ret void
;
%a = load i32, i32* %Q
@ -124,7 +124,7 @@ define void @test7_atomic(i32* align 4 %p, i8* align 4 %q, i8* noalias align 4 %
define double @test10(i8* %X) {
; CHECK-LABEL: @test10(
; CHECK-NEXT: [[X_ADDR:%.*]] = alloca i8*
; CHECK-NEXT: store i8* [[X:%.*]], i8** [[X_ADDR]]
; CHECK-NEXT: store i8* [[X:%.*]], i8** [[X_ADDR]], align 8
; CHECK-NEXT: [[TMP_0:%.*]] = va_arg i8** [[X_ADDR]], double
; CHECK-NEXT: ret double [[TMP_0]]
;
@ -141,7 +141,7 @@ define i32* @test13() {
; CHECK-NEXT: [[PTR:%.*]] = tail call i8* @malloc(i32 4)
; CHECK-NEXT: [[P:%.*]] = bitcast i8* [[PTR]] to i32*
; CHECK-NEXT: call void @test13f()
; CHECK-NEXT: store i32 0, i32* [[P]]
; CHECK-NEXT: store i32 0, i32* [[P]], align 4
; CHECK-NEXT: ret i32* [[P]]
;
%ptr = tail call i8* @malloc(i32 4)
@ -160,7 +160,7 @@ define i32 addrspace(1)* @test13_addrspacecast() {
; CHECK-NEXT: [[P_BC:%.*]] = bitcast i8* [[P]] to i32*
; CHECK-NEXT: [[P:%.*]] = addrspacecast i32* [[P_BC]] to i32 addrspace(1)*
; CHECK-NEXT: call void @test13f()
; CHECK-NEXT: store i32 0, i32 addrspace(1)* [[P]]
; CHECK-NEXT: store i32 0, i32 addrspace(1)* [[P]], align 4
; CHECK-NEXT: ret i32 addrspace(1)* [[P]]
;
%p = tail call i8* @malloc(i32 4)
@ -185,7 +185,7 @@ define void @test19({i32} * nocapture byval align 4 %arg5) nounwind ssp {
; CHECK-LABEL: @test19(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds { i32 }, { i32 }* [[ARG5:%.*]], i32 0, i32 0
; CHECK-NEXT: store i32 912, i32* [[TMP7]]
; CHECK-NEXT: store i32 912, i32* [[TMP7]], align 4
; CHECK-NEXT: call void @test19f({ i32 }* byval align 4 [[ARG5]])
; CHECK-NEXT: ret void
;
@ -377,9 +377,9 @@ bb2:
; it could unwind
define void @test34(i32* noalias %p) {
; CHECK-LABEL: @test34(
; CHECK-NEXT: store i32 1, i32* [[P:%.*]]
; CHECK-NEXT: store i32 1, i32* [[P:%.*]], align 4
; CHECK-NEXT: call void @unknown_func()
; CHECK-NEXT: store i32 0, i32* [[P]]
; CHECK-NEXT: store i32 0, i32* [[P]], align 4
; CHECK-NEXT: ret void
;
store i32 1, i32* %p
@ -392,7 +392,7 @@ define void @test34(i32* noalias %p) {
define void @test35(i32* noalias %p) {
; CHECK-LABEL: @test35(
; CHECK-NEXT: call void @unknown_func()
; CHECK-NEXT: store i32 0, i32* [[P:%.*]]
; CHECK-NEXT: store i32 0, i32* [[P:%.*]], align 4
; CHECK-NEXT: ret void
;
call void @unknown_func()
@ -536,9 +536,9 @@ declare void @free(i8* nocapture)
define void @test41(i32* noalias %P) {
; CHECK-LABEL: @test41(
; CHECK-NEXT: [[P2:%.*]] = bitcast i32* [[P:%.*]] to i8*
; CHECK-NEXT: store i32 1, i32* [[P]]
; CHECK-NEXT: store i32 1, i32* [[P]], align 4
; CHECK-NEXT: call void @unknown_func()
; CHECK-NEXT: store i32 2, i32* [[P]]
; CHECK-NEXT: store i32 2, i32* [[P]], align 4
; CHECK-NEXT: call void @free(i8* [[P2]])
; CHECK-NEXT: ret void
;
@ -552,10 +552,10 @@ define void @test41(i32* noalias %P) {
define void @test42(i32* %P, i32* %Q) {
; CHECK-LABEL: @test42(
; CHECK-NEXT: store i32 1, i32* [[P:%.*]]
; CHECK-NEXT: store i32 1, i32* [[P:%.*]], align 4
; CHECK-NEXT: [[P2:%.*]] = bitcast i32* [[P]] to i8*
; CHECK-NEXT: store i32 2, i32* [[Q:%.*]]
; CHECK-NEXT: store i8 3, i8* [[P2]]
; CHECK-NEXT: store i32 2, i32* [[Q:%.*]], align 4
; CHECK-NEXT: store i8 3, i8* [[P2]], align 1
; CHECK-NEXT: ret void
;
store i32 1, i32* %P

View File

@ -5,7 +5,7 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
define void @byte_by_byte_replacement(i32 *%ptr) {
; CHECK-LABEL: @byte_by_byte_replacement(
; CHECK-NEXT: entry:
; CHECK-NEXT: store i32 202050057, i32* [[PTR:%.*]]
; CHECK-NEXT: store i32 202050057, i32* [[PTR:%.*]], align 4
; CHECK-NEXT: ret void
;
entry:
@ -31,7 +31,7 @@ entry:
define void @word_replacement(i64 *%ptr) {
; CHECK-LABEL: @word_replacement(
; CHECK-NEXT: entry:
; CHECK-NEXT: store i64 8106482645252179720, i64* [[PTR:%.*]]
; CHECK-NEXT: store i64 8106482645252179720, i64* [[PTR:%.*]], align 8
; CHECK-NEXT: ret void
;
entry:
@ -54,7 +54,7 @@ entry:
define void @differently_sized_replacements(i64 *%ptr) {
; CHECK-LABEL: @differently_sized_replacements(
; CHECK-NEXT: entry:
; CHECK-NEXT: store i64 578437695752307201, i64* [[PTR:%.*]]
; CHECK-NEXT: store i64 578437695752307201, i64* [[PTR:%.*]], align 8
; CHECK-NEXT: ret void
;
entry:
@ -79,7 +79,7 @@ entry:
define void @multiple_replacements_to_same_byte(i64 *%ptr) {
; CHECK-LABEL: @multiple_replacements_to_same_byte(
; CHECK-NEXT: entry:
; CHECK-NEXT: store i64 579005069522043393, i64* [[PTR:%.*]]
; CHECK-NEXT: store i64 579005069522043393, i64* [[PTR:%.*]], align 8
; CHECK-NEXT: ret void
;
entry:
@ -103,7 +103,7 @@ entry:
define void @merged_merges(i64 *%ptr) {
; CHECK-LABEL: @merged_merges(
; CHECK-NEXT: entry:
; CHECK-NEXT: store i64 579005069572506113, i64* [[PTR:%.*]]
; CHECK-NEXT: store i64 579005069572506113, i64* [[PTR:%.*]], align 8
; CHECK-NEXT: ret void
;
entry:
@ -160,7 +160,7 @@ define void @foo(%union.U* nocapture %u) {
; CHECK-LABEL: @foo(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[I:%.*]] = getelementptr inbounds [[UNION_U:%.*]], %union.U* [[U:%.*]], i64 0, i32 0
; CHECK-NEXT: store i64 42, i64* [[I]], align 8
; CHECK-NEXT: store i64 42, i64* [[I]], align 8, !tbaa !0, !noalias !3, !nontemporal !4
; CHECK-NEXT: ret void
;
entry:
@ -175,8 +175,8 @@ entry:
define void @PR34074(i32* %x, i64* %y) {
; CHECK-LABEL: @PR34074(
; CHECK-NEXT: store i64 42, i64* %y
; CHECK-NEXT: store i32 4, i32* %x
; CHECK-NEXT: store i64 42, i64* [[Y:%.*]], align 8
; CHECK-NEXT: store i32 4, i32* [[X:%.*]], align 4
; CHECK-NEXT: ret void
;
store i64 42, i64* %y ; independent store
@ -190,10 +190,10 @@ define void @PR34074(i32* %x, i64* %y) {
define void @PR36129(i32* %P, i32* %Q) {
; CHECK-LABEL: @PR36129(
; CHECK-NEXT: store i32 1, i32* [[P:%.*]]
; CHECK-NEXT: store i32 1, i32* [[P:%.*]], align 4
; CHECK-NEXT: [[P2:%.*]] = bitcast i32* [[P]] to i8*
; CHECK-NEXT: store i32 2, i32* [[Q:%.*]]
; CHECK-NEXT: store i8 3, i8* [[P2]]
; CHECK-NEXT: store i32 2, i32* [[Q:%.*]], align 4
; CHECK-NEXT: store i8 3, i8* [[P2]], align 1
; CHECK-NEXT: ret void
;
store i32 1, i32* %P

View File

@ -11,12 +11,12 @@ define i32 @test0(i32* %ptr, i1 %cond) {
; We can do store to load forwarding over a guard, since it does not
; clobber memory
; NO_ASSUME-LABEL: @test0(
; NO_ASSUME-NEXT: store i32 40, i32* [[PTR:%.*]]
; NO_ASSUME-NEXT: store i32 40, i32* [[PTR:%.*]], align 4
; NO_ASSUME-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[COND:%.*]]) [ "deopt"() ]
; NO_ASSUME-NEXT: ret i32 40
;
; USE_ASSUME-LABEL: @test0(
; USE_ASSUME-NEXT: store i32 40, i32* [[PTR:%.*]]
; USE_ASSUME-NEXT: store i32 40, i32* [[PTR:%.*]], align 4
; USE_ASSUME-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[COND:%.*]]) [ "deopt"() ]
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]), "align"(i32* [[PTR]], i64 4) ]
; USE_ASSUME-NEXT: ret i32 40
@ -185,9 +185,9 @@ define void @test6(i1 %c, i32* %ptr) {
; Guard intrinsics do _read_ memory, so th call to guard below needs
; to see the store of 500 to %ptr
; CHECK-LABEL: @test6(
; CHECK-NEXT: store i32 500, i32* [[PTR:%.*]]
; CHECK-NEXT: store i32 500, i32* [[PTR:%.*]], align 4
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[C:%.*]]) [ "deopt"() ]
; CHECK-NEXT: store i32 600, i32* [[PTR]]
; CHECK-NEXT: store i32 600, i32* [[PTR]], align 4
; CHECK-NEXT: ret void
;
@ -219,17 +219,17 @@ define void @test08(i32 %a, i32 %b, i32* %ptr) {
; block in case when the condition is not recalculated.
; NO_ASSUME-LABEL: @test08(
; NO_ASSUME-NEXT: [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]]
; NO_ASSUME-NEXT: store i32 100, i32* [[PTR:%.*]]
; NO_ASSUME-NEXT: store i32 100, i32* [[PTR:%.*]], align 4
; NO_ASSUME-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[CMP]]) [ "deopt"() ]
; NO_ASSUME-NEXT: store i32 400, i32* [[PTR]]
; NO_ASSUME-NEXT: store i32 400, i32* [[PTR]], align 4
; NO_ASSUME-NEXT: ret void
;
; USE_ASSUME-LABEL: @test08(
; USE_ASSUME-NEXT: [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]]
; USE_ASSUME-NEXT: store i32 100, i32* [[PTR:%.*]]
; USE_ASSUME-NEXT: store i32 100, i32* [[PTR:%.*]], align 4
; USE_ASSUME-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[CMP]]) [ "deopt"() ]
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]) ]
; USE_ASSUME-NEXT: store i32 400, i32* [[PTR]]
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]), "align"(i32* [[PTR]], i64 4) ]
; USE_ASSUME-NEXT: store i32 400, i32* [[PTR]], align 4
; USE_ASSUME-NEXT: ret void
;
@ -251,15 +251,15 @@ define void @test09(i32 %a, i32 %b, i1 %c, i32* %ptr) {
; NO_ASSUME-LABEL: @test09(
; NO_ASSUME-NEXT: entry:
; NO_ASSUME-NEXT: [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]]
; NO_ASSUME-NEXT: store i32 100, i32* [[PTR:%.*]]
; NO_ASSUME-NEXT: store i32 100, i32* [[PTR:%.*]], align 4
; NO_ASSUME-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[CMP]]) [ "deopt"() ]
; NO_ASSUME-NEXT: store i32 400, i32* [[PTR]]
; NO_ASSUME-NEXT: store i32 400, i32* [[PTR]], align 4
; NO_ASSUME-NEXT: br i1 [[C:%.*]], label [[IF_TRUE:%.*]], label [[IF_FALSE:%.*]]
; NO_ASSUME: if.true:
; NO_ASSUME-NEXT: store i32 500, i32* [[PTR]]
; NO_ASSUME-NEXT: store i32 500, i32* [[PTR]], align 4
; NO_ASSUME-NEXT: br label [[MERGE:%.*]]
; NO_ASSUME: if.false:
; NO_ASSUME-NEXT: store i32 600, i32* [[PTR]]
; NO_ASSUME-NEXT: store i32 600, i32* [[PTR]], align 4
; NO_ASSUME-NEXT: br label [[MERGE]]
; NO_ASSUME: merge:
; NO_ASSUME-NEXT: ret void
@ -267,16 +267,16 @@ define void @test09(i32 %a, i32 %b, i1 %c, i32* %ptr) {
; USE_ASSUME-LABEL: @test09(
; USE_ASSUME-NEXT: entry:
; USE_ASSUME-NEXT: [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]]
; USE_ASSUME-NEXT: store i32 100, i32* [[PTR:%.*]]
; USE_ASSUME-NEXT: store i32 100, i32* [[PTR:%.*]], align 4
; USE_ASSUME-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[CMP]]) [ "deopt"() ]
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]) ]
; USE_ASSUME-NEXT: store i32 400, i32* [[PTR]]
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]), "align"(i32* [[PTR]], i64 4) ]
; USE_ASSUME-NEXT: store i32 400, i32* [[PTR]], align 4
; USE_ASSUME-NEXT: br i1 [[C:%.*]], label [[IF_TRUE:%.*]], label [[IF_FALSE:%.*]]
; USE_ASSUME: if.true:
; USE_ASSUME-NEXT: store i32 500, i32* [[PTR]]
; USE_ASSUME-NEXT: store i32 500, i32* [[PTR]], align 4
; USE_ASSUME-NEXT: br label [[MERGE:%.*]]
; USE_ASSUME: if.false:
; USE_ASSUME-NEXT: store i32 600, i32* [[PTR]]
; USE_ASSUME-NEXT: store i32 600, i32* [[PTR]], align 4
; USE_ASSUME-NEXT: br label [[MERGE]]
; USE_ASSUME: merge:
; USE_ASSUME-NEXT: ret void
@ -315,15 +315,15 @@ define void @test10(i32 %a, i32 %b, i1 %c, i32* %ptr) {
; CHECK-NEXT: br i1 [[C:%.*]], label [[IF_TRUE:%.*]], label [[IF_FALSE:%.*]]
; CHECK: if.true:
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[CMP]]) [ "deopt"() ]
; CHECK-NEXT: store i32 100, i32* [[PTR:%.*]]
; CHECK-NEXT: store i32 100, i32* [[PTR:%.*]], align 4
; CHECK-NEXT: br label [[MERGE:%.*]]
; CHECK: if.false:
; CHECK-NEXT: store i32 200, i32* [[PTR]]
; CHECK-NEXT: store i32 200, i32* [[PTR]], align 4
; CHECK-NEXT: br label [[MERGE]]
; CHECK: merge:
; CHECK-NEXT: store i32 300, i32* [[PTR]]
; CHECK-NEXT: store i32 300, i32* [[PTR]], align 4
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[CMP]]) [ "deopt"() ]
; CHECK-NEXT: store i32 400, i32* [[PTR]]
; CHECK-NEXT: store i32 400, i32* [[PTR]], align 4
; CHECK-NEXT: ret void
;
@ -401,14 +401,14 @@ define void @test13(i32 %a, i32 %b, i32* %ptr) {
; NO_ASSUME-LABEL: @test13(
; NO_ASSUME-NEXT: [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]]
; NO_ASSUME-NEXT: call void @llvm.assume(i1 [[CMP]])
; NO_ASSUME-NEXT: store i32 400, i32* [[PTR:%.*]]
; NO_ASSUME-NEXT: store i32 400, i32* [[PTR:%.*]], align 4
; NO_ASSUME-NEXT: ret void
;
; USE_ASSUME-LABEL: @test13(
; USE_ASSUME-NEXT: [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]]
; USE_ASSUME-NEXT: call void @llvm.assume(i1 [[CMP]])
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR:%.*]], i64 4), "nonnull"(i32* [[PTR]]) ]
; USE_ASSUME-NEXT: store i32 400, i32* [[PTR]]
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR:%.*]], i64 4), "nonnull"(i32* [[PTR]]), "align"(i32* [[PTR]], i64 4) ]
; USE_ASSUME-NEXT: store i32 400, i32* [[PTR]], align 4
; USE_ASSUME-NEXT: ret void
;
@ -432,13 +432,13 @@ define void @test14(i32 %a, i32 %b, i1 %c, i32* %ptr) {
; NO_ASSUME-NEXT: entry:
; NO_ASSUME-NEXT: [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]]
; NO_ASSUME-NEXT: call void @llvm.assume(i1 [[CMP]])
; NO_ASSUME-NEXT: store i32 400, i32* [[PTR:%.*]]
; NO_ASSUME-NEXT: store i32 400, i32* [[PTR:%.*]], align 4
; NO_ASSUME-NEXT: br i1 [[C:%.*]], label [[IF_TRUE:%.*]], label [[IF_FALSE:%.*]]
; NO_ASSUME: if.true:
; NO_ASSUME-NEXT: store i32 500, i32* [[PTR]]
; NO_ASSUME-NEXT: store i32 500, i32* [[PTR]], align 4
; NO_ASSUME-NEXT: br label [[MERGE:%.*]]
; NO_ASSUME: if.false:
; NO_ASSUME-NEXT: store i32 600, i32* [[PTR]]
; NO_ASSUME-NEXT: store i32 600, i32* [[PTR]], align 4
; NO_ASSUME-NEXT: br label [[MERGE]]
; NO_ASSUME: merge:
; NO_ASSUME-NEXT: ret void
@ -447,14 +447,14 @@ define void @test14(i32 %a, i32 %b, i1 %c, i32* %ptr) {
; USE_ASSUME-NEXT: entry:
; USE_ASSUME-NEXT: [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]]
; USE_ASSUME-NEXT: call void @llvm.assume(i1 [[CMP]])
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR:%.*]], i64 4), "nonnull"(i32* [[PTR]]) ]
; USE_ASSUME-NEXT: store i32 400, i32* [[PTR]]
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR:%.*]], i64 4), "nonnull"(i32* [[PTR]]), "align"(i32* [[PTR]], i64 4) ]
; USE_ASSUME-NEXT: store i32 400, i32* [[PTR]], align 4
; USE_ASSUME-NEXT: br i1 [[C:%.*]], label [[IF_TRUE:%.*]], label [[IF_FALSE:%.*]]
; USE_ASSUME: if.true:
; USE_ASSUME-NEXT: store i32 500, i32* [[PTR]]
; USE_ASSUME-NEXT: store i32 500, i32* [[PTR]], align 4
; USE_ASSUME-NEXT: br label [[MERGE:%.*]]
; USE_ASSUME: if.false:
; USE_ASSUME-NEXT: store i32 600, i32* [[PTR]]
; USE_ASSUME-NEXT: store i32 600, i32* [[PTR]], align 4
; USE_ASSUME-NEXT: br label [[MERGE]]
; USE_ASSUME: merge:
; USE_ASSUME-NEXT: ret void
@ -494,15 +494,15 @@ define void @test15(i32 %a, i32 %b, i1 %c, i32* %ptr) {
; CHECK-NEXT: br i1 [[C:%.*]], label [[IF_TRUE:%.*]], label [[IF_FALSE:%.*]]
; CHECK: if.true:
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
; CHECK-NEXT: store i32 100, i32* [[PTR:%.*]]
; CHECK-NEXT: store i32 100, i32* [[PTR:%.*]], align 4
; CHECK-NEXT: br label [[MERGE:%.*]]
; CHECK: if.false:
; CHECK-NEXT: store i32 200, i32* [[PTR]]
; CHECK-NEXT: store i32 200, i32* [[PTR]], align 4
; CHECK-NEXT: br label [[MERGE]]
; CHECK: merge:
; CHECK-NEXT: store i32 300, i32* [[PTR]]
; CHECK-NEXT: store i32 300, i32* [[PTR]], align 4
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[CMP]]) [ "deopt"() ]
; CHECK-NEXT: store i32 400, i32* [[PTR]]
; CHECK-NEXT: store i32 400, i32* [[PTR]], align 4
; CHECK-NEXT: ret void
;

View File

@ -155,7 +155,7 @@ define void @test_dse1(i32* %p) {
; USE_ASSUME-LABEL: @test_dse1(
; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P:%.*]], align 4, !invariant.load !0
; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[V1]])
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]) ]
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ]
; USE_ASSUME-NEXT: ret void
;
%v1 = load i32, i32* %p, !invariant.load !{}
@ -169,7 +169,7 @@ define void @test_false_negative_dse2(i32* %p, i32 %v2) {
; CHECK-LABEL: @test_false_negative_dse2(
; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[P:%.*]], align 4, !invariant.load !0
; CHECK-NEXT: call void @clobber_and_use(i32 [[V1]])
; CHECK-NEXT: store i32 [[V2:%.*]], i32* [[P]]
; CHECK-NEXT: store i32 [[V2:%.*]], i32* [[P]], align 4
; CHECK-NEXT: ret void
;
%v1 = load i32, i32* %p, !invariant.load !{}

View File

@ -35,13 +35,13 @@ define i8 @test_bypass1(i8 *%P) {
define i8 @test_bypass2(i8 *%P) {
; NO_ASSUME-LABEL: define {{[^@]+}}@test_bypass2
; NO_ASSUME-SAME: (i8* [[P:%.*]])
; NO_ASSUME-NEXT: store i8 42, i8* [[P]]
; NO_ASSUME-NEXT: store i8 42, i8* [[P]], align 1
; NO_ASSUME-NEXT: [[I:%.*]] = call {}* @llvm.invariant.start.p0i8(i64 1, i8* [[P]])
; NO_ASSUME-NEXT: ret i8 42
;
; USE_ASSUME-LABEL: define {{[^@]+}}@test_bypass2
; USE_ASSUME-SAME: (i8* [[P:%.*]])
; USE_ASSUME-NEXT: store i8 42, i8* [[P]]
; USE_ASSUME-NEXT: store i8 42, i8* [[P]], align 1
; USE_ASSUME-NEXT: [[I:%.*]] = call {}* @llvm.invariant.start.p0i8(i64 1, i8* [[P]])
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8* [[P]], i64 1), "nonnull"(i8* [[P]]) ]
; USE_ASSUME-NEXT: ret i8 42
@ -60,14 +60,14 @@ define void @test_bypass3(i8* %P) {
; NO_ASSUME-LABEL: define {{[^@]+}}@test_bypass3
; NO_ASSUME-SAME: (i8* [[P:%.*]])
; NO_ASSUME-NEXT: [[I:%.*]] = call {}* @llvm.invariant.start.p0i8(i64 1, i8* [[P]])
; NO_ASSUME-NEXT: store i8 60, i8* [[P]]
; NO_ASSUME-NEXT: store i8 60, i8* [[P]], align 1
; NO_ASSUME-NEXT: ret void
;
; USE_ASSUME-LABEL: define {{[^@]+}}@test_bypass3
; USE_ASSUME-SAME: (i8* [[P:%.*]])
; USE_ASSUME-NEXT: [[I:%.*]] = call {}* @llvm.invariant.start.p0i8(i64 1, i8* [[P]])
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8* [[P]], i64 1), "nonnull"(i8* [[P]]) ]
; USE_ASSUME-NEXT: store i8 60, i8* [[P]]
; USE_ASSUME-NEXT: store i8 60, i8* [[P]], align 1
; USE_ASSUME-NEXT: ret void
;
@ -83,10 +83,10 @@ define void @test_bypass3(i8* %P) {
define void @test_bypass4(i8* %P) {
; CHECK-LABEL: define {{[^@]+}}@test_bypass4
; CHECK-SAME: (i8* [[P:%.*]])
; CHECK-NEXT: store i8 50, i8* [[P]]
; CHECK-NEXT: store i8 50, i8* [[P]], align 1
; CHECK-NEXT: [[I:%.*]] = call {}* @llvm.invariant.start.p0i8(i64 1, i8* [[P]])
; CHECK-NEXT: call void @llvm.invariant.end.p0i8({}* [[I]], i64 1, i8* [[P]])
; CHECK-NEXT: store i8 60, i8* [[P]]
; CHECK-NEXT: store i8 60, i8* [[P]], align 1
; CHECK-NEXT: ret void
;
@ -369,7 +369,7 @@ define void @test_dse_before_load(i32* %p, i1 %cnd) {
; USE_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4
; USE_ASSUME-NEXT: call void @clobber()
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]) ]
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ]
; USE_ASSUME-NEXT: ret void
;
call {}* @llvm.invariant.start.p0i32(i64 4, i32* %p)
@ -392,7 +392,7 @@ define void @test_dse_after_load(i32* %p, i1 %cnd) {
; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4
; USE_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
; USE_ASSUME-NEXT: call void @clobber()
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]) ]
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ]
; USE_ASSUME-NEXT: ret void
;
%v1 = load i32, i32* %p

View File

@ -318,7 +318,7 @@ entry:
%unknownValue = load i8, i8* @unknownPtr
; FIXME: Can assume that %unknownValue == 42
; CHECK: store i8 %unknownValue, i8* %ptr, !invariant.group !0
; CHECK: store i8 %unknownValue, i8* %ptr, align 1, !invariant.group !0
store i8 %unknownValue, i8* %ptr, !invariant.group !0
%newPtr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)

View File

@ -7,12 +7,12 @@ target triple = "x86_64-unknown-linux-gnu"
define void @f0(i1 %alwaysFalse, i64 %val, i64* %loc) {
; CHECK-LABEL: @f0(
; CHECK-NEXT: entry:
; CHECK-NEXT: store i64 [[VAL:%.*]], i64* [[LOC:%.*]]
; CHECK-NEXT: store i64 [[VAL:%.*]], i64* [[LOC:%.*]], align 8
; CHECK-NEXT: br i1 [[ALWAYSFALSE:%.*]], label [[NEVERTAKEN:%.*]], label [[ALWAYSTAKEN:%.*]]
; CHECK: neverTaken:
; CHECK-NEXT: [[LOC_BC:%.*]] = bitcast i64* [[LOC]] to i8 addrspace(4)**
; CHECK-NEXT: [[PTR:%.*]] = load i8 addrspace(4)*, i8 addrspace(4)** [[LOC_BC]]
; CHECK-NEXT: store i8 5, i8 addrspace(4)* [[PTR]]
; CHECK-NEXT: [[PTR:%.*]] = load i8 addrspace(4)*, i8 addrspace(4)** [[LOC_BC]], align 8
; CHECK-NEXT: store i8 5, i8 addrspace(4)* [[PTR]], align 1
; CHECK-NEXT: ret void
; CHECK: alwaysTaken:
; CHECK-NEXT: ret void
@ -34,11 +34,11 @@ define void @f0(i1 %alwaysFalse, i64 %val, i64* %loc) {
define i64 @f1(i1 %alwaysFalse, i8 addrspace(4)* %val, i8 addrspace(4)** %loc) {
; CHECK-LABEL: @f1(
; CHECK-NEXT: entry:
; CHECK-NEXT: store i8 addrspace(4)* [[VAL:%.*]], i8 addrspace(4)** [[LOC:%.*]]
; CHECK-NEXT: store i8 addrspace(4)* [[VAL:%.*]], i8 addrspace(4)** [[LOC:%.*]], align 8
; CHECK-NEXT: br i1 [[ALWAYSFALSE:%.*]], label [[NEVERTAKEN:%.*]], label [[ALWAYSTAKEN:%.*]]
; CHECK: neverTaken:
; CHECK-NEXT: [[LOC_BC:%.*]] = bitcast i8 addrspace(4)** [[LOC]] to i64*
; CHECK-NEXT: [[INT:%.*]] = load i64, i64* [[LOC_BC]]
; CHECK-NEXT: [[INT:%.*]] = load i64, i64* [[LOC_BC]], align 8
; CHECK-NEXT: ret i64 [[INT]]
; CHECK: alwaysTaken:
; CHECK-NEXT: ret i64 42
@ -67,7 +67,7 @@ define i8 addrspace(4)* @neg_forward_memset(i8 addrspace(4)* addrspace(4)* %loc)
; CHECK-NEXT: entry:
; CHECK-NEXT: [[LOC_BC:%.*]] = bitcast i8 addrspace(4)* addrspace(4)* [[LOC:%.*]] to i8 addrspace(4)*
; CHECK-NEXT: call void @llvm.memset.p4i8.i64(i8 addrspace(4)* align 4 [[LOC_BC]], i8 7, i64 8, i1 false)
; CHECK-NEXT: [[REF:%.*]] = load i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* [[LOC]]
; CHECK-NEXT: [[REF:%.*]] = load i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* [[LOC]], align 8
; CHECK-NEXT: ret i8 addrspace(4)* [[REF]]
;
entry:
@ -82,7 +82,7 @@ define <1 x i8 addrspace(4)*> @neg_forward_memset_vload(<1 x i8 addrspace(4)*> a
; CHECK-NEXT: entry:
; CHECK-NEXT: [[LOC_BC:%.*]] = bitcast <1 x i8 addrspace(4)*> addrspace(4)* [[LOC:%.*]] to i8 addrspace(4)*
; CHECK-NEXT: call void @llvm.memset.p4i8.i64(i8 addrspace(4)* align 4 [[LOC_BC]], i8 7, i64 8, i1 false)
; CHECK-NEXT: [[REF:%.*]] = load <1 x i8 addrspace(4)*>, <1 x i8 addrspace(4)*> addrspace(4)* [[LOC]]
; CHECK-NEXT: [[REF:%.*]] = load <1 x i8 addrspace(4)*>, <1 x i8 addrspace(4)*> addrspace(4)* [[LOC]], align 8
; CHECK-NEXT: ret <1 x i8 addrspace(4)*> [[REF]]
;
entry:
@ -113,8 +113,8 @@ define i8 addrspace(4)* @neg_forward_store(i8 addrspace(4)* addrspace(4)* %loc)
; CHECK-LABEL: @neg_forward_store(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[LOC_BC:%.*]] = bitcast i8 addrspace(4)* addrspace(4)* [[LOC:%.*]] to i64 addrspace(4)*
; CHECK-NEXT: store i64 5, i64 addrspace(4)* [[LOC_BC]]
; CHECK-NEXT: [[REF:%.*]] = load i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* [[LOC]]
; CHECK-NEXT: store i64 5, i64 addrspace(4)* [[LOC_BC]], align 8
; CHECK-NEXT: [[REF:%.*]] = load i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* [[LOC]], align 8
; CHECK-NEXT: ret i8 addrspace(4)* [[REF]]
;
entry:
@ -128,8 +128,8 @@ define <1 x i8 addrspace(4)*> @neg_forward_store_vload(<1 x i8 addrspace(4)*> ad
; CHECK-LABEL: @neg_forward_store_vload(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[LOC_BC:%.*]] = bitcast <1 x i8 addrspace(4)*> addrspace(4)* [[LOC:%.*]] to i64 addrspace(4)*
; CHECK-NEXT: store i64 5, i64 addrspace(4)* [[LOC_BC]]
; CHECK-NEXT: [[REF:%.*]] = load <1 x i8 addrspace(4)*>, <1 x i8 addrspace(4)*> addrspace(4)* [[LOC]]
; CHECK-NEXT: store i64 5, i64 addrspace(4)* [[LOC_BC]], align 8
; CHECK-NEXT: [[REF:%.*]] = load <1 x i8 addrspace(4)*>, <1 x i8 addrspace(4)*> addrspace(4)* [[LOC]], align 8
; CHECK-NEXT: ret <1 x i8 addrspace(4)*> [[REF]]
;
entry:
@ -144,7 +144,7 @@ define i8 addrspace(4)* @forward_store_zero(i8 addrspace(4)* addrspace(4)* %loc)
; CHECK-LABEL: @forward_store_zero(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[LOC_BC:%.*]] = bitcast i8 addrspace(4)* addrspace(4)* [[LOC:%.*]] to i64 addrspace(4)*
; CHECK-NEXT: store i64 0, i64 addrspace(4)* [[LOC_BC]]
; CHECK-NEXT: store i64 0, i64 addrspace(4)* [[LOC_BC]], align 8
; CHECK-NEXT: ret i8 addrspace(4)* null
;
entry:
@ -159,7 +159,7 @@ define i8 addrspace(4)* @forward_store_zero2(i8 addrspace(4)* addrspace(4)* %loc
; CHECK-LABEL: @forward_store_zero2(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[LOC_BC:%.*]] = bitcast i8 addrspace(4)* addrspace(4)* [[LOC:%.*]] to <2 x i32> addrspace(4)*
; CHECK-NEXT: store <2 x i32> zeroinitializer, <2 x i32> addrspace(4)* [[LOC_BC]]
; CHECK-NEXT: store <2 x i32> zeroinitializer, <2 x i32> addrspace(4)* [[LOC_BC]], align 8
; CHECK-NEXT: ret i8 addrspace(4)* null
;
entry:
@ -179,7 +179,7 @@ define i8 addrspace(4)* @neg_forward_memcopy(i8 addrspace(4)* addrspace(4)* %loc
; CHECK-NEXT: entry:
; CHECK-NEXT: [[LOC_BC:%.*]] = bitcast i8 addrspace(4)* addrspace(4)* [[LOC:%.*]] to i8 addrspace(4)*
; CHECK-NEXT: call void @llvm.memcpy.p4i8.p0i8.i64(i8 addrspace(4)* align 4 [[LOC_BC]], i8* bitcast (<4 x i64>* @NonZeroConstant to i8*), i64 8, i1 false)
; CHECK-NEXT: [[REF:%.*]] = load i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* [[LOC]]
; CHECK-NEXT: [[REF:%.*]] = load i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* [[LOC]], align 8
; CHECK-NEXT: ret i8 addrspace(4)* [[REF]]
;
entry:
@ -195,7 +195,7 @@ define <1 x i8 addrspace(4)*> @neg_forward_memcpy_vload(<1 x i8 addrspace(4)*> a
; CHECK-NEXT: entry:
; CHECK-NEXT: [[LOC_BC:%.*]] = bitcast <1 x i8 addrspace(4)*> addrspace(4)* [[LOC:%.*]] to i8 addrspace(4)*
; CHECK-NEXT: call void @llvm.memcpy.p4i8.p0i8.i64(i8 addrspace(4)* align 4 [[LOC_BC]], i8* bitcast (<4 x i64>* @NonZeroConstant to i8*), i64 8, i1 false)
; CHECK-NEXT: [[REF:%.*]] = load <1 x i8 addrspace(4)*>, <1 x i8 addrspace(4)*> addrspace(4)* [[LOC]]
; CHECK-NEXT: [[REF:%.*]] = load <1 x i8 addrspace(4)*>, <1 x i8 addrspace(4)*> addrspace(4)* [[LOC]], align 8
; CHECK-NEXT: ret <1 x i8 addrspace(4)*> [[REF]]
;
entry:
@ -214,7 +214,7 @@ define i8 addrspace(4)* @forward_memcpy_zero(i8 addrspace(4)* addrspace(4)* %loc
; CHECK-NEXT: entry:
; CHECK-NEXT: [[LOC_BC:%.*]] = bitcast i8 addrspace(4)* addrspace(4)* [[LOC:%.*]] to i8 addrspace(4)*
; CHECK-NEXT: call void @llvm.memcpy.p4i8.p0i8.i64(i8 addrspace(4)* align 4 [[LOC_BC]], i8* bitcast (<4 x i64>* @ZeroConstant to i8*), i64 8, i1 false)
; CHECK-NEXT: [[REF:%.*]] = load i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* [[LOC]]
; CHECK-NEXT: [[REF:%.*]] = load i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* [[LOC]], align 8
; CHECK-NEXT: ret i8 addrspace(4)* [[REF]]
;
entry:
@ -234,9 +234,9 @@ define i8 addrspace(4)* @neg_store_clobber(i8 addrspace(4)* addrspace(4)* %loc)
; CHECK-LABEL: @neg_store_clobber(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[LOC_BC:%.*]] = bitcast i8 addrspace(4)* addrspace(4)* [[LOC:%.*]] to <2 x i64> addrspace(4)*
; CHECK-NEXT: store <2 x i64> <i64 4, i64 4>, <2 x i64> addrspace(4)* [[LOC_BC]]
; CHECK-NEXT: store <2 x i64> <i64 4, i64 4>, <2 x i64> addrspace(4)* [[LOC_BC]], align 16
; CHECK-NEXT: [[LOC_OFF:%.*]] = getelementptr i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* [[LOC]], i64 1
; CHECK-NEXT: [[REF:%.*]] = load i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* [[LOC_OFF]]
; CHECK-NEXT: [[REF:%.*]] = load i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* [[LOC_OFF]], align 8
; CHECK-NEXT: ret i8 addrspace(4)* [[REF]]
;
entry:
@ -255,10 +255,10 @@ define i8 addrspace(4)* @neg_load_clobber(i8 addrspace(4)* addrspace(4)* %loc) {
; CHECK-LABEL: @neg_load_clobber(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[LOC_BC:%.*]] = bitcast i8 addrspace(4)* addrspace(4)* [[LOC:%.*]] to <2 x i64> addrspace(4)*
; CHECK-NEXT: [[V:%.*]] = load <2 x i64>, <2 x i64> addrspace(4)* [[LOC_BC]]
; CHECK-NEXT: [[V:%.*]] = load <2 x i64>, <2 x i64> addrspace(4)* [[LOC_BC]], align 16
; CHECK-NEXT: call void @use(<2 x i64> [[V]])
; CHECK-NEXT: [[LOC_OFF:%.*]] = getelementptr i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* [[LOC]], i64 1
; CHECK-NEXT: [[REF:%.*]] = load i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* [[LOC_OFF]]
; CHECK-NEXT: [[REF:%.*]] = load i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* [[LOC_OFF]], align 8
; CHECK-NEXT: ret i8 addrspace(4)* [[REF]]
;
entry:
@ -274,7 +274,7 @@ define i8 addrspace(4)* @store_clobber_zero(i8 addrspace(4)* addrspace(4)* %loc)
; CHECK-LABEL: @store_clobber_zero(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[LOC_BC:%.*]] = bitcast i8 addrspace(4)* addrspace(4)* [[LOC:%.*]] to <2 x i64> addrspace(4)*
; CHECK-NEXT: store <2 x i64> zeroinitializer, <2 x i64> addrspace(4)* [[LOC_BC]]
; CHECK-NEXT: store <2 x i64> zeroinitializer, <2 x i64> addrspace(4)* [[LOC_BC]], align 16
; CHECK-NEXT: [[LOC_OFF:%.*]] = getelementptr i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* [[LOC]], i64 1
; CHECK-NEXT: ret i8 addrspace(4)* null
;

View File

@ -5,8 +5,8 @@
define <vscale x 4 x i32> @load_store_clobber_load(<vscale x 4 x i32> *%p) {
; CHECK-LABEL: @load_store_clobber_load(
; CHECK-NEXT: [[LOAD1:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[P:%.*]]
; CHECK-NEXT: store <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32>* undef
; CHECK-NEXT: [[LOAD1:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[P:%.*]], align 16
; CHECK-NEXT: store <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32>* undef, align 16
; CHECK-NEXT: [[ADD:%.*]] = add <vscale x 4 x i32> [[LOAD1]], [[LOAD1]]
; CHECK-NEXT: ret <vscale x 4 x i32> [[ADD]]
;
@ -19,9 +19,9 @@ define <vscale x 4 x i32> @load_store_clobber_load(<vscale x 4 x i32> *%p) {
define <vscale x 4 x i32> @load_store_clobber_load_mayalias(<vscale x 4 x i32>* %p, <vscale x 4 x i32>* %p2) {
; CHECK-LABEL: @load_store_clobber_load_mayalias(
; CHECK-NEXT: [[LOAD1:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[P:%.*]]
; CHECK-NEXT: store <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32>* [[P2:%.*]]
; CHECK-NEXT: [[LOAD2:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[P]]
; CHECK-NEXT: [[LOAD1:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[P:%.*]], align 16
; CHECK-NEXT: store <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32>* [[P2:%.*]], align 16
; CHECK-NEXT: [[LOAD2:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[P]], align 16
; CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 4 x i32> [[LOAD1]], [[LOAD2]]
; CHECK-NEXT: ret <vscale x 4 x i32> [[SUB]]
;
@ -34,8 +34,8 @@ define <vscale x 4 x i32> @load_store_clobber_load_mayalias(<vscale x 4 x i32>*
define <vscale x 4 x i32> @load_store_clobber_load_noalias(<vscale x 4 x i32>* noalias %p, <vscale x 4 x i32>* noalias %p2) {
; CHECK-LABEL: @load_store_clobber_load_noalias(
; CHECK-NEXT: [[LOAD1:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[P:%.*]]
; CHECK-NEXT: store <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32>* [[P2:%.*]]
; CHECK-NEXT: [[LOAD1:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[P:%.*]], align 16
; CHECK-NEXT: store <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32>* [[P2:%.*]], align 16
; CHECK-NEXT: [[ADD:%.*]] = add <vscale x 4 x i32> [[LOAD1]], [[LOAD1]]
; CHECK-NEXT: ret <vscale x 4 x i32> [[ADD]]
;
@ -50,10 +50,10 @@ define <vscale x 4 x i32> @load_store_clobber_load_noalias(<vscale x 4 x i32>* n
define i32 @load_clobber_load_gep1(<vscale x 4 x i32>* %p) {
; CHECK-LABEL: @load_clobber_load_gep1(
; CHECK-NEXT: [[GEP1:%.*]] = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* [[P:%.*]], i64 0, i64 1
; CHECK-NEXT: [[LOAD1:%.*]] = load i32, i32* [[GEP1]]
; CHECK-NEXT: [[LOAD1:%.*]] = load i32, i32* [[GEP1]], align 4
; CHECK-NEXT: [[P2:%.*]] = bitcast <vscale x 4 x i32>* [[P]] to i32*
; CHECK-NEXT: [[GEP2:%.*]] = getelementptr i32, i32* [[P2]], i64 1
; CHECK-NEXT: [[LOAD2:%.*]] = load i32, i32* [[GEP2]]
; CHECK-NEXT: [[LOAD2:%.*]] = load i32, i32* [[GEP2]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[LOAD1]], [[LOAD2]]
; CHECK-NEXT: ret i32 [[ADD]]
;
@ -69,10 +69,10 @@ define i32 @load_clobber_load_gep1(<vscale x 4 x i32>* %p) {
define i32 @load_clobber_load_gep2(<vscale x 4 x i32>* %p) {
; CHECK-LABEL: @load_clobber_load_gep2(
; CHECK-NEXT: [[GEP1:%.*]] = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* [[P:%.*]], i64 1, i64 0
; CHECK-NEXT: [[LOAD1:%.*]] = load i32, i32* [[GEP1]]
; CHECK-NEXT: [[LOAD1:%.*]] = load i32, i32* [[GEP1]], align 4
; CHECK-NEXT: [[P2:%.*]] = bitcast <vscale x 4 x i32>* [[P]] to i32*
; CHECK-NEXT: [[GEP2:%.*]] = getelementptr i32, i32* [[P2]], i64 4
; CHECK-NEXT: [[LOAD2:%.*]] = load i32, i32* [[GEP2]]
; CHECK-NEXT: [[LOAD2:%.*]] = load i32, i32* [[GEP2]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[LOAD1]], [[LOAD2]]
; CHECK-NEXT: ret i32 [[ADD]]
;
@ -89,10 +89,10 @@ define i32 @load_clobber_load_gep2(<vscale x 4 x i32>* %p) {
define i32 @load_clobber_load_gep3(<vscale x 4 x i32>* %p) {
; CHECK-LABEL: @load_clobber_load_gep3(
; CHECK-NEXT: [[GEP1:%.*]] = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* [[P:%.*]], i64 1, i64 0
; CHECK-NEXT: [[LOAD1:%.*]] = load i32, i32* [[GEP1]]
; CHECK-NEXT: [[LOAD1:%.*]] = load i32, i32* [[GEP1]], align 4
; CHECK-NEXT: [[P2:%.*]] = bitcast <vscale x 4 x i32>* [[P]] to <vscale x 4 x float>*
; CHECK-NEXT: [[GEP2:%.*]] = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* [[P2]], i64 1, i64 0
; CHECK-NEXT: [[LOAD2:%.*]] = load float, float* [[GEP2]]
; CHECK-NEXT: [[LOAD2:%.*]] = load float, float* [[GEP2]], align 4
; CHECK-NEXT: [[CAST:%.*]] = bitcast float [[LOAD2]] to i32
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[LOAD1]], [[CAST]]
; CHECK-NEXT: ret i32 [[ADD]]
@ -109,9 +109,9 @@ define i32 @load_clobber_load_gep3(<vscale x 4 x i32>* %p) {
define <vscale x 4 x i32> @load_clobber_load_fence(<vscale x 4 x i32>* %p) {
; CHECK-LABEL: @load_clobber_load_fence(
; CHECK-NEXT: [[LOAD1:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[P:%.*]]
; CHECK-NEXT: [[LOAD1:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[P:%.*]], align 16
; CHECK-NEXT: call void asm "", "~{memory}"()
; CHECK-NEXT: [[LOAD2:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[P]]
; CHECK-NEXT: [[LOAD2:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[P]], align 16
; CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 4 x i32> [[LOAD1]], [[LOAD2]]
; CHECK-NEXT: ret <vscale x 4 x i32> [[SUB]]
;
@ -124,9 +124,9 @@ define <vscale x 4 x i32> @load_clobber_load_fence(<vscale x 4 x i32>* %p) {
define <vscale x 4 x i32> @load_clobber_load_sideeffect(<vscale x 4 x i32>* %p) {
; CHECK-LABEL: @load_clobber_load_sideeffect(
; CHECK-NEXT: [[LOAD1:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[P:%.*]]
; CHECK-NEXT: [[LOAD1:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[P:%.*]], align 16
; CHECK-NEXT: call void asm sideeffect "", ""()
; CHECK-NEXT: [[LOAD2:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[P]]
; CHECK-NEXT: [[LOAD2:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[P]], align 16
; CHECK-NEXT: [[ADD:%.*]] = add <vscale x 4 x i32> [[LOAD1]], [[LOAD2]]
; CHECK-NEXT: ret <vscale x 4 x i32> [[ADD]]
;
@ -141,7 +141,7 @@ define <vscale x 4 x i32> @load_clobber_load_sideeffect(<vscale x 4 x i32>* %p)
define <vscale x 4 x i32> @store_forward_to_load(<vscale x 4 x i32>* %p) {
; CHECK-LABEL: @store_forward_to_load(
; CHECK-NEXT: store <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32>* [[P:%.*]]
; CHECK-NEXT: store <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32>* [[P:%.*]], align 16
; CHECK-NEXT: ret <vscale x 4 x i32> zeroinitializer
;
store <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32>* %p
@ -151,9 +151,9 @@ define <vscale x 4 x i32> @store_forward_to_load(<vscale x 4 x i32>* %p) {
define <vscale x 4 x i32> @store_forward_to_load_sideeffect(<vscale x 4 x i32>* %p) {
; CHECK-LABEL: @store_forward_to_load_sideeffect(
; CHECK-NEXT: store <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32>* [[P:%.*]]
; CHECK-NEXT: store <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32>* [[P:%.*]], align 16
; CHECK-NEXT: call void asm sideeffect "", ""()
; CHECK-NEXT: [[LOAD:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[P]]
; CHECK-NEXT: [[LOAD:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[P]], align 16
; CHECK-NEXT: ret <vscale x 4 x i32> [[LOAD]]
;
store <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32>* %p
@ -165,9 +165,9 @@ define <vscale x 4 x i32> @store_forward_to_load_sideeffect(<vscale x 4 x i32>*
define i32 @store_clobber_load() {
; CHECK-LABEL: @store_clobber_load(
; CHECK-NEXT: [[ALLOC:%.*]] = alloca <vscale x 4 x i32>
; CHECK-NEXT: store <vscale x 4 x i32> undef, <vscale x 4 x i32>* [[ALLOC]]
; CHECK-NEXT: store <vscale x 4 x i32> undef, <vscale x 4 x i32>* [[ALLOC]], align 16
; CHECK-NEXT: [[PTR:%.*]] = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* [[ALLOC]], i32 0, i32 1
; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[PTR]]
; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[PTR]], align 4
; CHECK-NEXT: ret i32 [[LOAD]]
;
%alloc = alloca <vscale x 4 x i32>
@ -199,7 +199,7 @@ define i32 @memset_clobber_load_vscaled_base(<vscale x 4 x i32> *%p) {
; CHECK-NEXT: [[CONV:%.*]] = bitcast <vscale x 4 x i32>* [[P:%.*]] to i8*
; CHECK-NEXT: tail call void @llvm.memset.p0i8.i64(i8* [[CONV]], i8 1, i64 200, i1 false)
; CHECK-NEXT: [[GEP:%.*]] = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* [[P]], i64 1, i64 1
; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[GEP]]
; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[GEP]], align 4
; CHECK-NEXT: ret i32 [[LOAD]]
;
%conv = bitcast <vscale x 4 x i32>* %p to i8*
@ -214,7 +214,7 @@ define i32 @memset_clobber_load_nonconst_index(<vscale x 4 x i32> *%p, i64 %idx1
; CHECK-NEXT: [[CONV:%.*]] = bitcast <vscale x 4 x i32>* [[P:%.*]] to i8*
; CHECK-NEXT: tail call void @llvm.memset.p0i8.i64(i8* [[CONV]], i8 1, i64 200, i1 false)
; CHECK-NEXT: [[GEP:%.*]] = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* [[P]], i64 [[IDX1:%.*]], i64 [[IDX2:%.*]]
; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[GEP]]
; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[GEP]], align 4
; CHECK-NEXT: ret i32 [[LOAD]]
;
%conv = bitcast <vscale x 4 x i32>* %p to i8*
@ -233,7 +233,7 @@ define <vscale x 4 x i32>* @load_from_alloc_replaced_with_undef() {
; CHECK-NEXT: [[A:%.*]] = alloca <vscale x 4 x i32>
; CHECK-NEXT: br i1 undef, label [[IF_END:%.*]], label [[IF_THEN:%.*]]
; CHECK: if.then:
; CHECK-NEXT: store <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32>* [[A]]
; CHECK-NEXT: store <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32>* [[A]], align 16
; CHECK-NEXT: br label [[IF_END]]
; CHECK: if.end:
; CHECK-NEXT: ret <vscale x 4 x i32>* [[A]]
@ -257,7 +257,7 @@ define i32 @redundant_load_elimination_1(<vscale x 4 x i32>* %p) {
; CHECK-LABEL: @redundant_load_elimination_1(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[GEP:%.*]] = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* [[P:%.*]], i64 1, i64 1
; CHECK-NEXT: [[LOAD1:%.*]] = load i32, i32* [[GEP]]
; CHECK-NEXT: [[LOAD1:%.*]] = load i32, i32* [[GEP]], align 4
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[LOAD1]], 0
; CHECK-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
; CHECK: if.then:
@ -286,13 +286,13 @@ define void @redundant_load_elimination_2(i1 %c, <vscale x 4 x i32>* %p, i32* %q
; CHECK-LABEL: @redundant_load_elimination_2(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[GEP1:%.*]] = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* [[P:%.*]], i64 1, i64 1
; CHECK-NEXT: store i32 0, i32* [[GEP1]]
; CHECK-NEXT: store i32 0, i32* [[GEP1]], align 4
; CHECK-NEXT: [[GEP2:%.*]] = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* [[P]], i64 1, i64 0
; CHECK-NEXT: store i32 1, i32* [[GEP2]]
; CHECK-NEXT: store i32 1, i32* [[GEP2]], align 4
; CHECK-NEXT: br i1 [[C:%.*]], label [[IF_ELSE:%.*]], label [[IF_THEN:%.*]]
; CHECK: if.then:
; CHECK-NEXT: [[T:%.*]] = load i32, i32* [[GEP1]]
; CHECK-NEXT: store i32 [[T]], i32* [[Q:%.*]]
; CHECK-NEXT: [[T:%.*]] = load i32, i32* [[GEP1]], align 4
; CHECK-NEXT: store i32 [[T]], i32* [[Q:%.*]], align 4
; CHECK-NEXT: ret void
; CHECK: if.else:
; CHECK-NEXT: ret void
@ -317,13 +317,13 @@ if.else:
define void @missing_load_elimination(i1 %c, <vscale x 4 x i32>* %p, <vscale x 4 x i32>* %q, <vscale x 4 x i32> %v) {
; CHECK-LABEL: @missing_load_elimination(
; CHECK-NEXT: entry:
; CHECK-NEXT: store <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32>* [[P:%.*]]
; CHECK-NEXT: store <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32>* [[P:%.*]], align 16
; CHECK-NEXT: [[P1:%.*]] = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* [[P]], i64 1
; CHECK-NEXT: store <vscale x 4 x i32> [[V:%.*]], <vscale x 4 x i32>* [[P1]]
; CHECK-NEXT: store <vscale x 4 x i32> [[V:%.*]], <vscale x 4 x i32>* [[P1]], align 16
; CHECK-NEXT: br i1 [[C:%.*]], label [[IF_ELSE:%.*]], label [[IF_THEN:%.*]]
; CHECK: if.then:
; CHECK-NEXT: [[T:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[P]]
; CHECK-NEXT: store <vscale x 4 x i32> [[T]], <vscale x 4 x i32>* [[Q:%.*]]
; CHECK-NEXT: [[T:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[P]], align 16
; CHECK-NEXT: store <vscale x 4 x i32> [[T]], <vscale x 4 x i32>* [[Q:%.*]], align 16
; CHECK-NEXT: ret void
; CHECK: if.else:
; CHECK-NEXT: ret void

View File

@ -58,7 +58,7 @@ define i32 @test3(i1 %C) {
; CHECK: Cond2:
; CHECK-NEXT: br label [[CONT]]
; CHECK: Cont:
; CHECK-NEXT: [[STOREMERGE:%.*]] = phi i32 [ -987654321, [[COND]] ], [ 47, [[COND2]] ]
; CHECK-NEXT: [[STOREMERGE:%.*]] = phi i32 [ 47, [[COND2]] ], [ -987654321, [[COND]] ]
; CHECK-NEXT: ret i32 [[STOREMERGE]]
;
%A = alloca i32

View File

@ -17,7 +17,7 @@ rhs:
br label %cleanup
cleanup:
; CHECK: %storemerge = phi i32 [ 1, %lhs ], [ 2, %rhs ], !dbg [[merge_loc:![0-9]+]]
; CHECK: %storemerge = phi i32 [ 2, %rhs ], [ 1, %lhs ], !dbg [[merge_loc:![0-9]+]]
%baz.val = load i32, i32* %baz
%ret.val = call i32 @escape(i32 %baz.val)
ret i32 %ret.val

View File

@ -589,17 +589,17 @@ define void @store_address_space(<4 x i32> addrspace(1)* %ptr, <2 x i32> %v0, <2
; CHECK-NEON-NEXT: [[TMP1:%.*]] = shufflevector <2 x i32> [[V0:%.*]], <2 x i32> [[V1:%.*]], <2 x i32> <i32 0, i32 1>
; CHECK-NEON-NEXT: [[TMP2:%.*]] = shufflevector <2 x i32> [[V0]], <2 x i32> [[V1]], <2 x i32> <i32 2, i32 3>
; CHECK-NEON-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> addrspace(1)* [[PTR:%.*]] to i8 addrspace(1)*
; CHECK-NEON-NEXT: call void @llvm.arm.neon.vst2.p1i8.v2i32(i8 addrspace(1)* [[TMP3]], <2 x i32> [[TMP1]], <2 x i32> [[TMP2]], i32 0)
; CHECK-NEON-NEXT: call void @llvm.arm.neon.vst2.p1i8.v2i32(i8 addrspace(1)* [[TMP3]], <2 x i32> [[TMP1]], <2 x i32> [[TMP2]], i32 8)
; CHECK-NEON-NEXT: ret void
;
; CHECK-MVE-LABEL: @store_address_space(
; CHECK-MVE-NEXT: [[INTERLEAVED_VEC:%.*]] = shufflevector <2 x i32> [[V0:%.*]], <2 x i32> [[V1:%.*]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
; CHECK-MVE-NEXT: store <4 x i32> [[INTERLEAVED_VEC]], <4 x i32> addrspace(1)* [[PTR:%.*]]
; CHECK-MVE-NEXT: store <4 x i32> [[INTERLEAVED_VEC]], <4 x i32> addrspace(1)* [[PTR:%.*]], align 8
; CHECK-MVE-NEXT: ret void
;
; CHECK-NONE-LABEL: @store_address_space(
; CHECK-NONE-NEXT: [[INTERLEAVED_VEC:%.*]] = shufflevector <2 x i32> [[V0:%.*]], <2 x i32> [[V1:%.*]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
; CHECK-NONE-NEXT: store <4 x i32> [[INTERLEAVED_VEC]], <4 x i32> addrspace(1)* [[PTR:%.*]]
; CHECK-NONE-NEXT: store <4 x i32> [[INTERLEAVED_VEC]], <4 x i32> addrspace(1)* [[PTR:%.*]], align 8
; CHECK-NONE-NEXT: ret void
;
%interleaved.vec = shufflevector <2 x i32> %v0, <2 x i32> %v1, <4 x i32> <i32 0, i32 2, i32 1, i32 3>

View File

@ -156,8 +156,8 @@ for.end: ; preds = %for.body, %entry
;; memcpy.atomic formation rejection (normal store w/ no align, atomic load)
define void @test3b(i64 %Size) nounwind ssp {
; CHECK-LABEL: @test3b(
; CHECK-NOT: call void @llvm.memcpy.element.unordered.atomic
; CHECK: store
; CHECK: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %Dest, i8* align 1 %Base, i64 %Size, i32 1)
; CHECK-NOT: store
; CHECK: ret void
bb.nph:
%Base = alloca i8, i32 10000

View File

@ -22,55 +22,55 @@ define i32 @foo(i32* %a) {
; ANALYZE-FULL: for.body:
; ANALYZE-FULL-NEXT: br i1 true, label [[DO_STORE:%.*]], label [[FOR_NEXT:%.*]]
; ANALYZE-FULL: do_store:
; ANALYZE-FULL-NEXT: store i32 0, i32* [[A:%.*]]
; ANALYZE-FULL-NEXT: store i32 0, i32* [[A:%.*]], align 4
; ANALYZE-FULL-NEXT: br label [[FOR_NEXT]]
; ANALYZE-FULL: for.next:
; ANALYZE-FULL-NEXT: br i1 true, label [[DO_STORE_1:%.*]], label [[FOR_NEXT_1:%.*]]
; ANALYZE-FULL: do_store.1:
; ANALYZE-FULL-NEXT: [[GEP_1:%.*]] = getelementptr i32, i32* [[A]], i32 1
; ANALYZE-FULL-NEXT: store i32 1, i32* [[GEP_1]]
; ANALYZE-FULL-NEXT: store i32 1, i32* [[GEP_1]], align 4
; ANALYZE-FULL-NEXT: br label [[FOR_NEXT_1]]
; ANALYZE-FULL: for.next.1:
; ANALYZE-FULL-NEXT: br i1 true, label [[DO_STORE_2:%.*]], label [[FOR_NEXT_2:%.*]]
; ANALYZE-FULL: do_store.2:
; ANALYZE-FULL-NEXT: [[GEP_2:%.*]] = getelementptr i32, i32* [[A]], i32 2
; ANALYZE-FULL-NEXT: store i32 2, i32* [[GEP_2]]
; ANALYZE-FULL-NEXT: store i32 2, i32* [[GEP_2]], align 4
; ANALYZE-FULL-NEXT: br label [[FOR_NEXT_2]]
; ANALYZE-FULL: for.next.2:
; ANALYZE-FULL-NEXT: br i1 true, label [[DO_STORE_3:%.*]], label [[FOR_NEXT_3:%.*]]
; ANALYZE-FULL: do_store.3:
; ANALYZE-FULL-NEXT: [[GEP_3:%.*]] = getelementptr i32, i32* [[A]], i32 3
; ANALYZE-FULL-NEXT: store i32 3, i32* [[GEP_3]]
; ANALYZE-FULL-NEXT: store i32 3, i32* [[GEP_3]], align 4
; ANALYZE-FULL-NEXT: br label [[FOR_NEXT_3]]
; ANALYZE-FULL: for.next.3:
; ANALYZE-FULL-NEXT: br i1 false, label [[DO_STORE_4:%.*]], label [[FOR_NEXT_4:%.*]]
; ANALYZE-FULL: do_store.4:
; ANALYZE-FULL-NEXT: [[GEP_4:%.*]] = getelementptr i32, i32* [[A]], i32 4
; ANALYZE-FULL-NEXT: store i32 4, i32* [[GEP_4]]
; ANALYZE-FULL-NEXT: store i32 4, i32* [[GEP_4]], align 4
; ANALYZE-FULL-NEXT: br label [[FOR_NEXT_4]]
; ANALYZE-FULL: for.next.4:
; ANALYZE-FULL-NEXT: br i1 false, label [[DO_STORE_5:%.*]], label [[FOR_NEXT_5:%.*]]
; ANALYZE-FULL: do_store.5:
; ANALYZE-FULL-NEXT: [[GEP_5:%.*]] = getelementptr i32, i32* [[A]], i32 5
; ANALYZE-FULL-NEXT: store i32 5, i32* [[GEP_5]]
; ANALYZE-FULL-NEXT: store i32 5, i32* [[GEP_5]], align 4
; ANALYZE-FULL-NEXT: br label [[FOR_NEXT_5]]
; ANALYZE-FULL: for.next.5:
; ANALYZE-FULL-NEXT: br i1 false, label [[DO_STORE_6:%.*]], label [[FOR_NEXT_6:%.*]]
; ANALYZE-FULL: do_store.6:
; ANALYZE-FULL-NEXT: [[GEP_6:%.*]] = getelementptr i32, i32* [[A]], i32 6
; ANALYZE-FULL-NEXT: store i32 6, i32* [[GEP_6]]
; ANALYZE-FULL-NEXT: store i32 6, i32* [[GEP_6]], align 4
; ANALYZE-FULL-NEXT: br label [[FOR_NEXT_6]]
; ANALYZE-FULL: for.next.6:
; ANALYZE-FULL-NEXT: br i1 false, label [[DO_STORE_7:%.*]], label [[FOR_NEXT_7:%.*]]
; ANALYZE-FULL: do_store.7:
; ANALYZE-FULL-NEXT: [[GEP_7:%.*]] = getelementptr i32, i32* [[A]], i32 7
; ANALYZE-FULL-NEXT: store i32 7, i32* [[GEP_7]]
; ANALYZE-FULL-NEXT: store i32 7, i32* [[GEP_7]], align 4
; ANALYZE-FULL-NEXT: br label [[FOR_NEXT_7]]
; ANALYZE-FULL: for.next.7:
; ANALYZE-FULL-NEXT: br i1 false, label [[DO_STORE_8:%.*]], label [[FOR_NEXT_8:%.*]]
; ANALYZE-FULL: do_store.8:
; ANALYZE-FULL-NEXT: [[GEP_8:%.*]] = getelementptr i32, i32* [[A]], i32 8
; ANALYZE-FULL-NEXT: store i32 8, i32* [[GEP_8]]
; ANALYZE-FULL-NEXT: store i32 8, i32* [[GEP_8]], align 4
; ANALYZE-FULL-NEXT: br label [[FOR_NEXT_8]]
; ANALYZE-FULL: for.next.8:
; ANALYZE-FULL-NEXT: ret i32 9
@ -87,7 +87,7 @@ define i32 @foo(i32* %a) {
; DONT-ANALYZE-FULL-NEXT: br i1 [[CMP2]], label [[DO_STORE:%.*]], label [[FOR_NEXT]]
; DONT-ANALYZE-FULL: do_store:
; DONT-ANALYZE-FULL-NEXT: [[GEP:%.*]] = getelementptr i32, i32* [[A:%.*]], i32 [[INDVAR]]
; DONT-ANALYZE-FULL-NEXT: store i32 [[INDVAR]], i32* [[GEP]]
; DONT-ANALYZE-FULL-NEXT: store i32 [[INDVAR]], i32* [[GEP]], align 4
; DONT-ANALYZE-FULL-NEXT: br label [[FOR_NEXT]]
; DONT-ANALYZE-FULL: for.next:
; DONT-ANALYZE-FULL-NEXT: [[EXITCOND:%.*]] = icmp ne i32 [[INDVAR_NEXT]], 9

View File

@ -66,7 +66,7 @@ define void @foo(i8* %arg) {
; CHECK: bb2:
; CHECK-NEXT: br label [[BB1]]
; CHECK: bb3:
; CHECK-NEXT: store i8 0, i8* [[TMP]], !g !0
; CHECK-NEXT: store i8 0, i8* [[TMP]], align 1, !g !0
; CHECK-NEXT: br label [[BB4:%.*]]
; CHECK: bb4:
; CHECK-NEXT: br label [[BB6:%.*]]
@ -77,10 +77,10 @@ define void @foo(i8* %arg) {
; CHECK-NEXT: i8 6, label [[BB8:%.*]]
; CHECK-NEXT: ]
; CHECK: bb8:
; CHECK-NEXT: store i8 undef, i8* null
; CHECK-NEXT: store i8 undef, i8* null, align 1
; CHECK-NEXT: br label [[BB4]]
; CHECK: bb9:
; CHECK-NEXT: store i8 0, i8* [[ARG]], !g !0
; CHECK-NEXT: store i8 0, i8* [[ARG]], align 1, !g !0
; CHECK-NEXT: unreachable
;
bb:

View File

@ -74,28 +74,28 @@ declare void @c.d.p(i64, i8*)
define void @e(i32 %a0, i32 %a1, %struct.a** %p2) {
; CHECK-LABEL: @e(
; CHECK-NEXT: [[F:%.*]] = alloca i32
; CHECK-NEXT: store i32 [[A0:%.*]], i32* [[F]], !g !0
; CHECK-NEXT: store i32 [[A0:%.*]], i32* [[F]], align 4, !g !0
; CHECK-NEXT: br label [[H:%.*]]
; CHECK: h:
; CHECK-NEXT: call void @c.d.p(i64 8, i8* undef)
; CHECK-NEXT: [[I:%.*]] = load i32, i32* [[F]]
; CHECK-NEXT: [[J:%.*]] = load i32, i32* null
; CHECK-NEXT: [[I:%.*]] = load i32, i32* [[F]], align 4
; CHECK-NEXT: [[J:%.*]] = load i32, i32* null, align 4
; CHECK-NEXT: [[K:%.*]] = icmp eq i32 [[I]], [[J]]
; CHECK-NEXT: br i1 [[K]], label [[L:%.*]], label [[Q:%.*]]
; CHECK: l:
; CHECK-NEXT: br label [[R:%.*]]
; CHECK: q:
; CHECK-NEXT: [[M:%.*]] = load %struct.a*, %struct.a** null
; CHECK-NEXT: [[M:%.*]] = load %struct.a*, %struct.a** null, align 8
; CHECK-NEXT: br label [[R]]
; CHECK: r:
; CHECK-NEXT: switch i32 undef, label [[N:%.*]] [
; CHECK-NEXT: i32 0, label [[S:%.*]]
; CHECK-NEXT: ]
; CHECK: s:
; CHECK-NEXT: store i32 [[A1:%.*]], i32* [[F]], !g !0
; CHECK-NEXT: store i32 [[A1:%.*]], i32* [[F]], align 4, !g !0
; CHECK-NEXT: br label [[H]]
; CHECK: n:
; CHECK-NEXT: [[O:%.*]] = load %struct.a*, %struct.a** [[P2:%.*]]
; CHECK-NEXT: [[O:%.*]] = load %struct.a*, %struct.a** [[P2:%.*]], align 8
; CHECK-NEXT: ret void
;
%f = alloca i32

View File

@ -23,17 +23,17 @@ define void @hoge(i32 %arg) {
; CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* @global, align 4, !h !0
; CHECK-NEXT: unreachable
; CHECK: bb6:
; CHECK-NEXT: store i32 [[TMP]], i32* @global.1, !h !0
; CHECK-NEXT: store i32 [[TMP]], i32* @global.1, align 4, !h !0
; CHECK-NEXT: br i1 undef, label [[BB7:%.*]], label [[BB1]]
; CHECK: bb7:
; CHECK-NEXT: br i1 undef, label [[BB10:%.*]], label [[BB8:%.*]]
; CHECK: bb8:
; CHECK-NEXT: br i1 false, label [[BB9:%.*]], label [[BB3:%.*]]
; CHECK: bb9:
; CHECK-NEXT: store i8 undef, i8* null
; CHECK-NEXT: store i8 undef, i8* null, align 1
; CHECK-NEXT: br label [[BB3]]
; CHECK: bb10:
; CHECK-NEXT: store i32 0, i32* @global, !h !0
; CHECK-NEXT: store i32 0, i32* @global, align 4, !h !0
; CHECK-NEXT: br label [[BB7]]
;
bb:

View File

@ -12,23 +12,23 @@ define void @patatino() {
; CHECK: for.cond2thread-pre-split:
; CHECK-NEXT: br i1 false, label [[FOR_BODY:%.*]], label [[FOR_COND8_PREHEADER:%.*]]
; CHECK: for.cond8.preheader:
; CHECK-NEXT: br i1 undef, label [[L1:%.*]], label %for.cond11thread-pre-split.lr.ph
; CHECK-NEXT: br i1 undef, label [[L1:%.*]], label [[FOR_COND11THREAD_PRE_SPLIT_LR_PH:%.*]]
; CHECK: for.cond11thread-pre-split.lr.ph:
; CHECK-NEXT: br label [[L1]]
; CHECK: for.body:
; CHECK-NEXT: [[CMP3:%.*]] = icmp ne i64 [[K_2:%.*]], 3
; CHECK-NEXT: [[CONV4:%.*]] = zext i1 [[CMP3]] to i64
; CHECK-NEXT: [[TMP0:%.*]] = load i64, i64* @f
; CHECK-NEXT: [[TMP0:%.*]] = load i64, i64* @f, align 4
; CHECK-NEXT: [[OR:%.*]] = or i64 [[TMP0]], [[CONV4]]
; CHECK-NEXT: store i64 [[OR]], i64* @f
; CHECK-NEXT: store i64 [[OR]], i64* @f, align 4
; CHECK-NEXT: [[TOBOOL7:%.*]] = icmp ne i64 [[K_2]], 0
; CHECK-NEXT: br i1 [[TOBOOL7]], label %for.cond2thread-pre-split, label [[LOR_RHS:%.*]]
; CHECK-NEXT: br i1 [[TOBOOL7]], label [[FOR_COND2THREAD_PRE_SPLIT:%.*]], label [[LOR_RHS:%.*]]
; CHECK: lor.rhs:
; CHECK-NEXT: store i64 1, i64* @b, align 8
; CHECK-NEXT: br label %for.cond2thread-pre-split
; CHECK-NEXT: br label [[FOR_COND2THREAD_PRE_SPLIT]]
; CHECK: l1:
; CHECK-NEXT: [[K_2]] = phi i64 [ undef, [[L1_PREHEADER:%.*]] ], [ 15, [[FOR_COND8_PREHEADER]] ], [ 5, %for.cond11thread-pre-split.lr.ph ]
; CHECK-NEXT: store i64 7, i64* [[J_3:%.*]]
; CHECK-NEXT: [[K_2]] = phi i64 [ undef, [[L1_PREHEADER:%.*]] ], [ 15, [[FOR_COND8_PREHEADER]] ], [ 5, [[FOR_COND11THREAD_PRE_SPLIT_LR_PH]] ]
; CHECK-NEXT: store i64 7, i64* [[J_3:%.*]], align 4
; CHECK-NEXT: br label [[FOR_BODY]]
; CHECK: for.cond16:
; CHECK-NEXT: [[J_0:%.*]] = phi i64* [ @f, [[ENTRY:%.*]] ], [ undef, [[FOR_COND20:%.*]] ], [ @e, [[FOR_COND16]] ]

View File

@ -47,13 +47,13 @@ define void @test(float * %a, float * %b, float * %c, float * %d) {
; CHECK-NEXT: call void @unknown()
; CHECK-NEXT: call void @unknown()
; CHECK-NEXT: call void @unknown()
; CHECK-NEXT: store float [[L0]], float* [[B:%.*]]
; CHECK-NEXT: store float [[L0]], float* [[B:%.*]], align 4
; CHECK-NEXT: [[B1:%.*]] = getelementptr inbounds float, float* [[B]], i64 1
; CHECK-NEXT: store float [[L1]], float* [[B1]]
; CHECK-NEXT: store float [[L1]], float* [[B1]], align 4
; CHECK-NEXT: [[B2:%.*]] = getelementptr inbounds float, float* [[B]], i64 2
; CHECK-NEXT: store float [[L2]], float* [[B2]]
; CHECK-NEXT: store float [[L2]], float* [[B2]], align 4
; CHECK-NEXT: [[B3:%.*]] = getelementptr inbounds float, float* [[B]], i64 3
; CHECK-NEXT: store float [[L3]], float* [[B3]]
; CHECK-NEXT: store float [[L3]], float* [[B3]], align 4
; CHECK-NEXT: [[C1:%.*]] = getelementptr inbounds float, float* [[C:%.*]], i64 1
; CHECK-NEXT: [[C2:%.*]] = getelementptr inbounds float, float* [[C]], i64 2
; CHECK-NEXT: [[C3:%.*]] = getelementptr inbounds float, float* [[C]], i64 3

View File

@ -148,8 +148,8 @@ define i32 @test2(%struct.S* %0, i32* %1, i8* %2) {
; BASIC-NEXT: store %struct.S* [[TMP0]], %struct.S** [[TMP4]], align 8
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32** [[TMP5]], i64 8), "nonnull"(i32** [[TMP5]]), "align"(i32** [[TMP5]], i64 8) ]
; BASIC-NEXT: store i32* [[TMP1]], i32** [[TMP5]], align 8
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8** [[TMP6]], i64 8), "nonnull"(i8** [[TMP6]]) ]
; BASIC-NEXT: store i8* [[TMP2]], i8** [[TMP6]]
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8** [[TMP6]], i64 8), "nonnull"(i8** [[TMP6]]), "align"(i8** [[TMP6]], i64 8) ]
; BASIC-NEXT: store i8* [[TMP2]], i8** [[TMP6]], align 8
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32** [[TMP5]], i64 8), "nonnull"(i32** [[TMP5]]), "align"(i32** [[TMP5]], i64 8) ]
; BASIC-NEXT: [[TMP8:%.*]] = load i32*, i32** [[TMP5]], align 8
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP8]], i64 4), "nonnull"(i32* [[TMP8]]), "align"(i32* [[TMP8]], i64 4) ]
@ -196,8 +196,8 @@ define i32 @test2(%struct.S* %0, i32* %1, i8* %2) {
; ALL-NEXT: store %struct.S* [[TMP0]], %struct.S** [[TMP4]], align 8
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32** [[TMP5]], i64 8), "nonnull"(i32** [[TMP5]]), "align"(i32** [[TMP5]], i64 8) ]
; ALL-NEXT: store i32* [[TMP1]], i32** [[TMP5]], align 8
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8** [[TMP6]], i64 8), "nonnull"(i8** [[TMP6]]) ]
; ALL-NEXT: store i8* [[TMP2]], i8** [[TMP6]]
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8** [[TMP6]], i64 8), "nonnull"(i8** [[TMP6]]), "align"(i8** [[TMP6]], i64 8) ]
; ALL-NEXT: store i8* [[TMP2]], i8** [[TMP6]], align 8
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32** [[TMP5]], i64 8), "nonnull"(i32** [[TMP5]]), "align"(i32** [[TMP5]], i64 8) ]
; ALL-NEXT: [[TMP8:%.*]] = load i32*, i32** [[TMP5]], align 8
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP8]], i64 4), "nonnull"(i32* [[TMP8]]), "align"(i32* [[TMP8]], i64 4) ]
@ -244,13 +244,12 @@ define i32 @test2(%struct.S* %0, i32* %1, i8* %2) {
; WITH-AC-NEXT: store %struct.S* [[TMP0]], %struct.S** [[TMP4]], align 8
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32** [[TMP5]], i64 8), "nonnull"(i32** [[TMP5]]), "align"(i32** [[TMP5]], i64 8) ]
; WITH-AC-NEXT: store i32* [[TMP1]], i32** [[TMP5]], align 8
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8** [[TMP6]], i64 8), "nonnull"(i8** [[TMP6]]) ]
; WITH-AC-NEXT: store i8* [[TMP2]], i8** [[TMP6]]
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8** [[TMP6]], i64 8), "nonnull"(i8** [[TMP6]]), "align"(i8** [[TMP6]], i64 8) ]
; WITH-AC-NEXT: store i8* [[TMP2]], i8** [[TMP6]], align 8
; WITH-AC-NEXT: [[TMP8:%.*]] = load i32*, i32** [[TMP5]], align 8
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP8]], i64 4), "nonnull"(i32* [[TMP8]]), "align"(i32* [[TMP8]], i64 4) ]
; WITH-AC-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
; WITH-AC-NEXT: [[TMP10:%.*]] = trunc i32 [[TMP9]] to i8
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "align"(i8** [[TMP6]], i64 8) ]
; WITH-AC-NEXT: [[TMP11:%.*]] = load i8*, i8** [[TMP6]], align 8
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8* [[TMP11]], i64 1), "nonnull"(i8* [[TMP11]]) ]
; WITH-AC-NEXT: store i8 [[TMP10]], i8* [[TMP11]], align 1
@ -287,13 +286,12 @@ define i32 @test2(%struct.S* %0, i32* %1, i8* %2) {
; CROSS-BLOCK-NEXT: store %struct.S* [[TMP0]], %struct.S** [[TMP4]], align 8
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32** [[TMP5]], i64 8), "nonnull"(i32** [[TMP5]]), "align"(i32** [[TMP5]], i64 8) ]
; CROSS-BLOCK-NEXT: store i32* [[TMP1]], i32** [[TMP5]], align 8
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8** [[TMP6]], i64 8), "nonnull"(i8** [[TMP6]]) ]
; CROSS-BLOCK-NEXT: store i8* [[TMP2]], i8** [[TMP6]]
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8** [[TMP6]], i64 8), "nonnull"(i8** [[TMP6]]), "align"(i8** [[TMP6]], i64 8) ]
; CROSS-BLOCK-NEXT: store i8* [[TMP2]], i8** [[TMP6]], align 8
; CROSS-BLOCK-NEXT: [[TMP8:%.*]] = load i32*, i32** [[TMP5]], align 8
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP8]], i64 4), "nonnull"(i32* [[TMP8]]), "align"(i32* [[TMP8]], i64 4) ]
; CROSS-BLOCK-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
; CROSS-BLOCK-NEXT: [[TMP10:%.*]] = trunc i32 [[TMP9]] to i8
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8** [[TMP6]], i64 8) ]
; CROSS-BLOCK-NEXT: [[TMP11:%.*]] = load i8*, i8** [[TMP6]], align 8
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8* [[TMP11]], i64 1), "nonnull"(i8* [[TMP11]]) ]
; CROSS-BLOCK-NEXT: store i8 [[TMP10]], i8* [[TMP11]], align 1
@ -328,7 +326,7 @@ define i32 @test2(%struct.S* %0, i32* %1, i8* %2) {
; FULL-SIMPLIFY-NEXT: [[TMP7:%.*]] = alloca [[STRUCT_S:%.*]], align 8
; FULL-SIMPLIFY-NEXT: store %struct.S* [[TMP0]], %struct.S** [[TMP4]], align 8
; FULL-SIMPLIFY-NEXT: store i32* [[TMP1]], i32** [[TMP5]], align 8
; FULL-SIMPLIFY-NEXT: store i8* [[TMP2]], i8** [[TMP6]]
; FULL-SIMPLIFY-NEXT: store i8* [[TMP2]], i8** [[TMP6]], align 8
; FULL-SIMPLIFY-NEXT: [[TMP8:%.*]] = load i32*, i32** [[TMP5]], align 8
; FULL-SIMPLIFY-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
; FULL-SIMPLIFY-NEXT: [[TMP10:%.*]] = trunc i32 [[TMP9]] to i8
@ -349,7 +347,7 @@ define i32 @test2(%struct.S* %0, i32* %1, i8* %2) {
; FULL-SIMPLIFY-NEXT: [[TMP24:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
; FULL-SIMPLIFY-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP24]], i32 0, i32 2
; FULL-SIMPLIFY-NEXT: [[TMP26:%.*]] = load i32*, i32** [[TMP25]], align 8
; FULL-SIMPLIFY-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "nonnull"(%struct.S** [[TMP4]]), "align"(%struct.S** [[TMP4]], i64 8), "dereferenceable"(i32** [[TMP5]], i64 8), "nonnull"(i32** [[TMP5]]), "align"(i32** [[TMP5]], i64 8), "dereferenceable"(i8** [[TMP6]], i64 8), "nonnull"(i8** [[TMP6]]), "dereferenceable"(i32* [[TMP8]], i64 4), "nonnull"(i32* [[TMP8]]), "align"(i32* [[TMP8]], i64 4), "align"(i8** [[TMP6]], i64 8), "dereferenceable"(i8* [[TMP11]], i64 1), "nonnull"(i8* [[TMP11]]), "dereferenceable"(i32* [[TMP17]], i64 4), "nonnull"(i32* [[TMP17]]), "align"(i32* [[TMP17]], i64 8), "dereferenceable"(i8* [[TMP20]], i64 1), "nonnull"(i8* [[TMP20]]), "align"(i8* [[TMP20]], i64 4), "dereferenceable"(i32** [[TMP25]], i64 8), "nonnull"(i32** [[TMP25]]), "align"(i32** [[TMP25]], i64 8), "dereferenceable"(i32* [[TMP26]], i64 4), "nonnull"(i32* [[TMP26]]), "align"(i32* [[TMP26]], i64 4) ]
; FULL-SIMPLIFY-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "nonnull"(%struct.S** [[TMP4]]), "align"(%struct.S** [[TMP4]], i64 8), "dereferenceable"(i32** [[TMP5]], i64 8), "nonnull"(i32** [[TMP5]]), "align"(i32** [[TMP5]], i64 8), "dereferenceable"(i8** [[TMP6]], i64 8), "nonnull"(i8** [[TMP6]]), "align"(i8** [[TMP6]], i64 8), "dereferenceable"(i32* [[TMP8]], i64 4), "nonnull"(i32* [[TMP8]]), "align"(i32* [[TMP8]], i64 4), "dereferenceable"(i8* [[TMP11]], i64 1), "nonnull"(i8* [[TMP11]]), "dereferenceable"(i32* [[TMP17]], i64 4), "nonnull"(i32* [[TMP17]]), "align"(i32* [[TMP17]], i64 8), "dereferenceable"(i8* [[TMP20]], i64 1), "nonnull"(i8* [[TMP20]]), "align"(i8* [[TMP20]], i64 4), "dereferenceable"(i32** [[TMP25]], i64 8), "nonnull"(i32** [[TMP25]]), "align"(i32** [[TMP25]], i64 8), "dereferenceable"(i32* [[TMP26]], i64 4), "nonnull"(i32* [[TMP26]]), "align"(i32* [[TMP26]], i64 4) ]
; FULL-SIMPLIFY-NEXT: [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
; FULL-SIMPLIFY-NEXT: [[TMP28:%.*]] = add nsw i32 [[TMP23]], [[TMP27]]
; FULL-SIMPLIFY-NEXT: ret i32 [[TMP28]]

View File

@ -8,34 +8,34 @@ define void @f_0(i32* %ptr) {
; This part checks for the easy syntactic verifier rules.
; CHECK: Struct tag metadata must have either 3 or 4 operands
; CHECK-NEXT: store i32 0, i32* %ptr, !tbaa !{{[0-9]+}}
; CHECK-NEXT: store i32 0, i32* %ptr, align 4, !tbaa !{{[0-9]+}}
; CHECK: Immutability tag on struct tag metadata must be a constant
; CHECK-NEXT: store i32 1, i32* %ptr, !tbaa !{{[0-9]+}}
; CHECK-NEXT: store i32 1, i32* %ptr, align 4, !tbaa !{{[0-9]+}}
; CHECK: Immutability part of the struct tag metadata must be either 0 or 1
; CHECK-NEXT: store i32 2, i32* %ptr, !tbaa !{{[0-9]+}}
; CHECK-NEXT: store i32 2, i32* %ptr, align 4, !tbaa !{{[0-9]+}}
; CHECK: Offset must be constant integer
; CHECK-NEXT: store i32 3, i32* %ptr, !tbaa !{{[0-9]+}}
; CHECK-NEXT: store i32 3, i32* %ptr, align 4, !tbaa !{{[0-9]+}}
; CHECK: Malformed struct tag metadata: base and access-type should be non-null and point to Metadata nodes
; CHECK-NEXT: store i32 4, i32* %ptr, !tbaa !{{[0-9]+}}
; CHECK-NEXT: store i32 4, i32* %ptr, align 4, !tbaa !{{[0-9]+}}
; CHECK: Access type node must be a valid scalar type
; CHECK-NEXT: store i32 5, i32* %ptr, !tbaa !{{[0-9]+}}
; CHECK-NEXT: store i32 5, i32* %ptr, align 4, !tbaa !{{[0-9]+}}
; CHECK: Access bit-width not the same as description bit-width
; CHECK-NEXT: store i32 6, i32* %ptr, !tbaa !{{[0-9]+}}
; CHECK-NEXT: store i32 6, i32* %ptr, align 4, !tbaa !{{[0-9]+}}
; CHECK: Access type node must be a valid scalar type
; CHECK-NEXT: store i32 7, i32* %ptr, !tbaa !{{[0-9]+}}
; CHECK-NEXT: store i32 7, i32* %ptr, align 4, !tbaa !{{[0-9]+}}
; CHECK: Struct tag nodes have a string as their first operand
; CHECK-NEXT: !{{[0-9]+}} = !{!{{[0-9]+}}, !{{[0-9]+}}, i64 0}
; CHECK: Access type node must be a valid scalar type
; CHECK-NEXT: store i32 9, i32* %ptr, !tbaa !{{[0-9]+}}
; CHECK-NEXT: store i32 9, i32* %ptr, align 4, !tbaa !{{[0-9]+}}
store i32 0, i32* %ptr, !tbaa !{!3, !2, i64 40, i64 0, i64 1, i64 2}
store i32 1, i32* %ptr, !tbaa !{!3, !2, i64 40, !"immutable"}
@ -55,40 +55,40 @@ define void @f_1(i32* %ptr) {
; This part checks for more semantic verifier rules.
; CHECK: Cycle detected in struct path
; CHECK-NEXT: store i32 0, i32* %ptr, !tbaa !{{[0-9]+}}
; CHECK-NEXT: store i32 0, i32* %ptr, align 4, !tbaa !{{[0-9]+}}
; CHECK: Offset not zero at the point of scalar access
; CHECK-NEXT: store i32 1, i32* %ptr, !tbaa !{{[0-9]+}}
; CHECK-NEXT: store i32 1, i32* %ptr, align 4, !tbaa !{{[0-9]+}}
; CHECK: Offset not zero at the point of scalar access
; CHECK-NEXT: store i32 2, i32* %ptr, !tbaa !{{[0-9]+}}
; CHECK-NEXT: store i32 2, i32* %ptr, align 4, !tbaa !{{[0-9]+}}
; CHECK: Could not find TBAA parent in struct type node
; CHECK-NEXT: store i32 3, i32* %ptr, !tbaa !{{[0-9]+}}
; CHECK-NEXT: store i32 3, i32* %ptr, align 4, !tbaa !{{[0-9]+}}
; CHECK: Did not see access type in access path!
; CHECK-NEXT: store i32 3, i32* %ptr, !tbaa !{{[0-9]+}}
; CHECK-NEXT: store i32 3, i32* %ptr, align 4, !tbaa !{{[0-9]+}}
; CHECK: Access type node must be a valid scalar type
; CHECK-NEXT: store i32 4, i32* %ptr, !tbaa !{{[0-9]+}}
; CHECK-NEXT: store i32 4, i32* %ptr, align 4, !tbaa !{{[0-9]+}}
; CHECK: Access type node must be a valid scalar type
; CHECK-NEXT: store i32 5, i32* %ptr, !tbaa !{{[0-9]+}}
; CHECK-NEXT: store i32 5, i32* %ptr, align 4, !tbaa !{{[0-9]+}}
; CHECK: Access type node must be a valid scalar type
; CHECK-NEXT: store i32 6, i32* %ptr, !tbaa !{{[0-9]+}}
; CHECK-NEXT: store i32 6, i32* %ptr, align 4, !tbaa !{{[0-9]+}}
; CHECK: Struct tag nodes must have an odd number of operands!
; CHECK-NEXT:!{{[0-9]+}} = !{!"bad-struct-type-0", !{{[0-9]+}}, i64 40, !{{[0-9]+}}}
; CHECK: Incorrect field entry in struct type node!
; CHECK-NEXT: store i32 8, i32* %ptr, !tbaa !{{[0-9]+}}
; CHECK-NEXT: store i32 8, i32* %ptr, align 4, !tbaa !{{[0-9]+}}
; CHECK: Bitwidth between the offsets and struct type entries must match
; CHECK-NEXT: store i32 9, i32* %ptr, !tbaa !{{[0-9]+}}
; CHECK-NEXT: store i32 9, i32* %ptr, align 4, !tbaa !{{[0-9]+}}
; CHECK: Offsets must be increasing!
; CHECK-NEXT: store i32 10, i32* %ptr, !tbaa !{{[0-9]+}}
; CHECK-NEXT: store i32 10, i32* %ptr, align 4, !tbaa !{{[0-9]+}}
store i32 0, i32* %ptr, !tbaa !{!4, !2, i64 40}
store i32 1, i32* %ptr, !tbaa !{!3, !2, i64 45}