1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-22 10:42:39 +01:00

[Alignement][NFC] Deprecate untyped CreateAlignedLoad

Summary:
This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790

Reviewers: courbet

Subscribers: arsenm, jvesely, nhaehnle, hiraditya, kerbowa, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D73260
This commit is contained in:
Guillaume Chatelet 2020-01-23 11:33:12 +01:00
parent 21172e27d1
commit 084ea94702
21 changed files with 98 additions and 89 deletions

View File

@ -1759,8 +1759,10 @@ public:
/// parameter.
/// FIXME: Remove this function once transition to Align is over.
/// Use the version that takes MaybeAlign instead of this one.
LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned Align,
const char *Name) {
LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr,
unsigned Align,
const char *Name),
"Use the version that takes NaybeAlign instead") {
return CreateAlignedLoad(Ty, Ptr, MaybeAlign(Align), Name);
}
LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align,
@ -1771,8 +1773,10 @@ public:
}
/// FIXME: Remove this function once transition to Align is over.
/// Use the version that takes MaybeAlign instead of this one.
LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned Align,
const Twine &Name = "") {
LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr,
unsigned Align,
const Twine &Name = ""),
"Use the version that takes MaybeAlign instead") {
return CreateAlignedLoad(Ty, Ptr, MaybeAlign(Align), Name);
}
LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align,
@ -1783,8 +1787,11 @@ public:
}
/// FIXME: Remove this function once transition to Align is over.
/// Use the version that takes MaybeAlign instead of this one.
LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned Align,
bool isVolatile, const Twine &Name = "") {
LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr,
unsigned Align,
bool isVolatile,
const Twine &Name = ""),
"Use the version that takes MaybeAlign instead") {
return CreateAlignedLoad(Ty, Ptr, MaybeAlign(Align), isVolatile, Name);
}
LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align,
@ -1797,19 +1804,19 @@ public:
// Deprecated [opaque pointer types]
LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, const char *Name) {
return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
Align, Name);
MaybeAlign(Align), Name);
}
// Deprecated [opaque pointer types]
LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align,
const Twine &Name = "") {
return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
Align, Name);
MaybeAlign(Align), Name);
}
// Deprecated [opaque pointer types]
LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, bool isVolatile,
const Twine &Name = "") {
return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
Align, isVolatile, Name);
MaybeAlign(Align), isVolatile, Name);
}
// Deprecated [opaque pointer types]
LoadInst *CreateAlignedLoad(Value *Ptr, MaybeAlign Align, const char *Name) {

View File

@ -109,8 +109,12 @@ public:
/// Return the alignment of the memory that is being allocated by the
/// instruction.
MaybeAlign getAlign() const {
return decodeMaybeAlign(getSubclassDataFromInstruction() & 31);
}
// FIXME: Remove this one transition to Align is over.
unsigned getAlignment() const {
if (const auto MA = decodeMaybeAlign(getSubclassDataFromInstruction() & 31))
if (const auto MA = getAlign())
return MA->value();
return 0;
}

View File

@ -1622,7 +1622,7 @@ bool AtomicExpand::expandAtomicOpToLibcall(
bool UseSizedLibcall = canUseSizedAtomicCall(Size, Align, DL);
Type *SizedIntTy = Type::getIntNTy(Ctx, Size * 8);
unsigned AllocaAlignment = DL.getPrefTypeAlignment(SizedIntTy);
const llvm::Align AllocaAlignment(DL.getPrefTypeAlignment(SizedIntTy));
// TODO: the "order" argument type is "int", not int32. So
// getInt32Ty may be wrong if the arch uses e.g. 16-bit ints.
@ -1712,7 +1712,7 @@ bool AtomicExpand::expandAtomicOpToLibcall(
// 'expected' argument, if present.
if (CASExpected) {
AllocaCASExpected = AllocaBuilder.CreateAlloca(CASExpected->getType());
AllocaCASExpected->setAlignment(MaybeAlign(AllocaAlignment));
AllocaCASExpected->setAlignment(AllocaAlignment);
unsigned AllocaAS = AllocaCASExpected->getType()->getPointerAddressSpace();
AllocaCASExpected_i8 =
@ -1731,7 +1731,7 @@ bool AtomicExpand::expandAtomicOpToLibcall(
Args.push_back(IntValue);
} else {
AllocaValue = AllocaBuilder.CreateAlloca(ValueOperand->getType());
AllocaValue->setAlignment(MaybeAlign(AllocaAlignment));
AllocaValue->setAlignment(AllocaAlignment);
AllocaValue_i8 =
Builder.CreateBitCast(AllocaValue, Type::getInt8PtrTy(Ctx));
Builder.CreateLifetimeStart(AllocaValue_i8, SizeVal64);
@ -1743,7 +1743,7 @@ bool AtomicExpand::expandAtomicOpToLibcall(
// 'ret' argument.
if (!CASExpected && HasResult && !UseSizedLibcall) {
AllocaResult = AllocaBuilder.CreateAlloca(I->getType());
AllocaResult->setAlignment(MaybeAlign(AllocaAlignment));
AllocaResult->setAlignment(AllocaAlignment);
unsigned AllocaAS = AllocaResult->getType()->getPointerAddressSpace();
AllocaResult_i8 =
Builder.CreateBitCast(AllocaResult, Type::getInt8PtrTy(Ctx, AllocaAS));

View File

@ -1220,7 +1220,7 @@ bool InterleavedLoadCombineImpl::combine(std::list<VectorInfo> &InterleavedLoad,
"interleaved.wide.ptrcast");
// Create the wide load and update the MemorySSA.
auto LI = Builder.CreateAlignedLoad(ILTy, CI, InsertionPoint->getAlignment(),
auto LI = Builder.CreateAlignedLoad(ILTy, CI, InsertionPoint->getAlign(),
"interleaved.wide.load");
auto MSSAU = MemorySSAUpdater(&MSSA);
MemoryUse *MSSALoad = cast<MemoryUse>(MSSAU.createMemoryAccessBefore(

View File

@ -46,7 +46,7 @@ static bool lowerLoadRelative(Function &F) {
Value *OffsetPtr =
B.CreateGEP(Int8Ty, CI->getArgOperand(0), CI->getArgOperand(1));
Value *OffsetPtrI32 = B.CreateBitCast(OffsetPtr, Int32PtrTy);
Value *OffsetI32 = B.CreateAlignedLoad(Int32Ty, OffsetPtrI32, 4);
Value *OffsetI32 = B.CreateAlignedLoad(Int32Ty, OffsetPtrI32, Align(4));
Value *ResultPtr = B.CreateGEP(Int8Ty, CI->getArgOperand(0), OffsetI32);

View File

@ -130,7 +130,7 @@ static void scalarizeMaskedLoad(CallInst *CI, bool &ModifiedDT) {
Value *Mask = CI->getArgOperand(2);
Value *Src0 = CI->getArgOperand(3);
unsigned AlignVal = cast<ConstantInt>(Alignment)->getZExtValue();
const Align AlignVal = cast<ConstantInt>(Alignment)->getAlignValue();
VectorType *VecType = cast<VectorType>(CI->getType());
Type *EltTy = VecType->getElementType();
@ -151,7 +151,8 @@ static void scalarizeMaskedLoad(CallInst *CI, bool &ModifiedDT) {
}
// Adjust alignment for the scalar instruction.
AlignVal = MinAlign(AlignVal, EltTy->getPrimitiveSizeInBits() / 8);
const Align AdjustedAlignVal =
commonAlignment(AlignVal, EltTy->getPrimitiveSizeInBits() / 8);
// Bitcast %addr from i8* to EltTy*
Type *NewPtrType =
EltTy->getPointerTo(Ptr->getType()->getPointerAddressSpace());
@ -166,7 +167,7 @@ static void scalarizeMaskedLoad(CallInst *CI, bool &ModifiedDT) {
if (cast<Constant>(Mask)->getAggregateElement(Idx)->isNullValue())
continue;
Value *Gep = Builder.CreateConstInBoundsGEP1_32(EltTy, FirstEltPtr, Idx);
LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Gep, AlignVal);
LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Gep, AdjustedAlignVal);
VResult = Builder.CreateInsertElement(VResult, Load, Idx);
}
CI->replaceAllUsesWith(VResult);
@ -210,7 +211,7 @@ static void scalarizeMaskedLoad(CallInst *CI, bool &ModifiedDT) {
Builder.SetInsertPoint(InsertPt);
Value *Gep = Builder.CreateConstInBoundsGEP1_32(EltTy, FirstEltPtr, Idx);
LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Gep, AlignVal);
LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Gep, AdjustedAlignVal);
Value *NewVResult = Builder.CreateInsertElement(VResult, Load, Idx);
// Create "else" block, fill it in the next iteration
@ -414,8 +415,8 @@ static void scalarizeMaskedGather(CallInst *CI, bool &ModifiedDT) {
if (cast<Constant>(Mask)->getAggregateElement(Idx)->isNullValue())
continue;
Value *Ptr = Builder.CreateExtractElement(Ptrs, Idx, "Ptr" + Twine(Idx));
LoadInst *Load =
Builder.CreateAlignedLoad(EltTy, Ptr, AlignVal, "Load" + Twine(Idx));
LoadInst *Load = Builder.CreateAlignedLoad(
EltTy, Ptr, MaybeAlign(AlignVal), "Load" + Twine(Idx));
VResult =
Builder.CreateInsertElement(VResult, Load, Idx, "Res" + Twine(Idx));
}
@ -459,8 +460,8 @@ static void scalarizeMaskedGather(CallInst *CI, bool &ModifiedDT) {
Builder.SetInsertPoint(InsertPt);
Value *Ptr = Builder.CreateExtractElement(Ptrs, Idx, "Ptr" + Twine(Idx));
LoadInst *Load =
Builder.CreateAlignedLoad(EltTy, Ptr, AlignVal, "Load" + Twine(Idx));
LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Ptr, MaybeAlign(AlignVal),
"Load" + Twine(Idx));
Value *NewVResult =
Builder.CreateInsertElement(VResult, Load, Idx, "Res" + Twine(Idx));
@ -624,8 +625,8 @@ static void scalarizeMaskedExpandLoad(CallInst *CI, bool &ModifiedDT) {
if (cast<Constant>(Mask)->getAggregateElement(Idx)->isNullValue())
continue;
Value *NewPtr = Builder.CreateConstInBoundsGEP1_32(EltTy, Ptr, MemIndex);
LoadInst *Load =
Builder.CreateAlignedLoad(EltTy, NewPtr, 1, "Load" + Twine(Idx));
LoadInst *Load = Builder.CreateAlignedLoad(EltTy, NewPtr, Align(1),
"Load" + Twine(Idx));
VResult =
Builder.CreateInsertElement(VResult, Load, Idx, "Res" + Twine(Idx));
++MemIndex;
@ -670,7 +671,7 @@ static void scalarizeMaskedExpandLoad(CallInst *CI, bool &ModifiedDT) {
"cond.load");
Builder.SetInsertPoint(InsertPt);
LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Ptr, 1);
LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Ptr, Align(1));
Value *NewVResult = Builder.CreateInsertElement(VResult, Load, Idx);
// Move the pointer if there are more blocks to come.

View File

@ -2308,7 +2308,7 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
Type *VT = VectorType::get(EltTy, NumSrcElts);
Value *Op = Builder.CreatePointerCast(CI->getArgOperand(0),
PointerType::getUnqual(VT));
Value *Load = Builder.CreateAlignedLoad(VT, Op, 1);
Value *Load = Builder.CreateAlignedLoad(VT, Op, Align(1));
if (NumSrcElts == 2)
Rep = Builder.CreateShuffleVector(Load, UndefValue::get(Load->getType()),
{ 0, 1, 0, 1 });
@ -3054,7 +3054,8 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
// Convert the type of the pointer to a pointer to the stored type.
Value *BC =
Builder.CreateBitCast(Ptr, PointerType::getUnqual(VTy), "cast");
LoadInst *LI = Builder.CreateAlignedLoad(VTy, BC, VTy->getBitWidth() / 8);
LoadInst *LI =
Builder.CreateAlignedLoad(VTy, BC, Align(VTy->getBitWidth() / 8));
LI->setMetadata(M->getMDKindID("nontemporal"), Node);
Rep = LI;
} else if (IsX86 && (Name.startswith("fma.vfmadd.") ||

View File

@ -160,7 +160,7 @@ bool AMDGPULowerKernelArguments::runOnFunction(Function &F) {
ArgPtr = Builder.CreateBitCast(ArgPtr, AdjustedArgTy->getPointerTo(AS),
ArgPtr->getName() + ".cast");
LoadInst *Load =
Builder.CreateAlignedLoad(AdjustedArgTy, ArgPtr, AdjustedAlign.value());
Builder.CreateAlignedLoad(AdjustedArgTy, ArgPtr, AdjustedAlign);
Load->setMetadata(LLVMContext::MD_invariant_load, MDNode::get(Ctx, {}));
MDBuilder MDB(Ctx);

View File

@ -251,10 +251,10 @@ AMDGPUPromoteAlloca::getLocalSizeYZ(IRBuilder<> &Builder) {
// 32-bit and extract sequence is already present, and it is probably easier
// to CSE this. The loads should be mergable later anyway.
Value *GEPXY = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 1);
LoadInst *LoadXY = Builder.CreateAlignedLoad(I32Ty, GEPXY, 4);
LoadInst *LoadXY = Builder.CreateAlignedLoad(I32Ty, GEPXY, Align(4));
Value *GEPZU = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 2);
LoadInst *LoadZU = Builder.CreateAlignedLoad(I32Ty, GEPZU, 4);
LoadInst *LoadZU = Builder.CreateAlignedLoad(I32Ty, GEPZU, Align(4));
MDNode *MD = MDNode::get(Mod->getContext(), None);
LoadXY->setMetadata(LLVMContext::MD_invariant_load, MD);

View File

@ -772,8 +772,7 @@ LoadInst* ARMParallelDSP::CreateWideLoad(MemInstList &Loads,
const unsigned AddrSpace = DomLoad->getPointerAddressSpace();
Value *VecPtr = IRB.CreateBitCast(Base->getPointerOperand(),
LoadTy->getPointerTo(AddrSpace));
LoadInst *WideLoad = IRB.CreateAlignedLoad(LoadTy, VecPtr,
Base->getAlignment());
LoadInst *WideLoad = IRB.CreateAlignedLoad(LoadTy, VecPtr, Base->getAlign());
// Make sure everything is in the correct order in the basic block.
MoveBefore(Base->getPointerOperand(), VecPtr);

View File

@ -27474,7 +27474,7 @@ X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
// Finally we can emit the atomic load.
LoadInst *Loaded =
Builder.CreateAlignedLoad(AI->getType(), AI->getPointerOperand(),
AI->getType()->getPrimitiveSizeInBits());
Align(AI->getType()->getPrimitiveSizeInBits()));
Loaded->setAtomic(Order, SSID);
AI->replaceAllUsesWith(Loaded);
AI->eraseFromParent();

View File

@ -216,7 +216,7 @@ void X86InterleavedAccessGroup::decompose(
Value *NewBasePtr =
Builder.CreateGEP(VecBaseTy, VecBasePtr, Builder.getInt32(i));
Instruction *NewLoad =
Builder.CreateAlignedLoad(VecBaseTy, NewBasePtr, LI->getAlignment());
Builder.CreateAlignedLoad(VecBaseTy, NewBasePtr, LI->getAlign());
DecomposedVectors.push_back(NewLoad);
}
}

View File

@ -1056,7 +1056,8 @@ static Value *simplifyX86vpermv(const IntrinsicInst &II,
// * Narrow width by halfs excluding zero/undef lanes
Value *InstCombiner::simplifyMaskedLoad(IntrinsicInst &II) {
Value *LoadPtr = II.getArgOperand(0);
unsigned Alignment = cast<ConstantInt>(II.getArgOperand(1))->getZExtValue();
const Align Alignment =
cast<ConstantInt>(II.getArgOperand(1))->getAlignValue();
// If the mask is all ones or undefs, this is a plain vector load of the 1st
// argument.
@ -1066,9 +1067,9 @@ Value *InstCombiner::simplifyMaskedLoad(IntrinsicInst &II) {
// If we can unconditionally load from this address, replace with a
// load/select idiom. TODO: use DT for context sensitive query
if (isDereferenceableAndAlignedPointer(
LoadPtr, II.getType(), MaybeAlign(Alignment),
II.getModule()->getDataLayout(), &II, nullptr)) {
if (isDereferenceableAndAlignedPointer(LoadPtr, II.getType(), Alignment,
II.getModule()->getDataLayout(), &II,
nullptr)) {
Value *LI = Builder.CreateAlignedLoad(II.getType(), LoadPtr, Alignment,
"unmaskedload");
return Builder.CreateSelect(II.getArgOperand(2), LI, II.getArgOperand(3));
@ -1459,7 +1460,7 @@ static Value *simplifyNeonVld1(const IntrinsicInst &II,
auto *BCastInst = Builder.CreateBitCast(II.getArgOperand(0),
PointerType::get(II.getType(), 0));
return Builder.CreateAlignedLoad(II.getType(), BCastInst, Alignment);
return Builder.CreateAlignedLoad(II.getType(), BCastInst, Align(Alignment));
}
// Returns true iff the 2 intrinsics have the same operands, limiting the

View File

@ -462,12 +462,11 @@ LoadInst *InstCombiner::combineLoadToNewType(LoadInst &LI, Type *NewTy,
NewPtr->getType()->getPointerAddressSpace() == AS))
NewPtr = Builder.CreateBitCast(Ptr, NewTy->getPointerTo(AS));
unsigned Align = LI.getAlignment();
if (!Align)
// If old load did not have an explicit alignment specified,
// manually preserve the implied (ABI) alignment of the load.
// Else we may inadvertently incorrectly over-promise alignment.
Align = getDataLayout().getABITypeAlignment(LI.getType());
const auto Align =
getDataLayout().getValueOrABITypeAlignment(LI.getAlign(), LI.getType());
LoadInst *NewLoad = Builder.CreateAlignedLoad(
NewTy, NewPtr, Align, LI.isVolatile(), LI.getName() + Suffix);
@ -674,9 +673,7 @@ static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) {
if (SL->hasPadding())
return nullptr;
auto Align = LI.getAlignment();
if (!Align)
Align = DL.getABITypeAlignment(ST);
const auto Align = DL.getValueOrABITypeAlignment(LI.getAlign(), ST);
auto *Addr = LI.getPointerOperand();
auto *IdxType = Type::getInt32Ty(T->getContext());
@ -690,9 +687,9 @@ static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) {
};
auto *Ptr = IC.Builder.CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices),
Name + ".elt");
auto EltAlign = MinAlign(Align, SL->getElementOffset(i));
auto *L = IC.Builder.CreateAlignedLoad(ST->getElementType(i), Ptr,
EltAlign, Name + ".unpack");
auto *L = IC.Builder.CreateAlignedLoad(
ST->getElementType(i), Ptr,
commonAlignment(Align, SL->getElementOffset(i)), Name + ".unpack");
// Propagate AA metadata. It'll still be valid on the narrowed load.
AAMDNodes AAMD;
LI.getAAMetadata(AAMD);
@ -725,9 +722,7 @@ static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) {
const DataLayout &DL = IC.getDataLayout();
auto EltSize = DL.getTypeAllocSize(ET);
auto Align = LI.getAlignment();
if (!Align)
Align = DL.getABITypeAlignment(T);
const auto Align = DL.getValueOrABITypeAlignment(LI.getAlign(), T);
auto *Addr = LI.getPointerOperand();
auto *IdxType = Type::getInt64Ty(T->getContext());
@ -742,8 +737,9 @@ static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) {
};
auto *Ptr = IC.Builder.CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices),
Name + ".elt");
auto *L = IC.Builder.CreateAlignedLoad(
AT->getElementType(), Ptr, MinAlign(Align, Offset), Name + ".unpack");
auto *L = IC.Builder.CreateAlignedLoad(AT->getElementType(), Ptr,
commonAlignment(Align, Offset),
Name + ".unpack");
AAMDNodes AAMD;
LI.getAAMetadata(AAMD);
L->setAAMetadata(AAMD);

View File

@ -1194,7 +1194,7 @@ Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align,
}
}
uint64_t ShadowAlign = Align * DFS.ShadowWidth / 8;
const MaybeAlign ShadowAlign(Align * DFS.ShadowWidth / 8);
SmallVector<const Value *, 2> Objs;
GetUnderlyingObjects(Addr, Objs, Pos->getModule()->getDataLayout());
bool AllConstants = true;
@ -1216,7 +1216,7 @@ Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align,
return DFS.ZeroShadow;
case 1: {
LoadInst *LI = new LoadInst(DFS.ShadowTy, ShadowAddr, "", Pos);
LI->setAlignment(MaybeAlign(ShadowAlign));
LI->setAlignment(ShadowAlign);
return LI;
}
case 2: {

View File

@ -1643,8 +1643,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
// ParamTLS overflow.
*ShadowPtr = getCleanShadow(V);
} else {
*ShadowPtr = EntryIRB.CreateAlignedLoad(
getShadowTy(&FArg), Base, kShadowTLSAlignment.value());
*ShadowPtr = EntryIRB.CreateAlignedLoad(getShadowTy(&FArg), Base,
kShadowTLSAlignment);
}
}
LLVM_DEBUG(dbgs()
@ -1783,8 +1783,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
if (PropagateShadow) {
std::tie(ShadowPtr, OriginPtr) =
getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ false);
setShadow(&I, IRB.CreateAlignedLoad(ShadowTy, ShadowPtr,
Alignment.value(), "_msld"));
setShadow(&I,
IRB.CreateAlignedLoad(ShadowTy, ShadowPtr, Alignment, "_msld"));
} else {
setShadow(&I, getCleanShadow(&I));
}
@ -1798,8 +1798,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
if (MS.TrackOrigins) {
if (PropagateShadow) {
const Align OriginAlignment = std::max(kMinOriginAlignment, Alignment);
setOrigin(&I, IRB.CreateAlignedLoad(MS.OriginTy, OriginPtr,
OriginAlignment.value()));
setOrigin(
&I, IRB.CreateAlignedLoad(MS.OriginTy, OriginPtr, OriginAlignment));
} else {
setOrigin(&I, getCleanOrigin());
}
@ -2481,8 +2481,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
const Align Alignment = Align::None();
std::tie(ShadowPtr, OriginPtr) =
getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ false);
setShadow(&I, IRB.CreateAlignedLoad(ShadowTy, ShadowPtr,
Alignment.value(), "_msld"));
setShadow(&I,
IRB.CreateAlignedLoad(ShadowTy, ShadowPtr, Alignment, "_msld"));
} else {
setShadow(&I, getCleanShadow(&I));
}
@ -2893,8 +2893,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
if (ClCheckAccessAddress)
insertShadowCheck(Addr, &I);
Value *Shadow =
IRB.CreateAlignedLoad(Ty, ShadowPtr, Alignment.value(), "_ldmxcsr");
Value *Shadow = IRB.CreateAlignedLoad(Ty, ShadowPtr, Alignment, "_ldmxcsr");
Value *Origin = MS.TrackOrigins ? IRB.CreateLoad(MS.OriginTy, OriginPtr)
: getCleanOrigin();
insertShadowCheck(Shadow, Origin, &I);
@ -3381,7 +3380,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
IRBuilder<> IRBAfter(&*NextInsn);
Value *RetvalShadow = IRBAfter.CreateAlignedLoad(
getShadowTy(&I), getShadowPtrForRetval(&I, IRBAfter),
kShadowTLSAlignment.value(), "_msret");
kShadowTLSAlignment, "_msret");
setShadow(&I, RetvalShadow);
if (MS.TrackOrigins)
setOrigin(&I, IRBAfter.CreateLoad(MS.OriginTy,

View File

@ -2460,7 +2460,7 @@ private:
assert(EndIndex > BeginIndex && "Empty vector!");
Value *V = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
NewAI.getAlignment(), "load");
NewAI.getAlign(), "load");
return extractVector(IRB, V, BeginIndex, EndIndex, "vec");
}
@ -2468,7 +2468,7 @@ private:
assert(IntTy && "We cannot insert an integer to the alloca");
assert(!LI.isVolatile());
Value *V = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
NewAI.getAlignment(), "load");
NewAI.getAlign(), "load");
V = convertValue(DL, IRB, V, IntTy);
assert(NewBeginOffset >= NewAllocaBeginOffset && "Out of bounds offset");
uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
@ -2513,8 +2513,8 @@ private:
(IsLoadPastEnd && NewAllocaTy->isIntegerTy() &&
TargetTy->isIntegerTy()))) {
LoadInst *NewLI = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
NewAI.getAlignment(),
LI.isVolatile(), LI.getName());
NewAI.getAlign(), LI.isVolatile(),
LI.getName());
if (AATags)
NewLI->setAAMetadata(AATags);
if (LI.isVolatile())
@ -2609,7 +2609,7 @@ private:
// Mix in the existing elements.
Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
NewAI.getAlignment(), "load");
NewAI.getAlign(), "load");
V = insertVector(IRB, Old, V, BeginIndex, "vec");
}
StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment());
@ -2626,7 +2626,7 @@ private:
assert(!SI.isVolatile());
if (DL.getTypeSizeInBits(V->getType()) != IntTy->getBitWidth()) {
Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
NewAI.getAlignment(), "oldload");
NewAI.getAlign(), "oldload");
Old = convertValue(DL, IRB, Old, IntTy);
assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset");
uint64_t Offset = BeginOffset - NewAllocaBeginOffset;
@ -2829,7 +2829,7 @@ private:
Splat = getVectorSplat(Splat, NumElements);
Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
NewAI.getAlignment(), "oldload");
NewAI.getAlign(), "oldload");
V = insertVector(IRB, Old, Splat, BeginIndex, "vec");
} else if (IntTy) {
// If this is a memset on an alloca where we can widen stores, insert the
@ -2842,7 +2842,7 @@ private:
if (IntTy && (BeginOffset != NewAllocaBeginOffset ||
EndOffset != NewAllocaBeginOffset)) {
Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
NewAI.getAlignment(), "oldload");
NewAI.getAlign(), "oldload");
Old = convertValue(DL, IRB, Old, IntTy);
uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
V = insertInteger(DL, IRB, Old, V, Offset, "insert");
@ -3028,11 +3028,11 @@ private:
Value *Src;
if (VecTy && !IsWholeAlloca && !IsDest) {
Src = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
NewAI.getAlignment(), "load");
NewAI.getAlign(), "load");
Src = extractVector(IRB, Src, BeginIndex, EndIndex, "vec");
} else if (IntTy && !IsWholeAlloca && !IsDest) {
Src = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
NewAI.getAlignment(), "load");
NewAI.getAlign(), "load");
Src = convertValue(DL, IRB, Src, IntTy);
uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
Src = extractInteger(DL, IRB, Src, SubIntTy, Offset, "extract");
@ -3046,11 +3046,11 @@ private:
if (VecTy && !IsWholeAlloca && IsDest) {
Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
NewAI.getAlignment(), "oldload");
NewAI.getAlign(), "oldload");
Src = insertVector(IRB, Old, Src, BeginIndex, "vec");
} else if (IntTy && !IsWholeAlloca && IsDest) {
Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
NewAI.getAlignment(), "oldload");
NewAI.getAlign(), "oldload");
Old = convertValue(DL, IRB, Old, IntTy);
uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
Src = insertInteger(DL, IRB, Old, Src, Offset, "insert");
@ -3357,7 +3357,7 @@ private:
Value *GEP =
IRB.CreateInBoundsGEP(BaseTy, Ptr, GEPIndices, Name + ".gep");
LoadInst *Load =
IRB.CreateAlignedLoad(Ty, GEP, Alignment.value(), Name + ".load");
IRB.CreateAlignedLoad(Ty, GEP, Alignment, Name + ".load");
if (AATags)
Load->setAAMetadata(AATags);
Agg = IRB.CreateInsertValue(Agg, Load, Indices, Name + ".insert");
@ -3860,7 +3860,7 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {
getAdjustedPtr(IRB, DL, BasePtr,
APInt(DL.getIndexSizeInBits(AS), PartOffset),
PartPtrTy, BasePtr->getName() + "."),
getAdjustedAlignment(LI, PartOffset, DL).value(),
getAdjustedAlignment(LI, PartOffset, DL),
/*IsVolatile*/ false, LI->getName());
PLoad->copyMetadata(*LI, {LLVMContext::MD_mem_parallel_loop_access,
LLVMContext::MD_access_group});
@ -4003,7 +4003,7 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {
getAdjustedPtr(IRB, DL, LoadBasePtr,
APInt(DL.getIndexSizeInBits(AS), PartOffset),
LoadPartPtrTy, LoadBasePtr->getName() + "."),
getAdjustedAlignment(LI, PartOffset, DL).value(),
getAdjustedAlignment(LI, PartOffset, DL),
/*IsVolatile*/ false, LI->getName());
}

View File

@ -802,7 +802,7 @@ bool ScalarizerVisitor::visitLoadInst(LoadInst &LI) {
for (unsigned I = 0; I < NumElems; ++I)
Res[I] = Builder.CreateAlignedLoad(Layout.VecTy->getElementType(), Ptr[I],
Layout.getElemAlign(I),
Align(Layout.getElemAlign(I)),
LI.getName() + ".i" + Twine(I));
gather(&LI, Res);
return true;

View File

@ -1194,7 +1194,8 @@ bool Vectorizer::vectorizeLoadChain(
Value *Bitcast =
Builder.CreateBitCast(L0->getPointerOperand(), VecTy->getPointerTo(AS));
LoadInst *LI = Builder.CreateAlignedLoad(VecTy, Bitcast, Alignment);
LoadInst *LI =
Builder.CreateAlignedLoad(VecTy, Bitcast, MaybeAlign(Alignment));
propagateMetadata(LI, Chain);
if (VecLoadTy) {

View File

@ -2268,7 +2268,7 @@ void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr,
}
else
NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part],
Group->getAlignment(), "wide.vec");
Group->getAlign(), "wide.vec");
Group->addMetadata(NewLoad);
NewLoads.push_back(NewLoad);
}
@ -2478,8 +2478,8 @@ void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr,
VecPtr, Alignment, BlockInMaskParts[Part], UndefValue::get(DataTy),
"wide.masked.load");
else
NewLI = Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment.value(),
"wide.load");
NewLI =
Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load");
// Add metadata to the load, but setVectorValue to the reverse shuffle.
addMetadata(NewLI, LI);

View File

@ -4150,7 +4150,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
Builder.SetInsertPoint(LI);
PointerType *PtrTy = PointerType::get(VecTy, LI->getPointerAddressSpace());
Value *Ptr = Builder.CreateBitCast(LI->getOperand(0), PtrTy);
LoadInst *V = Builder.CreateAlignedLoad(VecTy, Ptr, LI->getAlignment());
LoadInst *V = Builder.CreateAlignedLoad(VecTy, Ptr, LI->getAlign());
Value *NewV = propagateMetadata(V, E->Scalars);
if (!E->ReorderIndices.empty()) {
OrdersType Mask;