1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2025-01-31 20:51:52 +01:00

[NFC] Modernize misc. uses of Align/MaybeAlign APIs.

Use the current getAlign() APIs where it makes sense, and use Align
instead of MaybeAlign when we know the value is non-zero.
This commit is contained in:
Eli Friedman 2020-04-06 17:29:25 -07:00
parent 0a0f565e2a
commit 5b785f35a6
15 changed files with 28 additions and 33 deletions

View File

@ -383,7 +383,7 @@ LoadInst *AtomicExpand::convertAtomicLoadToIntegerType(LoadInst *LI) {
Value *NewAddr = Builder.CreateBitCast(Addr, PT);
auto *NewLI = Builder.CreateLoad(NewTy, NewAddr);
NewLI->setAlignment(MaybeAlign(LI->getAlignment()));
NewLI->setAlignment(LI->getAlign());
NewLI->setVolatile(LI->isVolatile());
NewLI->setAtomic(LI->getOrdering(), LI->getSyncScopeID());
LLVM_DEBUG(dbgs() << "Replaced " << *LI << " with " << *NewLI << "\n");
@ -470,7 +470,7 @@ StoreInst *AtomicExpand::convertAtomicStoreToIntegerType(StoreInst *SI) {
Value *NewAddr = Builder.CreateBitCast(Addr, PT);
StoreInst *NewSI = Builder.CreateStore(NewVal, NewAddr);
NewSI->setAlignment(MaybeAlign(SI->getAlignment()));
NewSI->setAlignment(SI->getAlign());
NewSI->setVolatile(SI->isVolatile());
NewSI->setAtomic(SI->getOrdering(), SI->getSyncScopeID());
LLVM_DEBUG(dbgs() << "Replaced " << *SI << " with " << *NewSI << "\n");
@ -1377,7 +1377,7 @@ Value *AtomicExpand::insertRMWCmpXchgLoop(
Builder.SetInsertPoint(BB);
LoadInst *InitLoaded = Builder.CreateLoad(ResultTy, Addr);
// Atomics require at least natural alignment.
InitLoaded->setAlignment(MaybeAlign(ResultTy->getPrimitiveSizeInBits() / 8));
InitLoaded->setAlignment(Align(ResultTy->getPrimitiveSizeInBits() / 8));
Builder.CreateBr(LoopBB);
// Start the main loop block now that we've taken care of the preliminaries.

View File

@ -248,8 +248,7 @@ Align IRTranslator::getMemOpAlign(const Instruction &I) {
return SI->getAlign().getValueOr(DL->getABITypeAlign(ValTy));
}
if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) {
Type *ValTy = LI->getType();
return LI->getAlign().getValueOr(DL->getABITypeAlign(ValTy));
return DL->getValueOrABITypeAlignment(LI->getAlign(), LI->getType());
}
if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I)) {
// TODO(PR27168): This instruction has no alignment attribute, but unlike

View File

@ -305,7 +305,7 @@ doPromotion(Function *F, SmallPtrSetImpl<Argument *> &ArgsToPromote,
// of the previous load.
LoadInst *newLoad =
IRB.CreateLoad(OrigLoad->getType(), V, V->getName() + ".val");
newLoad->setAlignment(MaybeAlign(OrigLoad->getAlignment()));
newLoad->setAlignment(OrigLoad->getAlign());
// Transfer the AA info too.
AAMDNodes AAInfo;
OrigLoad->getAAMetadata(AAInfo);

View File

@ -5653,7 +5653,7 @@ struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
constructPointer(PointeeTy->getPointerTo(), Base,
PrivStructLayout->getElementOffset(u), IRB, DL);
LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
L->setAlignment(MaybeAlign(1));
L->setAlignment(Align(1));
ReplacementValues.push_back(L);
}
} else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
@ -5664,12 +5664,12 @@ struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
Value *Ptr =
constructPointer(PointeePtrTy, Base, u * PointeeTySize, IRB, DL);
LoadInst *L = new LoadInst(PointeePtrTy, Ptr, "", IP);
L->setAlignment(MaybeAlign(1));
L->setAlignment(Align(1));
ReplacementValues.push_back(L);
}
} else {
LoadInst *L = new LoadInst(PrivType, Base, "", IP);
L->setAlignment(MaybeAlign(1));
L->setAlignment(Align(1));
ReplacementValues.push_back(L);
}
}

View File

@ -124,7 +124,7 @@ Instruction *InstCombiner::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
auto *SI = new StoreInst(RMWI.getValOperand(),
RMWI.getPointerOperand(), &RMWI);
SI->setAtomic(Ordering, RMWI.getSyncScopeID());
SI->setAlignment(MaybeAlign(DL.getABITypeAlignment(RMWI.getType())));
SI->setAlignment(DL.getABITypeAlign(RMWI.getType()));
return eraseInstFromFunction(RMWI);
}
@ -150,8 +150,8 @@ Instruction *InstCombiner::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
Ordering != AtomicOrdering::Monotonic)
return nullptr;
LoadInst *Load = new LoadInst(RMWI.getType(), RMWI.getPointerOperand());
Load->setAtomic(Ordering, RMWI.getSyncScopeID());
Load->setAlignment(MaybeAlign(DL.getABITypeAlignment(RMWI.getType())));
LoadInst *Load = new LoadInst(RMWI.getType(), RMWI.getPointerOperand(), "",
false, DL.getABITypeAlign(RMWI.getType()),
Ordering, RMWI.getSyncScopeID());
return Load;
}

View File

@ -1236,7 +1236,7 @@ Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align,
}
}
const MaybeAlign ShadowAlign(Align * DFS.ShadowWidthBytes);
const llvm::Align ShadowAlign(Align * DFS.ShadowWidthBytes);
SmallVector<const Value *, 2> Objs;
GetUnderlyingObjects(Addr, Objs, Pos->getModule()->getDataLayout());
bool AllConstants = true;

View File

@ -1266,7 +1266,7 @@ bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock,
auto *NewLoad = new LoadInst(
LI->getType(), LoadPtr, LI->getName() + ".pre", LI->isVolatile(),
MaybeAlign(LI->getAlignment()), LI->getOrdering(), LI->getSyncScopeID(),
LI->getAlign(), LI->getOrdering(), LI->getSyncScopeID(),
UnavailablePred->getTerminator());
NewLoad->setDebugLoc(LI->getDebugLoc());

View File

@ -1449,7 +1449,7 @@ bool JumpThreadingPass::SimplifyPartiallyRedundantLoad(LoadInst *LoadI) {
"Can't handle critical edge here!");
LoadInst *NewVal = new LoadInst(
LoadI->getType(), LoadedPtr->DoPHITranslation(LoadBB, UnavailablePred),
LoadI->getName() + ".pr", false, MaybeAlign(LoadI->getAlignment()),
LoadI->getName() + ".pr", false, LoadI->getAlign(),
LoadI->getOrdering(), LoadI->getSyncScopeID(),
UnavailablePred->getTerminator());
NewVal->setDebugLoc(LoadI->getDebugLoc());

View File

@ -1765,7 +1765,7 @@ public:
StoreInst *NewSI = new StoreInst(LiveInValue, Ptr, InsertPos);
if (UnorderedAtomic)
NewSI->setOrdering(AtomicOrdering::Unordered);
NewSI->setAlignment(MaybeAlign(Alignment));
NewSI->setAlignment(Align(Alignment));
NewSI->setDebugLoc(DL);
if (AATags)
NewSI->setAAMetadata(AATags);
@ -1998,8 +1998,7 @@ bool llvm::promoteLoopAccessesToScalars(
if (!DereferenceableInPH) {
DereferenceableInPH = isDereferenceableAndAlignedPointer(
Store->getPointerOperand(), Store->getValueOperand()->getType(),
MaybeAlign(Store->getAlignment()), MDL,
Preheader->getTerminator(), DT);
Store->getAlign(), MDL, Preheader->getTerminator(), DT);
}
} else
return false; // Not a load or store.
@ -2084,7 +2083,7 @@ bool llvm::promoteLoopAccessesToScalars(
SomePtr->getName() + ".promoted", Preheader->getTerminator());
if (SawUnorderedAtomic)
PreheaderLoad->setOrdering(AtomicOrdering::Unordered);
PreheaderLoad->setAlignment(MaybeAlign(Alignment));
PreheaderLoad->setAlignment(Align(Alignment));
PreheaderLoad->setDebugLoc(DL);
if (AATags)
PreheaderLoad->setAAMetadata(AATags);

View File

@ -436,8 +436,7 @@ public:
PH->getTerminator());
Value *Initial = new LoadInst(
Cand.Load->getType(), InitialPtr, "load_initial",
/* isVolatile */ false, MaybeAlign(Cand.Load->getAlignment()),
PH->getTerminator());
/* isVolatile */ false, Cand.Load->getAlign(), PH->getTerminator());
PHINode *PHI = PHINode::Create(Initial->getType(), 2, "store_forwarded",
&L->getHeader()->front());

View File

@ -1118,7 +1118,7 @@ public:
Builder.SetInsertPoint(Copy, Copy->begin());
AllocaInst *NewLd =
Builder.CreateAlloca(Load->getType(), Load->getPointerAddressSpace());
Builder.CreateMemCpy(NewLd, MaybeAlign(NewLd->getAlignment()),
Builder.CreateMemCpy(NewLd, NewLd->getAlign(),
Load->getPointerOperand(), Load->getAlign(),
LoadLoc.Size.getValue());
Builder.SetInsertPoint(Fusion, Fusion->begin());

View File

@ -1373,8 +1373,8 @@ static void speculateSelectInstLoads(SelectInst &SI) {
NumLoadsSpeculated += 2;
// Transfer alignment and AA info if present.
TL->setAlignment(MaybeAlign(LI->getAlignment()));
FL->setAlignment(MaybeAlign(LI->getAlignment()));
TL->setAlignment(LI->getAlign());
FL->setAlignment(LI->getAlign());
AAMDNodes Tags;
LI->getAAMetadata(Tags);

View File

@ -1178,7 +1178,7 @@ static unsigned enforceKnownAlignment(Value *V, unsigned Alignment,
// then don't round up. This avoids dynamic stack realignment.
if (DL.exceedsNaturalStackAlignment(Align(PrefAlign)))
return Alignment;
AI->setAlignment(MaybeAlign(PrefAlign));
AI->setAlignment(Align(PrefAlign));
return PrefAlign;
}
@ -1195,7 +1195,7 @@ static unsigned enforceKnownAlignment(Value *V, unsigned Alignment,
if (!GO->canIncreaseAlignment())
return Alignment;
GO->setAlignment(MaybeAlign(PrefAlign));
GO->setAlignment(Align(PrefAlign));
return PrefAlign;
}

View File

@ -513,7 +513,7 @@ Value *getLoadValueForLoad(LoadInst *SrcVal, unsigned Offset, Type *LoadTy,
PtrVal = Builder.CreateBitCast(PtrVal, DestPTy);
LoadInst *NewLoad = Builder.CreateLoad(DestTy, PtrVal);
NewLoad->takeName(SrcVal);
NewLoad->setAlignment(MaybeAlign(SrcVal->getAlignment()));
NewLoad->setAlignment(SrcVal->getAlign());
LLVM_DEBUG(dbgs() << "GVN WIDENED LOAD: " << *SrcVal << "\n");
LLVM_DEBUG(dbgs() << "TO: " << *NewLoad << "\n");

View File

@ -4371,11 +4371,9 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
if (getTreeEntry(PO))
ExternalUses.push_back(ExternalUser(PO, cast<User>(VecPtr), 0));
MaybeAlign Alignment = MaybeAlign(LI->getAlignment());
LI = Builder.CreateLoad(VecTy, VecPtr);
if (!Alignment)
Alignment = MaybeAlign(DL->getABITypeAlignment(ScalarLoadTy));
LI->setAlignment(Alignment);
Align Alignment = DL->getValueOrABITypeAlignment(LI->getAlign(),
ScalarLoadTy);
LI = Builder.CreateAlignedLoad(VecTy, VecPtr, Alignment);
Value *V = propagateMetadata(LI, E->Scalars);
if (IsReorder) {
OrdersType Mask;