mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-01-31 20:51:52 +01:00
Revert "[Local] Update getOrEnforceKnownAlignment/getKnownAlignment to use Align/MaybeAlign."
This is breaking the clang build. This reverts commit 897409fb56f4525639b0e47e88960f24cd91c924.
This commit is contained in:
parent
065c040876
commit
d3c8547d05
@ -267,18 +267,18 @@ AllocaInst *DemotePHIToStack(PHINode *P, Instruction *AllocaPoint = nullptr);
|
||||
/// so if alignment is important, a more reliable approach is to simply align
|
||||
/// all global variables and allocation instructions to their preferred
|
||||
/// alignment from the beginning.
|
||||
Align getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign,
|
||||
const DataLayout &DL,
|
||||
const Instruction *CxtI = nullptr,
|
||||
AssumptionCache *AC = nullptr,
|
||||
const DominatorTree *DT = nullptr);
|
||||
unsigned getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign,
|
||||
const DataLayout &DL,
|
||||
const Instruction *CxtI = nullptr,
|
||||
AssumptionCache *AC = nullptr,
|
||||
const DominatorTree *DT = nullptr);
|
||||
|
||||
/// Try to infer an alignment for the specified pointer.
|
||||
inline Align getKnownAlignment(Value *V, const DataLayout &DL,
|
||||
const Instruction *CxtI = nullptr,
|
||||
AssumptionCache *AC = nullptr,
|
||||
const DominatorTree *DT = nullptr) {
|
||||
return getOrEnforceKnownAlignment(V, MaybeAlign(), DL, CxtI, AC, DT);
|
||||
inline unsigned getKnownAlignment(Value *V, const DataLayout &DL,
|
||||
const Instruction *CxtI = nullptr,
|
||||
AssumptionCache *AC = nullptr,
|
||||
const DominatorTree *DT = nullptr) {
|
||||
return getOrEnforceKnownAlignment(V, 0, DL, CxtI, AC, DT);
|
||||
}
|
||||
|
||||
/// Create a call that matches the invoke \p II in terms of arguments,
|
||||
|
@ -1939,14 +1939,12 @@ bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool &ModifiedDT) {
|
||||
// If this is a memcpy (or similar) then we may be able to improve the
|
||||
// alignment
|
||||
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(CI)) {
|
||||
Align DestAlign = getKnownAlignment(MI->getDest(), *DL);
|
||||
MaybeAlign MIDestAlign = MI->getDestAlign();
|
||||
if (!MIDestAlign || DestAlign > *MIDestAlign)
|
||||
unsigned DestAlign = getKnownAlignment(MI->getDest(), *DL);
|
||||
if (DestAlign > MI->getDestAlignment())
|
||||
MI->setDestAlignment(DestAlign);
|
||||
if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
|
||||
MaybeAlign MTISrcAlign = MTI->getSourceAlign();
|
||||
Align SrcAlign = getKnownAlignment(MTI->getSource(), *DL);
|
||||
if (!MTISrcAlign || SrcAlign > *MTISrcAlign)
|
||||
unsigned SrcAlign = getKnownAlignment(MTI->getSource(), *DL);
|
||||
if (SrcAlign > MTI->getSourceAlignment())
|
||||
MTI->setSourceAlignment(SrcAlign);
|
||||
}
|
||||
}
|
||||
|
@ -117,16 +117,16 @@ static Constant *getNegativeIsTrueBoolVec(ConstantDataVector *V) {
|
||||
}
|
||||
|
||||
Instruction *InstCombiner::SimplifyAnyMemTransfer(AnyMemTransferInst *MI) {
|
||||
Align DstAlign = getKnownAlignment(MI->getRawDest(), DL, MI, &AC, &DT);
|
||||
MaybeAlign CopyDstAlign = MI->getDestAlign();
|
||||
if (!CopyDstAlign || *CopyDstAlign < DstAlign) {
|
||||
unsigned DstAlign = getKnownAlignment(MI->getRawDest(), DL, MI, &AC, &DT);
|
||||
unsigned CopyDstAlign = MI->getDestAlignment();
|
||||
if (CopyDstAlign < DstAlign){
|
||||
MI->setDestAlignment(DstAlign);
|
||||
return MI;
|
||||
}
|
||||
|
||||
Align SrcAlign = getKnownAlignment(MI->getRawSource(), DL, MI, &AC, &DT);
|
||||
MaybeAlign CopySrcAlign = MI->getSourceAlign();
|
||||
if (!CopySrcAlign || *CopySrcAlign < SrcAlign) {
|
||||
unsigned SrcAlign = getKnownAlignment(MI->getRawSource(), DL, MI, &AC, &DT);
|
||||
unsigned CopySrcAlign = MI->getSourceAlignment();
|
||||
if (CopySrcAlign < SrcAlign) {
|
||||
MI->setSourceAlignment(SrcAlign);
|
||||
return MI;
|
||||
}
|
||||
@ -234,10 +234,9 @@ Instruction *InstCombiner::SimplifyAnyMemTransfer(AnyMemTransferInst *MI) {
|
||||
}
|
||||
|
||||
Instruction *InstCombiner::SimplifyAnyMemSet(AnyMemSetInst *MI) {
|
||||
const Align KnownAlignment =
|
||||
const unsigned KnownAlignment =
|
||||
getKnownAlignment(MI->getDest(), DL, MI, &AC, &DT);
|
||||
MaybeAlign MemSetAlign = MI->getDestAlign();
|
||||
if (!MemSetAlign || *MemSetAlign < KnownAlignment) {
|
||||
if (MI->getDestAlignment() < KnownAlignment) {
|
||||
MI->setDestAlignment(KnownAlignment);
|
||||
return MI;
|
||||
}
|
||||
@ -2457,7 +2456,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
||||
case Intrinsic::ppc_altivec_lvx:
|
||||
case Intrinsic::ppc_altivec_lvxl:
|
||||
// Turn PPC lvx -> load if the pointer is known aligned.
|
||||
if (getOrEnforceKnownAlignment(II->getArgOperand(0), Align(16), DL, II, &AC,
|
||||
if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, &AC,
|
||||
&DT) >= 16) {
|
||||
Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
|
||||
PointerType::getUnqual(II->getType()));
|
||||
@ -2474,7 +2473,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
||||
case Intrinsic::ppc_altivec_stvx:
|
||||
case Intrinsic::ppc_altivec_stvxl:
|
||||
// Turn stvx -> store if the pointer is known aligned.
|
||||
if (getOrEnforceKnownAlignment(II->getArgOperand(1), Align(16), DL, II, &AC,
|
||||
if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, &AC,
|
||||
&DT) >= 16) {
|
||||
Type *OpPtrTy =
|
||||
PointerType::getUnqual(II->getArgOperand(0)->getType());
|
||||
@ -2491,7 +2490,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
||||
}
|
||||
case Intrinsic::ppc_qpx_qvlfs:
|
||||
// Turn PPC QPX qvlfs -> load if the pointer is known aligned.
|
||||
if (getOrEnforceKnownAlignment(II->getArgOperand(0), Align(16), DL, II, &AC,
|
||||
if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, &AC,
|
||||
&DT) >= 16) {
|
||||
Type *VTy =
|
||||
VectorType::get(Builder.getFloatTy(),
|
||||
@ -2504,7 +2503,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
||||
break;
|
||||
case Intrinsic::ppc_qpx_qvlfd:
|
||||
// Turn PPC QPX qvlfd -> load if the pointer is known aligned.
|
||||
if (getOrEnforceKnownAlignment(II->getArgOperand(0), Align(32), DL, II, &AC,
|
||||
if (getOrEnforceKnownAlignment(II->getArgOperand(0), 32, DL, II, &AC,
|
||||
&DT) >= 32) {
|
||||
Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
|
||||
PointerType::getUnqual(II->getType()));
|
||||
@ -2513,7 +2512,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
||||
break;
|
||||
case Intrinsic::ppc_qpx_qvstfs:
|
||||
// Turn PPC QPX qvstfs -> store if the pointer is known aligned.
|
||||
if (getOrEnforceKnownAlignment(II->getArgOperand(1), Align(16), DL, II, &AC,
|
||||
if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, &AC,
|
||||
&DT) >= 16) {
|
||||
Type *VTy = VectorType::get(
|
||||
Builder.getFloatTy(),
|
||||
@ -2526,7 +2525,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
||||
break;
|
||||
case Intrinsic::ppc_qpx_qvstfd:
|
||||
// Turn PPC QPX qvstfd -> store if the pointer is known aligned.
|
||||
if (getOrEnforceKnownAlignment(II->getArgOperand(1), Align(32), DL, II, &AC,
|
||||
if (getOrEnforceKnownAlignment(II->getArgOperand(1), 32, DL, II, &AC,
|
||||
&DT) >= 32) {
|
||||
Type *OpPtrTy =
|
||||
PointerType::getUnqual(II->getArgOperand(0)->getType());
|
||||
@ -3350,8 +3349,9 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
||||
break;
|
||||
|
||||
case Intrinsic::arm_neon_vld1: {
|
||||
Align MemAlign = getKnownAlignment(II->getArgOperand(0), DL, II, &AC, &DT);
|
||||
if (Value *V = simplifyNeonVld1(*II, MemAlign.value(), Builder))
|
||||
unsigned MemAlign = getKnownAlignment(II->getArgOperand(0),
|
||||
DL, II, &AC, &DT);
|
||||
if (Value *V = simplifyNeonVld1(*II, MemAlign, Builder))
|
||||
return replaceInstUsesWith(*II, V);
|
||||
break;
|
||||
}
|
||||
@ -3369,13 +3369,14 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
||||
case Intrinsic::arm_neon_vst2lane:
|
||||
case Intrinsic::arm_neon_vst3lane:
|
||||
case Intrinsic::arm_neon_vst4lane: {
|
||||
Align MemAlign = getKnownAlignment(II->getArgOperand(0), DL, II, &AC, &DT);
|
||||
unsigned MemAlign =
|
||||
getKnownAlignment(II->getArgOperand(0), DL, II, &AC, &DT);
|
||||
unsigned AlignArg = II->getNumArgOperands() - 1;
|
||||
ConstantInt *IntrAlign = dyn_cast<ConstantInt>(II->getArgOperand(AlignArg));
|
||||
if (IntrAlign && IntrAlign->getZExtValue() < MemAlign.value())
|
||||
if (IntrAlign && IntrAlign->getZExtValue() < MemAlign)
|
||||
return replaceOperand(*II, AlignArg,
|
||||
ConstantInt::get(Type::getInt32Ty(II->getContext()),
|
||||
MemAlign.value(), false));
|
||||
MemAlign, false));
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -398,10 +398,9 @@ Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
|
||||
// is only subsequently read.
|
||||
SmallVector<Instruction *, 4> ToDelete;
|
||||
if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) {
|
||||
MaybeAlign AllocaAlign = AI.getAlign();
|
||||
Align SourceAlign = getOrEnforceKnownAlignment(
|
||||
Copy->getSource(), AllocaAlign, DL, &AI, &AC, &DT);
|
||||
if ((!AllocaAlign || *AllocaAlign <= SourceAlign) &&
|
||||
unsigned SourceAlign = getOrEnforceKnownAlignment(
|
||||
Copy->getSource(), AI.getAlignment(), DL, &AI, &AC, &DT);
|
||||
if (AI.getAlignment() <= SourceAlign &&
|
||||
isDereferenceableForAllocaSize(Copy->getSource(), &AI, DL)) {
|
||||
LLVM_DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');
|
||||
LLVM_DEBUG(dbgs() << " memcpy = " << *Copy << '\n');
|
||||
@ -957,16 +956,16 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
|
||||
return Res;
|
||||
|
||||
// Attempt to improve the alignment.
|
||||
Align KnownAlign = getOrEnforceKnownAlignment(
|
||||
Op, DL.getPrefTypeAlign(LI.getType()), DL, &LI, &AC, &DT);
|
||||
MaybeAlign LoadAlign = LI.getAlign();
|
||||
Align EffectiveLoadAlign =
|
||||
LoadAlign ? *LoadAlign : DL.getABITypeAlign(LI.getType());
|
||||
unsigned KnownAlign = getOrEnforceKnownAlignment(
|
||||
Op, DL.getPrefTypeAlignment(LI.getType()), DL, &LI, &AC, &DT);
|
||||
unsigned LoadAlign = LI.getAlignment();
|
||||
unsigned EffectiveLoadAlign =
|
||||
LoadAlign != 0 ? LoadAlign : DL.getABITypeAlignment(LI.getType());
|
||||
|
||||
if (KnownAlign > EffectiveLoadAlign)
|
||||
LI.setAlignment(KnownAlign);
|
||||
LI.setAlignment(MaybeAlign(KnownAlign));
|
||||
else if (LoadAlign == 0)
|
||||
LI.setAlignment(EffectiveLoadAlign);
|
||||
LI.setAlignment(MaybeAlign(EffectiveLoadAlign));
|
||||
|
||||
// Replace GEP indices if possible.
|
||||
if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Op, LI)) {
|
||||
@ -1362,11 +1361,11 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
|
||||
return eraseInstFromFunction(SI);
|
||||
|
||||
// Attempt to improve the alignment.
|
||||
const Align KnownAlign = getOrEnforceKnownAlignment(
|
||||
Ptr, DL.getPrefTypeAlign(Val->getType()), DL, &SI, &AC, &DT);
|
||||
const MaybeAlign StoreAlign = SI.getAlign();
|
||||
const Align KnownAlign = Align(getOrEnforceKnownAlignment(
|
||||
Ptr, DL.getPrefTypeAlignment(Val->getType()), DL, &SI, &AC, &DT));
|
||||
const MaybeAlign StoreAlign = MaybeAlign(SI.getAlignment());
|
||||
const Align EffectiveStoreAlign =
|
||||
StoreAlign ? *StoreAlign : DL.getABITypeAlign(Val->getType());
|
||||
StoreAlign ? *StoreAlign : Align(DL.getABITypeAlignment(Val->getType()));
|
||||
|
||||
if (KnownAlign > EffectiveStoreAlign)
|
||||
SI.setAlignment(KnownAlign);
|
||||
|
@ -1277,8 +1277,8 @@ bool MemCpyOptPass::processByValArgument(CallBase &CB, unsigned ArgNo) {
|
||||
AssumptionCache &AC = LookupAssumptionCache();
|
||||
DominatorTree &DT = LookupDomTree();
|
||||
if (MDep->getSourceAlign() < ByValAlign &&
|
||||
getOrEnforceKnownAlignment(MDep->getSource(), ByValAlign, DL, &CB, &AC,
|
||||
&DT) < ByValAlign)
|
||||
getOrEnforceKnownAlignment(MDep->getSource(), ByValAlign->value(), DL,
|
||||
&CB, &AC, &DT) < ByValAlign->value())
|
||||
return false;
|
||||
|
||||
// The address space of the memcpy source must match the byval argument
|
||||
|
@ -1374,8 +1374,8 @@ static Value *HandleByValArgument(Value *Arg, Instruction *TheCall,
|
||||
|
||||
// If the pointer is already known to be sufficiently aligned, or if we can
|
||||
// round it up to a larger alignment, then we don't need a temporary.
|
||||
if (getOrEnforceKnownAlignment(Arg, Align(ByValAlignment), DL, TheCall,
|
||||
AC) >= ByValAlignment)
|
||||
if (getOrEnforceKnownAlignment(Arg, ByValAlignment, DL, TheCall, AC) >=
|
||||
ByValAlignment)
|
||||
return Arg;
|
||||
|
||||
// Otherwise, we have to make a memcpy to get a safe alignment. This is bad
|
||||
|
@ -1156,8 +1156,9 @@ bool llvm::EliminateDuplicatePHINodes(BasicBlock *BB) {
|
||||
/// often possible though. If alignment is important, a more reliable approach
|
||||
/// is to simply align all global variables and allocation instructions to
|
||||
/// their preferred alignment from the beginning.
|
||||
static Align enforceKnownAlignment(Value *V, Align Alignment, Align PrefAlign,
|
||||
const DataLayout &DL) {
|
||||
static unsigned enforceKnownAlignment(Value *V, unsigned Alignment,
|
||||
unsigned PrefAlign,
|
||||
const DataLayout &DL) {
|
||||
assert(PrefAlign > Alignment);
|
||||
|
||||
V = V->stripPointerCasts();
|
||||
@ -1169,21 +1170,21 @@ static Align enforceKnownAlignment(Value *V, Align Alignment, Align PrefAlign,
|
||||
// stripPointerCasts recurses through infinite layers of bitcasts,
|
||||
// while computeKnownBits is not allowed to traverse more than 6
|
||||
// levels.
|
||||
Alignment = max(AI->getAlign(), Alignment);
|
||||
Alignment = std::max(AI->getAlignment(), Alignment);
|
||||
if (PrefAlign <= Alignment)
|
||||
return Alignment;
|
||||
|
||||
// If the preferred alignment is greater than the natural stack alignment
|
||||
// then don't round up. This avoids dynamic stack realignment.
|
||||
if (DL.exceedsNaturalStackAlignment(PrefAlign))
|
||||
if (DL.exceedsNaturalStackAlignment(Align(PrefAlign)))
|
||||
return Alignment;
|
||||
AI->setAlignment(PrefAlign);
|
||||
AI->setAlignment(Align(PrefAlign));
|
||||
return PrefAlign;
|
||||
}
|
||||
|
||||
if (auto *GO = dyn_cast<GlobalObject>(V)) {
|
||||
// TODO: as above, this shouldn't be necessary.
|
||||
Alignment = max(GO->getAlign(), Alignment);
|
||||
Alignment = std::max(GO->getAlignment(), Alignment);
|
||||
if (PrefAlign <= Alignment)
|
||||
return Alignment;
|
||||
|
||||
@ -1194,18 +1195,18 @@ static Align enforceKnownAlignment(Value *V, Align Alignment, Align PrefAlign,
|
||||
if (!GO->canIncreaseAlignment())
|
||||
return Alignment;
|
||||
|
||||
GO->setAlignment(PrefAlign);
|
||||
GO->setAlignment(Align(PrefAlign));
|
||||
return PrefAlign;
|
||||
}
|
||||
|
||||
return Alignment;
|
||||
}
|
||||
|
||||
Align llvm::getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign,
|
||||
const DataLayout &DL,
|
||||
const Instruction *CxtI,
|
||||
AssumptionCache *AC,
|
||||
const DominatorTree *DT) {
|
||||
unsigned llvm::getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign,
|
||||
const DataLayout &DL,
|
||||
const Instruction *CxtI,
|
||||
AssumptionCache *AC,
|
||||
const DominatorTree *DT) {
|
||||
assert(V->getType()->isPointerTy() &&
|
||||
"getOrEnforceKnownAlignment expects a pointer!");
|
||||
|
||||
@ -1217,13 +1218,13 @@ Align llvm::getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign,
|
||||
// LLVM doesn't support alignments larger than (1 << MaxAlignmentExponent).
|
||||
TrailZ = std::min(TrailZ, +Value::MaxAlignmentExponent);
|
||||
|
||||
Align Alignment = Align(1u << std::min(Known.getBitWidth() - 1, TrailZ));
|
||||
unsigned Align = 1u << std::min(Known.getBitWidth() - 1, TrailZ);
|
||||
|
||||
if (PrefAlign && *PrefAlign > Alignment)
|
||||
Alignment = enforceKnownAlignment(V, Alignment, *PrefAlign, DL);
|
||||
if (PrefAlign > Align)
|
||||
Align = enforceKnownAlignment(V, Align, PrefAlign, DL);
|
||||
|
||||
// We don't need to make any adjustment.
|
||||
return Alignment;
|
||||
return Align;
|
||||
}
|
||||
|
||||
///===---------------------------------------------------------------------===//
|
||||
|
@ -1015,11 +1015,11 @@ bool Vectorizer::vectorizeStoreChain(
|
||||
vectorizeStoreChain(Chains.second, InstructionsProcessed);
|
||||
}
|
||||
|
||||
Align NewAlign = getOrEnforceKnownAlignment(S0->getPointerOperand(),
|
||||
Align(StackAdjustedAlignment),
|
||||
DL, S0, nullptr, &DT);
|
||||
if (NewAlign >= Alignment)
|
||||
Alignment = NewAlign;
|
||||
unsigned NewAlign = getOrEnforceKnownAlignment(S0->getPointerOperand(),
|
||||
StackAdjustedAlignment,
|
||||
DL, S0, nullptr, &DT);
|
||||
if (NewAlign >= Alignment.value())
|
||||
Alignment = Align(NewAlign);
|
||||
else
|
||||
return false;
|
||||
}
|
||||
@ -1160,11 +1160,10 @@ bool Vectorizer::vectorizeLoadChain(
|
||||
vectorizeLoadChain(Chains.second, InstructionsProcessed);
|
||||
}
|
||||
|
||||
Align NewAlign = getOrEnforceKnownAlignment(L0->getPointerOperand(),
|
||||
Align(StackAdjustedAlignment),
|
||||
DL, L0, nullptr, &DT);
|
||||
if (NewAlign >= Alignment)
|
||||
Alignment = NewAlign;
|
||||
unsigned NewAlign = getOrEnforceKnownAlignment(
|
||||
L0->getPointerOperand(), StackAdjustedAlignment, DL, L0, nullptr, &DT);
|
||||
if (NewAlign >= Alignment.value())
|
||||
Alignment = Align(NewAlign);
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user