mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-22 10:42:39 +01:00
Revert "[TTI] Unify FavorPostInc and FavorBackedgeIndex into getPreferredAddressingMode"
This reverts commit cd6de0e8de4a5fd558580be4b1a07116914fc8ed.
This commit is contained in:
parent
c1c4d25c71
commit
dda4f352fa
@ -638,15 +638,13 @@ public:
|
||||
DominatorTree *DT, AssumptionCache *AC,
|
||||
TargetLibraryInfo *LibInfo) const;
|
||||
|
||||
enum AddressingModeKind {
|
||||
AMK_PreIndexed,
|
||||
AMK_PostIndexed,
|
||||
AMK_None
|
||||
};
|
||||
/// \return True is LSR should make efforts to create/preserve post-inc
|
||||
/// addressing mode expressions.
|
||||
bool shouldFavorPostInc() const;
|
||||
|
||||
/// Return the preferred addressing mode LSR should make efforts to generate.
|
||||
AddressingModeKind getPreferredAddressingMode(const Loop *L,
|
||||
ScalarEvolution *SE) const;
|
||||
/// Return true if LSR should make efforts to generate indexed addressing
|
||||
/// modes that operate across loop iterations.
|
||||
bool shouldFavorBackedgeIndex(const Loop *L) const;
|
||||
|
||||
/// Return true if the target supports masked store.
|
||||
bool isLegalMaskedStore(Type *DataType, Align Alignment) const;
|
||||
@ -1456,8 +1454,8 @@ public:
|
||||
virtual bool canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE,
|
||||
LoopInfo *LI, DominatorTree *DT, AssumptionCache *AC,
|
||||
TargetLibraryInfo *LibInfo) = 0;
|
||||
virtual AddressingModeKind
|
||||
getPreferredAddressingMode(const Loop *L, ScalarEvolution *SE) const = 0;
|
||||
virtual bool shouldFavorPostInc() const = 0;
|
||||
virtual bool shouldFavorBackedgeIndex(const Loop *L) const = 0;
|
||||
virtual bool isLegalMaskedStore(Type *DataType, Align Alignment) = 0;
|
||||
virtual bool isLegalMaskedLoad(Type *DataType, Align Alignment) = 0;
|
||||
virtual bool isLegalNTStore(Type *DataType, Align Alignment) = 0;
|
||||
@ -1798,10 +1796,9 @@ public:
|
||||
TargetLibraryInfo *LibInfo) override {
|
||||
return Impl.canSaveCmp(L, BI, SE, LI, DT, AC, LibInfo);
|
||||
}
|
||||
AddressingModeKind
|
||||
getPreferredAddressingMode(const Loop *L,
|
||||
ScalarEvolution *SE) const override {
|
||||
return Impl.getPreferredAddressingMode(L, SE);
|
||||
bool shouldFavorPostInc() const override { return Impl.shouldFavorPostInc(); }
|
||||
bool shouldFavorBackedgeIndex(const Loop *L) const override {
|
||||
return Impl.shouldFavorBackedgeIndex(L);
|
||||
}
|
||||
bool isLegalMaskedStore(Type *DataType, Align Alignment) override {
|
||||
return Impl.isLegalMaskedStore(DataType, Alignment);
|
||||
|
@ -209,10 +209,9 @@ public:
|
||||
return false;
|
||||
}
|
||||
|
||||
TTI::AddressingModeKind
|
||||
getPreferredAddressingMode(const Loop *L, ScalarEvolution *SE) const {
|
||||
return TTI::AMK_None;
|
||||
}
|
||||
bool shouldFavorPostInc() const { return false; }
|
||||
|
||||
bool shouldFavorBackedgeIndex(const Loop *L) const { return false; }
|
||||
|
||||
bool isLegalMaskedStore(Type *DataType, Align Alignment) const {
|
||||
return false;
|
||||
|
@ -409,10 +409,12 @@ bool TargetTransformInfo::canSaveCmp(Loop *L, BranchInst **BI,
|
||||
return TTIImpl->canSaveCmp(L, BI, SE, LI, DT, AC, LibInfo);
|
||||
}
|
||||
|
||||
TTI::AddressingModeKind
|
||||
TargetTransformInfo::getPreferredAddressingMode(const Loop *L,
|
||||
ScalarEvolution *SE) const {
|
||||
return TTIImpl->getPreferredAddressingMode(L, SE);
|
||||
bool TargetTransformInfo::shouldFavorPostInc() const {
|
||||
return TTIImpl->shouldFavorPostInc();
|
||||
}
|
||||
|
||||
bool TargetTransformInfo::shouldFavorBackedgeIndex(const Loop *L) const {
|
||||
return TTIImpl->shouldFavorBackedgeIndex(L);
|
||||
}
|
||||
|
||||
bool TargetTransformInfo::isLegalMaskedStore(Type *DataType,
|
||||
|
@ -100,20 +100,18 @@ bool ARMTTIImpl::areInlineCompatible(const Function *Caller,
|
||||
return MatchExact && MatchSubset;
|
||||
}
|
||||
|
||||
TTI::AddressingModeKind
|
||||
ARMTTIImpl::getPreferredAddressingMode(const Loop *L,
|
||||
ScalarEvolution *SE) const {
|
||||
if (ST->hasMVEIntegerOps())
|
||||
return TTI::AMK_PostIndexed;
|
||||
|
||||
bool ARMTTIImpl::shouldFavorBackedgeIndex(const Loop *L) const {
|
||||
if (L->getHeader()->getParent()->hasOptSize())
|
||||
return TTI::AMK_None;
|
||||
return false;
|
||||
if (ST->hasMVEIntegerOps())
|
||||
return false;
|
||||
return ST->isMClass() && ST->isThumb2() && L->getNumBlocks() == 1;
|
||||
}
|
||||
|
||||
if (ST->isMClass() && ST->isThumb2() &&
|
||||
L->getNumBlocks() == 1)
|
||||
return TTI::AMK_PreIndexed;
|
||||
|
||||
return TTI::AMK_None;
|
||||
bool ARMTTIImpl::shouldFavorPostInc() const {
|
||||
if (ST->hasMVEIntegerOps())
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
Optional<Instruction *>
|
||||
|
@ -103,8 +103,8 @@ public:
|
||||
|
||||
bool enableInterleavedAccessVectorization() { return true; }
|
||||
|
||||
TTI::AddressingModeKind
|
||||
getPreferredAddressingMode(const Loop *L, ScalarEvolution *SE) const;
|
||||
bool shouldFavorBackedgeIndex(const Loop *L) const;
|
||||
bool shouldFavorPostInc() const;
|
||||
|
||||
/// Floating-point computation using ARMv8 AArch32 Advanced
|
||||
/// SIMD instructions remains unchanged from ARMv7. Only AArch64 SIMD
|
||||
|
@ -80,9 +80,8 @@ void HexagonTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
|
||||
}
|
||||
}
|
||||
|
||||
AddressingModeKind::getPreferredAddressingMode(const Loop *L,
|
||||
ScalarEvolution *SE) const {
|
||||
return AMK_PostIndexed;
|
||||
bool HexagonTTIImpl::shouldFavorPostInc() const {
|
||||
return true;
|
||||
}
|
||||
|
||||
/// --- Vector TTI begin ---
|
||||
|
@ -67,8 +67,7 @@ public:
|
||||
TTI::PeelingPreferences &PP);
|
||||
|
||||
/// Bias LSR towards creating post-increment opportunities.
|
||||
AddressingModeKind getPreferredAddressingMode(const Loop *L,
|
||||
ScalarEvolution *SE) const;
|
||||
bool shouldFavorPostInc() const;
|
||||
|
||||
// L1 cache prefetch.
|
||||
unsigned getPrefetchDistance() const override;
|
||||
|
@ -1227,15 +1227,13 @@ static unsigned getSetupCost(const SCEV *Reg, unsigned Depth) {
|
||||
/// Tally up interesting quantities from the given register.
|
||||
void Cost::RateRegister(const Formula &F, const SCEV *Reg,
|
||||
SmallPtrSetImpl<const SCEV *> &Regs) {
|
||||
TTI::AddressingModeKind AMK = TTI->getPreferredAddressingMode(L, SE);
|
||||
|
||||
if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Reg)) {
|
||||
// If this is an addrec for another loop, it should be an invariant
|
||||
// with respect to L since L is the innermost loop (at least
|
||||
// for now LSR only handles innermost loops).
|
||||
if (AR->getLoop() != L) {
|
||||
// If the AddRec exists, consider it's register free and leave it alone.
|
||||
if (isExistingPhi(AR, *SE) && AMK != TTI::AMK_PostIndexed)
|
||||
if (isExistingPhi(AR, *SE) && !TTI->shouldFavorPostInc())
|
||||
return;
|
||||
|
||||
// It is bad to allow LSR for current loop to add induction variables
|
||||
@ -1256,11 +1254,13 @@ void Cost::RateRegister(const Formula &F, const SCEV *Reg,
|
||||
|
||||
// If the step size matches the base offset, we could use pre-indexed
|
||||
// addressing.
|
||||
if (AMK == TTI::AMK_PreIndexed) {
|
||||
if (TTI->shouldFavorBackedgeIndex(L)) {
|
||||
if (auto *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(*SE)))
|
||||
if (Step->getAPInt() == F.BaseOffset)
|
||||
LoopCost = 0;
|
||||
} else if (AMK == TTI::AMK_PostIndexed) {
|
||||
}
|
||||
|
||||
if (TTI->shouldFavorPostInc()) {
|
||||
const SCEV *LoopStep = AR->getStepRecurrence(*SE);
|
||||
if (isa<SCEVConstant>(LoopStep)) {
|
||||
const SCEV *LoopStart = AR->getStart();
|
||||
@ -3575,8 +3575,7 @@ void LSRInstance::GenerateReassociationsImpl(LSRUse &LU, unsigned LUIdx,
|
||||
// may generate a post-increment operator. The reason is that the
|
||||
// reassociations cause extra base+register formula to be created,
|
||||
// and possibly chosen, but the post-increment is more efficient.
|
||||
TTI::AddressingModeKind AMK = TTI.getPreferredAddressingMode(L, &SE);
|
||||
if (AMK == TTI::AMK_PostIndexed && mayUsePostIncMode(TTI, LU, BaseReg, L, SE))
|
||||
if (TTI.shouldFavorPostInc() && mayUsePostIncMode(TTI, LU, BaseReg, L, SE))
|
||||
return;
|
||||
SmallVector<const SCEV *, 8> AddOps;
|
||||
const SCEV *Remainder = CollectSubexprs(BaseReg, nullptr, AddOps, L, SE);
|
||||
@ -4240,8 +4239,7 @@ void LSRInstance::GenerateCrossUseConstantOffsets() {
|
||||
NewF.BaseOffset = (uint64_t)NewF.BaseOffset + Imm;
|
||||
if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset,
|
||||
LU.Kind, LU.AccessTy, NewF)) {
|
||||
if (TTI.getPreferredAddressingMode(this->L, &SE) ==
|
||||
TTI::AMK_PostIndexed &&
|
||||
if (TTI.shouldFavorPostInc() &&
|
||||
mayUsePostIncMode(TTI, LU, OrigReg, this->L, SE))
|
||||
continue;
|
||||
if (!TTI.isLegalAddImmediate((uint64_t)NewF.UnfoldedOffset + Imm))
|
||||
@ -4681,7 +4679,7 @@ void LSRInstance::NarrowSearchSpaceByFilterFormulaWithSameScaledReg() {
|
||||
/// If we are over the complexity limit, filter out any post-inc prefering
|
||||
/// variables to only post-inc values.
|
||||
void LSRInstance::NarrowSearchSpaceByFilterPostInc() {
|
||||
if (TTI.getPreferredAddressingMode(L, &SE) != TTI::AMK_PostIndexed)
|
||||
if (!TTI.shouldFavorPostInc())
|
||||
return;
|
||||
if (EstimateSearchSpaceComplexity() < ComplexityLimit)
|
||||
return;
|
||||
@ -4980,8 +4978,7 @@ void LSRInstance::SolveRecurse(SmallVectorImpl<const Formula *> &Solution,
|
||||
// This can sometimes (notably when trying to favour postinc) lead to
|
||||
// sub-optimial decisions. There it is best left to the cost modelling to
|
||||
// get correct.
|
||||
if (TTI.getPreferredAddressingMode(L, &SE) != TTI::AMK_PostIndexed ||
|
||||
LU.Kind != LSRUse::Address) {
|
||||
if (!TTI.shouldFavorPostInc() || LU.Kind != LSRUse::Address) {
|
||||
int NumReqRegsToFind = std::min(F.getNumRegs(), ReqRegs.size());
|
||||
for (const SCEV *Reg : ReqRegs) {
|
||||
if ((F.ScaledReg && F.ScaledReg == Reg) ||
|
||||
@ -5563,8 +5560,7 @@ LSRInstance::LSRInstance(Loop *L, IVUsers &IU, ScalarEvolution &SE,
|
||||
TargetLibraryInfo &TLI, MemorySSAUpdater *MSSAU)
|
||||
: IU(IU), SE(SE), DT(DT), LI(LI), AC(AC), TLI(TLI), TTI(TTI), L(L),
|
||||
MSSAU(MSSAU), FavorBackedgeIndex(EnableBackedgeIndexing &&
|
||||
TTI.getPreferredAddressingMode(L, &SE) ==
|
||||
TTI::AMK_PreIndexed) {
|
||||
TTI.shouldFavorBackedgeIndex(L)) {
|
||||
// If LoopSimplify form is not available, stay out of trouble.
|
||||
if (!L->isLoopSimplifyForm())
|
||||
return;
|
||||
|
Loading…
Reference in New Issue
Block a user