From dda4f352fa921b646c1bb5c0e2490d36d07a40d8 Mon Sep 17 00:00:00 2001 From: Sjoerd Meijer Date: Mon, 15 Feb 2021 11:01:23 +0000 Subject: [PATCH] Revert "[TTI] Unify FavorPostInc and FavorBackedgeIndex into getPreferredAddressingMode" This reverts commit cd6de0e8de4a5fd558580be4b1a07116914fc8ed. --- include/llvm/Analysis/TargetTransformInfo.h | 25 ++++++++----------- .../llvm/Analysis/TargetTransformInfoImpl.h | 7 +++--- lib/Analysis/TargetTransformInfo.cpp | 10 +++++--- lib/Target/ARM/ARMTargetTransformInfo.cpp | 22 ++++++++-------- lib/Target/ARM/ARMTargetTransformInfo.h | 4 +-- .../Hexagon/HexagonTargetTransformInfo.cpp | 5 ++-- .../Hexagon/HexagonTargetTransformInfo.h | 3 +-- lib/Transforms/Scalar/LoopStrengthReduce.cpp | 24 ++++++++---------- 8 files changed, 45 insertions(+), 55 deletions(-) diff --git a/include/llvm/Analysis/TargetTransformInfo.h b/include/llvm/Analysis/TargetTransformInfo.h index 79303dab92a..c3d7d2cc80a 100644 --- a/include/llvm/Analysis/TargetTransformInfo.h +++ b/include/llvm/Analysis/TargetTransformInfo.h @@ -638,15 +638,13 @@ public: DominatorTree *DT, AssumptionCache *AC, TargetLibraryInfo *LibInfo) const; - enum AddressingModeKind { - AMK_PreIndexed, - AMK_PostIndexed, - AMK_None - }; + /// \return True is LSR should make efforts to create/preserve post-inc + /// addressing mode expressions. + bool shouldFavorPostInc() const; - /// Return the preferred addressing mode LSR should make efforts to generate. - AddressingModeKind getPreferredAddressingMode(const Loop *L, - ScalarEvolution *SE) const; + /// Return true if LSR should make efforts to generate indexed addressing + /// modes that operate across loop iterations. + bool shouldFavorBackedgeIndex(const Loop *L) const; /// Return true if the target supports masked store. bool isLegalMaskedStore(Type *DataType, Align Alignment) const; @@ -1456,8 +1454,8 @@ public: virtual bool canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE, LoopInfo *LI, DominatorTree *DT, AssumptionCache *AC, TargetLibraryInfo *LibInfo) = 0; - virtual AddressingModeKind - getPreferredAddressingMode(const Loop *L, ScalarEvolution *SE) const = 0; + virtual bool shouldFavorPostInc() const = 0; + virtual bool shouldFavorBackedgeIndex(const Loop *L) const = 0; virtual bool isLegalMaskedStore(Type *DataType, Align Alignment) = 0; virtual bool isLegalMaskedLoad(Type *DataType, Align Alignment) = 0; virtual bool isLegalNTStore(Type *DataType, Align Alignment) = 0; @@ -1798,10 +1796,9 @@ public: TargetLibraryInfo *LibInfo) override { return Impl.canSaveCmp(L, BI, SE, LI, DT, AC, LibInfo); } - AddressingModeKind - getPreferredAddressingMode(const Loop *L, - ScalarEvolution *SE) const override { - return Impl.getPreferredAddressingMode(L, SE); + bool shouldFavorPostInc() const override { return Impl.shouldFavorPostInc(); } + bool shouldFavorBackedgeIndex(const Loop *L) const override { + return Impl.shouldFavorBackedgeIndex(L); } bool isLegalMaskedStore(Type *DataType, Align Alignment) override { return Impl.isLegalMaskedStore(DataType, Alignment); diff --git a/include/llvm/Analysis/TargetTransformInfoImpl.h b/include/llvm/Analysis/TargetTransformInfoImpl.h index a9c9d3cb9f4..84de5038df4 100644 --- a/include/llvm/Analysis/TargetTransformInfoImpl.h +++ b/include/llvm/Analysis/TargetTransformInfoImpl.h @@ -209,10 +209,9 @@ public: return false; } - TTI::AddressingModeKind - getPreferredAddressingMode(const Loop *L, ScalarEvolution *SE) const { - return TTI::AMK_None; - } + bool shouldFavorPostInc() const { return false; } + + bool shouldFavorBackedgeIndex(const Loop *L) const { return false; } bool isLegalMaskedStore(Type *DataType, Align Alignment) const { return false; diff --git a/lib/Analysis/TargetTransformInfo.cpp b/lib/Analysis/TargetTransformInfo.cpp index 3db4b0b0d55..16992d099e0 100644 --- a/lib/Analysis/TargetTransformInfo.cpp +++ b/lib/Analysis/TargetTransformInfo.cpp @@ -409,10 +409,12 @@ bool TargetTransformInfo::canSaveCmp(Loop *L, BranchInst **BI, return TTIImpl->canSaveCmp(L, BI, SE, LI, DT, AC, LibInfo); } -TTI::AddressingModeKind -TargetTransformInfo::getPreferredAddressingMode(const Loop *L, - ScalarEvolution *SE) const { - return TTIImpl->getPreferredAddressingMode(L, SE); +bool TargetTransformInfo::shouldFavorPostInc() const { + return TTIImpl->shouldFavorPostInc(); +} + +bool TargetTransformInfo::shouldFavorBackedgeIndex(const Loop *L) const { + return TTIImpl->shouldFavorBackedgeIndex(L); } bool TargetTransformInfo::isLegalMaskedStore(Type *DataType, diff --git a/lib/Target/ARM/ARMTargetTransformInfo.cpp b/lib/Target/ARM/ARMTargetTransformInfo.cpp index 8c2a79efc67..80f1f2a2a8f 100644 --- a/lib/Target/ARM/ARMTargetTransformInfo.cpp +++ b/lib/Target/ARM/ARMTargetTransformInfo.cpp @@ -100,20 +100,18 @@ bool ARMTTIImpl::areInlineCompatible(const Function *Caller, return MatchExact && MatchSubset; } -TTI::AddressingModeKind -ARMTTIImpl::getPreferredAddressingMode(const Loop *L, - ScalarEvolution *SE) const { - if (ST->hasMVEIntegerOps()) - return TTI::AMK_PostIndexed; - +bool ARMTTIImpl::shouldFavorBackedgeIndex(const Loop *L) const { if (L->getHeader()->getParent()->hasOptSize()) - return TTI::AMK_None; + return false; + if (ST->hasMVEIntegerOps()) + return false; + return ST->isMClass() && ST->isThumb2() && L->getNumBlocks() == 1; +} - if (ST->isMClass() && ST->isThumb2() && - L->getNumBlocks() == 1) - return TTI::AMK_PreIndexed; - - return TTI::AMK_None; +bool ARMTTIImpl::shouldFavorPostInc() const { + if (ST->hasMVEIntegerOps()) + return true; + return false; } Optional diff --git a/lib/Target/ARM/ARMTargetTransformInfo.h b/lib/Target/ARM/ARMTargetTransformInfo.h index 80812892900..b8de27101a6 100644 --- a/lib/Target/ARM/ARMTargetTransformInfo.h +++ b/lib/Target/ARM/ARMTargetTransformInfo.h @@ -103,8 +103,8 @@ public: bool enableInterleavedAccessVectorization() { return true; } - TTI::AddressingModeKind - getPreferredAddressingMode(const Loop *L, ScalarEvolution *SE) const; + bool shouldFavorBackedgeIndex(const Loop *L) const; + bool shouldFavorPostInc() const; /// Floating-point computation using ARMv8 AArch32 Advanced /// SIMD instructions remains unchanged from ARMv7. Only AArch64 SIMD diff --git a/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp b/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp index 89e7df0aa27..af7bc468224 100644 --- a/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp +++ b/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp @@ -80,9 +80,8 @@ void HexagonTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE, } } -AddressingModeKind::getPreferredAddressingMode(const Loop *L, - ScalarEvolution *SE) const { - return AMK_PostIndexed; +bool HexagonTTIImpl::shouldFavorPostInc() const { + return true; } /// --- Vector TTI begin --- diff --git a/lib/Target/Hexagon/HexagonTargetTransformInfo.h b/lib/Target/Hexagon/HexagonTargetTransformInfo.h index ebaa619837f..dc075d6147b 100644 --- a/lib/Target/Hexagon/HexagonTargetTransformInfo.h +++ b/lib/Target/Hexagon/HexagonTargetTransformInfo.h @@ -67,8 +67,7 @@ public: TTI::PeelingPreferences &PP); /// Bias LSR towards creating post-increment opportunities. - AddressingModeKind getPreferredAddressingMode(const Loop *L, - ScalarEvolution *SE) const; + bool shouldFavorPostInc() const; // L1 cache prefetch. unsigned getPrefetchDistance() const override; diff --git a/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/lib/Transforms/Scalar/LoopStrengthReduce.cpp index 2f90df70a3c..5dec9b54207 100644 --- a/lib/Transforms/Scalar/LoopStrengthReduce.cpp +++ b/lib/Transforms/Scalar/LoopStrengthReduce.cpp @@ -1227,15 +1227,13 @@ static unsigned getSetupCost(const SCEV *Reg, unsigned Depth) { /// Tally up interesting quantities from the given register. void Cost::RateRegister(const Formula &F, const SCEV *Reg, SmallPtrSetImpl &Regs) { - TTI::AddressingModeKind AMK = TTI->getPreferredAddressingMode(L, SE); - if (const SCEVAddRecExpr *AR = dyn_cast(Reg)) { // If this is an addrec for another loop, it should be an invariant // with respect to L since L is the innermost loop (at least // for now LSR only handles innermost loops). if (AR->getLoop() != L) { // If the AddRec exists, consider it's register free and leave it alone. - if (isExistingPhi(AR, *SE) && AMK != TTI::AMK_PostIndexed) + if (isExistingPhi(AR, *SE) && !TTI->shouldFavorPostInc()) return; // It is bad to allow LSR for current loop to add induction variables @@ -1256,11 +1254,13 @@ void Cost::RateRegister(const Formula &F, const SCEV *Reg, // If the step size matches the base offset, we could use pre-indexed // addressing. - if (AMK == TTI::AMK_PreIndexed) { + if (TTI->shouldFavorBackedgeIndex(L)) { if (auto *Step = dyn_cast(AR->getStepRecurrence(*SE))) if (Step->getAPInt() == F.BaseOffset) LoopCost = 0; - } else if (AMK == TTI::AMK_PostIndexed) { + } + + if (TTI->shouldFavorPostInc()) { const SCEV *LoopStep = AR->getStepRecurrence(*SE); if (isa(LoopStep)) { const SCEV *LoopStart = AR->getStart(); @@ -3575,8 +3575,7 @@ void LSRInstance::GenerateReassociationsImpl(LSRUse &LU, unsigned LUIdx, // may generate a post-increment operator. The reason is that the // reassociations cause extra base+register formula to be created, // and possibly chosen, but the post-increment is more efficient. - TTI::AddressingModeKind AMK = TTI.getPreferredAddressingMode(L, &SE); - if (AMK == TTI::AMK_PostIndexed && mayUsePostIncMode(TTI, LU, BaseReg, L, SE)) + if (TTI.shouldFavorPostInc() && mayUsePostIncMode(TTI, LU, BaseReg, L, SE)) return; SmallVector AddOps; const SCEV *Remainder = CollectSubexprs(BaseReg, nullptr, AddOps, L, SE); @@ -4240,8 +4239,7 @@ void LSRInstance::GenerateCrossUseConstantOffsets() { NewF.BaseOffset = (uint64_t)NewF.BaseOffset + Imm; if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, NewF)) { - if (TTI.getPreferredAddressingMode(this->L, &SE) == - TTI::AMK_PostIndexed && + if (TTI.shouldFavorPostInc() && mayUsePostIncMode(TTI, LU, OrigReg, this->L, SE)) continue; if (!TTI.isLegalAddImmediate((uint64_t)NewF.UnfoldedOffset + Imm)) @@ -4681,7 +4679,7 @@ void LSRInstance::NarrowSearchSpaceByFilterFormulaWithSameScaledReg() { /// If we are over the complexity limit, filter out any post-inc prefering /// variables to only post-inc values. void LSRInstance::NarrowSearchSpaceByFilterPostInc() { - if (TTI.getPreferredAddressingMode(L, &SE) != TTI::AMK_PostIndexed) + if (!TTI.shouldFavorPostInc()) return; if (EstimateSearchSpaceComplexity() < ComplexityLimit) return; @@ -4980,8 +4978,7 @@ void LSRInstance::SolveRecurse(SmallVectorImpl &Solution, // This can sometimes (notably when trying to favour postinc) lead to // sub-optimial decisions. There it is best left to the cost modelling to // get correct. - if (TTI.getPreferredAddressingMode(L, &SE) != TTI::AMK_PostIndexed || - LU.Kind != LSRUse::Address) { + if (!TTI.shouldFavorPostInc() || LU.Kind != LSRUse::Address) { int NumReqRegsToFind = std::min(F.getNumRegs(), ReqRegs.size()); for (const SCEV *Reg : ReqRegs) { if ((F.ScaledReg && F.ScaledReg == Reg) || @@ -5563,8 +5560,7 @@ LSRInstance::LSRInstance(Loop *L, IVUsers &IU, ScalarEvolution &SE, TargetLibraryInfo &TLI, MemorySSAUpdater *MSSAU) : IU(IU), SE(SE), DT(DT), LI(LI), AC(AC), TLI(TLI), TTI(TTI), L(L), MSSAU(MSSAU), FavorBackedgeIndex(EnableBackedgeIndexing && - TTI.getPreferredAddressingMode(L, &SE) == - TTI::AMK_PreIndexed) { + TTI.shouldFavorBackedgeIndex(L)) { // If LoopSimplify form is not available, stay out of trouble. if (!L->isLoopSimplifyForm()) return;