mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-23 19:23:23 +01:00
[LSR / TTI / SystemZ] Eliminate TargetTransformInfo::isFoldableMemAccess()
isLegalAddressingMode() has recently gained the extra optional Instruction* parameter, and therefore it can now do the job that previously only isFoldableMemAccess() could do. The SystemZ implementation of isLegalAddressingMode() has gained the functionality of checking for offsets, which used to be done with isFoldableMemAccess(). The isFoldableMemAccess() hook has been removed everywhere. Review: Quentin Colombet, Ulrich Weigand https://reviews.llvm.org/D35933 llvm-svn: 310463
This commit is contained in:
parent
b5dbc00a27
commit
54a000e514
@ -461,12 +461,6 @@ public:
|
||||
/// immediate offset and no index register.
|
||||
bool LSRWithInstrQueries() const;
|
||||
|
||||
/// \brief Return true if target supports the load / store
|
||||
/// instruction with the given Offset on the form reg + Offset. It
|
||||
/// may be that Offset is too big for a certain type (register
|
||||
/// class).
|
||||
bool isFoldableMemAccessOffset(Instruction *I, int64_t Offset) const;
|
||||
|
||||
/// \brief Return true if it's free to truncate a value of type Ty1 to type
|
||||
/// Ty2. e.g. On x86 it's free to truncate a i32 value in register EAX to i16
|
||||
/// by referencing its sub-register AX.
|
||||
@ -904,7 +898,6 @@ public:
|
||||
int64_t BaseOffset, bool HasBaseReg,
|
||||
int64_t Scale, unsigned AddrSpace) = 0;
|
||||
virtual bool LSRWithInstrQueries() = 0;
|
||||
virtual bool isFoldableMemAccessOffset(Instruction *I, int64_t Offset) = 0;
|
||||
virtual bool isTruncateFree(Type *Ty1, Type *Ty2) = 0;
|
||||
virtual bool isProfitableToHoist(Instruction *I) = 0;
|
||||
virtual bool isTypeLegal(Type *Ty) = 0;
|
||||
@ -1129,9 +1122,6 @@ public:
|
||||
bool LSRWithInstrQueries() override {
|
||||
return Impl.LSRWithInstrQueries();
|
||||
}
|
||||
bool isFoldableMemAccessOffset(Instruction *I, int64_t Offset) override {
|
||||
return Impl.isFoldableMemAccessOffset(I, Offset);
|
||||
}
|
||||
bool isTruncateFree(Type *Ty1, Type *Ty2) override {
|
||||
return Impl.isTruncateFree(Ty1, Ty2);
|
||||
}
|
||||
|
@ -264,8 +264,6 @@ public:
|
||||
|
||||
bool LSRWithInstrQueries() { return false; }
|
||||
|
||||
bool isFoldableMemAccessOffset(Instruction *I, int64_t Offset) { return true; }
|
||||
|
||||
bool isTruncateFree(Type *Ty1, Type *Ty2) { return false; }
|
||||
|
||||
bool isProfitableToHoist(Instruction *I) { return true; }
|
||||
|
@ -133,10 +133,6 @@ public:
|
||||
return getTLI()->getScalingFactorCost(DL, AM, Ty, AddrSpace);
|
||||
}
|
||||
|
||||
bool isFoldableMemAccessOffset(Instruction *I, int64_t Offset) {
|
||||
return getTLI()->isFoldableMemAccessOffset(I, Offset);
|
||||
}
|
||||
|
||||
bool isTruncateFree(Type *Ty1, Type *Ty2) {
|
||||
return getTLI()->isTruncateFree(Ty1, Ty2);
|
||||
}
|
||||
|
@ -1904,10 +1904,6 @@ public:
|
||||
return -1;
|
||||
}
|
||||
|
||||
virtual bool isFoldableMemAccessOffset(Instruction *I, int64_t Offset) const {
|
||||
return true;
|
||||
}
|
||||
|
||||
/// Return true if the specified immediate is legal icmp immediate, that is
|
||||
/// the target has icmp instructions which can compare a register against the
|
||||
/// immediate without having to materialize the immediate into a register.
|
||||
|
@ -189,11 +189,6 @@ bool TargetTransformInfo::LSRWithInstrQueries() const {
|
||||
return TTIImpl->LSRWithInstrQueries();
|
||||
}
|
||||
|
||||
bool TargetTransformInfo::isFoldableMemAccessOffset(Instruction *I,
|
||||
int64_t Offset) const {
|
||||
return TTIImpl->isFoldableMemAccessOffset(I, Offset);
|
||||
}
|
||||
|
||||
bool TargetTransformInfo::isTruncateFree(Type *Ty1, Type *Ty2) const {
|
||||
return TTIImpl->isTruncateFree(Ty1, Ty2);
|
||||
}
|
||||
|
@ -688,11 +688,8 @@ supportedAddressingMode(Instruction *I, bool HasVector) {
|
||||
return AddressingMode(true/*LongDispl*/, true/*IdxReg*/);
|
||||
}
|
||||
|
||||
// TODO: This method should also check for the displacement when *I is
|
||||
// passed. It may also be possible to merge with isFoldableMemAccessOffset()
|
||||
// now that both methods get the *I.
|
||||
bool SystemZTargetLowering::isLegalAddressingMode(const DataLayout &DL,
|
||||
const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I) const {
|
||||
const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I) const {
|
||||
// Punt on globals for now, although they can be used in limited
|
||||
// RELATIVE LONG cases.
|
||||
if (AM.BaseGV)
|
||||
@ -702,8 +699,14 @@ bool SystemZTargetLowering::isLegalAddressingMode(const DataLayout &DL,
|
||||
if (!isInt<20>(AM.BaseOffs))
|
||||
return false;
|
||||
|
||||
if (I != nullptr &&
|
||||
!supportedAddressingMode(I, Subtarget.hasVector()).IndexReg)
|
||||
AddressingMode SupportedAM(true, true);
|
||||
if (I != nullptr)
|
||||
SupportedAM = supportedAddressingMode(I, Subtarget.hasVector());
|
||||
|
||||
if (!SupportedAM.LongDisplacement && !isUInt<12>(AM.BaseOffs))
|
||||
return false;
|
||||
|
||||
if (!SupportedAM.IndexReg)
|
||||
// No indexing allowed.
|
||||
return AM.Scale == 0;
|
||||
else
|
||||
@ -711,15 +714,6 @@ bool SystemZTargetLowering::isLegalAddressingMode(const DataLayout &DL,
|
||||
return AM.Scale == 0 || AM.Scale == 1;
|
||||
}
|
||||
|
||||
// TODO: Should we check for isInt<20> also?
|
||||
bool SystemZTargetLowering::isFoldableMemAccessOffset(Instruction *I,
|
||||
int64_t Offset) const {
|
||||
if (!supportedAddressingMode(I, Subtarget.hasVector()).LongDisplacement)
|
||||
return (isUInt<12>(Offset));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool SystemZTargetLowering::isTruncateFree(Type *FromType, Type *ToType) const {
|
||||
if (!FromType->isIntegerTy() || !ToType->isIntegerTy())
|
||||
return false;
|
||||
|
@ -398,7 +398,6 @@ public:
|
||||
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
|
||||
unsigned AS,
|
||||
Instruction *I = nullptr) const override;
|
||||
bool isFoldableMemAccessOffset(Instruction *I, int64_t Offset) const override;
|
||||
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS,
|
||||
unsigned Align,
|
||||
bool *Fast) const override;
|
||||
|
@ -1160,6 +1160,12 @@ public:
|
||||
|
||||
} // end anonymous namespace
|
||||
|
||||
static bool isAMCompletelyFolded(const TargetTransformInfo &TTI,
|
||||
LSRUse::KindType Kind, MemAccessTy AccessTy,
|
||||
GlobalValue *BaseGV, int64_t BaseOffset,
|
||||
bool HasBaseReg, int64_t Scale,
|
||||
Instruction *Fixup = nullptr);
|
||||
|
||||
/// Tally up interesting quantities from the given register.
|
||||
void Cost::RateRegister(const SCEV *Reg,
|
||||
SmallPtrSetImpl<const SCEV *> &Regs,
|
||||
@ -1288,7 +1294,8 @@ void Cost::RateFormula(const TargetTransformInfo &TTI,
|
||||
// Check with target if this offset with this instruction is
|
||||
// specifically not supported.
|
||||
if (LU.Kind == LSRUse::Address && Offset != 0 &&
|
||||
!TTI.isFoldableMemAccessOffset(Fixup.UserInst, Offset))
|
||||
!isAMCompletelyFolded(TTI, LSRUse::Address, LU.AccessTy, F.BaseGV,
|
||||
Offset, F.HasBaseReg, F.Scale, Fixup.UserInst))
|
||||
C.NumBaseAdds++;
|
||||
}
|
||||
|
||||
@ -1543,7 +1550,7 @@ static bool isAMCompletelyFolded(const TargetTransformInfo &TTI,
|
||||
LSRUse::KindType Kind, MemAccessTy AccessTy,
|
||||
GlobalValue *BaseGV, int64_t BaseOffset,
|
||||
bool HasBaseReg, int64_t Scale,
|
||||
Instruction *Fixup = nullptr) {
|
||||
Instruction *Fixup/*= nullptr*/) {
|
||||
switch (Kind) {
|
||||
case LSRUse::Address:
|
||||
return TTI.isLegalAddressingMode(AccessTy.MemTy, BaseGV, BaseOffset,
|
||||
|
@ -4,7 +4,7 @@
|
||||
; of computing a new LC0 value.
|
||||
|
||||
; CHECK-LABEL: @test
|
||||
; CHECK: loop0(.LBB0_1,#998)
|
||||
; CHECK: loop0(.LBB0_1,#999)
|
||||
|
||||
define i32 @test(i32* %A, i32* %B, i32 %count) {
|
||||
entry:
|
||||
|
Loading…
Reference in New Issue
Block a user