1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-25 12:12:47 +01:00

[TTI] NFC: Change getScalarizationOverhead and getOperandsScalarizationOverhead to return InstructionCost.

This patch migrates the TTI cost interfaces to return an InstructionCost.

See this patch for the introduction of the type: https://reviews.llvm.org/D91174
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2020-November/146408.html

Reviewed By: sdesmalen

Differential Revision: https://reviews.llvm.org/D101283
This commit is contained in:
dfukalov 2021-04-22 12:41:01 +03:00 committed by Daniil Fukalov
parent 1892e228c0
commit 584872cffa
10 changed files with 59 additions and 53 deletions

View File

@ -728,13 +728,14 @@ public:
/// Estimate the overhead of scalarizing an instruction. Insert and Extract
/// are set if the demanded result elements need to be inserted and/or
/// extracted from vectors.
unsigned getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts,
InstructionCost getScalarizationOverhead(VectorType *Ty,
const APInt &DemandedElts,
bool Insert, bool Extract) const;
/// Estimate the overhead of scalarizing an instructions unique
/// non-constant operands. The (potentially vector) types to use for each of
/// argument are passes via Tys.
unsigned getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
InstructionCost getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
ArrayRef<Type *> Tys) const;
/// If target has efficient vector element load/store instructions, it can
@ -1499,10 +1500,11 @@ public:
virtual bool shouldBuildLookupTablesForConstant(Constant *C) = 0;
virtual bool shouldBuildRelLookupTables() = 0;
virtual bool useColdCCForColdCall(Function &F) = 0;
virtual unsigned getScalarizationOverhead(VectorType *Ty,
virtual InstructionCost getScalarizationOverhead(VectorType *Ty,
const APInt &DemandedElts,
bool Insert, bool Extract) = 0;
virtual unsigned
bool Insert,
bool Extract) = 0;
virtual InstructionCost
getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
ArrayRef<Type *> Tys) = 0;
virtual bool supportsEfficientVectorElementLoadStore() = 0;
@ -1900,11 +1902,13 @@ public:
return Impl.useColdCCForColdCall(F);
}
unsigned getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts,
InstructionCost getScalarizationOverhead(VectorType *Ty,
const APInt &DemandedElts,
bool Insert, bool Extract) override {
return Impl.getScalarizationOverhead(Ty, DemandedElts, Insert, Extract);
}
unsigned getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
InstructionCost
getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
ArrayRef<Type *> Tys) override {
return Impl.getOperandsScalarizationOverhead(Args, Tys);
}

View File

@ -301,12 +301,13 @@ public:
bool useColdCCForColdCall(Function &F) const { return false; }
unsigned getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts,
InstructionCost getScalarizationOverhead(VectorType *Ty,
const APInt &DemandedElts,
bool Insert, bool Extract) const {
return 0;
}
unsigned getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
InstructionCost getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
ArrayRef<Type *> Tys) const {
return 0;
}

View File

@ -213,8 +213,8 @@ private:
getMemoryOpCost(Opcode, VT->getElementType(), Alignment, 0, CostKind));
// Next, compute the cost of packing the result in a vector.
int PackingCost = getScalarizationOverhead(VT, Opcode != Instruction::Store,
Opcode == Instruction::Store);
InstructionCost PackingCost = getScalarizationOverhead(
VT, Opcode != Instruction::Store, Opcode == Instruction::Store);
InstructionCost ConditionalCost = 0;
if (VariableMask) {
@ -650,7 +650,8 @@ public:
/// Estimate the overhead of scalarizing an instruction. Insert and Extract
/// are set if the demanded result elements need to be inserted and/or
/// extracted from vectors.
unsigned getScalarizationOverhead(VectorType *InTy, const APInt &DemandedElts,
InstructionCost getScalarizationOverhead(VectorType *InTy,
const APInt &DemandedElts,
bool Insert, bool Extract) {
/// FIXME: a bitfield is not a reasonable abstraction for talking about
/// which elements are needed from a scalable vector
@ -670,11 +671,11 @@ public:
Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty, i);
}
return *Cost.getValue();
return Cost;
}
/// Helper wrapper for the DemandedElts variant of getScalarizationOverhead.
unsigned getScalarizationOverhead(VectorType *InTy, bool Insert,
InstructionCost getScalarizationOverhead(VectorType *InTy, bool Insert,
bool Extract) {
auto *Ty = cast<FixedVectorType>(InTy);
@ -685,11 +686,11 @@ public:
/// Estimate the overhead of scalarizing an instructions unique
/// non-constant operands. The (potentially vector) types to use for each of
/// argument are passes via Tys.
unsigned getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
InstructionCost getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
ArrayRef<Type *> Tys) {
assert(Args.size() == Tys.size() && "Expected matching Args and Tys");
unsigned Cost = 0;
InstructionCost Cost = 0;
SmallPtrSet<const Value*, 4> UniqueOperands;
for (int I = 0, E = Args.size(); I != E; I++) {
// Disregard things like metadata arguments.
@ -712,12 +713,10 @@ public:
/// instruction, with return type RetTy and arguments Args of type Tys. If
/// Args are unknown (empty), then the cost associated with one argument is
/// added as a heuristic.
unsigned getScalarizationOverhead(VectorType *RetTy,
InstructionCost getScalarizationOverhead(VectorType *RetTy,
ArrayRef<const Value *> Args,
ArrayRef<Type *> Tys) {
unsigned Cost = 0;
Cost += getScalarizationOverhead(RetTy, true, false);
InstructionCost Cost = getScalarizationOverhead(RetTy, true, false);
if (!Args.empty())
Cost += getOperandsScalarizationOverhead(Args, Tys);
else
@ -756,7 +755,7 @@ public:
bool IsFloat = Ty->isFPOrFPVectorTy();
// Assume that floating point arithmetic operations cost twice as much as
// integer operations.
unsigned OpCost = (IsFloat ? 2 : 1);
InstructionCost OpCost = (IsFloat ? 2 : 1);
if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
// The operation is legal. Assume it costs 1.

View File

@ -470,14 +470,14 @@ bool TargetTransformInfo::useColdCCForColdCall(Function &F) const {
return TTIImpl->useColdCCForColdCall(F);
}
unsigned
InstructionCost
TargetTransformInfo::getScalarizationOverhead(VectorType *Ty,
const APInt &DemandedElts,
bool Insert, bool Extract) const {
return TTIImpl->getScalarizationOverhead(Ty, DemandedElts, Insert, Extract);
}
unsigned TargetTransformInfo::getOperandsScalarizationOverhead(
InstructionCost TargetTransformInfo::getOperandsScalarizationOverhead(
ArrayRef<const Value *> Args, ArrayRef<Type *> Tys) const {
return TTIImpl->getOperandsScalarizationOverhead(Args, Tys);
}

View File

@ -1514,8 +1514,8 @@ InstructionCost ARMTTIImpl::getGatherScatterOpCost(
NumElems * LT.first * ST->getMVEVectorCostFactor(CostKind);
// The scalarization cost should be a lot higher. We use the number of vector
// elements plus the scalarization overhead.
unsigned ScalarCost = NumElems * LT.first +
BaseT::getScalarizationOverhead(VTy, true, false) +
InstructionCost ScalarCost =
NumElems * LT.first + BaseT::getScalarizationOverhead(VTy, true, false) +
BaseT::getScalarizationOverhead(VTy, false, true);
if (EltSize < 8 || Alignment < EltSize / 8)

View File

@ -122,13 +122,12 @@ ElementCount HexagonTTIImpl::getMinimumVF(unsigned ElemWidth,
return ElementCount::getFixed((8 * ST.getVectorLength()) / ElemWidth);
}
unsigned HexagonTTIImpl::getScalarizationOverhead(VectorType *Ty,
const APInt &DemandedElts,
bool Insert, bool Extract) {
InstructionCost HexagonTTIImpl::getScalarizationOverhead(
VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract) {
return BaseT::getScalarizationOverhead(Ty, DemandedElts, Insert, Extract);
}
unsigned
InstructionCost
HexagonTTIImpl::getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
ArrayRef<Type *> Tys) {
return BaseT::getOperandsScalarizationOverhead(Args, Tys);

View File

@ -104,9 +104,10 @@ public:
return true;
}
unsigned getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts,
InstructionCost getScalarizationOverhead(VectorType *Ty,
const APInt &DemandedElts,
bool Insert, bool Extract);
unsigned getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
InstructionCost getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
ArrayRef<Type *> Tys);
InstructionCost getCallInstrCost(Function *F, Type *RetTy,
ArrayRef<Type *> Tys,

View File

@ -541,7 +541,7 @@ InstructionCost SystemZTTIImpl::getArithmeticInstrCost(
// There is no native support for FRem.
if (Opcode == Instruction::FRem) {
SmallVector<Type *> Tys(Args.size(), Ty);
unsigned Cost =
InstructionCost Cost =
(VF * LIBCALL_COST) + getScalarizationOverhead(VTy, Args, Tys);
// FIXME: VF 2 for float is currently just as expensive as for VF 4.
if (VF == 2 && ScalarBits == 32)

View File

@ -3121,10 +3121,11 @@ InstructionCost X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
return BaseT::getVectorInstrCost(Opcode, Val, Index) + RegisterFileMoveCost;
}
unsigned X86TTIImpl::getScalarizationOverhead(VectorType *Ty,
InstructionCost X86TTIImpl::getScalarizationOverhead(VectorType *Ty,
const APInt &DemandedElts,
bool Insert, bool Extract) {
unsigned Cost = 0;
bool Insert,
bool Extract) {
InstructionCost Cost = 0;
// For insertions, a ISD::BUILD_VECTOR style vector initialization can be much
// cheaper than an accumulation of ISD::INSERT_VECTOR_ELT.

View File

@ -140,7 +140,8 @@ public:
const Instruction *I = nullptr);
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val,
unsigned Index);
unsigned getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts,
InstructionCost getScalarizationOverhead(VectorType *Ty,
const APInt &DemandedElts,
bool Insert, bool Extract);
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src,
MaybeAlign Alignment, unsigned AddressSpace,