1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-19 02:52:53 +02:00

[NFCI][CostModel] Add const to Value*.

Summary:
Get back `const` partially lost in one of recent changes.
Additionally specify explicit qualifiers in few places.

Reviewers: samparker

Reviewed By: samparker

Subscribers: hiraditya, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D82383
This commit is contained in:
dfukalov 2020-06-23 16:07:44 +03:00
parent 9db79a6028
commit e968fde7cb
12 changed files with 74 additions and 67 deletions

View File

@ -112,7 +112,7 @@ class IntrinsicCostAttributes {
Type *RetTy = nullptr;
Intrinsic::ID IID;
SmallVector<Type *, 4> ParamTys;
SmallVector<Value *, 4> Arguments;
SmallVector<const Value *, 4> Arguments;
FastMathFlags FMF;
unsigned VF = 1;
// If ScalarizationCost is UINT_MAX, the cost of scalarizing the
@ -146,7 +146,7 @@ public:
ArrayRef<Type *> Tys);
IntrinsicCostAttributes(Intrinsic::ID Id, Type *RTy,
ArrayRef<Value *> Args);
ArrayRef<const Value *> Args);
Intrinsic::ID getID() const { return IID; }
const IntrinsicInst *getInst() const { return II; }
@ -154,7 +154,7 @@ public:
unsigned getVectorFactor() const { return VF; }
FastMathFlags getFlags() const { return FMF; }
unsigned getScalarizationCost() const { return ScalarizationCost; }
const SmallVectorImpl<Value *> &getArgs() const { return Arguments; }
const SmallVectorImpl<const Value *> &getArgs() const { return Arguments; }
const SmallVectorImpl<Type *> &getArgTypes() const { return ParamTys; }
bool isTypeBasedOnly() const {
@ -951,7 +951,7 @@ public:
unsigned getMaxInterleaveFactor(unsigned VF) const;
/// Collect properties of V used in cost analysis, e.g. OP_PowerOf2.
static OperandValueKind getOperandInfo(Value *V,
static OperandValueKind getOperandInfo(const Value *V,
OperandValueProperties &OpProps);
/// This is an approximation of reciprocal throughput of a math/logic op.
@ -1037,9 +1037,10 @@ public:
/// \p I - the optional original context instruction, if one exists, e.g. the
/// load/store to transform or the call to the gather/scatter intrinsic
int getGatherScatterOpCost(
unsigned Opcode, Type *DataTy, Value *Ptr, bool VariableMask,
unsigned Alignment, TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
const Instruction *I = nullptr) const;
unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
unsigned Alignment,
TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
const Instruction *I = nullptr) const;
/// \return The cost of the interleaved memory operation.
/// \p Opcode is the memory operation code
@ -1429,10 +1430,11 @@ public:
unsigned Alignment,
unsigned AddressSpace,
TTI::TargetCostKind CostKind) = 0;
virtual int getGatherScatterOpCost(
unsigned Opcode, Type *DataTy, Value *Ptr, bool VariableMask,
unsigned Alignment, TTI::TargetCostKind CostKind,
const Instruction *I = nullptr) = 0;
virtual int getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
const Value *Ptr, bool VariableMask,
unsigned Alignment,
TTI::TargetCostKind CostKind,
const Instruction *I = nullptr) = 0;
virtual int
getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor,
@ -1848,10 +1850,10 @@ public:
return Impl.getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
CostKind);
}
int getGatherScatterOpCost(
unsigned Opcode, Type *DataTy, Value *Ptr, bool VariableMask,
unsigned Alignment, TTI::TargetCostKind CostKind,
const Instruction *I = nullptr) override {
int getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr,
bool VariableMask, unsigned Alignment,
TTI::TargetCostKind CostKind,
const Instruction *I = nullptr) override {
return Impl.getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
Alignment, CostKind, I);
}

View File

@ -470,10 +470,11 @@ public:
return 1;
}
unsigned getGatherScatterOpCost(
unsigned Opcode, Type *DataTy, Value *Ptr, bool VariableMask,
unsigned Alignment, TTI::TargetCostKind CostKind,
const Instruction *I = nullptr) {
unsigned getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
const Value *Ptr, bool VariableMask,
unsigned Alignment,
TTI::TargetCostKind CostKind,
const Instruction *I = nullptr) {
return 1;
}

View File

@ -1139,14 +1139,14 @@ public:
: 1);
assert((RetVF == 1 || VF == 1) && "VF > 1 and RetVF is a vector type");
const IntrinsicInst *I = ICA.getInst();
const SmallVectorImpl<Value *> &Args = ICA.getArgs();
const SmallVectorImpl<const Value *> &Args = ICA.getArgs();
FastMathFlags FMF = ICA.getFlags();
switch (IID) {
default: {
// Assume that we need to scalarize this intrinsic.
SmallVector<Type *, 4> Types;
for (Value *Op : Args) {
for (const Value *Op : Args) {
Type *OpTy = Op->getType();
assert(VF == 1 || !OpTy->isVectorTy());
Types.push_back(VF == 1 ? OpTy : FixedVectorType::get(OpTy, VF));
@ -1173,7 +1173,7 @@ public:
}
case Intrinsic::masked_scatter: {
assert(VF == 1 && "Can't vectorize types here.");
Value *Mask = Args[3];
const Value *Mask = Args[3];
bool VarMask = !isa<Constant>(Mask);
unsigned Alignment = cast<ConstantInt>(Args[2])->getZExtValue();
return ConcreteTTI->getGatherScatterOpCost(Instruction::Store,
@ -1183,7 +1183,7 @@ public:
}
case Intrinsic::masked_gather: {
assert(VF == 1 && "Can't vectorize types here.");
Value *Mask = Args[2];
const Value *Mask = Args[2];
bool VarMask = !isa<Constant>(Mask);
unsigned Alignment = cast<ConstantInt>(Args[1])->getZExtValue();
return ConcreteTTI->getGatherScatterOpCost(
@ -1207,9 +1207,9 @@ public:
}
case Intrinsic::fshl:
case Intrinsic::fshr: {
Value *X = Args[0];
Value *Y = Args[1];
Value *Z = Args[2];
const Value *X = Args[0];
const Value *Y = Args[1];
const Value *Z = Args[2];
TTI::OperandValueProperties OpPropsX, OpPropsY, OpPropsZ, OpPropsBW;
TTI::OperandValueKind OpKindX = TTI::getOperandInfo(X, OpPropsX);
TTI::OperandValueKind OpKindY = TTI::getOperandInfo(Y, OpPropsY);

View File

@ -68,7 +68,7 @@ IntrinsicCostAttributes::IntrinsicCostAttributes(Intrinsic::ID Id,
const CallBase &CI) :
II(dyn_cast<IntrinsicInst>(&CI)), RetTy(CI.getType()), IID(Id) {
if (auto *FPMO = dyn_cast<FPMathOperator>(&CI))
if (const auto *FPMO = dyn_cast<FPMathOperator>(&CI))
FMF = FPMO->getFastMathFlags();
FunctionType *FTy =
@ -96,7 +96,7 @@ IntrinsicCostAttributes::IntrinsicCostAttributes(Intrinsic::ID Id,
unsigned ScalarCost) :
RetTy(CI.getType()), IID(Id), VF(Factor), ScalarizationCost(ScalarCost) {
if (auto *FPMO = dyn_cast<FPMathOperator>(&CI))
if (const auto *FPMO = dyn_cast<FPMathOperator>(&CI))
FMF = FPMO->getFastMathFlags();
Arguments.insert(Arguments.begin(), CI.arg_begin(), CI.arg_end());
@ -136,8 +136,8 @@ IntrinsicCostAttributes::IntrinsicCostAttributes(Intrinsic::ID Id, Type *RTy,
}
IntrinsicCostAttributes::IntrinsicCostAttributes(Intrinsic::ID Id, Type *Ty,
ArrayRef<Value *> Args) :
RetTy(Ty), IID(Id) {
ArrayRef<const Value *> Args)
: RetTy(Ty), IID(Id) {
Arguments.insert(Arguments.begin(), Args.begin(), Args.end());
ParamTys.reserve(Arguments.size());
@ -633,11 +633,12 @@ unsigned TargetTransformInfo::getMaxInterleaveFactor(unsigned VF) const {
}
TargetTransformInfo::OperandValueKind
TargetTransformInfo::getOperandInfo(Value *V, OperandValueProperties &OpProps) {
TargetTransformInfo::getOperandInfo(const Value *V,
OperandValueProperties &OpProps) {
OperandValueKind OpInfo = OK_AnyValue;
OpProps = OP_None;
if (auto *CI = dyn_cast<ConstantInt>(V)) {
if (const auto *CI = dyn_cast<ConstantInt>(V)) {
if (CI->getValue().isPowerOf2())
OpProps = OP_PowerOf2;
return OK_UniformConstantValue;
@ -646,7 +647,7 @@ TargetTransformInfo::getOperandInfo(Value *V, OperandValueProperties &OpProps) {
// A broadcast shuffle creates a uniform value.
// TODO: Add support for non-zero index broadcasts.
// TODO: Add support for different source vector width.
if (auto *ShuffleInst = dyn_cast<ShuffleVectorInst>(V))
if (const auto *ShuffleInst = dyn_cast<ShuffleVectorInst>(V))
if (ShuffleInst->isZeroEltSplat())
OpInfo = OK_UniformValue;
@ -661,7 +662,7 @@ TargetTransformInfo::getOperandInfo(Value *V, OperandValueProperties &OpProps) {
if (auto *CI = dyn_cast<ConstantInt>(Splat))
if (CI->getValue().isPowerOf2())
OpProps = OP_PowerOf2;
} else if (auto *CDS = dyn_cast<ConstantDataSequential>(V)) {
} else if (const auto *CDS = dyn_cast<ConstantDataSequential>(V)) {
OpProps = OP_PowerOf2;
for (unsigned I = 0, E = CDS->getNumElements(); I != E; ++I) {
if (auto *CI = dyn_cast<ConstantInt>(CDS->getElementAsConstant(I)))
@ -767,10 +768,12 @@ int TargetTransformInfo::getMaskedMemoryOpCost(unsigned Opcode, Type *Src,
return Cost;
}
int TargetTransformInfo::getGatherScatterOpCost(
unsigned Opcode, Type *DataTy, Value *Ptr, bool VariableMask,
unsigned Alignment, TTI::TargetCostKind CostKind,
const Instruction *I) const {
int TargetTransformInfo::getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
const Value *Ptr,
bool VariableMask,
unsigned Alignment,
TTI::TargetCostKind CostKind,
const Instruction *I) const {
int Cost = TTIImpl->getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
Alignment, CostKind, I);
assert(Cost >= 0 && "TTI should not produce negative costs!");

View File

@ -953,7 +953,7 @@ int ARMTTIImpl::getInterleavedMemoryOpCost(
}
unsigned ARMTTIImpl::getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
Value *Ptr, bool VariableMask,
const Value *Ptr, bool VariableMask,
unsigned Alignment,
TTI::TargetCostKind CostKind,
const Instruction *I) {
@ -1032,9 +1032,9 @@ unsigned ARMTTIImpl::getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
if (ExtSize != 8 && ExtSize != 16)
return ScalarCost;
if (auto BC = dyn_cast<BitCastInst>(Ptr))
if (const auto *BC = dyn_cast<BitCastInst>(Ptr))
Ptr = BC->getOperand(0);
if (auto *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
if (const auto *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
if (GEP->getNumOperands() != 2)
return ScalarCost;
unsigned Scale = DL.getTypeAllocSize(GEP->getResultElementType());
@ -1042,7 +1042,7 @@ unsigned ARMTTIImpl::getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
if (Scale != 1 && Scale * 8 != ExtSize)
return ScalarCost;
// And we need to zext (not sext) the indexes from a small enough type.
if (auto ZExt = dyn_cast<ZExtInst>(GEP->getOperand(1))) {
if (const auto *ZExt = dyn_cast<ZExtInst>(GEP->getOperand(1))) {
if (ZExt->getOperand(0)->getType()->getScalarSizeInBits() <= ExtSize)
return VectorCost;
}

View File

@ -231,10 +231,11 @@ public:
bool UseMaskForCond = false,
bool UseMaskForGaps = false);
unsigned getGatherScatterOpCost(
unsigned Opcode, Type *DataTy, Value *Ptr, bool VariableMask,
unsigned Alignment, TTI::TargetCostKind CostKind,
const Instruction *I = nullptr);
unsigned getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
const Value *Ptr, bool VariableMask,
unsigned Alignment,
TTI::TargetCostKind CostKind,
const Instruction *I = nullptr);
bool isLoweredToCall(const Function *F);
bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,

View File

@ -213,9 +213,8 @@ unsigned HexagonTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp,
}
unsigned HexagonTTIImpl::getGatherScatterOpCost(
unsigned Opcode, Type *DataTy, Value *Ptr, bool VariableMask,
unsigned Alignment, TTI::TargetCostKind CostKind,
const Instruction *I) {
unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
unsigned Alignment, TTI::TargetCostKind CostKind, const Instruction *I) {
return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
Alignment, CostKind, I);
}

View File

@ -120,8 +120,9 @@ public:
TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency);
unsigned getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
Type *SubTp);
unsigned getGatherScatterOpCost(unsigned Opcode, Type *DataTy, Value *Ptr,
bool VariableMask, unsigned Alignment,
unsigned getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
const Value *Ptr, bool VariableMask,
unsigned Alignment,
TTI::TargetCostKind CostKind,
const Instruction *I);
unsigned getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,

View File

@ -2771,7 +2771,7 @@ int X86TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
Intrinsic::ID IID = ICA.getID();
Type *RetTy = ICA.getReturnType();
const SmallVectorImpl<Value *> &Args = ICA.getArgs();
const SmallVectorImpl<const Value *> &Args = ICA.getArgs();
unsigned ISD = ISD::DELETED_NODE;
switch (IID) {
default:
@ -3849,7 +3849,7 @@ X86TTIImpl::getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind) {
}
// Return an average cost of Gather / Scatter instruction, maybe improved later
int X86TTIImpl::getGSVectorCost(unsigned Opcode, Type *SrcVTy, Value *Ptr,
int X86TTIImpl::getGSVectorCost(unsigned Opcode, Type *SrcVTy, const Value *Ptr,
unsigned Alignment, unsigned AddressSpace) {
assert(isa<VectorType>(SrcVTy) && "Unexpected type in getGSVectorCost");
@ -3860,14 +3860,14 @@ int X86TTIImpl::getGSVectorCost(unsigned Opcode, Type *SrcVTy, Value *Ptr,
// operation will use 16 x 64 indices which do not fit in a zmm and needs
// to split. Also check that the base pointer is the same for all lanes,
// and that there's at most one variable index.
auto getIndexSizeInBits = [](Value *Ptr, const DataLayout& DL) {
auto getIndexSizeInBits = [](const Value *Ptr, const DataLayout &DL) {
unsigned IndexSize = DL.getPointerSizeInBits();
GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
if (IndexSize < 64 || !GEP)
return IndexSize;
unsigned NumOfVarIndices = 0;
Value *Ptrs = GEP->getPointerOperand();
const Value *Ptrs = GEP->getPointerOperand();
if (Ptrs->getType()->isVectorTy() && !getSplatValue(Ptrs))
return IndexSize;
for (unsigned i = 1; i < GEP->getNumOperands(); ++i) {
@ -3884,7 +3884,6 @@ int X86TTIImpl::getGSVectorCost(unsigned Opcode, Type *SrcVTy, Value *Ptr,
return (unsigned)32;
};
// Trying to reduce IndexSize to 32 bits for vector 16.
// By default the IndexSize is equal to pointer size.
unsigned IndexSize = (ST->hasAVX512() && VF >= 16)
@ -3963,10 +3962,11 @@ int X86TTIImpl::getGSScalarCost(unsigned Opcode, Type *SrcVTy,
}
/// Calculate the cost of Gather / Scatter operation
int X86TTIImpl::getGatherScatterOpCost(
unsigned Opcode, Type *SrcVTy, Value *Ptr, bool VariableMask,
unsigned Alignment, TTI::TargetCostKind CostKind,
const Instruction *I = nullptr) {
int X86TTIImpl::getGatherScatterOpCost(unsigned Opcode, Type *SrcVTy,
const Value *Ptr, bool VariableMask,
unsigned Alignment,
TTI::TargetCostKind CostKind,
const Instruction *I = nullptr) {
if (CostKind != TTI::TCK_RecipThroughput)
return 1;

View File

@ -144,7 +144,7 @@ public:
int getMaskedMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
unsigned AddressSpace,
TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency);
int getGatherScatterOpCost(unsigned Opcode, Type *DataTy, Value *Ptr,
int getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr,
bool VariableMask, unsigned Alignment,
TTI::TargetCostKind CostKind,
const Instruction *I);
@ -230,7 +230,7 @@ public:
private:
int getGSScalarCost(unsigned Opcode, Type *DataTy, bool VariableMask,
unsigned Alignment, unsigned AddressSpace);
int getGSVectorCost(unsigned Opcode, Type *DataTy, Value *Ptr,
int getGSVectorCost(unsigned Opcode, Type *DataTy, const Value *Ptr,
unsigned Alignment, unsigned AddressSpace);
/// @}

View File

@ -1553,9 +1553,9 @@ bool LoopIdiomRecognize::recognizeAndInsertFFS() {
// %inc = add nsw %i.0, 1
// br i1 %tobool
Value *Args[] =
{InitX, ZeroCheck ? ConstantInt::getTrue(InitX->getContext())
: ConstantInt::getFalse(InitX->getContext())};
const Value *Args[] = {
InitX, ZeroCheck ? ConstantInt::getTrue(InitX->getContext())
: ConstantInt::getFalse(InitX->getContext())};
// @llvm.dbg doesn't count as they have no semantic effect.
auto InstWithoutDebugIt = CurLoop->getHeader()->instructionsWithoutDebug();

View File

@ -5932,7 +5932,7 @@ unsigned LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
Type *ValTy = getMemInstValueType(I);
auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
const Align Alignment = getLoadStoreAlignment(I);
Value *Ptr = getLoadStorePointerOperand(I);
const Value *Ptr = getLoadStorePointerOperand(I);
return TTI.getAddressComputationCost(VectorTy) +
TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr,