1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 19:23:23 +01:00

[TTI] Make the cost APIs in TargetTransformInfo consistently use 'int'

rather than 'unsigned' for their costs.

For something like costs in particular there is a natural "negative"
value, that of savings or saved cost. As a consequence, there is a lot
of code that subtracts or creates negative values based on cost, all of
which is prone to awkwardness or bugs when dealing with an unsigned
type. Similarly, we *never* want these values to wrap, as that would
cause Very Bad code generation (likely percieved as an infinite loop as
we try to emit over 2^32 instructions or some such insanity).

All around 'int' seems a much better fit for these basic metrics. I've
added asserts to ensure that at least the TTI interface never returns
negative numbers here. If we ever have a use case for negative numbers,
we can remove this, but this way a bug where someone used '-1' to
produce a 'very large' cost will be caught by the assert.

This passes all tests, and is also UBSan clean.

No functional change intended.

Differential Revision: http://reviews.llvm.org/D11741

llvm-svn: 244080
This commit is contained in:
Chandler Carruth 2015-08-05 18:08:10 +00:00
parent 9d06f58b97
commit 98500f2974
16 changed files with 448 additions and 443 deletions

View File

@ -97,11 +97,14 @@ public:
///
/// Many APIs in this interface return a cost. This enum defines the
/// fundamental values that should be used to interpret (and produce) those
/// costs. The costs are returned as an unsigned rather than a member of this
/// costs. The costs are returned as an int rather than a member of this
/// enumeration because it is expected that the cost of one IR instruction
/// may have a multiplicative factor to it or otherwise won't fit directly
/// into the enum. Moreover, it is common to sum or average costs which works
/// better as simple integral values. Thus this enum only provides constants.
/// Also note that the returned costs are signed integers to make it natural
/// to add, subtract, and test with zero (a common boundary condition). It is
/// not expected that 2^32 is a realistic cost to be modeling at any point.
///
/// Note that these costs should usually reflect the intersection of code-size
/// cost and execution cost. A free instruction is typically one that folds
@ -128,16 +131,15 @@ public:
///
/// The returned cost is defined in terms of \c TargetCostConstants, see its
/// comments for a detailed explanation of the cost values.
unsigned getOperationCost(unsigned Opcode, Type *Ty,
Type *OpTy = nullptr) const;
int getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy = nullptr) const;
/// \brief Estimate the cost of a GEP operation when lowered.
///
/// The contract for this function is the same as \c getOperationCost except
/// that it supports an interface that provides extra information specific to
/// the GEP operation.
unsigned getGEPCost(Type *PointeeType, const Value *Ptr,
ArrayRef<const Value *> Operands) const;
int getGEPCost(Type *PointeeType, const Value *Ptr,
ArrayRef<const Value *> Operands) const;
/// \brief Estimate the cost of a function call when lowered.
///
@ -148,32 +150,31 @@ public:
/// This is the most basic query for estimating call cost: it only knows the
/// function type and (potentially) the number of arguments at the call site.
/// The latter is only interesting for varargs function types.
unsigned getCallCost(FunctionType *FTy, int NumArgs = -1) const;
int getCallCost(FunctionType *FTy, int NumArgs = -1) const;
/// \brief Estimate the cost of calling a specific function when lowered.
///
/// This overload adds the ability to reason about the particular function
/// being called in the event it is a library call with special lowering.
unsigned getCallCost(const Function *F, int NumArgs = -1) const;
int getCallCost(const Function *F, int NumArgs = -1) const;
/// \brief Estimate the cost of calling a specific function when lowered.
///
/// This overload allows specifying a set of candidate argument values.
unsigned getCallCost(const Function *F,
int getCallCost(const Function *F, ArrayRef<const Value *> Arguments) const;
/// \brief Estimate the cost of an intrinsic when lowered.
///
/// Mirrors the \c getCallCost method but uses an intrinsic identifier.
int getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
ArrayRef<Type *> ParamTys) const;
/// \brief Estimate the cost of an intrinsic when lowered.
///
/// Mirrors the \c getCallCost method but uses an intrinsic identifier.
int getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
ArrayRef<const Value *> Arguments) const;
/// \brief Estimate the cost of an intrinsic when lowered.
///
/// Mirrors the \c getCallCost method but uses an intrinsic identifier.
unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
ArrayRef<Type *> ParamTys) const;
/// \brief Estimate the cost of an intrinsic when lowered.
///
/// Mirrors the \c getCallCost method but uses an intrinsic identifier.
unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
ArrayRef<const Value *> Arguments) const;
/// \brief Estimate the cost of a given IR user when lowered.
///
/// This can estimate the cost of either a ConstantExpr or Instruction when
@ -189,7 +190,7 @@ public:
///
/// The returned cost is defined in terms of \c TargetCostConstants, see its
/// comments for a detailed explanation of the cost values.
unsigned getUserCost(const User *U) const;
int getUserCost(const User *U) const;
/// \brief Return true if branch divergence exists.
///
@ -364,19 +365,19 @@ public:
/// \brief Return the expected cost of supporting the floating point operation
/// of the specified type.
unsigned getFPOpCost(Type *Ty) const;
int getFPOpCost(Type *Ty) const;
/// \brief Return the expected cost of materializing for the given integer
/// immediate of the specified type.
unsigned getIntImmCost(const APInt &Imm, Type *Ty) const;
int getIntImmCost(const APInt &Imm, Type *Ty) const;
/// \brief Return the expected cost of materialization for the given integer
/// immediate of the specified type for a given instruction. The cost can be
/// zero if the immediate can be folded into the specified instruction.
unsigned getIntImmCost(unsigned Opc, unsigned Idx, const APInt &Imm,
Type *Ty) const;
unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
Type *Ty) const;
int getIntImmCost(unsigned Opc, unsigned Idx, const APInt &Imm,
Type *Ty) const;
int getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
Type *Ty) const;
/// @}
/// \name Vector Target Information
@ -416,43 +417,41 @@ public:
unsigned getMaxInterleaveFactor(unsigned VF) const;
/// \return The expected cost of arithmetic ops, such as mul, xor, fsub, etc.
unsigned
getArithmeticInstrCost(unsigned Opcode, Type *Ty,
OperandValueKind Opd1Info = OK_AnyValue,
OperandValueKind Opd2Info = OK_AnyValue,
OperandValueProperties Opd1PropInfo = OP_None,
OperandValueProperties Opd2PropInfo = OP_None) const;
int getArithmeticInstrCost(
unsigned Opcode, Type *Ty, OperandValueKind Opd1Info = OK_AnyValue,
OperandValueKind Opd2Info = OK_AnyValue,
OperandValueProperties Opd1PropInfo = OP_None,
OperandValueProperties Opd2PropInfo = OP_None) const;
/// \return The cost of a shuffle instruction of kind Kind and of type Tp.
/// The index and subtype parameters are used by the subvector insertion and
/// extraction shuffle kinds.
unsigned getShuffleCost(ShuffleKind Kind, Type *Tp, int Index = 0,
Type *SubTp = nullptr) const;
int getShuffleCost(ShuffleKind Kind, Type *Tp, int Index = 0,
Type *SubTp = nullptr) const;
/// \return The expected cost of cast instructions, such as bitcast, trunc,
/// zext, etc.
unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) const;
int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) const;
/// \return The expected cost of control-flow related instructions such as
/// Phi, Ret, Br.
unsigned getCFInstrCost(unsigned Opcode) const;
int getCFInstrCost(unsigned Opcode) const;
/// \returns The expected cost of compare and select instructions.
unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
Type *CondTy = nullptr) const;
int getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
Type *CondTy = nullptr) const;
/// \return The expected cost of vector Insert and Extract.
/// Use -1 to indicate that there is no information on the index value.
unsigned getVectorInstrCost(unsigned Opcode, Type *Val,
unsigned Index = -1) const;
int getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index = -1) const;
/// \return The cost of Load and Store instructions.
unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
unsigned AddressSpace) const;
int getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
unsigned AddressSpace) const;
/// \return The cost of masked Load and Store instructions.
unsigned getMaskedMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
unsigned AddressSpace) const;
int getMaskedMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
unsigned AddressSpace) const;
/// \return The cost of the interleaved memory operation.
/// \p Opcode is the memory operation code
@ -462,11 +461,9 @@ public:
/// load allows gaps)
/// \p Alignment is the alignment of the memory operation
/// \p AddressSpace is address space of the pointer.
unsigned getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
unsigned Factor,
ArrayRef<unsigned> Indices,
unsigned Alignment,
unsigned AddressSpace) const;
int getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor,
ArrayRef<unsigned> Indices, unsigned Alignment,
unsigned AddressSpace) const;
/// \brief Calculate the cost of performing a vector reduction.
///
@ -481,16 +478,14 @@ public:
/// Split:
/// (v0, v1, v2, v3)
/// ((v0+v2), (v1+v3), undef, undef)
unsigned getReductionCost(unsigned Opcode, Type *Ty,
bool IsPairwiseForm) const;
int getReductionCost(unsigned Opcode, Type *Ty, bool IsPairwiseForm) const;
/// \returns The cost of Intrinsic instructions.
unsigned getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
ArrayRef<Type *> Tys) const;
int getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
ArrayRef<Type *> Tys) const;
/// \returns The cost of Call instructions.
unsigned getCallInstrCost(Function *F, Type *RetTy,
ArrayRef<Type *> Tys) const;
int getCallInstrCost(Function *F, Type *RetTy, ArrayRef<Type *> Tys) const;
/// \returns The number of pieces into which the provided type must be
/// split during legalization. Zero is returned when the answer is unknown.
@ -503,7 +498,7 @@ public:
/// The 'IsComplex' parameter is a hint that the address computation is likely
/// to involve multiple instructions and as such unlikely to be merged into
/// the address indexing mode.
unsigned getAddressComputationCost(Type *Ty, bool IsComplex = false) const;
int getAddressComputationCost(Type *Ty, bool IsComplex = false) const;
/// \returns The cost, if any, of keeping values of the given types alive
/// over a callsite.
@ -548,18 +543,18 @@ class TargetTransformInfo::Concept {
public:
virtual ~Concept() = 0;
virtual const DataLayout &getDataLayout() const = 0;
virtual unsigned getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy) = 0;
virtual unsigned getGEPCost(Type *PointeeType, const Value *Ptr,
ArrayRef<const Value *> Operands) = 0;
virtual unsigned getCallCost(FunctionType *FTy, int NumArgs) = 0;
virtual unsigned getCallCost(const Function *F, int NumArgs) = 0;
virtual unsigned getCallCost(const Function *F,
virtual int getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy) = 0;
virtual int getGEPCost(Type *PointeeType, const Value *Ptr,
ArrayRef<const Value *> Operands) = 0;
virtual int getCallCost(FunctionType *FTy, int NumArgs) = 0;
virtual int getCallCost(const Function *F, int NumArgs) = 0;
virtual int getCallCost(const Function *F,
ArrayRef<const Value *> Arguments) = 0;
virtual int getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
ArrayRef<Type *> ParamTys) = 0;
virtual int getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
ArrayRef<const Value *> Arguments) = 0;
virtual unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
ArrayRef<Type *> ParamTys) = 0;
virtual unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
ArrayRef<const Value *> Arguments) = 0;
virtual unsigned getUserCost(const User *U) = 0;
virtual int getUserCost(const User *U) = 0;
virtual bool hasBranchDivergence() = 0;
virtual bool isSourceOfDivergence(const Value *V) = 0;
virtual bool isLoweredToCall(const Function *F) = 0;
@ -585,12 +580,12 @@ public:
virtual bool enableAggressiveInterleaving(bool LoopHasReductions) = 0;
virtual PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) = 0;
virtual bool haveFastSqrt(Type *Ty) = 0;
virtual unsigned getFPOpCost(Type *Ty) = 0;
virtual unsigned getIntImmCost(const APInt &Imm, Type *Ty) = 0;
virtual unsigned getIntImmCost(unsigned Opc, unsigned Idx, const APInt &Imm,
Type *Ty) = 0;
virtual unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx,
const APInt &Imm, Type *Ty) = 0;
virtual int getFPOpCost(Type *Ty) = 0;
virtual int getIntImmCost(const APInt &Imm, Type *Ty) = 0;
virtual int getIntImmCost(unsigned Opc, unsigned Idx, const APInt &Imm,
Type *Ty) = 0;
virtual int getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
Type *Ty) = 0;
virtual unsigned getNumberOfRegisters(bool Vector) = 0;
virtual unsigned getRegisterBitWidth(bool Vector) = 0;
virtual unsigned getMaxInterleaveFactor(unsigned VF) = 0;
@ -599,33 +594,32 @@ public:
OperandValueKind Opd2Info,
OperandValueProperties Opd1PropInfo,
OperandValueProperties Opd2PropInfo) = 0;
virtual unsigned getShuffleCost(ShuffleKind Kind, Type *Tp, int Index,
Type *SubTp) = 0;
virtual unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) = 0;
virtual unsigned getCFInstrCost(unsigned Opcode) = 0;
virtual unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
Type *CondTy) = 0;
virtual unsigned getVectorInstrCost(unsigned Opcode, Type *Val,
unsigned Index) = 0;
virtual unsigned getMemoryOpCost(unsigned Opcode, Type *Src,
unsigned Alignment,
unsigned AddressSpace) = 0;
virtual unsigned getMaskedMemoryOpCost(unsigned Opcode, Type *Src,
virtual int getShuffleCost(ShuffleKind Kind, Type *Tp, int Index,
Type *SubTp) = 0;
virtual int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) = 0;
virtual int getCFInstrCost(unsigned Opcode) = 0;
virtual int getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
Type *CondTy) = 0;
virtual int getVectorInstrCost(unsigned Opcode, Type *Val,
unsigned Index) = 0;
virtual int getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
unsigned AddressSpace) = 0;
virtual int getMaskedMemoryOpCost(unsigned Opcode, Type *Src,
unsigned Alignment,
unsigned AddressSpace) = 0;
virtual int getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
unsigned Factor,
ArrayRef<unsigned> Indices,
unsigned Alignment,
unsigned AddressSpace) = 0;
virtual unsigned getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
unsigned Factor,
ArrayRef<unsigned> Indices,
unsigned Alignment,
unsigned AddressSpace) = 0;
virtual unsigned getReductionCost(unsigned Opcode, Type *Ty,
bool IsPairwiseForm) = 0;
virtual unsigned getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
ArrayRef<Type *> Tys) = 0;
virtual unsigned getCallInstrCost(Function *F, Type *RetTy,
virtual int getReductionCost(unsigned Opcode, Type *Ty,
bool IsPairwiseForm) = 0;
virtual int getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
ArrayRef<Type *> Tys) = 0;
virtual int getCallInstrCost(Function *F, Type *RetTy,
ArrayRef<Type *> Tys) = 0;
virtual unsigned getNumberOfParts(Type *Tp) = 0;
virtual unsigned getAddressComputationCost(Type *Ty, bool IsComplex) = 0;
virtual int getAddressComputationCost(Type *Ty, bool IsComplex) = 0;
virtual unsigned getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) = 0;
virtual bool getTgtMemIntrinsic(IntrinsicInst *Inst,
MemIntrinsicInfo &Info) = 0;
@ -647,32 +641,32 @@ public:
return Impl.getDataLayout();
}
unsigned getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy) override {
int getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy) override {
return Impl.getOperationCost(Opcode, Ty, OpTy);
}
unsigned getGEPCost(Type *PointeeType, const Value *Ptr,
ArrayRef<const Value *> Operands) override {
int getGEPCost(Type *PointeeType, const Value *Ptr,
ArrayRef<const Value *> Operands) override {
return Impl.getGEPCost(PointeeType, Ptr, Operands);
}
unsigned getCallCost(FunctionType *FTy, int NumArgs) override {
int getCallCost(FunctionType *FTy, int NumArgs) override {
return Impl.getCallCost(FTy, NumArgs);
}
unsigned getCallCost(const Function *F, int NumArgs) override {
int getCallCost(const Function *F, int NumArgs) override {
return Impl.getCallCost(F, NumArgs);
}
unsigned getCallCost(const Function *F,
ArrayRef<const Value *> Arguments) override {
int getCallCost(const Function *F,
ArrayRef<const Value *> Arguments) override {
return Impl.getCallCost(F, Arguments);
}
unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
ArrayRef<Type *> ParamTys) override {
int getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
ArrayRef<Type *> ParamTys) override {
return Impl.getIntrinsicCost(IID, RetTy, ParamTys);
}
unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
ArrayRef<const Value *> Arguments) override {
int getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
ArrayRef<const Value *> Arguments) override {
return Impl.getIntrinsicCost(IID, RetTy, Arguments);
}
unsigned getUserCost(const User *U) override { return Impl.getUserCost(U); }
int getUserCost(const User *U) override { return Impl.getUserCost(U); }
bool hasBranchDivergence() override { return Impl.hasBranchDivergence(); }
bool isSourceOfDivergence(const Value *V) override {
return Impl.isSourceOfDivergence(V);
@ -730,19 +724,17 @@ public:
}
bool haveFastSqrt(Type *Ty) override { return Impl.haveFastSqrt(Ty); }
unsigned getFPOpCost(Type *Ty) override {
return Impl.getFPOpCost(Ty);
}
int getFPOpCost(Type *Ty) override { return Impl.getFPOpCost(Ty); }
unsigned getIntImmCost(const APInt &Imm, Type *Ty) override {
int getIntImmCost(const APInt &Imm, Type *Ty) override {
return Impl.getIntImmCost(Imm, Ty);
}
unsigned getIntImmCost(unsigned Opc, unsigned Idx, const APInt &Imm,
Type *Ty) override {
int getIntImmCost(unsigned Opc, unsigned Idx, const APInt &Imm,
Type *Ty) override {
return Impl.getIntImmCost(Opc, Idx, Imm, Ty);
}
unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
Type *Ty) override {
int getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
Type *Ty) override {
return Impl.getIntImmCost(IID, Idx, Imm, Ty);
}
unsigned getNumberOfRegisters(bool Vector) override {
@ -762,56 +754,52 @@ public:
return Impl.getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
Opd1PropInfo, Opd2PropInfo);
}
unsigned getShuffleCost(ShuffleKind Kind, Type *Tp, int Index,
Type *SubTp) override {
int getShuffleCost(ShuffleKind Kind, Type *Tp, int Index,
Type *SubTp) override {
return Impl.getShuffleCost(Kind, Tp, Index, SubTp);
}
unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) override {
int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) override {
return Impl.getCastInstrCost(Opcode, Dst, Src);
}
unsigned getCFInstrCost(unsigned Opcode) override {
int getCFInstrCost(unsigned Opcode) override {
return Impl.getCFInstrCost(Opcode);
}
unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
Type *CondTy) override {
int getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) override {
return Impl.getCmpSelInstrCost(Opcode, ValTy, CondTy);
}
unsigned getVectorInstrCost(unsigned Opcode, Type *Val,
unsigned Index) override {
int getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) override {
return Impl.getVectorInstrCost(Opcode, Val, Index);
}
unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
unsigned AddressSpace) override {
int getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
unsigned AddressSpace) override {
return Impl.getMemoryOpCost(Opcode, Src, Alignment, AddressSpace);
}
unsigned getMaskedMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
unsigned AddressSpace) override {
int getMaskedMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
unsigned AddressSpace) override {
return Impl.getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace);
}
unsigned getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
unsigned Factor,
ArrayRef<unsigned> Indices,
unsigned Alignment,
unsigned AddressSpace) override {
int getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor,
ArrayRef<unsigned> Indices, unsigned Alignment,
unsigned AddressSpace) override {
return Impl.getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
Alignment, AddressSpace);
}
unsigned getReductionCost(unsigned Opcode, Type *Ty,
bool IsPairwiseForm) override {
int getReductionCost(unsigned Opcode, Type *Ty,
bool IsPairwiseForm) override {
return Impl.getReductionCost(Opcode, Ty, IsPairwiseForm);
}
unsigned getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
ArrayRef<Type *> Tys) override {
int getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
ArrayRef<Type *> Tys) override {
return Impl.getIntrinsicInstrCost(ID, RetTy, Tys);
}
unsigned getCallInstrCost(Function *F, Type *RetTy,
ArrayRef<Type *> Tys) override {
int getCallInstrCost(Function *F, Type *RetTy,
ArrayRef<Type *> Tys) override {
return Impl.getCallInstrCost(F, RetTy, Tys);
}
unsigned getNumberOfParts(Type *Tp) override {
return Impl.getNumberOfParts(Tp);
}
unsigned getAddressComputationCost(Type *Ty, bool IsComplex) override {
int getAddressComputationCost(Type *Ty, bool IsComplex) override {
return Impl.getAddressComputationCost(Ty, IsComplex);
}
unsigned getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) override {

View File

@ -1017,8 +1017,8 @@ public:
int InstructionOpcodeToISD(unsigned Opcode) const;
/// Estimate the cost of type-legalization and the legalized type.
std::pair<unsigned, MVT> getTypeLegalizationCost(const DataLayout &DL,
Type *Ty) const;
std::pair<int, MVT> getTypeLegalizationCost(const DataLayout &DL,
Type *Ty) const;
/// @}

View File

@ -46,30 +46,37 @@ TargetTransformInfo &TargetTransformInfo::operator=(TargetTransformInfo &&RHS) {
return *this;
}
unsigned TargetTransformInfo::getOperationCost(unsigned Opcode, Type *Ty,
Type *OpTy) const {
return TTIImpl->getOperationCost(Opcode, Ty, OpTy);
int TargetTransformInfo::getOperationCost(unsigned Opcode, Type *Ty,
Type *OpTy) const {
int Cost = TTIImpl->getOperationCost(Opcode, Ty, OpTy);
assert(Cost >= 0 && "TTI should not produce negative costs!");
return Cost;
}
unsigned TargetTransformInfo::getCallCost(FunctionType *FTy,
int NumArgs) const {
return TTIImpl->getCallCost(FTy, NumArgs);
int TargetTransformInfo::getCallCost(FunctionType *FTy, int NumArgs) const {
int Cost = TTIImpl->getCallCost(FTy, NumArgs);
assert(Cost >= 0 && "TTI should not produce negative costs!");
return Cost;
}
unsigned
TargetTransformInfo::getCallCost(const Function *F,
ArrayRef<const Value *> Arguments) const {
return TTIImpl->getCallCost(F, Arguments);
int TargetTransformInfo::getCallCost(const Function *F,
ArrayRef<const Value *> Arguments) const {
int Cost = TTIImpl->getCallCost(F, Arguments);
assert(Cost >= 0 && "TTI should not produce negative costs!");
return Cost;
}
unsigned
TargetTransformInfo::getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
ArrayRef<const Value *> Arguments) const {
return TTIImpl->getIntrinsicCost(IID, RetTy, Arguments);
int TargetTransformInfo::getIntrinsicCost(
Intrinsic::ID IID, Type *RetTy, ArrayRef<const Value *> Arguments) const {
int Cost = TTIImpl->getIntrinsicCost(IID, RetTy, Arguments);
assert(Cost >= 0 && "TTI should not produce negative costs!");
return Cost;
}
unsigned TargetTransformInfo::getUserCost(const User *U) const {
return TTIImpl->getUserCost(U);
int TargetTransformInfo::getUserCost(const User *U) const {
int Cost = TTIImpl->getUserCost(U);
assert(Cost >= 0 && "TTI should not produce negative costs!");
return Cost;
}
bool TargetTransformInfo::hasBranchDivergence() const {
@ -121,8 +128,10 @@ int TargetTransformInfo::getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
bool HasBaseReg,
int64_t Scale,
unsigned AddrSpace) const {
return TTIImpl->getScalingFactorCost(Ty, BaseGV, BaseOffset, HasBaseReg,
Scale, AddrSpace);
int Cost = TTIImpl->getScalingFactorCost(Ty, BaseGV, BaseOffset, HasBaseReg,
Scale, AddrSpace);
assert(Cost >= 0 && "TTI should not produce negative costs!");
return Cost;
}
bool TargetTransformInfo::isTruncateFree(Type *Ty1, Type *Ty2) const {
@ -166,22 +175,30 @@ bool TargetTransformInfo::haveFastSqrt(Type *Ty) const {
return TTIImpl->haveFastSqrt(Ty);
}
unsigned TargetTransformInfo::getFPOpCost(Type *Ty) const {
return TTIImpl->getFPOpCost(Ty);
int TargetTransformInfo::getFPOpCost(Type *Ty) const {
int Cost = TTIImpl->getFPOpCost(Ty);
assert(Cost >= 0 && "TTI should not produce negative costs!");
return Cost;
}
unsigned TargetTransformInfo::getIntImmCost(const APInt &Imm, Type *Ty) const {
return TTIImpl->getIntImmCost(Imm, Ty);
int TargetTransformInfo::getIntImmCost(const APInt &Imm, Type *Ty) const {
int Cost = TTIImpl->getIntImmCost(Imm, Ty);
assert(Cost >= 0 && "TTI should not produce negative costs!");
return Cost;
}
unsigned TargetTransformInfo::getIntImmCost(unsigned Opcode, unsigned Idx,
const APInt &Imm, Type *Ty) const {
return TTIImpl->getIntImmCost(Opcode, Idx, Imm, Ty);
int TargetTransformInfo::getIntImmCost(unsigned Opcode, unsigned Idx,
const APInt &Imm, Type *Ty) const {
int Cost = TTIImpl->getIntImmCost(Opcode, Idx, Imm, Ty);
assert(Cost >= 0 && "TTI should not produce negative costs!");
return Cost;
}
unsigned TargetTransformInfo::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
const APInt &Imm, Type *Ty) const {
return TTIImpl->getIntImmCost(IID, Idx, Imm, Ty);
int TargetTransformInfo::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
const APInt &Imm, Type *Ty) const {
int Cost = TTIImpl->getIntImmCost(IID, Idx, Imm, Ty);
assert(Cost >= 0 && "TTI should not produce negative costs!");
return Cost;
}
unsigned TargetTransformInfo::getNumberOfRegisters(bool Vector) const {
@ -196,81 +213,106 @@ unsigned TargetTransformInfo::getMaxInterleaveFactor(unsigned VF) const {
return TTIImpl->getMaxInterleaveFactor(VF);
}
unsigned TargetTransformInfo::getArithmeticInstrCost(
int TargetTransformInfo::getArithmeticInstrCost(
unsigned Opcode, Type *Ty, OperandValueKind Opd1Info,
OperandValueKind Opd2Info, OperandValueProperties Opd1PropInfo,
OperandValueProperties Opd2PropInfo) const {
return TTIImpl->getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
Opd1PropInfo, Opd2PropInfo);
int Cost = TTIImpl->getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
Opd1PropInfo, Opd2PropInfo);
assert(Cost >= 0 && "TTI should not produce negative costs!");
return Cost;
}
unsigned TargetTransformInfo::getShuffleCost(ShuffleKind Kind, Type *Ty,
int Index, Type *SubTp) const {
return TTIImpl->getShuffleCost(Kind, Ty, Index, SubTp);
int TargetTransformInfo::getShuffleCost(ShuffleKind Kind, Type *Ty, int Index,
Type *SubTp) const {
int Cost = TTIImpl->getShuffleCost(Kind, Ty, Index, SubTp);
assert(Cost >= 0 && "TTI should not produce negative costs!");
return Cost;
}
unsigned TargetTransformInfo::getCastInstrCost(unsigned Opcode, Type *Dst,
Type *Src) const {
return TTIImpl->getCastInstrCost(Opcode, Dst, Src);
int TargetTransformInfo::getCastInstrCost(unsigned Opcode, Type *Dst,
Type *Src) const {
int Cost = TTIImpl->getCastInstrCost(Opcode, Dst, Src);
assert(Cost >= 0 && "TTI should not produce negative costs!");
return Cost;
}
unsigned TargetTransformInfo::getCFInstrCost(unsigned Opcode) const {
return TTIImpl->getCFInstrCost(Opcode);
int TargetTransformInfo::getCFInstrCost(unsigned Opcode) const {
int Cost = TTIImpl->getCFInstrCost(Opcode);
assert(Cost >= 0 && "TTI should not produce negative costs!");
return Cost;
}
unsigned TargetTransformInfo::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
Type *CondTy) const {
return TTIImpl->getCmpSelInstrCost(Opcode, ValTy, CondTy);
int TargetTransformInfo::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
Type *CondTy) const {
int Cost = TTIImpl->getCmpSelInstrCost(Opcode, ValTy, CondTy);
assert(Cost >= 0 && "TTI should not produce negative costs!");
return Cost;
}
unsigned TargetTransformInfo::getVectorInstrCost(unsigned Opcode, Type *Val,
unsigned Index) const {
return TTIImpl->getVectorInstrCost(Opcode, Val, Index);
int TargetTransformInfo::getVectorInstrCost(unsigned Opcode, Type *Val,
unsigned Index) const {
int Cost = TTIImpl->getVectorInstrCost(Opcode, Val, Index);
assert(Cost >= 0 && "TTI should not produce negative costs!");
return Cost;
}
unsigned TargetTransformInfo::getMemoryOpCost(unsigned Opcode, Type *Src,
unsigned Alignment,
unsigned AddressSpace) const {
return TTIImpl->getMemoryOpCost(Opcode, Src, Alignment, AddressSpace);
int TargetTransformInfo::getMemoryOpCost(unsigned Opcode, Type *Src,
unsigned Alignment,
unsigned AddressSpace) const {
int Cost = TTIImpl->getMemoryOpCost(Opcode, Src, Alignment, AddressSpace);
assert(Cost >= 0 && "TTI should not produce negative costs!");
return Cost;
}
unsigned
TargetTransformInfo::getMaskedMemoryOpCost(unsigned Opcode, Type *Src,
unsigned Alignment,
unsigned AddressSpace) const {
return TTIImpl->getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace);
int TargetTransformInfo::getMaskedMemoryOpCost(unsigned Opcode, Type *Src,
unsigned Alignment,
unsigned AddressSpace) const {
int Cost =
TTIImpl->getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace);
assert(Cost >= 0 && "TTI should not produce negative costs!");
return Cost;
}
unsigned TargetTransformInfo::getInterleavedMemoryOpCost(
int TargetTransformInfo::getInterleavedMemoryOpCost(
unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
unsigned Alignment, unsigned AddressSpace) const {
return TTIImpl->getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
Alignment, AddressSpace);
int Cost = TTIImpl->getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
Alignment, AddressSpace);
assert(Cost >= 0 && "TTI should not produce negative costs!");
return Cost;
}
unsigned
TargetTransformInfo::getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
ArrayRef<Type *> Tys) const {
return TTIImpl->getIntrinsicInstrCost(ID, RetTy, Tys);
}
unsigned TargetTransformInfo::getCallInstrCost(Function *F, Type *RetTy,
int TargetTransformInfo::getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
ArrayRef<Type *> Tys) const {
return TTIImpl->getCallInstrCost(F, RetTy, Tys);
int Cost = TTIImpl->getIntrinsicInstrCost(ID, RetTy, Tys);
assert(Cost >= 0 && "TTI should not produce negative costs!");
return Cost;
}
int TargetTransformInfo::getCallInstrCost(Function *F, Type *RetTy,
ArrayRef<Type *> Tys) const {
int Cost = TTIImpl->getCallInstrCost(F, RetTy, Tys);
assert(Cost >= 0 && "TTI should not produce negative costs!");
return Cost;
}
unsigned TargetTransformInfo::getNumberOfParts(Type *Tp) const {
return TTIImpl->getNumberOfParts(Tp);
}
unsigned TargetTransformInfo::getAddressComputationCost(Type *Tp,
bool IsComplex) const {
return TTIImpl->getAddressComputationCost(Tp, IsComplex);
int TargetTransformInfo::getAddressComputationCost(Type *Tp,
bool IsComplex) const {
int Cost = TTIImpl->getAddressComputationCost(Tp, IsComplex);
assert(Cost >= 0 && "TTI should not produce negative costs!");
return Cost;
}
unsigned TargetTransformInfo::getReductionCost(unsigned Opcode, Type *Ty,
bool IsPairwiseForm) const {
return TTIImpl->getReductionCost(Opcode, Ty, IsPairwiseForm);
int TargetTransformInfo::getReductionCost(unsigned Opcode, Type *Ty,
bool IsPairwiseForm) const {
int Cost = TTIImpl->getReductionCost(Opcode, Ty, IsPairwiseForm);
assert(Cost >= 0 && "TTI should not produce negative costs!");
return Cost;
}
unsigned

View File

@ -1634,13 +1634,13 @@ int TargetLoweringBase::InstructionOpcodeToISD(unsigned Opcode) const {
llvm_unreachable("Unknown instruction type encountered!");
}
std::pair<unsigned, MVT>
std::pair<int, MVT>
TargetLoweringBase::getTypeLegalizationCost(const DataLayout &DL,
Type *Ty) const {
LLVMContext &C = Ty->getContext();
EVT MTy = getValueType(DL, Ty);
unsigned Cost = 1;
int Cost = 1;
// We keep legalizing the type until we find a legal kind. We assume that
// the only operation that costs anything is the split. After splitting
// we need to handle two types.

View File

@ -23,7 +23,7 @@ using namespace llvm;
/// \brief Calculate the cost of materializing a 64-bit value. This helper
/// method might only calculate a fraction of a larger immediate. Therefore it
/// is valid to return a cost of ZERO.
unsigned AArch64TTIImpl::getIntImmCost(int64_t Val) {
int AArch64TTIImpl::getIntImmCost(int64_t Val) {
// Check if the immediate can be encoded within an instruction.
if (Val == 0 || AArch64_AM::isLogicalImmediate(Val, 64))
return 0;
@ -37,7 +37,7 @@ unsigned AArch64TTIImpl::getIntImmCost(int64_t Val) {
}
/// \brief Calculate the cost of materializing the given constant.
unsigned AArch64TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
int AArch64TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
assert(Ty->isIntegerTy());
unsigned BitSize = Ty->getPrimitiveSizeInBits();
@ -51,18 +51,18 @@ unsigned AArch64TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
// Split the constant into 64-bit chunks and calculate the cost for each
// chunk.
unsigned Cost = 0;
int Cost = 0;
for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
int64_t Val = Tmp.getSExtValue();
Cost += getIntImmCost(Val);
}
// We need at least one instruction to materialze the constant.
return std::max(1U, Cost);
return std::max(1, Cost);
}
unsigned AArch64TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx,
const APInt &Imm, Type *Ty) {
int AArch64TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx,
const APInt &Imm, Type *Ty) {
assert(Ty->isIntegerTy());
unsigned BitSize = Ty->getPrimitiveSizeInBits();
@ -118,17 +118,17 @@ unsigned AArch64TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx,
}
if (Idx == ImmIdx) {
unsigned NumConstants = (BitSize + 63) / 64;
unsigned Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty);
int NumConstants = (BitSize + 63) / 64;
int Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty);
return (Cost <= NumConstants * TTI::TCC_Basic)
? static_cast<unsigned>(TTI::TCC_Free)
? static_cast<int>(TTI::TCC_Free)
: Cost;
}
return AArch64TTIImpl::getIntImmCost(Imm, Ty);
}
unsigned AArch64TTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
const APInt &Imm, Type *Ty) {
int AArch64TTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
const APInt &Imm, Type *Ty) {
assert(Ty->isIntegerTy());
unsigned BitSize = Ty->getPrimitiveSizeInBits();
@ -147,10 +147,10 @@ unsigned AArch64TTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
case Intrinsic::smul_with_overflow:
case Intrinsic::umul_with_overflow:
if (Idx == 1) {
unsigned NumConstants = (BitSize + 63) / 64;
unsigned Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty);
int NumConstants = (BitSize + 63) / 64;
int Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty);
return (Cost <= NumConstants * TTI::TCC_Basic)
? static_cast<unsigned>(TTI::TCC_Free)
? static_cast<int>(TTI::TCC_Free)
: Cost;
}
break;
@ -176,8 +176,7 @@ AArch64TTIImpl::getPopcntSupport(unsigned TyWidth) {
return TTI::PSK_Software;
}
unsigned AArch64TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst,
Type *Src) {
int AArch64TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
int ISD = TLI->InstructionOpcodeToISD(Opcode);
assert(ISD && "Invalid opcode");
@ -259,13 +258,13 @@ unsigned AArch64TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst,
return BaseT::getCastInstrCost(Opcode, Dst, Src);
}
unsigned AArch64TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
unsigned Index) {
int AArch64TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
unsigned Index) {
assert(Val->isVectorTy() && "This must be a vector type");
if (Index != -1U) {
// Legalize the type.
std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, Val);
std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Val);
// This type is legalized to a scalar type.
if (!LT.second.isVector())
@ -284,12 +283,12 @@ unsigned AArch64TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
return 2;
}
unsigned AArch64TTIImpl::getArithmeticInstrCost(
int AArch64TTIImpl::getArithmeticInstrCost(
unsigned Opcode, Type *Ty, TTI::OperandValueKind Opd1Info,
TTI::OperandValueKind Opd2Info, TTI::OperandValueProperties Opd1PropInfo,
TTI::OperandValueProperties Opd2PropInfo) {
// Legalize the type.
std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
int ISD = TLI->InstructionOpcodeToISD(Opcode);
@ -300,10 +299,9 @@ unsigned AArch64TTIImpl::getArithmeticInstrCost(
// normally expanded to the sequence ADD + CMP + SELECT + SRA.
// The OperandValue properties many not be same as that of previous
// operation; conservatively assume OP_None.
unsigned Cost =
getArithmeticInstrCost(Instruction::Add, Ty, Opd1Info, Opd2Info,
TargetTransformInfo::OP_None,
TargetTransformInfo::OP_None);
int Cost = getArithmeticInstrCost(Instruction::Add, Ty, Opd1Info, Opd2Info,
TargetTransformInfo::OP_None,
TargetTransformInfo::OP_None);
Cost += getArithmeticInstrCost(Instruction::Sub, Ty, Opd1Info, Opd2Info,
TargetTransformInfo::OP_None,
TargetTransformInfo::OP_None);
@ -331,7 +329,7 @@ unsigned AArch64TTIImpl::getArithmeticInstrCost(
}
}
unsigned AArch64TTIImpl::getAddressComputationCost(Type *Ty, bool IsComplex) {
int AArch64TTIImpl::getAddressComputationCost(Type *Ty, bool IsComplex) {
// Address computations in vectorized code with non-consecutive addresses will
// likely result in more instructions compared to scalar code where the
// computation can more often be merged into the index mode. The resulting
@ -346,14 +344,14 @@ unsigned AArch64TTIImpl::getAddressComputationCost(Type *Ty, bool IsComplex) {
return 1;
}
unsigned AArch64TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
Type *CondTy) {
int AArch64TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
Type *CondTy) {
int ISD = TLI->InstructionOpcodeToISD(Opcode);
// We don't lower vector selects well that are wider than the register width.
if (ValTy->isVectorTy() && ISD == ISD::SELECT) {
// We would need this many instructions to hide the scalarization happening.
const unsigned AmortizationCost = 20;
const int AmortizationCost = 20;
static const TypeConversionCostTblEntry<MVT::SimpleValueType>
VectorSelectTbl[] = {
{ ISD::SELECT, MVT::v16i1, MVT::v16i16, 16 * AmortizationCost },
@ -377,10 +375,9 @@ unsigned AArch64TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy);
}
unsigned AArch64TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
unsigned Alignment,
unsigned AddressSpace) {
std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
int AArch64TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
unsigned Alignment, unsigned AddressSpace) {
std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
if (Opcode == Instruction::Store && Src->isVectorTy() && Alignment != 16 &&
Src->getVectorElementType()->isIntegerTy(64)) {
@ -389,7 +386,7 @@ unsigned AArch64TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
// practice on inlined memcpy code.
// We make v2i64 stores expensive so that we will only vectorize if there
// are 6 other instructions getting vectorized.
unsigned AmortizationCost = 6;
int AmortizationCost = 6;
return LT.first * 2 * AmortizationCost;
}
@ -407,9 +404,11 @@ unsigned AArch64TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
return LT.first;
}
unsigned AArch64TTIImpl::getInterleavedMemoryOpCost(
unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
unsigned Alignment, unsigned AddressSpace) {
int AArch64TTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
unsigned Factor,
ArrayRef<unsigned> Indices,
unsigned Alignment,
unsigned AddressSpace) {
assert(Factor >= 2 && "Invalid interleave factor");
assert(isa<VectorType>(VecTy) && "Expect a vector type");
@ -427,8 +426,8 @@ unsigned AArch64TTIImpl::getInterleavedMemoryOpCost(
Alignment, AddressSpace);
}
unsigned AArch64TTIImpl::getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) {
unsigned Cost = 0;
int AArch64TTIImpl::getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) {
int Cost = 0;
for (auto *I : Tys) {
if (!I->isVectorTy())
continue;

View File

@ -63,12 +63,11 @@ public:
/// @{
using BaseT::getIntImmCost;
unsigned getIntImmCost(int64_t Val);
unsigned getIntImmCost(const APInt &Imm, Type *Ty);
unsigned getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
Type *Ty);
unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
Type *Ty);
int getIntImmCost(int64_t Val);
int getIntImmCost(const APInt &Imm, Type *Ty);
int getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty);
int getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
Type *Ty);
TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth);
/// @}
@ -96,25 +95,25 @@ public:
unsigned getMaxInterleaveFactor(unsigned VF);
unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src);
int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src);
unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index);
int getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index);
unsigned getArithmeticInstrCost(
int getArithmeticInstrCost(
unsigned Opcode, Type *Ty,
TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue,
TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue,
TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None,
TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None);
unsigned getAddressComputationCost(Type *Ty, bool IsComplex);
int getAddressComputationCost(Type *Ty, bool IsComplex);
unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy);
int getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy);
unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
unsigned AddressSpace);
int getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
unsigned AddressSpace);
unsigned getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys);
int getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys);
void getUnrollingPreferences(Loop *L, TTI::UnrollingPreferences &UP);
@ -123,11 +122,9 @@ public:
bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info);
unsigned getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
unsigned Factor,
ArrayRef<unsigned> Indices,
unsigned Alignment,
unsigned AddressSpace);
int getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor,
ArrayRef<unsigned> Indices, unsigned Alignment,
unsigned AddressSpace);
/// @}
};

View File

@ -15,7 +15,7 @@ using namespace llvm;
#define DEBUG_TYPE "armtti"
unsigned ARMTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
int ARMTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
assert(Ty->isIntegerTy());
unsigned Bits = Ty->getPrimitiveSizeInBits();
@ -47,7 +47,7 @@ unsigned ARMTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
return 3;
}
unsigned ARMTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
int ARMTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
int ISD = TLI->InstructionOpcodeToISD(Opcode);
assert(ISD && "Invalid opcode");
@ -61,7 +61,7 @@ unsigned ARMTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
if (Src->isVectorTy() && ST->hasNEON() && (ISD == ISD::FP_ROUND ||
ISD == ISD::FP_EXTEND)) {
std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
int Idx = CostTableLookup(NEONFltDblTbl, ISD, LT.second);
if (Idx != -1)
return LT.first * NEONFltDblTbl[Idx].Cost;
@ -245,8 +245,8 @@ unsigned ARMTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
return BaseT::getCastInstrCost(Opcode, Dst, Src);
}
unsigned ARMTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
unsigned Index) {
int ARMTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
unsigned Index) {
// Penalize inserting into an D-subregister. We end up with a three times
// lower estimated throughput on swift.
if (ST->isSwift() &&
@ -265,8 +265,7 @@ unsigned ARMTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
}
unsigned ARMTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
Type *CondTy) {
int ARMTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) {
int ISD = TLI->InstructionOpcodeToISD(Opcode);
// On NEON a a vector select gets lowered to vbsl.
@ -292,14 +291,14 @@ unsigned ARMTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
return NEONVectorSelectTbl[Idx].Cost;
}
std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
return LT.first;
}
return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy);
}
unsigned ARMTTIImpl::getAddressComputationCost(Type *Ty, bool IsComplex) {
int ARMTTIImpl::getAddressComputationCost(Type *Ty, bool IsComplex) {
// Address computations in vectorized code with non-consecutive addresses will
// likely result in more instructions compared to scalar code where the
// computation can more often be merged into the index mode. The resulting
@ -314,7 +313,7 @@ unsigned ARMTTIImpl::getAddressComputationCost(Type *Ty, bool IsComplex) {
return 1;
}
unsigned ARMTTIImpl::getFPOpCost(Type *Ty) {
int ARMTTIImpl::getFPOpCost(Type *Ty) {
// Use similar logic that's in ARMISelLowering:
// Any ARM CPU with VFP2 has floating point, but Thumb1 didn't have access
// to VFP.
@ -333,8 +332,8 @@ unsigned ARMTTIImpl::getFPOpCost(Type *Ty) {
return TargetTransformInfo::TCC_Expensive;
}
unsigned ARMTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
Type *SubTp) {
int ARMTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
Type *SubTp) {
// We only handle costs of reverse and alternate shuffles for now.
if (Kind != TTI::SK_Reverse && Kind != TTI::SK_Alternate)
return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
@ -353,7 +352,7 @@ unsigned ARMTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
{ISD::VECTOR_SHUFFLE, MVT::v8i16, 2},
{ISD::VECTOR_SHUFFLE, MVT::v16i8, 2}};
std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
int Idx = CostTableLookup(NEONShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second);
if (Idx == -1)
@ -379,7 +378,7 @@ unsigned ARMTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
{ISD::VECTOR_SHUFFLE, MVT::v16i8, 32}};
std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
int Idx =
CostTableLookup(NEONAltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second);
if (Idx == -1)
@ -389,13 +388,13 @@ unsigned ARMTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
}
unsigned ARMTTIImpl::getArithmeticInstrCost(
int ARMTTIImpl::getArithmeticInstrCost(
unsigned Opcode, Type *Ty, TTI::OperandValueKind Op1Info,
TTI::OperandValueKind Op2Info, TTI::OperandValueProperties Opd1PropInfo,
TTI::OperandValueProperties Opd2PropInfo) {
int ISDOpcode = TLI->InstructionOpcodeToISD(Opcode);
std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
const unsigned FunctionCallDivCost = 20;
const unsigned ReciprocalDivCost = 10;
@ -448,8 +447,8 @@ unsigned ARMTTIImpl::getArithmeticInstrCost(
if (Idx != -1)
return LT.first * CostTbl[Idx].Cost;
unsigned Cost = BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info,
Opd1PropInfo, Opd2PropInfo);
int Cost = BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info,
Opd1PropInfo, Opd2PropInfo);
// This is somewhat of a hack. The problem that we are facing is that SROA
// creates a sequence of shift, and, or instructions to construct values.
@ -465,10 +464,9 @@ unsigned ARMTTIImpl::getArithmeticInstrCost(
return Cost;
}
unsigned ARMTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
unsigned Alignment,
unsigned AddressSpace) {
std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
int ARMTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
unsigned AddressSpace) {
std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
if (Src->isVectorTy() && Alignment != 16 &&
Src->getVectorElementType()->isDoubleTy()) {
@ -479,11 +477,11 @@ unsigned ARMTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
return LT.first;
}
unsigned ARMTTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
unsigned Factor,
ArrayRef<unsigned> Indices,
unsigned Alignment,
unsigned AddressSpace) {
int ARMTTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
unsigned Factor,
ArrayRef<unsigned> Indices,
unsigned Alignment,
unsigned AddressSpace) {
assert(Factor >= 2 && "Invalid interleave factor");
assert(isa<VectorType>(VecTy) && "Expect a vector type");

View File

@ -56,7 +56,7 @@ public:
/// @{
using BaseT::getIntImmCost;
unsigned getIntImmCost(const APInt &Imm, Type *Ty);
int getIntImmCost(const APInt &Imm, Type *Ty);
/// @}
@ -92,34 +92,31 @@ public:
return 1;
}
unsigned getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
Type *SubTp);
int getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index, Type *SubTp);
unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src);
int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src);
unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy);
int getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy);
unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index);
int getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index);
unsigned getAddressComputationCost(Type *Val, bool IsComplex);
int getAddressComputationCost(Type *Val, bool IsComplex);
unsigned getFPOpCost(Type *Ty);
int getFPOpCost(Type *Ty);
unsigned getArithmeticInstrCost(
int getArithmeticInstrCost(
unsigned Opcode, Type *Ty,
TTI::OperandValueKind Op1Info = TTI::OK_AnyValue,
TTI::OperandValueKind Op2Info = TTI::OK_AnyValue,
TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None,
TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None);
unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
unsigned AddressSpace);
int getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
unsigned AddressSpace);
unsigned getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
unsigned Factor,
ArrayRef<unsigned> Indices,
unsigned Alignment,
unsigned AddressSpace);
int getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor,
ArrayRef<unsigned> Indices, unsigned Alignment,
unsigned AddressSpace);
/// @}
};

View File

@ -89,12 +89,12 @@ bool NVPTXTTIImpl::isSourceOfDivergence(const Value *V) {
return false;
}
unsigned NVPTXTTIImpl::getArithmeticInstrCost(
int NVPTXTTIImpl::getArithmeticInstrCost(
unsigned Opcode, Type *Ty, TTI::OperandValueKind Opd1Info,
TTI::OperandValueKind Opd2Info, TTI::OperandValueProperties Opd1PropInfo,
TTI::OperandValueProperties Opd2PropInfo) {
// Legalize the type.
std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
int ISD = TLI->InstructionOpcodeToISD(Opcode);

View File

@ -52,7 +52,7 @@ public:
bool isSourceOfDivergence(const Value *V);
unsigned getArithmeticInstrCost(
int getArithmeticInstrCost(
unsigned Opcode, Type *Ty,
TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue,
TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue,

View File

@ -35,7 +35,7 @@ PPCTTIImpl::getPopcntSupport(unsigned TyWidth) {
return TTI::PSK_Software;
}
unsigned PPCTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
int PPCTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
if (DisablePPCConstHoist)
return BaseT::getIntImmCost(Imm, Ty);
@ -64,8 +64,8 @@ unsigned PPCTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
return 4 * TTI::TCC_Basic;
}
unsigned PPCTTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
const APInt &Imm, Type *Ty) {
int PPCTTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
Type *Ty) {
if (DisablePPCConstHoist)
return BaseT::getIntImmCost(IID, Idx, Imm, Ty);
@ -98,8 +98,8 @@ unsigned PPCTTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
return PPCTTIImpl::getIntImmCost(Imm, Ty);
}
unsigned PPCTTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx,
const APInt &Imm, Type *Ty) {
int PPCTTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
Type *Ty) {
if (DisablePPCConstHoist)
return BaseT::getIntImmCost(Opcode, Idx, Imm, Ty);
@ -246,7 +246,7 @@ unsigned PPCTTIImpl::getMaxInterleaveFactor(unsigned VF) {
return 2;
}
unsigned PPCTTIImpl::getArithmeticInstrCost(
int PPCTTIImpl::getArithmeticInstrCost(
unsigned Opcode, Type *Ty, TTI::OperandValueKind Op1Info,
TTI::OperandValueKind Op2Info, TTI::OperandValueProperties Opd1PropInfo,
TTI::OperandValueProperties Opd2PropInfo) {
@ -257,24 +257,22 @@ unsigned PPCTTIImpl::getArithmeticInstrCost(
Opd1PropInfo, Opd2PropInfo);
}
unsigned PPCTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
Type *SubTp) {
int PPCTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
Type *SubTp) {
return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
}
unsigned PPCTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
int PPCTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode");
return BaseT::getCastInstrCost(Opcode, Dst, Src);
}
unsigned PPCTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
Type *CondTy) {
int PPCTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) {
return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy);
}
unsigned PPCTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
unsigned Index) {
int PPCTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
assert(Val->isVectorTy() && "This must be a vector type");
int ISD = TLI->InstructionOpcodeToISD(Opcode);
@ -313,15 +311,14 @@ unsigned PPCTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
return BaseT::getVectorInstrCost(Opcode, Val, Index);
}
unsigned PPCTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
unsigned Alignment,
unsigned AddressSpace) {
int PPCTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
unsigned AddressSpace) {
// Legalize the type.
std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
"Invalid Opcode");
unsigned Cost = BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace);
int Cost = BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace);
// VSX loads/stores support unaligned access.
if (ST->hasVSX()) {

View File

@ -52,12 +52,11 @@ public:
/// @{
using BaseT::getIntImmCost;
unsigned getIntImmCost(const APInt &Imm, Type *Ty);
int getIntImmCost(const APInt &Imm, Type *Ty);
unsigned getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
Type *Ty);
unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
Type *Ty);
int getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty);
int getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
Type *Ty);
TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth);
void getUnrollingPreferences(Loop *L, TTI::UnrollingPreferences &UP);
@ -71,19 +70,18 @@ public:
unsigned getNumberOfRegisters(bool Vector);
unsigned getRegisterBitWidth(bool Vector);
unsigned getMaxInterleaveFactor(unsigned VF);
unsigned getArithmeticInstrCost(
int getArithmeticInstrCost(
unsigned Opcode, Type *Ty,
TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue,
TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue,
TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None,
TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None);
unsigned getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
Type *SubTp);
unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src);
unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy);
unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index);
unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
unsigned AddressSpace);
int getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index, Type *SubTp);
int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src);
int getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy);
int getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index);
int getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
unsigned AddressSpace);
/// @}
};

View File

@ -31,7 +31,7 @@ using namespace llvm;
//
//===----------------------------------------------------------------------===//
unsigned SystemZTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
int SystemZTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
assert(Ty->isIntegerTy());
unsigned BitSize = Ty->getPrimitiveSizeInBits();
@ -63,8 +63,8 @@ unsigned SystemZTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
return 4 * TTI::TCC_Basic;
}
unsigned SystemZTTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx,
const APInt &Imm, Type *Ty) {
int SystemZTTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx,
const APInt &Imm, Type *Ty) {
assert(Ty->isIntegerTy());
unsigned BitSize = Ty->getPrimitiveSizeInBits();
@ -181,8 +181,8 @@ unsigned SystemZTTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx,
return SystemZTTIImpl::getIntImmCost(Imm, Ty);
}
unsigned SystemZTTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
const APInt &Imm, Type *Ty) {
int SystemZTTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
const APInt &Imm, Type *Ty) {
assert(Ty->isIntegerTy());
unsigned BitSize = Ty->getPrimitiveSizeInBits();

View File

@ -42,12 +42,11 @@ public:
/// \name Scalar TTI Implementations
/// @{
unsigned getIntImmCost(const APInt &Imm, Type *Ty);
int getIntImmCost(const APInt &Imm, Type *Ty);
unsigned getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
Type *Ty);
unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
Type *Ty);
int getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty);
int getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
Type *Ty);
TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth);

View File

@ -84,12 +84,12 @@ unsigned X86TTIImpl::getMaxInterleaveFactor(unsigned VF) {
return 2;
}
unsigned X86TTIImpl::getArithmeticInstrCost(
int X86TTIImpl::getArithmeticInstrCost(
unsigned Opcode, Type *Ty, TTI::OperandValueKind Op1Info,
TTI::OperandValueKind Op2Info, TTI::OperandValueProperties Opd1PropInfo,
TTI::OperandValueProperties Opd2PropInfo) {
// Legalize the type.
std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
int ISD = TLI->InstructionOpcodeToISD(Opcode);
assert(ISD && "Invalid opcode");
@ -101,10 +101,9 @@ unsigned X86TTIImpl::getArithmeticInstrCost(
// normally expanded to the sequence SRA + SRL + ADD + SRA.
// The OperandValue properties many not be same as that of previous
// operation;conservatively assume OP_None.
unsigned Cost =
2 * getArithmeticInstrCost(Instruction::AShr, Ty, Op1Info, Op2Info,
TargetTransformInfo::OP_None,
TargetTransformInfo::OP_None);
int Cost = 2 * getArithmeticInstrCost(Instruction::AShr, Ty, Op1Info,
Op2Info, TargetTransformInfo::OP_None,
TargetTransformInfo::OP_None);
Cost += getArithmeticInstrCost(Instruction::LShr, Ty, Op1Info, Op2Info,
TargetTransformInfo::OP_None,
TargetTransformInfo::OP_None);
@ -349,15 +348,15 @@ unsigned X86TTIImpl::getArithmeticInstrCost(
return BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info);
}
unsigned X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
Type *SubTp) {
int X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
Type *SubTp) {
// We only estimate the cost of reverse and alternate shuffles.
if (Kind != TTI::SK_Reverse && Kind != TTI::SK_Alternate)
return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
if (Kind == TTI::SK_Reverse) {
std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
unsigned Cost = 1;
std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
int Cost = 1;
if (LT.second.getSizeInBits() > 128)
Cost = 3; // Extract + insert + copy.
@ -368,7 +367,7 @@ unsigned X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
if (Kind == TTI::SK_Alternate) {
// 64-bit packed float vectors (v2f32) are widened to type v4f32.
// 64-bit packed integer vectors (v2i32) are promoted to type v2i64.
std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
// The backend knows how to generate a single VEX.256 version of
// instruction VPBLENDW if the target supports AVX2.
@ -464,7 +463,7 @@ unsigned X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
}
unsigned X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
int X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
int ISD = TLI->InstructionOpcodeToISD(Opcode);
assert(ISD && "Invalid opcode");
@ -628,8 +627,8 @@ unsigned X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
{ ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
};
std::pair<unsigned, MVT> LTSrc = TLI->getTypeLegalizationCost(DL, Src);
std::pair<unsigned, MVT> LTDest = TLI->getTypeLegalizationCost(DL, Dst);
std::pair<int, MVT> LTSrc = TLI->getTypeLegalizationCost(DL, Src);
std::pair<int, MVT> LTDest = TLI->getTypeLegalizationCost(DL, Dst);
if (ST->hasSSE2() && !ST->hasAVX()) {
int Idx =
@ -669,10 +668,9 @@ unsigned X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
return BaseT::getCastInstrCost(Opcode, Dst, Src);
}
unsigned X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
Type *CondTy) {
int X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) {
// Legalize the type.
std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
MVT MTy = LT.second;
@ -739,13 +737,12 @@ unsigned X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy);
}
unsigned X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
unsigned Index) {
int X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
assert(Val->isVectorTy() && "This must be a vector type");
if (Index != -1U) {
// Legalize the type.
std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, Val);
std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Val);
// This type is legalized to a scalar type.
if (!LT.second.isVector())
@ -763,10 +760,9 @@ unsigned X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
return BaseT::getVectorInstrCost(Opcode, Val, Index);
}
unsigned X86TTIImpl::getScalarizationOverhead(Type *Ty, bool Insert,
bool Extract) {
int X86TTIImpl::getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) {
assert (Ty->isVectorTy() && "Can only scalarize vectors");
unsigned Cost = 0;
int Cost = 0;
for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
if (Insert)
@ -778,9 +774,8 @@ unsigned X86TTIImpl::getScalarizationOverhead(Type *Ty, bool Insert,
return Cost;
}
unsigned X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
unsigned Alignment,
unsigned AddressSpace) {
int X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
unsigned AddressSpace) {
// Handle non-power-of-two vectors such as <3 x float>
if (VectorType *VTy = dyn_cast<VectorType>(Src)) {
unsigned NumElem = VTy->getVectorNumElements();
@ -798,22 +793,21 @@ unsigned X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
// Assume that all other non-power-of-two numbers are scalarized.
if (!isPowerOf2_32(NumElem)) {
unsigned Cost = BaseT::getMemoryOpCost(Opcode, VTy->getScalarType(),
Alignment, AddressSpace);
unsigned SplitCost = getScalarizationOverhead(Src,
Opcode == Instruction::Load,
Opcode==Instruction::Store);
int Cost = BaseT::getMemoryOpCost(Opcode, VTy->getScalarType(), Alignment,
AddressSpace);
int SplitCost = getScalarizationOverhead(Src, Opcode == Instruction::Load,
Opcode == Instruction::Store);
return NumElem * Cost + SplitCost;
}
}
// Legalize the type.
std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
"Invalid Opcode");
// Each load/store unit costs 1.
unsigned Cost = LT.first * 1;
int Cost = LT.first * 1;
// On Sandybridge 256bit load/stores are double pumped
// (but not on Haswell).
@ -823,9 +817,9 @@ unsigned X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
return Cost;
}
unsigned X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy,
unsigned Alignment,
unsigned AddressSpace) {
int X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy,
unsigned Alignment,
unsigned AddressSpace) {
VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy);
if (!SrcVTy)
// To calculate scalar take the regular cost, without mask
@ -838,25 +832,23 @@ unsigned X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy,
(Opcode == Instruction::Store && !isLegalMaskedStore(SrcVTy, 1)) ||
!isPowerOf2_32(NumElem)) {
// Scalarization
unsigned MaskSplitCost = getScalarizationOverhead(MaskTy, false, true);
unsigned ScalarCompareCost =
getCmpSelInstrCost(Instruction::ICmp,
Type::getInt8Ty(getGlobalContext()), NULL);
unsigned BranchCost = getCFInstrCost(Instruction::Br);
unsigned MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost);
int MaskSplitCost = getScalarizationOverhead(MaskTy, false, true);
int ScalarCompareCost = getCmpSelInstrCost(
Instruction::ICmp, Type::getInt8Ty(getGlobalContext()), NULL);
int BranchCost = getCFInstrCost(Instruction::Br);
int MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost);
unsigned ValueSplitCost =
getScalarizationOverhead(SrcVTy, Opcode == Instruction::Load,
Opcode == Instruction::Store);
unsigned MemopCost =
int ValueSplitCost = getScalarizationOverhead(
SrcVTy, Opcode == Instruction::Load, Opcode == Instruction::Store);
int MemopCost =
NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
Alignment, AddressSpace);
return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost;
}
// Legalize the type.
std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, SrcVTy);
unsigned Cost = 0;
std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, SrcVTy);
int Cost = 0;
if (LT.second != TLI->getValueType(DL, SrcVTy).getSimpleVT() &&
LT.second.getVectorNumElements() == NumElem)
// Promotion requires expand/truncate for data and a shuffle for mask.
@ -876,7 +868,7 @@ unsigned X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy,
return Cost+LT.first;
}
unsigned X86TTIImpl::getAddressComputationCost(Type *Ty, bool IsComplex) {
int X86TTIImpl::getAddressComputationCost(Type *Ty, bool IsComplex) {
// Address computations in vectorized code with non-consecutive addresses will
// likely result in more instructions compared to scalar code where the
// computation can more often be merged into the index mode. The resulting
@ -889,10 +881,10 @@ unsigned X86TTIImpl::getAddressComputationCost(Type *Ty, bool IsComplex) {
return BaseT::getAddressComputationCost(Ty, IsComplex);
}
unsigned X86TTIImpl::getReductionCost(unsigned Opcode, Type *ValTy,
bool IsPairwise) {
int X86TTIImpl::getReductionCost(unsigned Opcode, Type *ValTy,
bool IsPairwise) {
std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
MVT MTy = LT.second;
@ -972,7 +964,7 @@ unsigned X86TTIImpl::getReductionCost(unsigned Opcode, Type *ValTy,
/// \brief Calculate the cost of materializing a 64-bit value. This helper
/// method might only calculate a fraction of a larger immediate. Therefore it
/// is valid to return a cost of ZERO.
unsigned X86TTIImpl::getIntImmCost(int64_t Val) {
int X86TTIImpl::getIntImmCost(int64_t Val) {
if (Val == 0)
return TTI::TCC_Free;
@ -982,7 +974,7 @@ unsigned X86TTIImpl::getIntImmCost(int64_t Val) {
return 2 * TTI::TCC_Basic;
}
unsigned X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
int X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
assert(Ty->isIntegerTy());
unsigned BitSize = Ty->getPrimitiveSizeInBits();
@ -1006,18 +998,18 @@ unsigned X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
// Split the constant into 64-bit chunks and calculate the cost for each
// chunk.
unsigned Cost = 0;
int Cost = 0;
for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
int64_t Val = Tmp.getSExtValue();
Cost += getIntImmCost(Val);
}
// We need at least one instruction to materialze the constant.
return std::max(1U, Cost);
return std::max(1, Cost);
}
unsigned X86TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx,
const APInt &Imm, Type *Ty) {
int X86TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
Type *Ty) {
assert(Ty->isIntegerTy());
unsigned BitSize = Ty->getPrimitiveSizeInBits();
@ -1075,18 +1067,18 @@ unsigned X86TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx,
}
if (Idx == ImmIdx) {
unsigned NumConstants = (BitSize + 63) / 64;
unsigned Cost = X86TTIImpl::getIntImmCost(Imm, Ty);
int NumConstants = (BitSize + 63) / 64;
int Cost = X86TTIImpl::getIntImmCost(Imm, Ty);
return (Cost <= NumConstants * TTI::TCC_Basic)
? static_cast<unsigned>(TTI::TCC_Free)
? static_cast<int>(TTI::TCC_Free)
: Cost;
}
return X86TTIImpl::getIntImmCost(Imm, Ty);
}
unsigned X86TTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
const APInt &Imm, Type *Ty) {
int X86TTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
Type *Ty) {
assert(Ty->isIntegerTy());
unsigned BitSize = Ty->getPrimitiveSizeInBits();

View File

@ -33,7 +33,7 @@ class X86TTIImpl : public BasicTTIImplBase<X86TTIImpl> {
const X86Subtarget *ST;
const X86TargetLowering *TLI;
unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract);
int getScalarizationOverhead(Type *Ty, bool Insert, bool Extract);
const X86Subtarget *getST() const { return ST; }
const X86TargetLowering *getTLI() const { return TLI; }
@ -62,34 +62,32 @@ public:
unsigned getNumberOfRegisters(bool Vector);
unsigned getRegisterBitWidth(bool Vector);
unsigned getMaxInterleaveFactor(unsigned VF);
unsigned getArithmeticInstrCost(
int getArithmeticInstrCost(
unsigned Opcode, Type *Ty,
TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue,
TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue,
TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None,
TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None);
unsigned getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
Type *SubTp);
unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src);
unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy);
unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index);
unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
unsigned AddressSpace);
unsigned getMaskedMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
unsigned AddressSpace);
int getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index, Type *SubTp);
int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src);
int getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy);
int getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index);
int getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
unsigned AddressSpace);
int getMaskedMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
unsigned AddressSpace);
unsigned getAddressComputationCost(Type *PtrTy, bool IsComplex);
int getAddressComputationCost(Type *PtrTy, bool IsComplex);
unsigned getReductionCost(unsigned Opcode, Type *Ty, bool IsPairwiseForm);
int getReductionCost(unsigned Opcode, Type *Ty, bool IsPairwiseForm);
unsigned getIntImmCost(int64_t);
int getIntImmCost(int64_t);
unsigned getIntImmCost(const APInt &Imm, Type *Ty);
int getIntImmCost(const APInt &Imm, Type *Ty);
unsigned getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
Type *Ty);
unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
Type *Ty);
int getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty);
int getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
Type *Ty);
bool isLegalMaskedLoad(Type *DataType, int Consecutive);
bool isLegalMaskedStore(Type *DataType, int Consecutive);
bool areInlineCompatible(const Function *Caller,