mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-22 18:54:02 +01:00
[Alignment][NFC] Conform X86, ARM and AArch64 TargetTransformInfo backends to the public API
The main interface has been migrated to Align already but a few backends where broadening the type from Align to MaybeAlign. This patch makes sure all implementations conform to the public API. Differential Revision: https://reviews.llvm.org/D82465
This commit is contained in:
parent
f93a8896de
commit
c9edb6243d
@ -156,7 +156,7 @@ public:
|
||||
|
||||
bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info);
|
||||
|
||||
bool isLegalMaskedLoadStore(Type *DataType, MaybeAlign Alignment) {
|
||||
bool isLegalMaskedLoadStore(Type *DataType, Align Alignment) {
|
||||
if (!isa<VectorType>(DataType) || !ST->hasSVE())
|
||||
return false;
|
||||
|
||||
@ -172,11 +172,11 @@ public:
|
||||
return false;
|
||||
}
|
||||
|
||||
bool isLegalMaskedLoad(Type *DataType, MaybeAlign Alignment) {
|
||||
bool isLegalMaskedLoad(Type *DataType, Align Alignment) {
|
||||
return isLegalMaskedLoadStore(DataType, Alignment);
|
||||
}
|
||||
|
||||
bool isLegalMaskedStore(Type *DataType, MaybeAlign Alignment) {
|
||||
bool isLegalMaskedStore(Type *DataType, Align Alignment) {
|
||||
return isLegalMaskedLoadStore(DataType, Alignment);
|
||||
}
|
||||
|
||||
|
@ -580,7 +580,7 @@ bool ARMTTIImpl::isProfitableLSRChainElement(Instruction *I) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool ARMTTIImpl::isLegalMaskedLoad(Type *DataTy, MaybeAlign Alignment) {
|
||||
bool ARMTTIImpl::isLegalMaskedLoad(Type *DataTy, Align Alignment) {
|
||||
if (!EnableMaskedLoadStores || !ST->hasMVEIntegerOps())
|
||||
return false;
|
||||
|
||||
@ -596,12 +596,11 @@ bool ARMTTIImpl::isLegalMaskedLoad(Type *DataTy, MaybeAlign Alignment) {
|
||||
}
|
||||
|
||||
unsigned EltWidth = DataTy->getScalarSizeInBits();
|
||||
return (EltWidth == 32 && (!Alignment || *Alignment >= 4)) ||
|
||||
(EltWidth == 16 && (!Alignment || *Alignment >= 2)) ||
|
||||
(EltWidth == 8);
|
||||
return (EltWidth == 32 && Alignment >= 4) ||
|
||||
(EltWidth == 16 && Alignment >= 2) || (EltWidth == 8);
|
||||
}
|
||||
|
||||
bool ARMTTIImpl::isLegalMaskedGather(Type *Ty, MaybeAlign Alignment) {
|
||||
bool ARMTTIImpl::isLegalMaskedGather(Type *Ty, Align Alignment) {
|
||||
if (!EnableMaskedGatherScatters || !ST->hasMVEIntegerOps())
|
||||
return false;
|
||||
|
||||
@ -618,8 +617,8 @@ bool ARMTTIImpl::isLegalMaskedGather(Type *Ty, MaybeAlign Alignment) {
|
||||
return false;
|
||||
|
||||
unsigned EltWidth = Ty->getScalarSizeInBits();
|
||||
return ((EltWidth == 32 && (!Alignment || *Alignment >= 4)) ||
|
||||
(EltWidth == 16 && (!Alignment || *Alignment >= 2)) || EltWidth == 8);
|
||||
return ((EltWidth == 32 && Alignment >= 4) ||
|
||||
(EltWidth == 16 && Alignment >= 2) || EltWidth == 8);
|
||||
}
|
||||
|
||||
int ARMTTIImpl::getMemcpyCost(const Instruction *I) {
|
||||
|
@ -153,15 +153,15 @@ public:
|
||||
|
||||
bool isProfitableLSRChainElement(Instruction *I);
|
||||
|
||||
bool isLegalMaskedLoad(Type *DataTy, MaybeAlign Alignment);
|
||||
bool isLegalMaskedLoad(Type *DataTy, Align Alignment);
|
||||
|
||||
bool isLegalMaskedStore(Type *DataTy, MaybeAlign Alignment) {
|
||||
bool isLegalMaskedStore(Type *DataTy, Align Alignment) {
|
||||
return isLegalMaskedLoad(DataTy, Alignment);
|
||||
}
|
||||
|
||||
bool isLegalMaskedGather(Type *Ty, MaybeAlign Alignment);
|
||||
bool isLegalMaskedGather(Type *Ty, Align Alignment);
|
||||
|
||||
bool isLegalMaskedScatter(Type *Ty, MaybeAlign Alignment) {
|
||||
bool isLegalMaskedScatter(Type *Ty, Align Alignment) {
|
||||
return isLegalMaskedGather(Ty, Alignment);
|
||||
}
|
||||
|
||||
|
@ -3046,8 +3046,8 @@ int X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy,
|
||||
unsigned NumElem = SrcVTy->getNumElements();
|
||||
auto *MaskTy =
|
||||
FixedVectorType::get(Type::getInt8Ty(SrcVTy->getContext()), NumElem);
|
||||
if ((IsLoad && !isLegalMaskedLoad(SrcVTy, MaybeAlign(Alignment))) ||
|
||||
(IsStore && !isLegalMaskedStore(SrcVTy, MaybeAlign(Alignment))) ||
|
||||
if ((IsLoad && !isLegalMaskedLoad(SrcVTy, Align(Alignment))) ||
|
||||
(IsStore && !isLegalMaskedStore(SrcVTy, Align(Alignment))) ||
|
||||
!isPowerOf2_32(NumElem)) {
|
||||
// Scalarization
|
||||
APInt DemandedElts = APInt::getAllOnesValue(NumElem);
|
||||
@ -3982,9 +3982,9 @@ int X86TTIImpl::getGatherScatterOpCost(unsigned Opcode, Type *SrcVTy,
|
||||
|
||||
bool Scalarize = false;
|
||||
if ((Opcode == Instruction::Load &&
|
||||
!isLegalMaskedGather(SrcVTy, MaybeAlign(Alignment))) ||
|
||||
!isLegalMaskedGather(SrcVTy, Align(Alignment))) ||
|
||||
(Opcode == Instruction::Store &&
|
||||
!isLegalMaskedScatter(SrcVTy, MaybeAlign(Alignment))))
|
||||
!isLegalMaskedScatter(SrcVTy, Align(Alignment))))
|
||||
Scalarize = true;
|
||||
// Gather / Scatter for vector 2 is not profitable on KNL / SKX
|
||||
// Vector-4 of gather/scatter instruction does not exist on KNL.
|
||||
@ -4017,7 +4017,7 @@ bool X86TTIImpl::canMacroFuseCmp() {
|
||||
return ST->hasMacroFusion() || ST->hasBranchFusion();
|
||||
}
|
||||
|
||||
bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy, MaybeAlign Alignment) {
|
||||
bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy, Align Alignment) {
|
||||
if (!ST->hasAVX())
|
||||
return false;
|
||||
|
||||
@ -4041,7 +4041,7 @@ bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy, MaybeAlign Alignment) {
|
||||
((IntWidth == 8 || IntWidth == 16) && ST->hasBWI());
|
||||
}
|
||||
|
||||
bool X86TTIImpl::isLegalMaskedStore(Type *DataType, MaybeAlign Alignment) {
|
||||
bool X86TTIImpl::isLegalMaskedStore(Type *DataType, Align Alignment) {
|
||||
return isLegalMaskedLoad(DataType, Alignment);
|
||||
}
|
||||
|
||||
@ -4108,7 +4108,7 @@ bool X86TTIImpl::isLegalMaskedCompressStore(Type *DataTy) {
|
||||
return isLegalMaskedExpandLoad(DataTy);
|
||||
}
|
||||
|
||||
bool X86TTIImpl::isLegalMaskedGather(Type *DataTy, MaybeAlign Alignment) {
|
||||
bool X86TTIImpl::isLegalMaskedGather(Type *DataTy, Align Alignment) {
|
||||
// Some CPUs have better gather performance than others.
|
||||
// TODO: Remove the explicit ST->hasAVX512()?, That would mean we would only
|
||||
// enable gather with a -march.
|
||||
@ -4146,7 +4146,7 @@ bool X86TTIImpl::isLegalMaskedGather(Type *DataTy, MaybeAlign Alignment) {
|
||||
return IntWidth == 32 || IntWidth == 64;
|
||||
}
|
||||
|
||||
bool X86TTIImpl::isLegalMaskedScatter(Type *DataType, MaybeAlign Alignment) {
|
||||
bool X86TTIImpl::isLegalMaskedScatter(Type *DataType, Align Alignment) {
|
||||
// AVX2 doesn't support scatter
|
||||
if (!ST->hasAVX512())
|
||||
return false;
|
||||
|
@ -200,12 +200,12 @@ public:
|
||||
bool isLSRCostLess(TargetTransformInfo::LSRCost &C1,
|
||||
TargetTransformInfo::LSRCost &C2);
|
||||
bool canMacroFuseCmp();
|
||||
bool isLegalMaskedLoad(Type *DataType, MaybeAlign Alignment);
|
||||
bool isLegalMaskedStore(Type *DataType, MaybeAlign Alignment);
|
||||
bool isLegalMaskedLoad(Type *DataType, Align Alignment);
|
||||
bool isLegalMaskedStore(Type *DataType, Align Alignment);
|
||||
bool isLegalNTLoad(Type *DataType, Align Alignment);
|
||||
bool isLegalNTStore(Type *DataType, Align Alignment);
|
||||
bool isLegalMaskedGather(Type *DataType, MaybeAlign Alignment);
|
||||
bool isLegalMaskedScatter(Type *DataType, MaybeAlign Alignment);
|
||||
bool isLegalMaskedGather(Type *DataType, Align Alignment);
|
||||
bool isLegalMaskedScatter(Type *DataType, Align Alignment);
|
||||
bool isLegalMaskedExpandLoad(Type *DataType);
|
||||
bool isLegalMaskedCompressStore(Type *DataType);
|
||||
bool hasDivRemOp(Type *DataType, bool IsSigned);
|
||||
|
Loading…
Reference in New Issue
Block a user