1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-22 10:42:39 +01:00
llvm-mirror/lib/Target/RISCV/RISCVTargetTransformInfo.h
Luke 1271774638 [RISCV] Don't enable Interleaved Access Vectorization
The patch https://reviews.llvm.org/D101469 is intended to enable loop unrolling,
not interleaved access vectorization. The method bool enableInterleavedAccessVectorization()
should not be implemented.
2021-06-18 12:32:30 +08:00

189 lines
6.3 KiB
C++

//===- RISCVTargetTransformInfo.h - RISC-V specific TTI ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// This file defines a TargetTransformInfo::Concept conforming object specific
/// to the RISC-V target machine. It uses the target's detailed information to
/// provide more precise answers to certain TTI queries, while letting the
/// target independent and default TTI implementations handle the rest.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_TARGET_RISCV_RISCVTARGETTRANSFORMINFO_H
#define LLVM_LIB_TARGET_RISCV_RISCVTARGETTRANSFORMINFO_H
#include "RISCVSubtarget.h"
#include "RISCVTargetMachine.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/CodeGen/BasicTTIImpl.h"
#include "llvm/IR/Function.h"
namespace llvm {
class RISCVTTIImpl : public BasicTTIImplBase<RISCVTTIImpl> {
using BaseT = BasicTTIImplBase<RISCVTTIImpl>;
using TTI = TargetTransformInfo;
friend BaseT;
const RISCVSubtarget *ST;
const RISCVTargetLowering *TLI;
const RISCVSubtarget *getST() const { return ST; }
const RISCVTargetLowering *getTLI() const { return TLI; }
public:
explicit RISCVTTIImpl(const RISCVTargetMachine *TM, const Function &F)
: BaseT(TM, F.getParent()->getDataLayout()), ST(TM->getSubtargetImpl(F)),
TLI(ST->getTargetLowering()) {}
InstructionCost getIntImmCost(const APInt &Imm, Type *Ty,
TTI::TargetCostKind CostKind);
InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx,
const APInt &Imm, Type *Ty,
TTI::TargetCostKind CostKind,
Instruction *Inst = nullptr);
InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
const APInt &Imm, Type *Ty,
TTI::TargetCostKind CostKind);
TargetTransformInfo::PopcntSupportKind getPopcntSupport(unsigned TyWidth);
bool shouldExpandReduction(const IntrinsicInst *II) const;
bool supportsScalableVectors() const { return ST->hasStdExtV(); }
Optional<unsigned> getMaxVScale() const;
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
switch (K) {
case TargetTransformInfo::RGK_Scalar:
return TypeSize::getFixed(ST->getXLen());
case TargetTransformInfo::RGK_FixedWidthVector:
return TypeSize::getFixed(
ST->hasStdExtV() ? ST->getMinRVVVectorSizeInBits() : 0);
case TargetTransformInfo::RGK_ScalableVector:
return TypeSize::getScalable(
ST->hasStdExtV() ? ST->getMinRVVVectorSizeInBits() : 0);
}
llvm_unreachable("Unsupported register kind");
}
InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
const Value *Ptr, bool VariableMask,
Align Alignment,
TTI::TargetCostKind CostKind,
const Instruction *I);
bool isLegalElementTypeForRVV(Type *ScalarTy) const {
if (ScalarTy->isPointerTy())
return true;
if (ScalarTy->isIntegerTy(8) || ScalarTy->isIntegerTy(16) ||
ScalarTy->isIntegerTy(32) || ScalarTy->isIntegerTy(64))
return true;
if (ScalarTy->isHalfTy())
return ST->hasStdExtZfh();
if (ScalarTy->isFloatTy())
return ST->hasStdExtF();
if (ScalarTy->isDoubleTy())
return ST->hasStdExtD();
return false;
}
bool isLegalMaskedLoadStore(Type *DataType, Align Alignment) {
if (!ST->hasStdExtV())
return false;
// Only support fixed vectors if we know the minimum vector size.
if (isa<FixedVectorType>(DataType) && ST->getMinRVVVectorSizeInBits() == 0)
return false;
if (Alignment <
DL.getTypeStoreSize(DataType->getScalarType()).getFixedSize())
return false;
return isLegalElementTypeForRVV(DataType->getScalarType());
}
bool isLegalMaskedLoad(Type *DataType, Align Alignment) {
return isLegalMaskedLoadStore(DataType, Alignment);
}
bool isLegalMaskedStore(Type *DataType, Align Alignment) {
return isLegalMaskedLoadStore(DataType, Alignment);
}
bool isLegalMaskedGatherScatter(Type *DataType, Align Alignment) {
if (!ST->hasStdExtV())
return false;
// Only support fixed vectors if we know the minimum vector size.
if (isa<FixedVectorType>(DataType) && ST->getMinRVVVectorSizeInBits() == 0)
return false;
if (Alignment <
DL.getTypeStoreSize(DataType->getScalarType()).getFixedSize())
return false;
return isLegalElementTypeForRVV(DataType->getScalarType());
}
bool isLegalMaskedGather(Type *DataType, Align Alignment) {
return isLegalMaskedGatherScatter(DataType, Alignment);
}
bool isLegalMaskedScatter(Type *DataType, Align Alignment) {
return isLegalMaskedGatherScatter(DataType, Alignment);
}
/// \returns How the target needs this vector-predicated operation to be
/// transformed.
TargetTransformInfo::VPLegalization
getVPLegalizationStrategy(const VPIntrinsic &PI) const {
using VPLegalization = TargetTransformInfo::VPLegalization;
return VPLegalization(VPLegalization::Legal, VPLegalization::Legal);
}
bool isLegalToVectorizeReduction(const RecurrenceDescriptor &RdxDesc,
ElementCount VF) const {
if (!ST->hasStdExtV())
return false;
if (!VF.isScalable())
return true;
Type *Ty = RdxDesc.getRecurrenceType();
if (!isLegalElementTypeForRVV(Ty))
return false;
switch (RdxDesc.getRecurrenceKind()) {
case RecurKind::Add:
case RecurKind::FAdd:
case RecurKind::And:
case RecurKind::Or:
case RecurKind::Xor:
case RecurKind::SMin:
case RecurKind::SMax:
case RecurKind::UMin:
case RecurKind::UMax:
case RecurKind::FMin:
case RecurKind::FMax:
return true;
default:
return false;
}
}
unsigned getMaxInterleaveFactor(unsigned VF) {
return ST->getMaxInterleaveFactor();
}
};
} // end namespace llvm
#endif // LLVM_LIB_TARGET_RISCV_RISCVTARGETTRANSFORMINFO_H