2017-08-01 23:20:10 +02:00
|
|
|
//===- HexagonTargetTransformInfo.cpp - Hexagon specific TTI pass ---------===//
|
2015-08-05 20:35:37 +02:00
|
|
|
//
|
2019-01-19 09:50:56 +01:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2015-08-05 20:35:37 +02:00
|
|
|
//
|
|
|
|
/// \file
|
|
|
|
/// This file implements a TargetTransformInfo analysis pass specific to the
|
|
|
|
/// Hexagon target machine. It uses the target's detailed information to provide
|
|
|
|
/// more precise answers to certain TTI queries, while letting the target
|
|
|
|
/// independent and default TTI implementations handle the rest.
|
|
|
|
///
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "HexagonTargetTransformInfo.h"
|
2017-08-01 23:20:10 +02:00
|
|
|
#include "HexagonSubtarget.h"
|
|
|
|
#include "llvm/Analysis/TargetTransformInfo.h"
|
2018-04-13 22:46:50 +02:00
|
|
|
#include "llvm/CodeGen/ValueTypes.h"
|
2017-08-01 23:20:10 +02:00
|
|
|
#include "llvm/IR/InstrTypes.h"
|
2016-08-19 16:22:07 +02:00
|
|
|
#include "llvm/IR/Instructions.h"
|
2017-08-01 23:20:10 +02:00
|
|
|
#include "llvm/IR/User.h"
|
|
|
|
#include "llvm/Support/Casting.h"
|
|
|
|
#include "llvm/Support/CommandLine.h"
|
2020-07-31 20:31:58 +02:00
|
|
|
#include "llvm/Transforms/Utils/LoopPeel.h"
|
2018-04-04 00:55:09 +02:00
|
|
|
#include "llvm/Transforms/Utils/UnrollLoop.h"
|
2015-08-05 20:35:37 +02:00
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
|
|
|
|
#define DEBUG_TYPE "hexagontti"
|
|
|
|
|
2018-10-27 06:51:12 +02:00
|
|
|
static cl::opt<bool> HexagonAutoHVX("hexagon-autohvx", cl::init(false),
|
2018-03-27 19:07:52 +02:00
|
|
|
cl::Hidden, cl::desc("Enable loop vectorizer for HVX"));
|
|
|
|
|
2017-06-30 22:54:24 +02:00
|
|
|
static cl::opt<bool> EmitLookupTables("hexagon-emit-lookup-tables",
|
|
|
|
cl::init(true), cl::Hidden,
|
|
|
|
cl::desc("Control lookup table emission on Hexagon target"));
|
|
|
|
|
2020-08-25 01:29:57 +02:00
|
|
|
static cl::opt<bool> HexagonMaskedVMem("hexagon-masked-vmem", cl::init(true),
|
2020-09-07 21:26:48 +02:00
|
|
|
cl::Hidden, cl::desc("Enable masked loads/stores for HVX"));
|
2020-08-25 01:29:57 +02:00
|
|
|
|
2018-06-12 17:12:50 +02:00
|
|
|
// Constant "cost factor" to make floating point operations more expensive
|
|
|
|
// in terms of vectorization cost. This isn't the best way, but it should
|
|
|
|
// do. Ultimately, the cost should use cycles.
|
|
|
|
static const unsigned FloatFactor = 4;
|
2018-04-13 22:46:50 +02:00
|
|
|
|
|
|
|
bool HexagonTTIImpl::useHVX() const {
|
|
|
|
return ST.useHVXOps() && HexagonAutoHVX;
|
|
|
|
}
|
|
|
|
|
2018-06-12 17:12:50 +02:00
|
|
|
unsigned HexagonTTIImpl::getTypeNumElements(Type *Ty) const {
|
2020-05-14 01:00:20 +02:00
|
|
|
if (auto *VTy = dyn_cast<FixedVectorType>(Ty))
|
2020-04-03 20:24:59 +02:00
|
|
|
return VTy->getNumElements();
|
2018-06-12 17:12:50 +02:00
|
|
|
assert((Ty->isIntegerTy() || Ty->isFloatingPointTy()) &&
|
|
|
|
"Expecting scalar type");
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2015-08-05 20:35:37 +02:00
|
|
|
TargetTransformInfo::PopcntSupportKind
|
|
|
|
HexagonTTIImpl::getPopcntSupport(unsigned IntTyWidthInBit) const {
|
2018-04-13 22:46:50 +02:00
|
|
|
// Return fast hardware support as every input < 64 bits will be promoted
|
2015-08-05 20:35:37 +02:00
|
|
|
// to 64 bits.
|
|
|
|
return TargetTransformInfo::PSK_FastHardware;
|
|
|
|
}
|
|
|
|
|
|
|
|
// The Hexagon target can unroll loops with run-time trip counts.
|
[LoopUnroll] Pass SCEV to getUnrollingPreferences hook. NFCI.
Reviewers: sanjoy, anna, reames, apilipenko, igor-laevsky, mkuper
Subscribers: jholewinski, arsenm, mzolotukhin, nemanjai, nhaehnle, javed.absar, mcrosier, llvm-commits
Differential Revision: https://reviews.llvm.org/D34531
llvm-svn: 306554
2017-06-28 17:53:17 +02:00
|
|
|
void HexagonTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
|
2015-08-05 20:35:37 +02:00
|
|
|
TTI::UnrollingPreferences &UP) {
|
|
|
|
UP.Runtime = UP.Partial = true;
|
[NFC] Separate Peeling Properties into its own struct (re-land after minor fix)
Summary:
This patch separates the peeling specific parameters from the UnrollingPreferences,
and creates a new struct called PeelingPreferences. Functions which used the
UnrollingPreferences struct for peeling have been updated to use the PeelingPreferences struct.
Author: sidbav (Sidharth Baveja)
Reviewers: Whitney (Whitney Tsang), Meinersbur (Michael Kruse), skatkov (Serguei Katkov), ashlykov (Arkady Shlykov), bogner (Justin Bogner), hfinkel (Hal Finkel), anhtuyen (Anh Tuyen Tran), nikic (Nikita Popov)
Reviewed By: Meinersbur (Michael Kruse)
Subscribers: fhahn (Florian Hahn), hiraditya (Aditya Kumar), llvm-commits, LLVM
Tag: LLVM
Differential Revision: https://reviews.llvm.org/D80580
2020-07-10 20:38:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void HexagonTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
|
|
|
|
TTI::PeelingPreferences &PP) {
|
|
|
|
BaseT::getPeelingPreferences(L, SE, PP);
|
2018-04-03 05:39:43 +02:00
|
|
|
// Only try to peel innermost loops with small runtime trip counts.
|
2020-09-22 22:59:34 +02:00
|
|
|
if (L && L->isInnermost() && canPeel(L) &&
|
2018-04-03 05:39:43 +02:00
|
|
|
SE.getSmallConstantTripCount(L) == 0 &&
|
|
|
|
SE.getSmallConstantMaxTripCount(L) > 0 &&
|
|
|
|
SE.getSmallConstantMaxTripCount(L) <= 5) {
|
[NFC] Separate Peeling Properties into its own struct (re-land after minor fix)
Summary:
This patch separates the peeling specific parameters from the UnrollingPreferences,
and creates a new struct called PeelingPreferences. Functions which used the
UnrollingPreferences struct for peeling have been updated to use the PeelingPreferences struct.
Author: sidbav (Sidharth Baveja)
Reviewers: Whitney (Whitney Tsang), Meinersbur (Michael Kruse), skatkov (Serguei Katkov), ashlykov (Arkady Shlykov), bogner (Justin Bogner), hfinkel (Hal Finkel), anhtuyen (Anh Tuyen Tran), nikic (Nikita Popov)
Reviewed By: Meinersbur (Michael Kruse)
Subscribers: fhahn (Florian Hahn), hiraditya (Aditya Kumar), llvm-commits, LLVM
Tag: LLVM
Differential Revision: https://reviews.llvm.org/D80580
2020-07-10 20:38:08 +02:00
|
|
|
PP.PeelCount = 2;
|
2018-04-03 05:39:43 +02:00
|
|
|
}
|
2015-08-05 20:35:37 +02:00
|
|
|
}
|
|
|
|
|
2021-02-15 12:06:42 +01:00
|
|
|
TTI::AddressingModeKind
|
|
|
|
HexagonTTIImpl::getPreferredAddressingMode(const Loop *L,
|
|
|
|
ScalarEvolution *SE) const {
|
|
|
|
return TTI::AMK_PostIndexed;
|
2018-03-26 17:32:03 +02:00
|
|
|
}
|
|
|
|
|
2018-04-13 22:46:50 +02:00
|
|
|
/// --- Vector TTI begin ---
|
|
|
|
|
2018-03-27 19:07:52 +02:00
|
|
|
unsigned HexagonTTIImpl::getNumberOfRegisters(bool Vector) const {
|
|
|
|
if (Vector)
|
2018-04-13 22:46:50 +02:00
|
|
|
return useHVX() ? 32 : 0;
|
2018-03-27 19:07:52 +02:00
|
|
|
return 32;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned HexagonTTIImpl::getMaxInterleaveFactor(unsigned VF) {
|
2020-10-09 16:34:24 +02:00
|
|
|
return useHVX() ? 2 : 1;
|
2018-03-27 19:07:52 +02:00
|
|
|
}
|
|
|
|
|
2021-03-24 14:50:31 +01:00
|
|
|
TypeSize
|
|
|
|
HexagonTTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
|
|
|
|
switch (K) {
|
|
|
|
case TargetTransformInfo::RGK_Scalar:
|
|
|
|
return TypeSize::getFixed(32);
|
|
|
|
case TargetTransformInfo::RGK_FixedWidthVector:
|
|
|
|
return TypeSize::getFixed(getMinVectorRegisterBitWidth());
|
|
|
|
case TargetTransformInfo::RGK_ScalableVector:
|
|
|
|
return TypeSize::getScalable(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm_unreachable("Unsupported register kind");
|
2018-03-27 19:07:52 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
unsigned HexagonTTIImpl::getMinVectorRegisterBitWidth() const {
|
2020-08-12 17:11:12 +02:00
|
|
|
return useHVX() ? ST.getVectorLength()*8 : 32;
|
2018-03-27 19:07:52 +02:00
|
|
|
}
|
|
|
|
|
2021-02-11 09:50:08 +01:00
|
|
|
ElementCount HexagonTTIImpl::getMinimumVF(unsigned ElemWidth,
|
|
|
|
bool IsScalable) const {
|
|
|
|
assert(!IsScalable && "Scalable VFs are not supported for Hexagon");
|
|
|
|
return ElementCount::getFixed((8 * ST.getVectorLength()) / ElemWidth);
|
2018-04-13 22:46:50 +02:00
|
|
|
}
|
|
|
|
|
2021-04-22 11:41:01 +02:00
|
|
|
InstructionCost HexagonTTIImpl::getScalarizationOverhead(
|
|
|
|
VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract) {
|
2020-04-29 12:39:13 +02:00
|
|
|
return BaseT::getScalarizationOverhead(Ty, DemandedElts, Insert, Extract);
|
2018-04-13 22:46:50 +02:00
|
|
|
}
|
|
|
|
|
2021-04-22 11:41:01 +02:00
|
|
|
InstructionCost
|
2021-02-23 14:04:59 +01:00
|
|
|
HexagonTTIImpl::getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
|
|
|
|
ArrayRef<Type *> Tys) {
|
|
|
|
return BaseT::getOperandsScalarizationOverhead(Args, Tys);
|
2018-04-13 22:46:50 +02:00
|
|
|
}
|
|
|
|
|
2021-04-14 17:48:07 +02:00
|
|
|
InstructionCost HexagonTTIImpl::getCallInstrCost(Function *F, Type *RetTy,
|
|
|
|
ArrayRef<Type *> Tys,
|
|
|
|
TTI::TargetCostKind CostKind) {
|
2020-04-28 15:11:27 +02:00
|
|
|
return BaseT::getCallInstrCost(F, RetTy, Tys, CostKind);
|
2018-04-13 22:46:50 +02:00
|
|
|
}
|
|
|
|
|
2021-01-22 18:14:44 +01:00
|
|
|
InstructionCost
|
2020-05-20 10:18:42 +02:00
|
|
|
HexagonTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
|
|
|
|
TTI::TargetCostKind CostKind) {
|
|
|
|
if (ICA.getID() == Intrinsic::bswap) {
|
2021-04-29 15:02:51 +02:00
|
|
|
std::pair<InstructionCost, MVT> LT =
|
|
|
|
TLI.getTypeLegalizationCost(DL, ICA.getReturnType());
|
2018-04-13 22:46:50 +02:00
|
|
|
return LT.first + 2;
|
|
|
|
}
|
2020-05-20 10:18:42 +02:00
|
|
|
return BaseT::getIntrinsicInstrCost(ICA, CostKind);
|
2018-04-13 22:46:50 +02:00
|
|
|
}
|
|
|
|
|
2021-01-27 14:12:56 +01:00
|
|
|
InstructionCost HexagonTTIImpl::getAddressComputationCost(Type *Tp,
|
|
|
|
ScalarEvolution *SE,
|
|
|
|
const SCEV *S) {
|
2018-04-13 22:46:50 +02:00
|
|
|
return 0;
|
2018-04-13 22:16:32 +02:00
|
|
|
}
|
|
|
|
|
2021-01-23 13:14:21 +01:00
|
|
|
InstructionCost HexagonTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
|
|
|
|
MaybeAlign Alignment,
|
|
|
|
unsigned AddressSpace,
|
|
|
|
TTI::TargetCostKind CostKind,
|
|
|
|
const Instruction *I) {
|
2018-04-13 22:46:50 +02:00
|
|
|
assert(Opcode == Instruction::Load || Opcode == Instruction::Store);
|
2020-06-05 11:09:56 +02:00
|
|
|
// TODO: Handle other cost kinds.
|
|
|
|
if (CostKind != TTI::TCK_RecipThroughput)
|
|
|
|
return 1;
|
|
|
|
|
2018-04-13 22:46:50 +02:00
|
|
|
if (Opcode == Instruction::Store)
|
2020-04-28 15:11:27 +02:00
|
|
|
return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
|
|
|
|
CostKind, I);
|
2018-04-13 22:46:50 +02:00
|
|
|
|
|
|
|
if (Src->isVectorTy()) {
|
2018-03-27 19:07:52 +02:00
|
|
|
VectorType *VecTy = cast<VectorType>(Src);
|
2020-04-14 19:58:39 +02:00
|
|
|
unsigned VecWidth = VecTy->getPrimitiveSizeInBits().getFixedSize();
|
2020-11-02 20:57:42 +01:00
|
|
|
if (useHVX() && ST.isTypeForHVX(VecTy)) {
|
2021-03-24 14:50:31 +01:00
|
|
|
unsigned RegWidth =
|
|
|
|
getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector)
|
|
|
|
.getFixedSize();
|
2019-05-22 14:25:46 +02:00
|
|
|
assert(RegWidth && "Non-zero vector register width expected");
|
2018-04-13 22:46:50 +02:00
|
|
|
// Cost of HVX loads.
|
|
|
|
if (VecWidth % RegWidth == 0)
|
|
|
|
return VecWidth / RegWidth;
|
2019-10-22 17:16:52 +02:00
|
|
|
// Cost of constructing HVX vector from scalar loads
|
|
|
|
const Align RegAlign(RegWidth / 8);
|
|
|
|
if (!Alignment || *Alignment > RegAlign)
|
|
|
|
Alignment = RegAlign;
|
|
|
|
assert(Alignment);
|
|
|
|
unsigned AlignWidth = 8 * Alignment->value();
|
2018-03-27 19:07:52 +02:00
|
|
|
unsigned NumLoads = alignTo(VecWidth, AlignWidth) / AlignWidth;
|
2019-05-22 14:25:46 +02:00
|
|
|
return 3 * NumLoads;
|
2018-03-27 19:07:52 +02:00
|
|
|
}
|
2018-04-13 22:46:50 +02:00
|
|
|
|
|
|
|
// Non-HVX vectors.
|
|
|
|
// Add extra cost for floating point types.
|
2019-10-22 17:16:52 +02:00
|
|
|
unsigned Cost =
|
|
|
|
VecTy->getElementType()->isFloatingPointTy() ? FloatFactor : 1;
|
|
|
|
|
[Alignment][NFC] Deprecate Align::None()
Summary:
This is a follow up on https://reviews.llvm.org/D71473#inline-647262.
There's a caveat here that `Align(1)` relies on the compiler understanding of `Log2_64` implementation to produce good code. One could use `Align()` as a replacement but I believe it is less clear that the alignment is one in that case.
Reviewers: xbolva00, courbet, bollu
Subscribers: arsenm, dylanmckay, sdardis, nemanjai, jvesely, nhaehnle, hiraditya, kbarton, jrtc27, atanasyan, jsji, Jim, kerbowa, cfe-commits, llvm-commits
Tags: #clang, #llvm
Differential Revision: https://reviews.llvm.org/D73099
2020-01-21 15:00:04 +01:00
|
|
|
// At this point unspecified alignment is considered as Align(1).
|
2019-10-22 17:16:52 +02:00
|
|
|
const Align BoundAlignment = std::min(Alignment.valueOrOne(), Align(8));
|
|
|
|
unsigned AlignWidth = 8 * BoundAlignment.value();
|
2018-04-13 22:46:50 +02:00
|
|
|
unsigned NumLoads = alignTo(VecWidth, AlignWidth) / AlignWidth;
|
2019-10-22 17:16:52 +02:00
|
|
|
if (Alignment == Align(4) || Alignment == Align(8))
|
2018-04-13 22:46:50 +02:00
|
|
|
return Cost * NumLoads;
|
|
|
|
// Loads of less than 32 bits will need extra inserts to compose a vector.
|
2019-10-22 17:16:52 +02:00
|
|
|
assert(BoundAlignment <= Align(8));
|
|
|
|
unsigned LogA = Log2(BoundAlignment);
|
2018-04-13 22:46:50 +02:00
|
|
|
return (3 - LogA) * Cost * NumLoads;
|
2018-03-27 19:07:52 +02:00
|
|
|
}
|
2018-04-13 22:46:50 +02:00
|
|
|
|
2020-04-28 15:11:27 +02:00
|
|
|
return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
|
|
|
|
CostKind, I);
|
2015-08-05 20:35:37 +02:00
|
|
|
}
|
2016-07-22 16:22:43 +02:00
|
|
|
|
2021-01-22 23:41:19 +01:00
|
|
|
InstructionCost
|
|
|
|
HexagonTTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *Src,
|
|
|
|
Align Alignment, unsigned AddressSpace,
|
|
|
|
TTI::TargetCostKind CostKind) {
|
2020-04-28 15:11:27 +02:00
|
|
|
return BaseT::getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
|
|
|
|
CostKind);
|
2018-04-13 22:46:50 +02:00
|
|
|
}
|
|
|
|
|
2021-04-14 17:50:20 +02:00
|
|
|
InstructionCost HexagonTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp,
|
|
|
|
ArrayRef<int> Mask, int Index,
|
|
|
|
Type *SubTp) {
|
2018-04-13 22:46:50 +02:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2021-01-22 22:25:50 +01:00
|
|
|
InstructionCost HexagonTTIImpl::getGatherScatterOpCost(
|
2020-06-23 15:07:44 +02:00
|
|
|
unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
|
2020-06-26 13:08:27 +02:00
|
|
|
Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) {
|
2018-04-13 22:46:50 +02:00
|
|
|
return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
|
2020-04-28 15:11:27 +02:00
|
|
|
Alignment, CostKind, I);
|
2018-04-13 22:46:50 +02:00
|
|
|
}
|
|
|
|
|
2021-01-23 13:14:21 +01:00
|
|
|
InstructionCost HexagonTTIImpl::getInterleavedMemoryOpCost(
|
2020-06-26 13:00:53 +02:00
|
|
|
unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
|
|
|
|
Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
|
|
|
|
bool UseMaskForCond, bool UseMaskForGaps) {
|
2018-10-31 10:57:56 +01:00
|
|
|
if (Indices.size() != Factor || UseMaskForCond || UseMaskForGaps)
|
2018-08-22 22:15:04 +02:00
|
|
|
return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
|
2018-10-31 10:57:56 +01:00
|
|
|
Alignment, AddressSpace,
|
2020-04-28 15:11:27 +02:00
|
|
|
CostKind,
|
2018-10-31 10:57:56 +01:00
|
|
|
UseMaskForCond, UseMaskForGaps);
|
2019-10-22 17:16:52 +02:00
|
|
|
return getMemoryOpCost(Opcode, VecTy, MaybeAlign(Alignment), AddressSpace,
|
2020-04-28 15:11:27 +02:00
|
|
|
CostKind);
|
2018-04-13 22:46:50 +02:00
|
|
|
}
|
|
|
|
|
2021-01-22 22:44:23 +01:00
|
|
|
InstructionCost HexagonTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
|
|
|
|
Type *CondTy,
|
|
|
|
CmpInst::Predicate VecPred,
|
|
|
|
TTI::TargetCostKind CostKind,
|
|
|
|
const Instruction *I) {
|
2020-05-26 15:28:34 +02:00
|
|
|
if (ValTy->isVectorTy() && CostKind == TTI::TCK_RecipThroughput) {
|
2021-04-29 15:02:51 +02:00
|
|
|
std::pair<InstructionCost, MVT> LT = TLI.getTypeLegalizationCost(DL, ValTy);
|
2018-04-13 22:46:50 +02:00
|
|
|
if (Opcode == Instruction::FCmp)
|
2018-06-12 17:12:50 +02:00
|
|
|
return LT.first + FloatFactor * getTypeNumElements(ValTy);
|
2018-04-13 22:46:50 +02:00
|
|
|
}
|
2020-11-02 13:40:34 +01:00
|
|
|
return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
|
2018-04-13 22:46:50 +02:00
|
|
|
}
|
|
|
|
|
2021-04-14 17:53:01 +02:00
|
|
|
InstructionCost HexagonTTIImpl::getArithmeticInstrCost(
|
2020-04-28 15:11:27 +02:00
|
|
|
unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
|
2021-04-14 17:53:01 +02:00
|
|
|
TTI::OperandValueKind Opd1Info, TTI::OperandValueKind Opd2Info,
|
|
|
|
TTI::OperandValueProperties Opd1PropInfo,
|
[ARM] Teach the Arm cost model that a Shift can be folded into other instructions
This attempts to teach the cost model in Arm that code such as:
%s = shl i32 %a, 3
%a = and i32 %s, %b
Can under Arm or Thumb2 become:
and r0, r1, r2, lsl #3
So the cost of the shift can essentially be free. To do this without
trying to artificially adjust the cost of the "and" instruction, it
needs to get the users of the shl and check if they are a type of
instruction that the shift can be folded into. And so it needs to have
access to the actual instruction in getArithmeticInstrCost, which if
available is added as an extra parameter much like getCastInstrCost.
We otherwise limit it to shifts with a single user, which should
hopefully handle most of the cases. The list of instruction that the
shift can be folded into include ADC, ADD, AND, BIC, CMP, EOR, MVN, ORR,
ORN, RSB, SBC and SUB. This translates to Add, Sub, And, Or, Xor and
ICmp.
Differential Revision: https://reviews.llvm.org/D70966
2019-12-08 16:33:24 +01:00
|
|
|
TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args,
|
|
|
|
const Instruction *CxtI) {
|
2020-06-05 09:42:03 +02:00
|
|
|
// TODO: Handle more cost kinds.
|
|
|
|
if (CostKind != TTI::TCK_RecipThroughput)
|
|
|
|
return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info,
|
|
|
|
Opd2Info, Opd1PropInfo,
|
|
|
|
Opd2PropInfo, Args, CxtI);
|
|
|
|
|
2018-06-12 17:12:50 +02:00
|
|
|
if (Ty->isVectorTy()) {
|
2021-04-29 15:02:51 +02:00
|
|
|
std::pair<InstructionCost, MVT> LT = TLI.getTypeLegalizationCost(DL, Ty);
|
2018-06-12 17:12:50 +02:00
|
|
|
if (LT.second.isFloatingPoint())
|
|
|
|
return LT.first + FloatFactor * getTypeNumElements(Ty);
|
|
|
|
}
|
2020-04-28 15:11:27 +02:00
|
|
|
return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info, Opd2Info,
|
[ARM] Teach the Arm cost model that a Shift can be folded into other instructions
This attempts to teach the cost model in Arm that code such as:
%s = shl i32 %a, 3
%a = and i32 %s, %b
Can under Arm or Thumb2 become:
and r0, r1, r2, lsl #3
So the cost of the shift can essentially be free. To do this without
trying to artificially adjust the cost of the "and" instruction, it
needs to get the users of the shl and check if they are a type of
instruction that the shift can be folded into. And so it needs to have
access to the actual instruction in getArithmeticInstrCost, which if
available is added as an extra parameter much like getCastInstrCost.
We otherwise limit it to shifts with a single user, which should
hopefully handle most of the cases. The list of instruction that the
shift can be folded into include ADC, ADD, AND, BIC, CMP, EOR, MVN, ORR,
ORN, RSB, SBC and SUB. This translates to Add, Sub, And, Or, Xor and
ICmp.
Differential Revision: https://reviews.llvm.org/D70966
2019-12-08 16:33:24 +01:00
|
|
|
Opd1PropInfo, Opd2PropInfo, Args, CxtI);
|
2018-04-13 22:46:50 +02:00
|
|
|
}
|
|
|
|
|
2021-01-21 14:40:22 +01:00
|
|
|
InstructionCost HexagonTTIImpl::getCastInstrCost(unsigned Opcode, Type *DstTy,
|
|
|
|
Type *SrcTy,
|
|
|
|
TTI::CastContextHint CCH,
|
|
|
|
TTI::TargetCostKind CostKind,
|
|
|
|
const Instruction *I) {
|
2018-06-12 17:12:50 +02:00
|
|
|
if (SrcTy->isFPOrFPVectorTy() || DstTy->isFPOrFPVectorTy()) {
|
|
|
|
unsigned SrcN = SrcTy->isFPOrFPVectorTy() ? getTypeNumElements(SrcTy) : 0;
|
|
|
|
unsigned DstN = DstTy->isFPOrFPVectorTy() ? getTypeNumElements(DstTy) : 0;
|
|
|
|
|
2021-04-29 15:02:51 +02:00
|
|
|
std::pair<InstructionCost, MVT> SrcLT =
|
|
|
|
TLI.getTypeLegalizationCost(DL, SrcTy);
|
|
|
|
std::pair<InstructionCost, MVT> DstLT =
|
|
|
|
TLI.getTypeLegalizationCost(DL, DstTy);
|
|
|
|
InstructionCost Cost =
|
|
|
|
std::max(SrcLT.first, DstLT.first) + FloatFactor * (SrcN + DstN);
|
2020-05-26 12:27:57 +02:00
|
|
|
// TODO: Allow non-throughput costs that aren't binary.
|
|
|
|
if (CostKind != TTI::TCK_RecipThroughput)
|
|
|
|
return Cost == 0 ? 0 : 1;
|
|
|
|
return Cost;
|
2018-06-12 17:12:50 +02:00
|
|
|
}
|
2018-04-13 22:46:50 +02:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2021-01-27 11:52:58 +01:00
|
|
|
InstructionCost HexagonTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
|
|
|
|
unsigned Index) {
|
2018-04-13 22:46:50 +02:00
|
|
|
Type *ElemTy = Val->isVectorTy() ? cast<VectorType>(Val)->getElementType()
|
|
|
|
: Val;
|
|
|
|
if (Opcode == Instruction::InsertElement) {
|
|
|
|
// Need two rotations for non-zero index.
|
|
|
|
unsigned Cost = (Index != 0) ? 2 : 0;
|
|
|
|
if (ElemTy->isIntegerTy(32))
|
|
|
|
return Cost;
|
|
|
|
// If it's not a 32-bit value, there will need to be an extract.
|
|
|
|
return Cost + getVectorInstrCost(Instruction::ExtractElement, Val, Index);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Opcode == Instruction::ExtractElement)
|
|
|
|
return 2;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2020-08-25 01:29:57 +02:00
|
|
|
bool HexagonTTIImpl::isLegalMaskedStore(Type *DataType, Align /*Alignment*/) {
|
2020-11-02 20:57:42 +01:00
|
|
|
return HexagonMaskedVMem && ST.isTypeForHVX(DataType);
|
2020-08-25 01:29:57 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
bool HexagonTTIImpl::isLegalMaskedLoad(Type *DataType, Align /*Alignment*/) {
|
2020-11-02 20:57:42 +01:00
|
|
|
return HexagonMaskedVMem && ST.isTypeForHVX(DataType);
|
2020-08-25 01:29:57 +02:00
|
|
|
}
|
|
|
|
|
2018-04-13 22:46:50 +02:00
|
|
|
/// --- Vector TTI end ---
|
|
|
|
|
2016-07-22 16:22:43 +02:00
|
|
|
unsigned HexagonTTIImpl::getPrefetchDistance() const {
|
2018-04-13 22:46:50 +02:00
|
|
|
return ST.getL1PrefetchDistance();
|
2016-07-22 16:22:43 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
unsigned HexagonTTIImpl::getCacheLineSize() const {
|
2018-04-13 22:46:50 +02:00
|
|
|
return ST.getL1CacheLineSize();
|
2016-07-22 16:22:43 +02:00
|
|
|
}
|
2016-08-19 16:22:07 +02:00
|
|
|
|
2021-01-20 18:17:23 +01:00
|
|
|
InstructionCost HexagonTTIImpl::getUserCost(const User *U,
|
|
|
|
ArrayRef<const Value *> Operands,
|
|
|
|
TTI::TargetCostKind CostKind) {
|
2018-03-27 19:07:52 +02:00
|
|
|
auto isCastFoldedIntoLoad = [this](const CastInst *CI) -> bool {
|
2016-08-19 16:22:07 +02:00
|
|
|
if (!CI->isIntegerCast())
|
|
|
|
return false;
|
2018-03-27 19:07:52 +02:00
|
|
|
// Only extensions from an integer type shorter than 32-bit to i32
|
|
|
|
// can be folded into the load.
|
|
|
|
const DataLayout &DL = getDataLayout();
|
|
|
|
unsigned SBW = DL.getTypeSizeInBits(CI->getSrcTy());
|
|
|
|
unsigned DBW = DL.getTypeSizeInBits(CI->getDestTy());
|
|
|
|
if (DBW != 32 || SBW >= DBW)
|
|
|
|
return false;
|
|
|
|
|
2016-08-19 16:22:07 +02:00
|
|
|
const LoadInst *LI = dyn_cast<const LoadInst>(CI->getOperand(0));
|
|
|
|
// Technically, this code could allow multiple uses of the load, and
|
|
|
|
// check if all the uses are the same extension operation, but this
|
|
|
|
// should be sufficient for most cases.
|
2018-03-27 19:07:52 +02:00
|
|
|
return LI && LI->hasOneUse();
|
2016-08-19 16:22:07 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
if (const CastInst *CI = dyn_cast<const CastInst>(U))
|
|
|
|
if (isCastFoldedIntoLoad(CI))
|
|
|
|
return TargetTransformInfo::TCC_Free;
|
2020-04-27 10:02:14 +02:00
|
|
|
return BaseT::getUserCost(U, Operands, CostKind);
|
2016-08-19 16:22:07 +02:00
|
|
|
}
|
2017-06-30 22:54:24 +02:00
|
|
|
|
|
|
|
bool HexagonTTIImpl::shouldBuildLookupTables() const {
|
2018-03-26 17:32:03 +02:00
|
|
|
return EmitLookupTables;
|
2017-06-30 22:54:24 +02:00
|
|
|
}
|