2012-11-02 22:48:17 +01:00
|
|
|
//===- CostModel.cpp ------ Cost Model Analysis ---------------------------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file defines the cost model analysis. It provides a very basic cost
|
2012-12-24 06:51:12 +01:00
|
|
|
// estimation for LLVM-IR. This analysis uses the services of the codegen
|
|
|
|
// to approximate the cost of any IR instruction when lowered to machine
|
|
|
|
// instructions. The cost results are unit-less and the cost number represents
|
|
|
|
// the throughput of the machine assuming that all loads hit the cache, all
|
|
|
|
// branches are predicted, etc. The cost numbers can be added in order to
|
|
|
|
// compare two or more transformation alternatives.
|
2012-11-02 22:48:17 +01:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
Costmodel: Add support for horizontal vector reductions
Upcoming SLP vectorization improvements will want to be able to estimate costs
of horizontal reductions. Add infrastructure to support this.
We model reductions as a series of (shufflevector,add) tuples ultimately
followed by an extractelement. For example, for an add-reduction of <4 x float>
we could generate the following sequence:
(v0, v1, v2, v3)
\ \ / /
\ \ /
+ +
(v0+v2, v1+v3, undef, undef)
\ /
((v0+v2) + (v1+v3), undef, undef)
%rdx.shuf = shufflevector <4 x float> %rdx, <4 x float> undef,
<4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
%bin.rdx = fadd <4 x float> %rdx, %rdx.shuf
%rdx.shuf7 = shufflevector <4 x float> %bin.rdx, <4 x float> undef,
<4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
%bin.rdx8 = fadd <4 x float> %bin.rdx, %rdx.shuf7
%r = extractelement <4 x float> %bin.rdx8, i32 0
This commit adds a cost model interface "getReductionCost(Opcode, Ty, Pairwise)"
that will allow clients to ask for the cost of such a reduction (as backends
might generate more efficient code than the cost of the individual instructions
summed up). This interface is excercised by the CostModel analysis pass which
looks for reduction patterns like the one above - starting at extractelements -
and if it sees a matching sequence will call the cost model interface.
We will also support a second form of pairwise reduction that is well supported
on common architectures (haddps, vpadd, faddp).
(v0, v1, v2, v3)
\ / \ /
(v0+v1, v2+v3, undef, undef)
\ /
((v0+v1)+(v2+v3), undef, undef, undef)
%rdx.shuf.0.0 = shufflevector <4 x float> %rdx, <4 x float> undef,
<4 x i32> <i32 0, i32 2 , i32 undef, i32 undef>
%rdx.shuf.0.1 = shufflevector <4 x float> %rdx, <4 x float> undef,
<4 x i32> <i32 1, i32 3, i32 undef, i32 undef>
%bin.rdx.0 = fadd <4 x float> %rdx.shuf.0.0, %rdx.shuf.0.1
%rdx.shuf.1.0 = shufflevector <4 x float> %bin.rdx.0, <4 x float> undef,
<4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
%rdx.shuf.1.1 = shufflevector <4 x float> %bin.rdx.0, <4 x float> undef,
<4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
%bin.rdx.1 = fadd <4 x float> %rdx.shuf.1.0, %rdx.shuf.1.1
%r = extractelement <4 x float> %bin.rdx.1, i32 0
llvm-svn: 190876
2013-09-17 20:06:50 +02:00
|
|
|
#include "llvm/ADT/STLExtras.h"
|
2012-11-02 22:48:17 +01:00
|
|
|
#include "llvm/Analysis/Passes.h"
|
2013-01-07 04:08:10 +01:00
|
|
|
#include "llvm/Analysis/TargetTransformInfo.h"
|
2013-01-02 12:36:10 +01:00
|
|
|
#include "llvm/IR/Function.h"
|
|
|
|
#include "llvm/IR/Instructions.h"
|
2013-02-28 20:09:33 +01:00
|
|
|
#include "llvm/IR/IntrinsicInst.h"
|
2013-01-02 12:36:10 +01:00
|
|
|
#include "llvm/IR/Value.h"
|
2012-11-02 22:48:17 +01:00
|
|
|
#include "llvm/Pass.h"
|
Costmodel: Add support for horizontal vector reductions
Upcoming SLP vectorization improvements will want to be able to estimate costs
of horizontal reductions. Add infrastructure to support this.
We model reductions as a series of (shufflevector,add) tuples ultimately
followed by an extractelement. For example, for an add-reduction of <4 x float>
we could generate the following sequence:
(v0, v1, v2, v3)
\ \ / /
\ \ /
+ +
(v0+v2, v1+v3, undef, undef)
\ /
((v0+v2) + (v1+v3), undef, undef)
%rdx.shuf = shufflevector <4 x float> %rdx, <4 x float> undef,
<4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
%bin.rdx = fadd <4 x float> %rdx, %rdx.shuf
%rdx.shuf7 = shufflevector <4 x float> %bin.rdx, <4 x float> undef,
<4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
%bin.rdx8 = fadd <4 x float> %bin.rdx, %rdx.shuf7
%r = extractelement <4 x float> %bin.rdx8, i32 0
This commit adds a cost model interface "getReductionCost(Opcode, Ty, Pairwise)"
that will allow clients to ask for the cost of such a reduction (as backends
might generate more efficient code than the cost of the individual instructions
summed up). This interface is excercised by the CostModel analysis pass which
looks for reduction patterns like the one above - starting at extractelements -
and if it sees a matching sequence will call the cost model interface.
We will also support a second form of pairwise reduction that is well supported
on common architectures (haddps, vpadd, faddp).
(v0, v1, v2, v3)
\ / \ /
(v0+v1, v2+v3, undef, undef)
\ /
((v0+v1)+(v2+v3), undef, undef, undef)
%rdx.shuf.0.0 = shufflevector <4 x float> %rdx, <4 x float> undef,
<4 x i32> <i32 0, i32 2 , i32 undef, i32 undef>
%rdx.shuf.0.1 = shufflevector <4 x float> %rdx, <4 x float> undef,
<4 x i32> <i32 1, i32 3, i32 undef, i32 undef>
%bin.rdx.0 = fadd <4 x float> %rdx.shuf.0.0, %rdx.shuf.0.1
%rdx.shuf.1.0 = shufflevector <4 x float> %bin.rdx.0, <4 x float> undef,
<4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
%rdx.shuf.1.1 = shufflevector <4 x float> %bin.rdx.0, <4 x float> undef,
<4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
%bin.rdx.1 = fadd <4 x float> %rdx.shuf.1.0, %rdx.shuf.1.1
%r = extractelement <4 x float> %bin.rdx.1, i32 0
llvm-svn: 190876
2013-09-17 20:06:50 +02:00
|
|
|
#include "llvm/Support/CommandLine.h"
|
2012-11-02 22:48:17 +01:00
|
|
|
#include "llvm/Support/Debug.h"
|
|
|
|
#include "llvm/Support/raw_ostream.h"
|
|
|
|
using namespace llvm;
|
|
|
|
|
2014-04-22 04:48:03 +02:00
|
|
|
#define CM_NAME "cost-model"
|
|
|
|
#define DEBUG_TYPE CM_NAME
|
|
|
|
|
Costmodel: Add support for horizontal vector reductions
Upcoming SLP vectorization improvements will want to be able to estimate costs
of horizontal reductions. Add infrastructure to support this.
We model reductions as a series of (shufflevector,add) tuples ultimately
followed by an extractelement. For example, for an add-reduction of <4 x float>
we could generate the following sequence:
(v0, v1, v2, v3)
\ \ / /
\ \ /
+ +
(v0+v2, v1+v3, undef, undef)
\ /
((v0+v2) + (v1+v3), undef, undef)
%rdx.shuf = shufflevector <4 x float> %rdx, <4 x float> undef,
<4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
%bin.rdx = fadd <4 x float> %rdx, %rdx.shuf
%rdx.shuf7 = shufflevector <4 x float> %bin.rdx, <4 x float> undef,
<4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
%bin.rdx8 = fadd <4 x float> %bin.rdx, %rdx.shuf7
%r = extractelement <4 x float> %bin.rdx8, i32 0
This commit adds a cost model interface "getReductionCost(Opcode, Ty, Pairwise)"
that will allow clients to ask for the cost of such a reduction (as backends
might generate more efficient code than the cost of the individual instructions
summed up). This interface is excercised by the CostModel analysis pass which
looks for reduction patterns like the one above - starting at extractelements -
and if it sees a matching sequence will call the cost model interface.
We will also support a second form of pairwise reduction that is well supported
on common architectures (haddps, vpadd, faddp).
(v0, v1, v2, v3)
\ / \ /
(v0+v1, v2+v3, undef, undef)
\ /
((v0+v1)+(v2+v3), undef, undef, undef)
%rdx.shuf.0.0 = shufflevector <4 x float> %rdx, <4 x float> undef,
<4 x i32> <i32 0, i32 2 , i32 undef, i32 undef>
%rdx.shuf.0.1 = shufflevector <4 x float> %rdx, <4 x float> undef,
<4 x i32> <i32 1, i32 3, i32 undef, i32 undef>
%bin.rdx.0 = fadd <4 x float> %rdx.shuf.0.0, %rdx.shuf.0.1
%rdx.shuf.1.0 = shufflevector <4 x float> %bin.rdx.0, <4 x float> undef,
<4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
%rdx.shuf.1.1 = shufflevector <4 x float> %bin.rdx.0, <4 x float> undef,
<4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
%bin.rdx.1 = fadd <4 x float> %rdx.shuf.1.0, %rdx.shuf.1.1
%r = extractelement <4 x float> %bin.rdx.1, i32 0
llvm-svn: 190876
2013-09-17 20:06:50 +02:00
|
|
|
static cl::opt<bool> EnableReduxCost("costmodel-reduxcost", cl::init(false),
|
|
|
|
cl::Hidden,
|
|
|
|
cl::desc("Recognize reduction patterns."));
|
|
|
|
|
2012-11-02 22:48:17 +01:00
|
|
|
namespace {
|
|
|
|
class CostModelAnalysis : public FunctionPass {
|
|
|
|
|
|
|
|
public:
|
|
|
|
static char ID; // Class identification, replacement for typeinfo
|
2014-04-15 06:59:12 +02:00
|
|
|
CostModelAnalysis() : FunctionPass(ID), F(nullptr), TTI(nullptr) {
|
2012-11-02 22:48:17 +01:00
|
|
|
initializeCostModelAnalysisPass(
|
|
|
|
*PassRegistry::getPassRegistry());
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns the expected cost of the instruction.
|
|
|
|
/// Returns -1 if the cost is unknown.
|
|
|
|
/// Note, this method does not cache the cost calculation and it
|
|
|
|
/// can be expensive in some cases.
|
2012-12-03 23:47:12 +01:00
|
|
|
unsigned getInstructionCost(const Instruction *I) const;
|
2012-11-02 22:48:17 +01:00
|
|
|
|
|
|
|
private:
|
2014-03-05 08:30:04 +01:00
|
|
|
void getAnalysisUsage(AnalysisUsage &AU) const override;
|
|
|
|
bool runOnFunction(Function &F) override;
|
|
|
|
void print(raw_ostream &OS, const Module*) const override;
|
2012-11-02 22:48:17 +01:00
|
|
|
|
|
|
|
/// The function that we analyze.
|
|
|
|
Function *F;
|
2013-01-05 11:09:33 +01:00
|
|
|
/// Target information.
|
|
|
|
const TargetTransformInfo *TTI;
|
2012-11-02 22:48:17 +01:00
|
|
|
};
|
|
|
|
} // End of anonymous namespace
|
|
|
|
|
|
|
|
// Register this pass.
|
|
|
|
char CostModelAnalysis::ID = 0;
|
|
|
|
static const char cm_name[] = "Cost Model Analysis";
|
|
|
|
INITIALIZE_PASS_BEGIN(CostModelAnalysis, CM_NAME, cm_name, false, true)
|
|
|
|
INITIALIZE_PASS_END (CostModelAnalysis, CM_NAME, cm_name, false, true)
|
|
|
|
|
|
|
|
FunctionPass *llvm::createCostModelAnalysisPass() {
|
|
|
|
return new CostModelAnalysis();
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
CostModelAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
|
|
|
|
AU.setPreservesAll();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
CostModelAnalysis::runOnFunction(Function &F) {
|
|
|
|
this->F = &F;
|
[PM] Change the core design of the TTI analysis to use a polymorphic
type erased interface and a single analysis pass rather than an
extremely complex analysis group.
The end result is that the TTI analysis can contain a type erased
implementation that supports the polymorphic TTI interface. We can build
one from a target-specific implementation or from a dummy one in the IR.
I've also factored all of the code into "mix-in"-able base classes,
including CRTP base classes to facilitate calling back up to the most
specialized form when delegating horizontally across the surface. These
aren't as clean as I would like and I'm planning to work on cleaning
some of this up, but I wanted to start by putting into the right form.
There are a number of reasons for this change, and this particular
design. The first and foremost reason is that an analysis group is
complete overkill, and the chaining delegation strategy was so opaque,
confusing, and high overhead that TTI was suffering greatly for it.
Several of the TTI functions had failed to be implemented in all places
because of the chaining-based delegation making there be no checking of
this. A few other functions were implemented with incorrect delegation.
The message to me was very clear working on this -- the delegation and
analysis group structure was too confusing to be useful here.
The other reason of course is that this is *much* more natural fit for
the new pass manager. This will lay the ground work for a type-erased
per-function info object that can look up the correct subtarget and even
cache it.
Yet another benefit is that this will significantly simplify the
interaction of the pass managers and the TargetMachine. See the future
work below.
The downside of this change is that it is very, very verbose. I'm going
to work to improve that, but it is somewhat an implementation necessity
in C++ to do type erasure. =/ I discussed this design really extensively
with Eric and Hal prior to going down this path, and afterward showed
them the result. No one was really thrilled with it, but there doesn't
seem to be a substantially better alternative. Using a base class and
virtual method dispatch would make the code much shorter, but as
discussed in the update to the programmer's manual and elsewhere,
a polymorphic interface feels like the more principled approach even if
this is perhaps the least compelling example of it. ;]
Ultimately, there is still a lot more to be done here, but this was the
huge chunk that I couldn't really split things out of because this was
the interface change to TTI. I've tried to minimize all the other parts
of this. The follow up work should include at least:
1) Improving the TargetMachine interface by having it directly return
a TTI object. Because we have a non-pass object with value semantics
and an internal type erasure mechanism, we can narrow the interface
of the TargetMachine to *just* do what we need: build and return
a TTI object that we can then insert into the pass pipeline.
2) Make the TTI object be fully specialized for a particular function.
This will include splitting off a minimal form of it which is
sufficient for the inliner and the old pass manager.
3) Add a new pass manager analysis which produces TTI objects from the
target machine for each function. This may actually be done as part
of #2 in order to use the new analysis to implement #2.
4) Work on narrowing the API between TTI and the targets so that it is
easier to understand and less verbose to type erase.
5) Work on narrowing the API between TTI and its clients so that it is
easier to understand and less verbose to forward.
6) Try to improve the CRTP-based delegation. I feel like this code is
just a bit messy and exacerbating the complexity of implementing
the TTI in each target.
Many thanks to Eric and Hal for their help here. I ended up blocked on
this somewhat more abruptly than I expected, and so I appreciate getting
it sorted out very quickly.
Differential Revision: http://reviews.llvm.org/D7293
llvm-svn: 227669
2015-01-31 04:43:40 +01:00
|
|
|
auto *TTIWP = getAnalysisIfAvailable<TargetTransformInfoWrapperPass>();
|
2015-02-01 13:01:35 +01:00
|
|
|
TTI = TTIWP ? &TTIWP->getTTI(F) : nullptr;
|
2012-11-02 22:48:17 +01:00
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2013-07-11 18:22:38 +02:00
|
|
|
static bool isReverseVectorMask(SmallVectorImpl<int> &Mask) {
|
2013-02-12 03:40:37 +01:00
|
|
|
for (unsigned i = 0, MaskSize = Mask.size(); i < MaskSize; ++i)
|
|
|
|
if (Mask[i] > 0 && Mask[i] != (int)(MaskSize - 1 - i))
|
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-07-04 00:24:18 +02:00
|
|
|
static bool isAlternateVectorMask(SmallVectorImpl<int> &Mask) {
|
|
|
|
bool isAlternate = true;
|
|
|
|
unsigned MaskSize = Mask.size();
|
|
|
|
|
|
|
|
// Example: shufflevector A, B, <0,5,2,7>
|
|
|
|
for (unsigned i = 0; i < MaskSize && isAlternate; ++i) {
|
|
|
|
if (Mask[i] < 0)
|
|
|
|
continue;
|
|
|
|
isAlternate = Mask[i] == (int)((i & 1) ? MaskSize + i : i);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (isAlternate)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
isAlternate = true;
|
|
|
|
// Example: shufflevector A, B, <4,1,6,3>
|
|
|
|
for (unsigned i = 0; i < MaskSize && isAlternate; ++i) {
|
|
|
|
if (Mask[i] < 0)
|
|
|
|
continue;
|
|
|
|
isAlternate = Mask[i] == (int)((i & 1) ? i : MaskSize + i);
|
|
|
|
}
|
|
|
|
|
|
|
|
return isAlternate;
|
|
|
|
}
|
|
|
|
|
2013-04-05 01:26:21 +02:00
|
|
|
static TargetTransformInfo::OperandValueKind getOperandInfo(Value *V) {
|
|
|
|
TargetTransformInfo::OperandValueKind OpInfo =
|
|
|
|
TargetTransformInfo::OK_AnyValue;
|
|
|
|
|
2014-02-13 00:43:47 +01:00
|
|
|
// Check for a splat of a constant or for a non uniform vector of constants.
|
2014-02-13 17:48:38 +01:00
|
|
|
if (isa<ConstantVector>(V) || isa<ConstantDataVector>(V)) {
|
2014-02-13 00:43:47 +01:00
|
|
|
OpInfo = TargetTransformInfo::OK_NonUniformConstantValue;
|
2014-04-15 06:59:12 +02:00
|
|
|
if (cast<Constant>(V)->getSplatValue() != nullptr)
|
2013-04-05 01:26:21 +02:00
|
|
|
OpInfo = TargetTransformInfo::OK_UniformConstantValue;
|
2014-02-13 00:43:47 +01:00
|
|
|
}
|
2013-04-05 01:26:21 +02:00
|
|
|
|
|
|
|
return OpInfo;
|
|
|
|
}
|
|
|
|
|
Costmodel: Add support for horizontal vector reductions
Upcoming SLP vectorization improvements will want to be able to estimate costs
of horizontal reductions. Add infrastructure to support this.
We model reductions as a series of (shufflevector,add) tuples ultimately
followed by an extractelement. For example, for an add-reduction of <4 x float>
we could generate the following sequence:
(v0, v1, v2, v3)
\ \ / /
\ \ /
+ +
(v0+v2, v1+v3, undef, undef)
\ /
((v0+v2) + (v1+v3), undef, undef)
%rdx.shuf = shufflevector <4 x float> %rdx, <4 x float> undef,
<4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
%bin.rdx = fadd <4 x float> %rdx, %rdx.shuf
%rdx.shuf7 = shufflevector <4 x float> %bin.rdx, <4 x float> undef,
<4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
%bin.rdx8 = fadd <4 x float> %bin.rdx, %rdx.shuf7
%r = extractelement <4 x float> %bin.rdx8, i32 0
This commit adds a cost model interface "getReductionCost(Opcode, Ty, Pairwise)"
that will allow clients to ask for the cost of such a reduction (as backends
might generate more efficient code than the cost of the individual instructions
summed up). This interface is excercised by the CostModel analysis pass which
looks for reduction patterns like the one above - starting at extractelements -
and if it sees a matching sequence will call the cost model interface.
We will also support a second form of pairwise reduction that is well supported
on common architectures (haddps, vpadd, faddp).
(v0, v1, v2, v3)
\ / \ /
(v0+v1, v2+v3, undef, undef)
\ /
((v0+v1)+(v2+v3), undef, undef, undef)
%rdx.shuf.0.0 = shufflevector <4 x float> %rdx, <4 x float> undef,
<4 x i32> <i32 0, i32 2 , i32 undef, i32 undef>
%rdx.shuf.0.1 = shufflevector <4 x float> %rdx, <4 x float> undef,
<4 x i32> <i32 1, i32 3, i32 undef, i32 undef>
%bin.rdx.0 = fadd <4 x float> %rdx.shuf.0.0, %rdx.shuf.0.1
%rdx.shuf.1.0 = shufflevector <4 x float> %bin.rdx.0, <4 x float> undef,
<4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
%rdx.shuf.1.1 = shufflevector <4 x float> %bin.rdx.0, <4 x float> undef,
<4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
%bin.rdx.1 = fadd <4 x float> %rdx.shuf.1.0, %rdx.shuf.1.1
%r = extractelement <4 x float> %bin.rdx.1, i32 0
llvm-svn: 190876
2013-09-17 20:06:50 +02:00
|
|
|
static bool matchPairwiseShuffleMask(ShuffleVectorInst *SI, bool IsLeft,
|
|
|
|
unsigned Level) {
|
|
|
|
// We don't need a shuffle if we just want to have element 0 in position 0 of
|
|
|
|
// the vector.
|
|
|
|
if (!SI && Level == 0 && IsLeft)
|
|
|
|
return true;
|
|
|
|
else if (!SI)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
SmallVector<int, 32> Mask(SI->getType()->getVectorNumElements(), -1);
|
|
|
|
|
|
|
|
// Build a mask of 0, 2, ... (left) or 1, 3, ... (right) depending on whether
|
|
|
|
// we look at the left or right side.
|
|
|
|
for (unsigned i = 0, e = (1 << Level), val = !IsLeft; i != e; ++i, val += 2)
|
|
|
|
Mask[i] = val;
|
|
|
|
|
|
|
|
SmallVector<int, 16> ActualMask = SI->getShuffleMask();
|
2014-04-18 21:48:03 +02:00
|
|
|
if (Mask != ActualMask)
|
Costmodel: Add support for horizontal vector reductions
Upcoming SLP vectorization improvements will want to be able to estimate costs
of horizontal reductions. Add infrastructure to support this.
We model reductions as a series of (shufflevector,add) tuples ultimately
followed by an extractelement. For example, for an add-reduction of <4 x float>
we could generate the following sequence:
(v0, v1, v2, v3)
\ \ / /
\ \ /
+ +
(v0+v2, v1+v3, undef, undef)
\ /
((v0+v2) + (v1+v3), undef, undef)
%rdx.shuf = shufflevector <4 x float> %rdx, <4 x float> undef,
<4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
%bin.rdx = fadd <4 x float> %rdx, %rdx.shuf
%rdx.shuf7 = shufflevector <4 x float> %bin.rdx, <4 x float> undef,
<4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
%bin.rdx8 = fadd <4 x float> %bin.rdx, %rdx.shuf7
%r = extractelement <4 x float> %bin.rdx8, i32 0
This commit adds a cost model interface "getReductionCost(Opcode, Ty, Pairwise)"
that will allow clients to ask for the cost of such a reduction (as backends
might generate more efficient code than the cost of the individual instructions
summed up). This interface is excercised by the CostModel analysis pass which
looks for reduction patterns like the one above - starting at extractelements -
and if it sees a matching sequence will call the cost model interface.
We will also support a second form of pairwise reduction that is well supported
on common architectures (haddps, vpadd, faddp).
(v0, v1, v2, v3)
\ / \ /
(v0+v1, v2+v3, undef, undef)
\ /
((v0+v1)+(v2+v3), undef, undef, undef)
%rdx.shuf.0.0 = shufflevector <4 x float> %rdx, <4 x float> undef,
<4 x i32> <i32 0, i32 2 , i32 undef, i32 undef>
%rdx.shuf.0.1 = shufflevector <4 x float> %rdx, <4 x float> undef,
<4 x i32> <i32 1, i32 3, i32 undef, i32 undef>
%bin.rdx.0 = fadd <4 x float> %rdx.shuf.0.0, %rdx.shuf.0.1
%rdx.shuf.1.0 = shufflevector <4 x float> %bin.rdx.0, <4 x float> undef,
<4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
%rdx.shuf.1.1 = shufflevector <4 x float> %bin.rdx.0, <4 x float> undef,
<4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
%bin.rdx.1 = fadd <4 x float> %rdx.shuf.1.0, %rdx.shuf.1.1
%r = extractelement <4 x float> %bin.rdx.1, i32 0
llvm-svn: 190876
2013-09-17 20:06:50 +02:00
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool matchPairwiseReductionAtLevel(const BinaryOperator *BinOp,
|
|
|
|
unsigned Level, unsigned NumLevels) {
|
|
|
|
// Match one level of pairwise operations.
|
|
|
|
// %rdx.shuf.0.0 = shufflevector <4 x float> %rdx, <4 x float> undef,
|
|
|
|
// <4 x i32> <i32 0, i32 2 , i32 undef, i32 undef>
|
|
|
|
// %rdx.shuf.0.1 = shufflevector <4 x float> %rdx, <4 x float> undef,
|
|
|
|
// <4 x i32> <i32 1, i32 3, i32 undef, i32 undef>
|
|
|
|
// %bin.rdx.0 = fadd <4 x float> %rdx.shuf.0.0, %rdx.shuf.0.1
|
2014-04-15 06:59:12 +02:00
|
|
|
if (BinOp == nullptr)
|
Costmodel: Add support for horizontal vector reductions
Upcoming SLP vectorization improvements will want to be able to estimate costs
of horizontal reductions. Add infrastructure to support this.
We model reductions as a series of (shufflevector,add) tuples ultimately
followed by an extractelement. For example, for an add-reduction of <4 x float>
we could generate the following sequence:
(v0, v1, v2, v3)
\ \ / /
\ \ /
+ +
(v0+v2, v1+v3, undef, undef)
\ /
((v0+v2) + (v1+v3), undef, undef)
%rdx.shuf = shufflevector <4 x float> %rdx, <4 x float> undef,
<4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
%bin.rdx = fadd <4 x float> %rdx, %rdx.shuf
%rdx.shuf7 = shufflevector <4 x float> %bin.rdx, <4 x float> undef,
<4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
%bin.rdx8 = fadd <4 x float> %bin.rdx, %rdx.shuf7
%r = extractelement <4 x float> %bin.rdx8, i32 0
This commit adds a cost model interface "getReductionCost(Opcode, Ty, Pairwise)"
that will allow clients to ask for the cost of such a reduction (as backends
might generate more efficient code than the cost of the individual instructions
summed up). This interface is excercised by the CostModel analysis pass which
looks for reduction patterns like the one above - starting at extractelements -
and if it sees a matching sequence will call the cost model interface.
We will also support a second form of pairwise reduction that is well supported
on common architectures (haddps, vpadd, faddp).
(v0, v1, v2, v3)
\ / \ /
(v0+v1, v2+v3, undef, undef)
\ /
((v0+v1)+(v2+v3), undef, undef, undef)
%rdx.shuf.0.0 = shufflevector <4 x float> %rdx, <4 x float> undef,
<4 x i32> <i32 0, i32 2 , i32 undef, i32 undef>
%rdx.shuf.0.1 = shufflevector <4 x float> %rdx, <4 x float> undef,
<4 x i32> <i32 1, i32 3, i32 undef, i32 undef>
%bin.rdx.0 = fadd <4 x float> %rdx.shuf.0.0, %rdx.shuf.0.1
%rdx.shuf.1.0 = shufflevector <4 x float> %bin.rdx.0, <4 x float> undef,
<4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
%rdx.shuf.1.1 = shufflevector <4 x float> %bin.rdx.0, <4 x float> undef,
<4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
%bin.rdx.1 = fadd <4 x float> %rdx.shuf.1.0, %rdx.shuf.1.1
%r = extractelement <4 x float> %bin.rdx.1, i32 0
llvm-svn: 190876
2013-09-17 20:06:50 +02:00
|
|
|
return false;
|
|
|
|
|
2013-09-17 23:13:57 +02:00
|
|
|
assert(BinOp->getType()->isVectorTy() && "Expecting a vector type");
|
Costmodel: Add support for horizontal vector reductions
Upcoming SLP vectorization improvements will want to be able to estimate costs
of horizontal reductions. Add infrastructure to support this.
We model reductions as a series of (shufflevector,add) tuples ultimately
followed by an extractelement. For example, for an add-reduction of <4 x float>
we could generate the following sequence:
(v0, v1, v2, v3)
\ \ / /
\ \ /
+ +
(v0+v2, v1+v3, undef, undef)
\ /
((v0+v2) + (v1+v3), undef, undef)
%rdx.shuf = shufflevector <4 x float> %rdx, <4 x float> undef,
<4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
%bin.rdx = fadd <4 x float> %rdx, %rdx.shuf
%rdx.shuf7 = shufflevector <4 x float> %bin.rdx, <4 x float> undef,
<4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
%bin.rdx8 = fadd <4 x float> %bin.rdx, %rdx.shuf7
%r = extractelement <4 x float> %bin.rdx8, i32 0
This commit adds a cost model interface "getReductionCost(Opcode, Ty, Pairwise)"
that will allow clients to ask for the cost of such a reduction (as backends
might generate more efficient code than the cost of the individual instructions
summed up). This interface is excercised by the CostModel analysis pass which
looks for reduction patterns like the one above - starting at extractelements -
and if it sees a matching sequence will call the cost model interface.
We will also support a second form of pairwise reduction that is well supported
on common architectures (haddps, vpadd, faddp).
(v0, v1, v2, v3)
\ / \ /
(v0+v1, v2+v3, undef, undef)
\ /
((v0+v1)+(v2+v3), undef, undef, undef)
%rdx.shuf.0.0 = shufflevector <4 x float> %rdx, <4 x float> undef,
<4 x i32> <i32 0, i32 2 , i32 undef, i32 undef>
%rdx.shuf.0.1 = shufflevector <4 x float> %rdx, <4 x float> undef,
<4 x i32> <i32 1, i32 3, i32 undef, i32 undef>
%bin.rdx.0 = fadd <4 x float> %rdx.shuf.0.0, %rdx.shuf.0.1
%rdx.shuf.1.0 = shufflevector <4 x float> %bin.rdx.0, <4 x float> undef,
<4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
%rdx.shuf.1.1 = shufflevector <4 x float> %bin.rdx.0, <4 x float> undef,
<4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
%bin.rdx.1 = fadd <4 x float> %rdx.shuf.1.0, %rdx.shuf.1.1
%r = extractelement <4 x float> %bin.rdx.1, i32 0
llvm-svn: 190876
2013-09-17 20:06:50 +02:00
|
|
|
|
|
|
|
unsigned Opcode = BinOp->getOpcode();
|
|
|
|
Value *L = BinOp->getOperand(0);
|
|
|
|
Value *R = BinOp->getOperand(1);
|
|
|
|
|
|
|
|
ShuffleVectorInst *LS = dyn_cast<ShuffleVectorInst>(L);
|
|
|
|
if (!LS && Level)
|
|
|
|
return false;
|
|
|
|
ShuffleVectorInst *RS = dyn_cast<ShuffleVectorInst>(R);
|
|
|
|
if (!RS && Level)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// On level 0 we can omit one shufflevector instruction.
|
|
|
|
if (!Level && !RS && !LS)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Shuffle inputs must match.
|
2014-04-15 06:59:12 +02:00
|
|
|
Value *NextLevelOpL = LS ? LS->getOperand(0) : nullptr;
|
|
|
|
Value *NextLevelOpR = RS ? RS->getOperand(0) : nullptr;
|
|
|
|
Value *NextLevelOp = nullptr;
|
Costmodel: Add support for horizontal vector reductions
Upcoming SLP vectorization improvements will want to be able to estimate costs
of horizontal reductions. Add infrastructure to support this.
We model reductions as a series of (shufflevector,add) tuples ultimately
followed by an extractelement. For example, for an add-reduction of <4 x float>
we could generate the following sequence:
(v0, v1, v2, v3)
\ \ / /
\ \ /
+ +
(v0+v2, v1+v3, undef, undef)
\ /
((v0+v2) + (v1+v3), undef, undef)
%rdx.shuf = shufflevector <4 x float> %rdx, <4 x float> undef,
<4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
%bin.rdx = fadd <4 x float> %rdx, %rdx.shuf
%rdx.shuf7 = shufflevector <4 x float> %bin.rdx, <4 x float> undef,
<4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
%bin.rdx8 = fadd <4 x float> %bin.rdx, %rdx.shuf7
%r = extractelement <4 x float> %bin.rdx8, i32 0
This commit adds a cost model interface "getReductionCost(Opcode, Ty, Pairwise)"
that will allow clients to ask for the cost of such a reduction (as backends
might generate more efficient code than the cost of the individual instructions
summed up). This interface is excercised by the CostModel analysis pass which
looks for reduction patterns like the one above - starting at extractelements -
and if it sees a matching sequence will call the cost model interface.
We will also support a second form of pairwise reduction that is well supported
on common architectures (haddps, vpadd, faddp).
(v0, v1, v2, v3)
\ / \ /
(v0+v1, v2+v3, undef, undef)
\ /
((v0+v1)+(v2+v3), undef, undef, undef)
%rdx.shuf.0.0 = shufflevector <4 x float> %rdx, <4 x float> undef,
<4 x i32> <i32 0, i32 2 , i32 undef, i32 undef>
%rdx.shuf.0.1 = shufflevector <4 x float> %rdx, <4 x float> undef,
<4 x i32> <i32 1, i32 3, i32 undef, i32 undef>
%bin.rdx.0 = fadd <4 x float> %rdx.shuf.0.0, %rdx.shuf.0.1
%rdx.shuf.1.0 = shufflevector <4 x float> %bin.rdx.0, <4 x float> undef,
<4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
%rdx.shuf.1.1 = shufflevector <4 x float> %bin.rdx.0, <4 x float> undef,
<4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
%bin.rdx.1 = fadd <4 x float> %rdx.shuf.1.0, %rdx.shuf.1.1
%r = extractelement <4 x float> %bin.rdx.1, i32 0
llvm-svn: 190876
2013-09-17 20:06:50 +02:00
|
|
|
if (NextLevelOpR && NextLevelOpL) {
|
|
|
|
// If we have two shuffles their operands must match.
|
|
|
|
if (NextLevelOpL != NextLevelOpR)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
NextLevelOp = NextLevelOpL;
|
|
|
|
} else if (Level == 0 && (NextLevelOpR || NextLevelOpL)) {
|
|
|
|
// On the first level we can omit the shufflevector <0, undef,...>. So the
|
|
|
|
// input to the other shufflevector <1, undef> must match with one of the
|
|
|
|
// inputs to the current binary operation.
|
|
|
|
// Example:
|
|
|
|
// %NextLevelOpL = shufflevector %R, <1, undef ...>
|
|
|
|
// %BinOp = fadd %NextLevelOpL, %R
|
|
|
|
if (NextLevelOpL && NextLevelOpL != R)
|
|
|
|
return false;
|
|
|
|
else if (NextLevelOpR && NextLevelOpR != L)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
NextLevelOp = NextLevelOpL ? R : L;
|
|
|
|
} else
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Check that the next levels binary operation exists and matches with the
|
|
|
|
// current one.
|
2014-04-15 06:59:12 +02:00
|
|
|
BinaryOperator *NextLevelBinOp = nullptr;
|
Costmodel: Add support for horizontal vector reductions
Upcoming SLP vectorization improvements will want to be able to estimate costs
of horizontal reductions. Add infrastructure to support this.
We model reductions as a series of (shufflevector,add) tuples ultimately
followed by an extractelement. For example, for an add-reduction of <4 x float>
we could generate the following sequence:
(v0, v1, v2, v3)
\ \ / /
\ \ /
+ +
(v0+v2, v1+v3, undef, undef)
\ /
((v0+v2) + (v1+v3), undef, undef)
%rdx.shuf = shufflevector <4 x float> %rdx, <4 x float> undef,
<4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
%bin.rdx = fadd <4 x float> %rdx, %rdx.shuf
%rdx.shuf7 = shufflevector <4 x float> %bin.rdx, <4 x float> undef,
<4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
%bin.rdx8 = fadd <4 x float> %bin.rdx, %rdx.shuf7
%r = extractelement <4 x float> %bin.rdx8, i32 0
This commit adds a cost model interface "getReductionCost(Opcode, Ty, Pairwise)"
that will allow clients to ask for the cost of such a reduction (as backends
might generate more efficient code than the cost of the individual instructions
summed up). This interface is excercised by the CostModel analysis pass which
looks for reduction patterns like the one above - starting at extractelements -
and if it sees a matching sequence will call the cost model interface.
We will also support a second form of pairwise reduction that is well supported
on common architectures (haddps, vpadd, faddp).
(v0, v1, v2, v3)
\ / \ /
(v0+v1, v2+v3, undef, undef)
\ /
((v0+v1)+(v2+v3), undef, undef, undef)
%rdx.shuf.0.0 = shufflevector <4 x float> %rdx, <4 x float> undef,
<4 x i32> <i32 0, i32 2 , i32 undef, i32 undef>
%rdx.shuf.0.1 = shufflevector <4 x float> %rdx, <4 x float> undef,
<4 x i32> <i32 1, i32 3, i32 undef, i32 undef>
%bin.rdx.0 = fadd <4 x float> %rdx.shuf.0.0, %rdx.shuf.0.1
%rdx.shuf.1.0 = shufflevector <4 x float> %bin.rdx.0, <4 x float> undef,
<4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
%rdx.shuf.1.1 = shufflevector <4 x float> %bin.rdx.0, <4 x float> undef,
<4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
%bin.rdx.1 = fadd <4 x float> %rdx.shuf.1.0, %rdx.shuf.1.1
%r = extractelement <4 x float> %bin.rdx.1, i32 0
llvm-svn: 190876
2013-09-17 20:06:50 +02:00
|
|
|
if (Level + 1 != NumLevels) {
|
|
|
|
if (!(NextLevelBinOp = dyn_cast<BinaryOperator>(NextLevelOp)))
|
|
|
|
return false;
|
|
|
|
else if (NextLevelBinOp->getOpcode() != Opcode)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Shuffle mask for pairwise operation must match.
|
|
|
|
if (matchPairwiseShuffleMask(LS, true, Level)) {
|
|
|
|
if (!matchPairwiseShuffleMask(RS, false, Level))
|
|
|
|
return false;
|
|
|
|
} else if (matchPairwiseShuffleMask(RS, true, Level)) {
|
|
|
|
if (!matchPairwiseShuffleMask(LS, false, Level))
|
|
|
|
return false;
|
|
|
|
} else
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (++Level == NumLevels)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// Match next level.
|
|
|
|
return matchPairwiseReductionAtLevel(NextLevelBinOp, Level, NumLevels);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool matchPairwiseReduction(const ExtractElementInst *ReduxRoot,
|
|
|
|
unsigned &Opcode, Type *&Ty) {
|
|
|
|
if (!EnableReduxCost)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Need to extract the first element.
|
|
|
|
ConstantInt *CI = dyn_cast<ConstantInt>(ReduxRoot->getOperand(1));
|
|
|
|
unsigned Idx = ~0u;
|
|
|
|
if (CI)
|
|
|
|
Idx = CI->getZExtValue();
|
|
|
|
if (Idx != 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
BinaryOperator *RdxStart = dyn_cast<BinaryOperator>(ReduxRoot->getOperand(0));
|
|
|
|
if (!RdxStart)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
Type *VecTy = ReduxRoot->getOperand(0)->getType();
|
|
|
|
unsigned NumVecElems = VecTy->getVectorNumElements();
|
|
|
|
if (!isPowerOf2_32(NumVecElems))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// We look for a sequence of shuffle,shuffle,add triples like the following
|
|
|
|
// that builds a pairwise reduction tree.
|
|
|
|
//
|
|
|
|
// (X0, X1, X2, X3)
|
|
|
|
// (X0 + X1, X2 + X3, undef, undef)
|
|
|
|
// ((X0 + X1) + (X2 + X3), undef, undef, undef)
|
|
|
|
//
|
|
|
|
// %rdx.shuf.0.0 = shufflevector <4 x float> %rdx, <4 x float> undef,
|
|
|
|
// <4 x i32> <i32 0, i32 2 , i32 undef, i32 undef>
|
|
|
|
// %rdx.shuf.0.1 = shufflevector <4 x float> %rdx, <4 x float> undef,
|
|
|
|
// <4 x i32> <i32 1, i32 3, i32 undef, i32 undef>
|
|
|
|
// %bin.rdx.0 = fadd <4 x float> %rdx.shuf.0.0, %rdx.shuf.0.1
|
|
|
|
// %rdx.shuf.1.0 = shufflevector <4 x float> %bin.rdx.0, <4 x float> undef,
|
|
|
|
// <4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
|
|
|
|
// %rdx.shuf.1.1 = shufflevector <4 x float> %bin.rdx.0, <4 x float> undef,
|
|
|
|
// <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
|
|
|
|
// %bin.rdx8 = fadd <4 x float> %rdx.shuf.1.0, %rdx.shuf.1.1
|
|
|
|
// %r = extractelement <4 x float> %bin.rdx8, i32 0
|
|
|
|
if (!matchPairwiseReductionAtLevel(RdxStart, 0, Log2_32(NumVecElems)))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
Opcode = RdxStart->getOpcode();
|
|
|
|
Ty = VecTy;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static std::pair<Value *, ShuffleVectorInst *>
|
|
|
|
getShuffleAndOtherOprd(BinaryOperator *B) {
|
|
|
|
|
|
|
|
Value *L = B->getOperand(0);
|
|
|
|
Value *R = B->getOperand(1);
|
2014-04-15 06:59:12 +02:00
|
|
|
ShuffleVectorInst *S = nullptr;
|
Costmodel: Add support for horizontal vector reductions
Upcoming SLP vectorization improvements will want to be able to estimate costs
of horizontal reductions. Add infrastructure to support this.
We model reductions as a series of (shufflevector,add) tuples ultimately
followed by an extractelement. For example, for an add-reduction of <4 x float>
we could generate the following sequence:
(v0, v1, v2, v3)
\ \ / /
\ \ /
+ +
(v0+v2, v1+v3, undef, undef)
\ /
((v0+v2) + (v1+v3), undef, undef)
%rdx.shuf = shufflevector <4 x float> %rdx, <4 x float> undef,
<4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
%bin.rdx = fadd <4 x float> %rdx, %rdx.shuf
%rdx.shuf7 = shufflevector <4 x float> %bin.rdx, <4 x float> undef,
<4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
%bin.rdx8 = fadd <4 x float> %bin.rdx, %rdx.shuf7
%r = extractelement <4 x float> %bin.rdx8, i32 0
This commit adds a cost model interface "getReductionCost(Opcode, Ty, Pairwise)"
that will allow clients to ask for the cost of such a reduction (as backends
might generate more efficient code than the cost of the individual instructions
summed up). This interface is excercised by the CostModel analysis pass which
looks for reduction patterns like the one above - starting at extractelements -
and if it sees a matching sequence will call the cost model interface.
We will also support a second form of pairwise reduction that is well supported
on common architectures (haddps, vpadd, faddp).
(v0, v1, v2, v3)
\ / \ /
(v0+v1, v2+v3, undef, undef)
\ /
((v0+v1)+(v2+v3), undef, undef, undef)
%rdx.shuf.0.0 = shufflevector <4 x float> %rdx, <4 x float> undef,
<4 x i32> <i32 0, i32 2 , i32 undef, i32 undef>
%rdx.shuf.0.1 = shufflevector <4 x float> %rdx, <4 x float> undef,
<4 x i32> <i32 1, i32 3, i32 undef, i32 undef>
%bin.rdx.0 = fadd <4 x float> %rdx.shuf.0.0, %rdx.shuf.0.1
%rdx.shuf.1.0 = shufflevector <4 x float> %bin.rdx.0, <4 x float> undef,
<4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
%rdx.shuf.1.1 = shufflevector <4 x float> %bin.rdx.0, <4 x float> undef,
<4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
%bin.rdx.1 = fadd <4 x float> %rdx.shuf.1.0, %rdx.shuf.1.1
%r = extractelement <4 x float> %bin.rdx.1, i32 0
llvm-svn: 190876
2013-09-17 20:06:50 +02:00
|
|
|
|
|
|
|
if ((S = dyn_cast<ShuffleVectorInst>(L)))
|
|
|
|
return std::make_pair(R, S);
|
|
|
|
|
|
|
|
S = dyn_cast<ShuffleVectorInst>(R);
|
|
|
|
return std::make_pair(L, S);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool matchVectorSplittingReduction(const ExtractElementInst *ReduxRoot,
|
|
|
|
unsigned &Opcode, Type *&Ty) {
|
|
|
|
if (!EnableReduxCost)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Need to extract the first element.
|
|
|
|
ConstantInt *CI = dyn_cast<ConstantInt>(ReduxRoot->getOperand(1));
|
|
|
|
unsigned Idx = ~0u;
|
|
|
|
if (CI)
|
|
|
|
Idx = CI->getZExtValue();
|
|
|
|
if (Idx != 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
BinaryOperator *RdxStart = dyn_cast<BinaryOperator>(ReduxRoot->getOperand(0));
|
|
|
|
if (!RdxStart)
|
|
|
|
return false;
|
|
|
|
unsigned RdxOpcode = RdxStart->getOpcode();
|
|
|
|
|
|
|
|
Type *VecTy = ReduxRoot->getOperand(0)->getType();
|
|
|
|
unsigned NumVecElems = VecTy->getVectorNumElements();
|
|
|
|
if (!isPowerOf2_32(NumVecElems))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// We look for a sequence of shuffles and adds like the following matching one
|
|
|
|
// fadd, shuffle vector pair at a time.
|
|
|
|
//
|
|
|
|
// %rdx.shuf = shufflevector <4 x float> %rdx, <4 x float> undef,
|
|
|
|
// <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
|
|
|
|
// %bin.rdx = fadd <4 x float> %rdx, %rdx.shuf
|
|
|
|
// %rdx.shuf7 = shufflevector <4 x float> %bin.rdx, <4 x float> undef,
|
|
|
|
// <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
|
|
|
|
// %bin.rdx8 = fadd <4 x float> %bin.rdx, %rdx.shuf7
|
|
|
|
// %r = extractelement <4 x float> %bin.rdx8, i32 0
|
|
|
|
|
|
|
|
unsigned MaskStart = 1;
|
|
|
|
Value *RdxOp = RdxStart;
|
|
|
|
SmallVector<int, 32> ShuffleMask(NumVecElems, 0);
|
|
|
|
unsigned NumVecElemsRemain = NumVecElems;
|
|
|
|
while (NumVecElemsRemain - 1) {
|
|
|
|
// Check for the right reduction operation.
|
|
|
|
BinaryOperator *BinOp;
|
|
|
|
if (!(BinOp = dyn_cast<BinaryOperator>(RdxOp)))
|
|
|
|
return false;
|
|
|
|
if (BinOp->getOpcode() != RdxOpcode)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
Value *NextRdxOp;
|
|
|
|
ShuffleVectorInst *Shuffle;
|
2014-03-02 14:30:33 +01:00
|
|
|
std::tie(NextRdxOp, Shuffle) = getShuffleAndOtherOprd(BinOp);
|
Costmodel: Add support for horizontal vector reductions
Upcoming SLP vectorization improvements will want to be able to estimate costs
of horizontal reductions. Add infrastructure to support this.
We model reductions as a series of (shufflevector,add) tuples ultimately
followed by an extractelement. For example, for an add-reduction of <4 x float>
we could generate the following sequence:
(v0, v1, v2, v3)
\ \ / /
\ \ /
+ +
(v0+v2, v1+v3, undef, undef)
\ /
((v0+v2) + (v1+v3), undef, undef)
%rdx.shuf = shufflevector <4 x float> %rdx, <4 x float> undef,
<4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
%bin.rdx = fadd <4 x float> %rdx, %rdx.shuf
%rdx.shuf7 = shufflevector <4 x float> %bin.rdx, <4 x float> undef,
<4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
%bin.rdx8 = fadd <4 x float> %bin.rdx, %rdx.shuf7
%r = extractelement <4 x float> %bin.rdx8, i32 0
This commit adds a cost model interface "getReductionCost(Opcode, Ty, Pairwise)"
that will allow clients to ask for the cost of such a reduction (as backends
might generate more efficient code than the cost of the individual instructions
summed up). This interface is excercised by the CostModel analysis pass which
looks for reduction patterns like the one above - starting at extractelements -
and if it sees a matching sequence will call the cost model interface.
We will also support a second form of pairwise reduction that is well supported
on common architectures (haddps, vpadd, faddp).
(v0, v1, v2, v3)
\ / \ /
(v0+v1, v2+v3, undef, undef)
\ /
((v0+v1)+(v2+v3), undef, undef, undef)
%rdx.shuf.0.0 = shufflevector <4 x float> %rdx, <4 x float> undef,
<4 x i32> <i32 0, i32 2 , i32 undef, i32 undef>
%rdx.shuf.0.1 = shufflevector <4 x float> %rdx, <4 x float> undef,
<4 x i32> <i32 1, i32 3, i32 undef, i32 undef>
%bin.rdx.0 = fadd <4 x float> %rdx.shuf.0.0, %rdx.shuf.0.1
%rdx.shuf.1.0 = shufflevector <4 x float> %bin.rdx.0, <4 x float> undef,
<4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
%rdx.shuf.1.1 = shufflevector <4 x float> %bin.rdx.0, <4 x float> undef,
<4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
%bin.rdx.1 = fadd <4 x float> %rdx.shuf.1.0, %rdx.shuf.1.1
%r = extractelement <4 x float> %bin.rdx.1, i32 0
llvm-svn: 190876
2013-09-17 20:06:50 +02:00
|
|
|
|
|
|
|
// Check the current reduction operation and the shuffle use the same value.
|
2014-04-15 06:59:12 +02:00
|
|
|
if (Shuffle == nullptr)
|
Costmodel: Add support for horizontal vector reductions
Upcoming SLP vectorization improvements will want to be able to estimate costs
of horizontal reductions. Add infrastructure to support this.
We model reductions as a series of (shufflevector,add) tuples ultimately
followed by an extractelement. For example, for an add-reduction of <4 x float>
we could generate the following sequence:
(v0, v1, v2, v3)
\ \ / /
\ \ /
+ +
(v0+v2, v1+v3, undef, undef)
\ /
((v0+v2) + (v1+v3), undef, undef)
%rdx.shuf = shufflevector <4 x float> %rdx, <4 x float> undef,
<4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
%bin.rdx = fadd <4 x float> %rdx, %rdx.shuf
%rdx.shuf7 = shufflevector <4 x float> %bin.rdx, <4 x float> undef,
<4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
%bin.rdx8 = fadd <4 x float> %bin.rdx, %rdx.shuf7
%r = extractelement <4 x float> %bin.rdx8, i32 0
This commit adds a cost model interface "getReductionCost(Opcode, Ty, Pairwise)"
that will allow clients to ask for the cost of such a reduction (as backends
might generate more efficient code than the cost of the individual instructions
summed up). This interface is excercised by the CostModel analysis pass which
looks for reduction patterns like the one above - starting at extractelements -
and if it sees a matching sequence will call the cost model interface.
We will also support a second form of pairwise reduction that is well supported
on common architectures (haddps, vpadd, faddp).
(v0, v1, v2, v3)
\ / \ /
(v0+v1, v2+v3, undef, undef)
\ /
((v0+v1)+(v2+v3), undef, undef, undef)
%rdx.shuf.0.0 = shufflevector <4 x float> %rdx, <4 x float> undef,
<4 x i32> <i32 0, i32 2 , i32 undef, i32 undef>
%rdx.shuf.0.1 = shufflevector <4 x float> %rdx, <4 x float> undef,
<4 x i32> <i32 1, i32 3, i32 undef, i32 undef>
%bin.rdx.0 = fadd <4 x float> %rdx.shuf.0.0, %rdx.shuf.0.1
%rdx.shuf.1.0 = shufflevector <4 x float> %bin.rdx.0, <4 x float> undef,
<4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
%rdx.shuf.1.1 = shufflevector <4 x float> %bin.rdx.0, <4 x float> undef,
<4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
%bin.rdx.1 = fadd <4 x float> %rdx.shuf.1.0, %rdx.shuf.1.1
%r = extractelement <4 x float> %bin.rdx.1, i32 0
llvm-svn: 190876
2013-09-17 20:06:50 +02:00
|
|
|
return false;
|
|
|
|
if (Shuffle->getOperand(0) != NextRdxOp)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Check that shuffle masks matches.
|
|
|
|
for (unsigned j = 0; j != MaskStart; ++j)
|
|
|
|
ShuffleMask[j] = MaskStart + j;
|
|
|
|
// Fill the rest of the mask with -1 for undef.
|
|
|
|
std::fill(&ShuffleMask[MaskStart], ShuffleMask.end(), -1);
|
|
|
|
|
|
|
|
SmallVector<int, 16> Mask = Shuffle->getShuffleMask();
|
2014-04-18 21:48:03 +02:00
|
|
|
if (ShuffleMask != Mask)
|
Costmodel: Add support for horizontal vector reductions
Upcoming SLP vectorization improvements will want to be able to estimate costs
of horizontal reductions. Add infrastructure to support this.
We model reductions as a series of (shufflevector,add) tuples ultimately
followed by an extractelement. For example, for an add-reduction of <4 x float>
we could generate the following sequence:
(v0, v1, v2, v3)
\ \ / /
\ \ /
+ +
(v0+v2, v1+v3, undef, undef)
\ /
((v0+v2) + (v1+v3), undef, undef)
%rdx.shuf = shufflevector <4 x float> %rdx, <4 x float> undef,
<4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
%bin.rdx = fadd <4 x float> %rdx, %rdx.shuf
%rdx.shuf7 = shufflevector <4 x float> %bin.rdx, <4 x float> undef,
<4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
%bin.rdx8 = fadd <4 x float> %bin.rdx, %rdx.shuf7
%r = extractelement <4 x float> %bin.rdx8, i32 0
This commit adds a cost model interface "getReductionCost(Opcode, Ty, Pairwise)"
that will allow clients to ask for the cost of such a reduction (as backends
might generate more efficient code than the cost of the individual instructions
summed up). This interface is excercised by the CostModel analysis pass which
looks for reduction patterns like the one above - starting at extractelements -
and if it sees a matching sequence will call the cost model interface.
We will also support a second form of pairwise reduction that is well supported
on common architectures (haddps, vpadd, faddp).
(v0, v1, v2, v3)
\ / \ /
(v0+v1, v2+v3, undef, undef)
\ /
((v0+v1)+(v2+v3), undef, undef, undef)
%rdx.shuf.0.0 = shufflevector <4 x float> %rdx, <4 x float> undef,
<4 x i32> <i32 0, i32 2 , i32 undef, i32 undef>
%rdx.shuf.0.1 = shufflevector <4 x float> %rdx, <4 x float> undef,
<4 x i32> <i32 1, i32 3, i32 undef, i32 undef>
%bin.rdx.0 = fadd <4 x float> %rdx.shuf.0.0, %rdx.shuf.0.1
%rdx.shuf.1.0 = shufflevector <4 x float> %bin.rdx.0, <4 x float> undef,
<4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
%rdx.shuf.1.1 = shufflevector <4 x float> %bin.rdx.0, <4 x float> undef,
<4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
%bin.rdx.1 = fadd <4 x float> %rdx.shuf.1.0, %rdx.shuf.1.1
%r = extractelement <4 x float> %bin.rdx.1, i32 0
llvm-svn: 190876
2013-09-17 20:06:50 +02:00
|
|
|
return false;
|
|
|
|
|
|
|
|
RdxOp = NextRdxOp;
|
|
|
|
NumVecElemsRemain /= 2;
|
|
|
|
MaskStart *= 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
Opcode = RdxOpcode;
|
|
|
|
Ty = VecTy;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2012-12-03 23:47:12 +01:00
|
|
|
unsigned CostModelAnalysis::getInstructionCost(const Instruction *I) const {
|
2013-01-05 11:09:33 +01:00
|
|
|
if (!TTI)
|
2012-11-02 22:48:17 +01:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
switch (I->getOpcode()) {
|
2013-02-08 15:50:48 +01:00
|
|
|
case Instruction::GetElementPtr:{
|
|
|
|
Type *ValTy = I->getOperand(0)->getType()->getPointerElementType();
|
|
|
|
return TTI->getAddressComputationCost(ValTy);
|
|
|
|
}
|
|
|
|
|
2012-11-02 22:48:17 +01:00
|
|
|
case Instruction::Ret:
|
|
|
|
case Instruction::PHI:
|
|
|
|
case Instruction::Br: {
|
2013-01-05 11:09:33 +01:00
|
|
|
return TTI->getCFInstrCost(I->getOpcode());
|
2012-11-02 22:48:17 +01:00
|
|
|
}
|
|
|
|
case Instruction::Add:
|
|
|
|
case Instruction::FAdd:
|
|
|
|
case Instruction::Sub:
|
|
|
|
case Instruction::FSub:
|
|
|
|
case Instruction::Mul:
|
|
|
|
case Instruction::FMul:
|
|
|
|
case Instruction::UDiv:
|
|
|
|
case Instruction::SDiv:
|
|
|
|
case Instruction::FDiv:
|
|
|
|
case Instruction::URem:
|
|
|
|
case Instruction::SRem:
|
|
|
|
case Instruction::FRem:
|
|
|
|
case Instruction::Shl:
|
|
|
|
case Instruction::LShr:
|
|
|
|
case Instruction::AShr:
|
|
|
|
case Instruction::And:
|
|
|
|
case Instruction::Or:
|
|
|
|
case Instruction::Xor: {
|
2013-04-05 01:26:21 +02:00
|
|
|
TargetTransformInfo::OperandValueKind Op1VK =
|
|
|
|
getOperandInfo(I->getOperand(0));
|
|
|
|
TargetTransformInfo::OperandValueKind Op2VK =
|
|
|
|
getOperandInfo(I->getOperand(1));
|
|
|
|
return TTI->getArithmeticInstrCost(I->getOpcode(), I->getType(), Op1VK,
|
|
|
|
Op2VK);
|
2012-11-02 22:48:17 +01:00
|
|
|
}
|
|
|
|
case Instruction::Select: {
|
2012-12-03 23:47:12 +01:00
|
|
|
const SelectInst *SI = cast<SelectInst>(I);
|
2012-11-02 22:48:17 +01:00
|
|
|
Type *CondTy = SI->getCondition()->getType();
|
2013-01-05 11:09:33 +01:00
|
|
|
return TTI->getCmpSelInstrCost(I->getOpcode(), I->getType(), CondTy);
|
2012-11-02 22:48:17 +01:00
|
|
|
}
|
|
|
|
case Instruction::ICmp:
|
|
|
|
case Instruction::FCmp: {
|
|
|
|
Type *ValTy = I->getOperand(0)->getType();
|
2013-01-05 11:09:33 +01:00
|
|
|
return TTI->getCmpSelInstrCost(I->getOpcode(), ValTy);
|
2012-11-02 22:48:17 +01:00
|
|
|
}
|
|
|
|
case Instruction::Store: {
|
2012-12-03 23:47:12 +01:00
|
|
|
const StoreInst *SI = cast<StoreInst>(I);
|
2012-11-02 22:48:17 +01:00
|
|
|
Type *ValTy = SI->getValueOperand()->getType();
|
2013-01-05 11:09:33 +01:00
|
|
|
return TTI->getMemoryOpCost(I->getOpcode(), ValTy,
|
2012-11-02 22:48:17 +01:00
|
|
|
SI->getAlignment(),
|
|
|
|
SI->getPointerAddressSpace());
|
|
|
|
}
|
|
|
|
case Instruction::Load: {
|
2012-12-03 23:47:12 +01:00
|
|
|
const LoadInst *LI = cast<LoadInst>(I);
|
2013-01-05 11:09:33 +01:00
|
|
|
return TTI->getMemoryOpCost(I->getOpcode(), I->getType(),
|
2012-11-02 22:48:17 +01:00
|
|
|
LI->getAlignment(),
|
|
|
|
LI->getPointerAddressSpace());
|
|
|
|
}
|
|
|
|
case Instruction::ZExt:
|
|
|
|
case Instruction::SExt:
|
|
|
|
case Instruction::FPToUI:
|
|
|
|
case Instruction::FPToSI:
|
|
|
|
case Instruction::FPExt:
|
|
|
|
case Instruction::PtrToInt:
|
|
|
|
case Instruction::IntToPtr:
|
|
|
|
case Instruction::SIToFP:
|
|
|
|
case Instruction::UIToFP:
|
|
|
|
case Instruction::Trunc:
|
|
|
|
case Instruction::FPTrunc:
|
2014-01-22 21:30:16 +01:00
|
|
|
case Instruction::BitCast:
|
|
|
|
case Instruction::AddrSpaceCast: {
|
2012-11-02 22:48:17 +01:00
|
|
|
Type *SrcTy = I->getOperand(0)->getType();
|
2013-01-05 11:09:33 +01:00
|
|
|
return TTI->getCastInstrCost(I->getOpcode(), I->getType(), SrcTy);
|
2012-11-02 22:48:17 +01:00
|
|
|
}
|
2012-11-02 23:31:56 +01:00
|
|
|
case Instruction::ExtractElement: {
|
2012-12-03 23:47:12 +01:00
|
|
|
const ExtractElementInst * EEI = cast<ExtractElementInst>(I);
|
2012-11-02 23:31:56 +01:00
|
|
|
ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1));
|
|
|
|
unsigned Idx = -1;
|
|
|
|
if (CI)
|
|
|
|
Idx = CI->getZExtValue();
|
Costmodel: Add support for horizontal vector reductions
Upcoming SLP vectorization improvements will want to be able to estimate costs
of horizontal reductions. Add infrastructure to support this.
We model reductions as a series of (shufflevector,add) tuples ultimately
followed by an extractelement. For example, for an add-reduction of <4 x float>
we could generate the following sequence:
(v0, v1, v2, v3)
\ \ / /
\ \ /
+ +
(v0+v2, v1+v3, undef, undef)
\ /
((v0+v2) + (v1+v3), undef, undef)
%rdx.shuf = shufflevector <4 x float> %rdx, <4 x float> undef,
<4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
%bin.rdx = fadd <4 x float> %rdx, %rdx.shuf
%rdx.shuf7 = shufflevector <4 x float> %bin.rdx, <4 x float> undef,
<4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
%bin.rdx8 = fadd <4 x float> %bin.rdx, %rdx.shuf7
%r = extractelement <4 x float> %bin.rdx8, i32 0
This commit adds a cost model interface "getReductionCost(Opcode, Ty, Pairwise)"
that will allow clients to ask for the cost of such a reduction (as backends
might generate more efficient code than the cost of the individual instructions
summed up). This interface is excercised by the CostModel analysis pass which
looks for reduction patterns like the one above - starting at extractelements -
and if it sees a matching sequence will call the cost model interface.
We will also support a second form of pairwise reduction that is well supported
on common architectures (haddps, vpadd, faddp).
(v0, v1, v2, v3)
\ / \ /
(v0+v1, v2+v3, undef, undef)
\ /
((v0+v1)+(v2+v3), undef, undef, undef)
%rdx.shuf.0.0 = shufflevector <4 x float> %rdx, <4 x float> undef,
<4 x i32> <i32 0, i32 2 , i32 undef, i32 undef>
%rdx.shuf.0.1 = shufflevector <4 x float> %rdx, <4 x float> undef,
<4 x i32> <i32 1, i32 3, i32 undef, i32 undef>
%bin.rdx.0 = fadd <4 x float> %rdx.shuf.0.0, %rdx.shuf.0.1
%rdx.shuf.1.0 = shufflevector <4 x float> %bin.rdx.0, <4 x float> undef,
<4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
%rdx.shuf.1.1 = shufflevector <4 x float> %bin.rdx.0, <4 x float> undef,
<4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
%bin.rdx.1 = fadd <4 x float> %rdx.shuf.1.0, %rdx.shuf.1.1
%r = extractelement <4 x float> %bin.rdx.1, i32 0
llvm-svn: 190876
2013-09-17 20:06:50 +02:00
|
|
|
|
|
|
|
// Try to match a reduction sequence (series of shufflevector and vector
|
|
|
|
// adds followed by a extractelement).
|
|
|
|
unsigned ReduxOpCode;
|
|
|
|
Type *ReduxType;
|
|
|
|
|
|
|
|
if (matchVectorSplittingReduction(EEI, ReduxOpCode, ReduxType))
|
|
|
|
return TTI->getReductionCost(ReduxOpCode, ReduxType, false);
|
|
|
|
else if (matchPairwiseReduction(EEI, ReduxOpCode, ReduxType))
|
|
|
|
return TTI->getReductionCost(ReduxOpCode, ReduxType, true);
|
|
|
|
|
2013-01-05 11:09:33 +01:00
|
|
|
return TTI->getVectorInstrCost(I->getOpcode(),
|
|
|
|
EEI->getOperand(0)->getType(), Idx);
|
2012-11-02 23:31:56 +01:00
|
|
|
}
|
|
|
|
case Instruction::InsertElement: {
|
2013-07-11 07:39:44 +02:00
|
|
|
const InsertElementInst * IE = cast<InsertElementInst>(I);
|
|
|
|
ConstantInt *CI = dyn_cast<ConstantInt>(IE->getOperand(2));
|
|
|
|
unsigned Idx = -1;
|
|
|
|
if (CI)
|
|
|
|
Idx = CI->getZExtValue();
|
|
|
|
return TTI->getVectorInstrCost(I->getOpcode(),
|
|
|
|
IE->getType(), Idx);
|
|
|
|
}
|
2013-02-12 03:40:37 +01:00
|
|
|
case Instruction::ShuffleVector: {
|
|
|
|
const ShuffleVectorInst *Shuffle = cast<ShuffleVectorInst>(I);
|
|
|
|
Type *VecTypOp0 = Shuffle->getOperand(0)->getType();
|
|
|
|
unsigned NumVecElems = VecTypOp0->getVectorNumElements();
|
|
|
|
SmallVector<int, 16> Mask = Shuffle->getShuffleMask();
|
|
|
|
|
2014-07-04 00:24:18 +02:00
|
|
|
if (NumVecElems == Mask.size()) {
|
|
|
|
if (isReverseVectorMask(Mask))
|
|
|
|
return TTI->getShuffleCost(TargetTransformInfo::SK_Reverse, VecTypOp0,
|
|
|
|
0, nullptr);
|
|
|
|
if (isAlternateVectorMask(Mask))
|
|
|
|
return TTI->getShuffleCost(TargetTransformInfo::SK_Alternate,
|
|
|
|
VecTypOp0, 0, nullptr);
|
|
|
|
}
|
|
|
|
|
2013-02-12 03:40:37 +01:00
|
|
|
return -1;
|
|
|
|
}
|
2013-02-28 20:09:33 +01:00
|
|
|
case Instruction::Call:
|
|
|
|
if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
|
|
|
|
SmallVector<Type*, 4> Tys;
|
|
|
|
for (unsigned J = 0, JE = II->getNumArgOperands(); J != JE; ++J)
|
|
|
|
Tys.push_back(II->getArgOperand(J)->getType());
|
|
|
|
|
|
|
|
return TTI->getIntrinsicInstrCost(II->getIntrinsicID(), II->getType(),
|
|
|
|
Tys);
|
|
|
|
}
|
|
|
|
return -1;
|
2012-11-02 22:48:17 +01:00
|
|
|
default:
|
|
|
|
// We don't have any information on this instruction.
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void CostModelAnalysis::print(raw_ostream &OS, const Module*) const {
|
|
|
|
if (!F)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (Function::iterator B = F->begin(), BE = F->end(); B != BE; ++B) {
|
|
|
|
for (BasicBlock::iterator it = B->begin(), e = B->end(); it != e; ++it) {
|
|
|
|
Instruction *Inst = it;
|
|
|
|
unsigned Cost = getInstructionCost(Inst);
|
|
|
|
if (Cost != (unsigned)-1)
|
|
|
|
OS << "Cost Model: Found an estimated cost of " << Cost;
|
|
|
|
else
|
|
|
|
OS << "Cost Model: Unknown cost";
|
|
|
|
|
|
|
|
OS << " for instruction: "<< *Inst << "\n";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|