2015-04-14 06:59:22 +02:00
|
|
|
//===- NaryReassociate.cpp - Reassociate n-ary expressions ----------------===//
|
|
|
|
//
|
2019-01-19 09:50:56 +01:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2015-04-14 06:59:22 +02:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This pass reassociates n-ary add expressions and eliminates the redundancy
|
|
|
|
// exposed by the reassociation.
|
|
|
|
//
|
|
|
|
// A motivating example:
|
|
|
|
//
|
|
|
|
// void foo(int a, int b) {
|
|
|
|
// bar(a + b);
|
|
|
|
// bar((a + 2) + b);
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// An ideal compiler should reassociate (a + 2) + b to (a + b) + 2 and simplify
|
|
|
|
// the above code to
|
|
|
|
//
|
|
|
|
// int t = a + b;
|
|
|
|
// bar(t);
|
|
|
|
// bar(t + 2);
|
|
|
|
//
|
|
|
|
// However, the Reassociate pass is unable to do that because it processes each
|
|
|
|
// instruction individually and believes (a + 2) + b is the best form according
|
|
|
|
// to its rank system.
|
|
|
|
//
|
|
|
|
// To address this limitation, NaryReassociate reassociates an expression in a
|
|
|
|
// form that reuses existing instructions. As a result, NaryReassociate can
|
|
|
|
// reassociate (a + 2) + b in the example to (a + b) + 2 because it detects that
|
|
|
|
// (a + b) is computed before.
|
|
|
|
//
|
|
|
|
// NaryReassociate works as follows. For every instruction in the form of (a +
|
|
|
|
// b) + c, it checks whether a + c or b + c is already computed by a dominating
|
|
|
|
// instruction. If so, it then reassociates (a + b) + c into (a + c) + b or (b +
|
2015-04-17 02:25:10 +02:00
|
|
|
// c) + a and removes the redundancy accordingly. To efficiently look up whether
|
|
|
|
// an expression is computed before, we store each instruction seen and its SCEV
|
|
|
|
// into an SCEV-to-instruction map.
|
2015-04-14 06:59:22 +02:00
|
|
|
//
|
|
|
|
// Although the algorithm pattern-matches only ternary additions, it
|
|
|
|
// automatically handles many >3-ary expressions by walking through the function
|
|
|
|
// in the depth-first order. For example, given
|
|
|
|
//
|
|
|
|
// (a + c) + d
|
|
|
|
// ((a + b) + c) + d
|
|
|
|
//
|
|
|
|
// NaryReassociate first rewrites (a + b) + c to (a + c) + b, and then rewrites
|
|
|
|
// ((a + c) + b) + d into ((a + c) + d) + b.
|
|
|
|
//
|
2015-04-17 02:25:10 +02:00
|
|
|
// Finally, the above dominator-based algorithm may need to be run multiple
|
|
|
|
// iterations before emitting optimal code. One source of this need is that we
|
|
|
|
// only split an operand when it is used only once. The above algorithm can
|
|
|
|
// eliminate an instruction and decrease the usage count of its operands. As a
|
|
|
|
// result, an instruction that previously had multiple uses may become a
|
|
|
|
// single-use instruction and thus eligible for split consideration. For
|
|
|
|
// example,
|
|
|
|
//
|
|
|
|
// ac = a + c
|
|
|
|
// ab = a + b
|
|
|
|
// abc = ab + c
|
|
|
|
// ab2 = ab + b
|
|
|
|
// ab2c = ab2 + c
|
|
|
|
//
|
|
|
|
// In the first iteration, we cannot reassociate abc to ac+b because ab is used
|
|
|
|
// twice. However, we can reassociate ab2c to abc+b in the first iteration. As a
|
|
|
|
// result, ab2 becomes dead and ab will be used only once in the second
|
|
|
|
// iteration.
|
|
|
|
//
|
2015-04-14 06:59:22 +02:00
|
|
|
// Limitations and TODO items:
|
|
|
|
//
|
2015-09-15 19:22:52 +02:00
|
|
|
// 1) We only considers n-ary adds and muls for now. This should be extended
|
|
|
|
// and generalized.
|
2015-04-14 06:59:22 +02:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2016-07-22 00:28:52 +02:00
|
|
|
#include "llvm/Transforms/Scalar/NaryReassociate.h"
|
2017-10-17 23:27:42 +02:00
|
|
|
#include "llvm/ADT/DepthFirstIterator.h"
|
|
|
|
#include "llvm/ADT/SmallVector.h"
|
|
|
|
#include "llvm/Analysis/AssumptionCache.h"
|
|
|
|
#include "llvm/Analysis/ScalarEvolution.h"
|
|
|
|
#include "llvm/Analysis/TargetLibraryInfo.h"
|
|
|
|
#include "llvm/Analysis/TargetTransformInfo.h"
|
2018-06-04 23:23:21 +02:00
|
|
|
#include "llvm/Transforms/Utils/Local.h"
|
2015-07-01 05:38:49 +02:00
|
|
|
#include "llvm/Analysis/ValueTracking.h"
|
2017-10-17 23:27:42 +02:00
|
|
|
#include "llvm/IR/BasicBlock.h"
|
|
|
|
#include "llvm/IR/Constants.h"
|
|
|
|
#include "llvm/IR/DataLayout.h"
|
|
|
|
#include "llvm/IR/DerivedTypes.h"
|
|
|
|
#include "llvm/IR/Dominators.h"
|
|
|
|
#include "llvm/IR/Function.h"
|
|
|
|
#include "llvm/IR/GetElementPtrTypeIterator.h"
|
|
|
|
#include "llvm/IR/IRBuilder.h"
|
|
|
|
#include "llvm/IR/InstrTypes.h"
|
|
|
|
#include "llvm/IR/Instruction.h"
|
|
|
|
#include "llvm/IR/Instructions.h"
|
2015-04-14 06:59:22 +02:00
|
|
|
#include "llvm/IR/Module.h"
|
2017-10-17 23:27:42 +02:00
|
|
|
#include "llvm/IR/Operator.h"
|
2015-04-14 06:59:22 +02:00
|
|
|
#include "llvm/IR/PatternMatch.h"
|
2017-10-17 23:27:42 +02:00
|
|
|
#include "llvm/IR/Type.h"
|
|
|
|
#include "llvm/IR/Value.h"
|
|
|
|
#include "llvm/IR/ValueHandle.h"
|
|
|
|
#include "llvm/Pass.h"
|
|
|
|
#include "llvm/Support/Casting.h"
|
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
2015-04-14 06:59:22 +02:00
|
|
|
#include "llvm/Transforms/Scalar.h"
|
2017-10-17 23:27:42 +02:00
|
|
|
#include <cassert>
|
|
|
|
#include <cstdint>
|
|
|
|
|
2015-04-14 06:59:22 +02:00
|
|
|
using namespace llvm;
|
|
|
|
using namespace PatternMatch;
|
|
|
|
|
|
|
|
#define DEBUG_TYPE "nary-reassociate"
|
|
|
|
|
|
|
|
namespace {
|
2017-10-17 23:27:42 +02:00
|
|
|
|
2016-07-22 00:28:52 +02:00
|
|
|
class NaryReassociateLegacyPass : public FunctionPass {
|
2015-04-14 06:59:22 +02:00
|
|
|
public:
|
|
|
|
static char ID;
|
|
|
|
|
2016-07-22 00:28:52 +02:00
|
|
|
NaryReassociateLegacyPass() : FunctionPass(ID) {
|
|
|
|
initializeNaryReassociateLegacyPassPass(*PassRegistry::getPassRegistry());
|
2015-04-14 06:59:22 +02:00
|
|
|
}
|
|
|
|
|
2015-05-22 01:17:30 +02:00
|
|
|
bool doInitialization(Module &M) override {
|
|
|
|
return false;
|
|
|
|
}
|
2017-10-17 23:27:42 +02:00
|
|
|
|
2015-04-14 06:59:22 +02:00
|
|
|
bool runOnFunction(Function &F) override;
|
|
|
|
|
|
|
|
void getAnalysisUsage(AnalysisUsage &AU) const override {
|
|
|
|
AU.addPreserved<DominatorTreeWrapperPass>();
|
[PM] Port ScalarEvolution to the new pass manager.
This change makes ScalarEvolution a stand-alone object and just produces
one from a pass as needed. Making this work well requires making the
object movable, using references instead of overwritten pointers in
a number of places, and other refactorings.
I've also wired it up to the new pass manager and added a RUN line to
a test to exercise it under the new pass manager. This includes basic
printing support much like with other analyses.
But there is a big and somewhat scary change here. Prior to this patch
ScalarEvolution was never *actually* invalidated!!! Re-running the pass
just re-wired up the various other analyses and didn't remove any of the
existing entries in the SCEV caches or clear out anything at all. This
might seem OK as everything in SCEV that can uses ValueHandles to track
updates to the values that serve as SCEV keys. However, this still means
that as we ran SCEV over each function in the module, we kept
accumulating more and more SCEVs into the cache. At the end, we would
have a SCEV cache with every value that we ever needed a SCEV for in the
entire module!!! Yowzers. The releaseMemory routine would dump all of
this, but that isn't realy called during normal runs of the pipeline as
far as I can see.
To make matters worse, there *is* actually a key that we don't update
with value handles -- there is a map keyed off of Loop*s. Because
LoopInfo *does* release its memory from run to run, it is entirely
possible to run SCEV over one function, then over another function, and
then lookup a Loop* from the second function but find an entry inserted
for the first function! Ouch.
To make matters still worse, there are plenty of updates that *don't*
trip a value handle. It seems incredibly unlikely that today GVN or
another pass that invalidates SCEV can update values in *just* such
a way that a subsequent run of SCEV will incorrectly find lookups in
a cache, but it is theoretically possible and would be a nightmare to
debug.
With this refactoring, I've fixed all this by actually destroying and
recreating the ScalarEvolution object from run to run. Technically, this
could increase the amount of malloc traffic we see, but then again it is
also technically correct. ;] I don't actually think we're suffering from
tons of malloc traffic from SCEV because if we were, the fact that we
never clear the memory would seem more likely to have come up as an
actual problem before now. So, I've made the simple fix here. If in fact
there are serious issues with too much allocation and deallocation,
I can work on a clever fix that preserves the allocations (while
clearing the data) between each run, but I'd prefer to do that kind of
optimization with a test case / benchmark that shows why we need such
cleverness (and that can test that we actually make it faster). It's
possible that this will make some things faster by making the SCEV
caches have higher locality (due to being significantly smaller) so
until there is a clear benchmark, I think the simple change is best.
Differential Revision: http://reviews.llvm.org/D12063
llvm-svn: 245193
2015-08-17 04:08:17 +02:00
|
|
|
AU.addPreserved<ScalarEvolutionWrapperPass>();
|
2015-04-17 02:25:10 +02:00
|
|
|
AU.addPreserved<TargetLibraryInfoWrapperPass>();
|
2016-12-19 09:22:17 +01:00
|
|
|
AU.addRequired<AssumptionCacheTracker>();
|
2015-04-14 06:59:22 +02:00
|
|
|
AU.addRequired<DominatorTreeWrapperPass>();
|
[PM] Port ScalarEvolution to the new pass manager.
This change makes ScalarEvolution a stand-alone object and just produces
one from a pass as needed. Making this work well requires making the
object movable, using references instead of overwritten pointers in
a number of places, and other refactorings.
I've also wired it up to the new pass manager and added a RUN line to
a test to exercise it under the new pass manager. This includes basic
printing support much like with other analyses.
But there is a big and somewhat scary change here. Prior to this patch
ScalarEvolution was never *actually* invalidated!!! Re-running the pass
just re-wired up the various other analyses and didn't remove any of the
existing entries in the SCEV caches or clear out anything at all. This
might seem OK as everything in SCEV that can uses ValueHandles to track
updates to the values that serve as SCEV keys. However, this still means
that as we ran SCEV over each function in the module, we kept
accumulating more and more SCEVs into the cache. At the end, we would
have a SCEV cache with every value that we ever needed a SCEV for in the
entire module!!! Yowzers. The releaseMemory routine would dump all of
this, but that isn't realy called during normal runs of the pipeline as
far as I can see.
To make matters worse, there *is* actually a key that we don't update
with value handles -- there is a map keyed off of Loop*s. Because
LoopInfo *does* release its memory from run to run, it is entirely
possible to run SCEV over one function, then over another function, and
then lookup a Loop* from the second function but find an entry inserted
for the first function! Ouch.
To make matters still worse, there are plenty of updates that *don't*
trip a value handle. It seems incredibly unlikely that today GVN or
another pass that invalidates SCEV can update values in *just* such
a way that a subsequent run of SCEV will incorrectly find lookups in
a cache, but it is theoretically possible and would be a nightmare to
debug.
With this refactoring, I've fixed all this by actually destroying and
recreating the ScalarEvolution object from run to run. Technically, this
could increase the amount of malloc traffic we see, but then again it is
also technically correct. ;] I don't actually think we're suffering from
tons of malloc traffic from SCEV because if we were, the fact that we
never clear the memory would seem more likely to have come up as an
actual problem before now. So, I've made the simple fix here. If in fact
there are serious issues with too much allocation and deallocation,
I can work on a clever fix that preserves the allocations (while
clearing the data) between each run, but I'd prefer to do that kind of
optimization with a test case / benchmark that shows why we need such
cleverness (and that can test that we actually make it faster). It's
possible that this will make some things faster by making the SCEV
caches have higher locality (due to being significantly smaller) so
until there is a clear benchmark, I think the simple change is best.
Differential Revision: http://reviews.llvm.org/D12063
llvm-svn: 245193
2015-08-17 04:08:17 +02:00
|
|
|
AU.addRequired<ScalarEvolutionWrapperPass>();
|
2015-04-17 02:25:10 +02:00
|
|
|
AU.addRequired<TargetLibraryInfoWrapperPass>();
|
2015-05-22 01:17:30 +02:00
|
|
|
AU.addRequired<TargetTransformInfoWrapperPass>();
|
2015-04-14 06:59:22 +02:00
|
|
|
AU.setPreservesCFG();
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
2016-07-22 00:28:52 +02:00
|
|
|
NaryReassociatePass Impl;
|
2015-04-14 06:59:22 +02:00
|
|
|
};
|
2017-10-17 23:27:42 +02:00
|
|
|
|
|
|
|
} // end anonymous namespace
|
2015-04-14 06:59:22 +02:00
|
|
|
|
2016-07-22 00:28:52 +02:00
|
|
|
char NaryReassociateLegacyPass::ID = 0;
|
2017-10-17 23:27:42 +02:00
|
|
|
|
2016-07-22 00:28:52 +02:00
|
|
|
INITIALIZE_PASS_BEGIN(NaryReassociateLegacyPass, "nary-reassociate",
|
|
|
|
"Nary reassociation", false, false)
|
2016-12-19 09:22:17 +01:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
|
2015-04-14 06:59:22 +02:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
|
[PM] Port ScalarEvolution to the new pass manager.
This change makes ScalarEvolution a stand-alone object and just produces
one from a pass as needed. Making this work well requires making the
object movable, using references instead of overwritten pointers in
a number of places, and other refactorings.
I've also wired it up to the new pass manager and added a RUN line to
a test to exercise it under the new pass manager. This includes basic
printing support much like with other analyses.
But there is a big and somewhat scary change here. Prior to this patch
ScalarEvolution was never *actually* invalidated!!! Re-running the pass
just re-wired up the various other analyses and didn't remove any of the
existing entries in the SCEV caches or clear out anything at all. This
might seem OK as everything in SCEV that can uses ValueHandles to track
updates to the values that serve as SCEV keys. However, this still means
that as we ran SCEV over each function in the module, we kept
accumulating more and more SCEVs into the cache. At the end, we would
have a SCEV cache with every value that we ever needed a SCEV for in the
entire module!!! Yowzers. The releaseMemory routine would dump all of
this, but that isn't realy called during normal runs of the pipeline as
far as I can see.
To make matters worse, there *is* actually a key that we don't update
with value handles -- there is a map keyed off of Loop*s. Because
LoopInfo *does* release its memory from run to run, it is entirely
possible to run SCEV over one function, then over another function, and
then lookup a Loop* from the second function but find an entry inserted
for the first function! Ouch.
To make matters still worse, there are plenty of updates that *don't*
trip a value handle. It seems incredibly unlikely that today GVN or
another pass that invalidates SCEV can update values in *just* such
a way that a subsequent run of SCEV will incorrectly find lookups in
a cache, but it is theoretically possible and would be a nightmare to
debug.
With this refactoring, I've fixed all this by actually destroying and
recreating the ScalarEvolution object from run to run. Technically, this
could increase the amount of malloc traffic we see, but then again it is
also technically correct. ;] I don't actually think we're suffering from
tons of malloc traffic from SCEV because if we were, the fact that we
never clear the memory would seem more likely to have come up as an
actual problem before now. So, I've made the simple fix here. If in fact
there are serious issues with too much allocation and deallocation,
I can work on a clever fix that preserves the allocations (while
clearing the data) between each run, but I'd prefer to do that kind of
optimization with a test case / benchmark that shows why we need such
cleverness (and that can test that we actually make it faster). It's
possible that this will make some things faster by making the SCEV
caches have higher locality (due to being significantly smaller) so
until there is a clear benchmark, I think the simple change is best.
Differential Revision: http://reviews.llvm.org/D12063
llvm-svn: 245193
2015-08-17 04:08:17 +02:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
|
2015-04-17 02:25:10 +02:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
|
2015-05-22 01:17:30 +02:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
|
2016-07-22 00:28:52 +02:00
|
|
|
INITIALIZE_PASS_END(NaryReassociateLegacyPass, "nary-reassociate",
|
|
|
|
"Nary reassociation", false, false)
|
2015-04-14 06:59:22 +02:00
|
|
|
|
|
|
|
FunctionPass *llvm::createNaryReassociatePass() {
|
2016-07-22 00:28:52 +02:00
|
|
|
return new NaryReassociateLegacyPass();
|
2015-04-14 06:59:22 +02:00
|
|
|
}
|
|
|
|
|
2016-07-22 00:28:52 +02:00
|
|
|
bool NaryReassociateLegacyPass::runOnFunction(Function &F) {
|
2016-04-23 00:06:11 +02:00
|
|
|
if (skipFunction(F))
|
2015-04-14 06:59:22 +02:00
|
|
|
return false;
|
|
|
|
|
2016-12-19 09:22:17 +01:00
|
|
|
auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
|
2016-07-22 00:28:52 +02:00
|
|
|
auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
|
|
|
|
auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
|
Change TargetLibraryInfo analysis passes to always require Function
Summary:
This is the first change to enable the TLI to be built per-function so
that -fno-builtin* handling can be migrated to use function attributes.
See discussion on D61634 for background. This is an enabler for fixing
handling of these options for LTO, for example.
This change should not affect behavior, as the provided function is not
yet used to build a specifically per-function TLI, but rather enables
that migration.
Most of the changes were very mechanical, e.g. passing a Function to the
legacy analysis pass's getTLI interface, or in Module level cases,
adding a callback. This is similar to the way the per-function TTI
analysis works.
There was one place where we were looking for builtins but not in the
context of a specific function. See FindCXAAtExit in
lib/Transforms/IPO/GlobalOpt.cpp. I'm somewhat concerned my workaround
could provide the wrong behavior in some corner cases. Suggestions
welcome.
Reviewers: chandlerc, hfinkel
Subscribers: arsenm, dschuff, jvesely, nhaehnle, mehdi_amini, javed.absar, sbc100, jgravelle-google, eraman, aheejin, steven_wu, george.burgess.iv, dexonsmith, jfb, asbirlea, gchatelet, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D66428
llvm-svn: 371284
2019-09-07 05:09:36 +02:00
|
|
|
auto *TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
|
2016-07-22 00:28:52 +02:00
|
|
|
auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
|
|
|
|
|
2016-12-19 09:22:17 +01:00
|
|
|
return Impl.runImpl(F, AC, DT, SE, TLI, TTI);
|
2016-07-22 00:28:52 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
PreservedAnalyses NaryReassociatePass::run(Function &F,
|
|
|
|
FunctionAnalysisManager &AM) {
|
2016-12-19 09:22:17 +01:00
|
|
|
auto *AC = &AM.getResult<AssumptionAnalysis>(F);
|
2016-07-22 00:28:52 +02:00
|
|
|
auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
|
|
|
|
auto *SE = &AM.getResult<ScalarEvolutionAnalysis>(F);
|
|
|
|
auto *TLI = &AM.getResult<TargetLibraryAnalysis>(F);
|
|
|
|
auto *TTI = &AM.getResult<TargetIRAnalysis>(F);
|
|
|
|
|
2017-01-24 13:55:57 +01:00
|
|
|
if (!runImpl(F, AC, DT, SE, TLI, TTI))
|
2016-07-22 00:28:52 +02:00
|
|
|
return PreservedAnalyses::all();
|
|
|
|
|
|
|
|
PreservedAnalyses PA;
|
2017-01-15 07:32:49 +01:00
|
|
|
PA.preserveSet<CFGAnalyses>();
|
2016-07-22 00:28:52 +02:00
|
|
|
PA.preserve<ScalarEvolutionAnalysis>();
|
|
|
|
return PA;
|
|
|
|
}
|
|
|
|
|
2016-12-19 09:22:17 +01:00
|
|
|
bool NaryReassociatePass::runImpl(Function &F, AssumptionCache *AC_,
|
|
|
|
DominatorTree *DT_, ScalarEvolution *SE_,
|
2016-07-22 00:28:52 +02:00
|
|
|
TargetLibraryInfo *TLI_,
|
|
|
|
TargetTransformInfo *TTI_) {
|
2016-12-19 09:22:17 +01:00
|
|
|
AC = AC_;
|
2016-07-22 00:28:52 +02:00
|
|
|
DT = DT_;
|
|
|
|
SE = SE_;
|
|
|
|
TLI = TLI_;
|
|
|
|
TTI = TTI_;
|
|
|
|
DL = &F.getParent()->getDataLayout();
|
2015-04-14 06:59:22 +02:00
|
|
|
|
2015-04-17 02:25:10 +02:00
|
|
|
bool Changed = false, ChangedInThisIteration;
|
|
|
|
do {
|
|
|
|
ChangedInThisIteration = doOneIteration(F);
|
|
|
|
Changed |= ChangedInThisIteration;
|
|
|
|
} while (ChangedInThisIteration);
|
|
|
|
return Changed;
|
|
|
|
}
|
|
|
|
|
2015-05-22 01:17:30 +02:00
|
|
|
// Whitelist the instruction types NaryReassociate handles for now.
|
|
|
|
static bool isPotentiallyNaryReassociable(Instruction *I) {
|
|
|
|
switch (I->getOpcode()) {
|
|
|
|
case Instruction::Add:
|
|
|
|
case Instruction::GetElementPtr:
|
2015-09-15 19:22:52 +02:00
|
|
|
case Instruction::Mul:
|
2015-05-22 01:17:30 +02:00
|
|
|
return true;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-22 00:28:52 +02:00
|
|
|
bool NaryReassociatePass::doOneIteration(Function &F) {
|
2015-04-14 06:59:22 +02:00
|
|
|
bool Changed = false;
|
|
|
|
SeenExprs.clear();
|
2016-08-20 00:06:23 +02:00
|
|
|
// Process the basic blocks in a depth first traversal of the dominator
|
|
|
|
// tree. This order ensures that all bases of a candidate are in Candidates
|
|
|
|
// when we process it.
|
|
|
|
for (const auto Node : depth_first(DT)) {
|
|
|
|
BasicBlock *BB = Node->getBlock();
|
2015-04-14 06:59:22 +02:00
|
|
|
for (auto I = BB->begin(); I != BB->end(); ++I) {
|
2015-10-13 21:26:58 +02:00
|
|
|
if (SE->isSCEVable(I->getType()) && isPotentiallyNaryReassociable(&*I)) {
|
|
|
|
const SCEV *OldSCEV = SE->getSCEV(&*I);
|
|
|
|
if (Instruction *NewI = tryReassociate(&*I)) {
|
2015-04-17 02:25:10 +02:00
|
|
|
Changed = true;
|
2015-10-13 21:26:58 +02:00
|
|
|
SE->forgetValue(&*I);
|
2015-04-14 06:59:22 +02:00
|
|
|
I->replaceAllUsesWith(NewI);
|
2018-05-24 08:09:02 +02:00
|
|
|
WeakVH NewIExist = NewI;
|
|
|
|
// If SeenExprs/NewIExist contains I's WeakTrackingVH/WeakVH, that
|
|
|
|
// entry will be replaced with nullptr if deleted.
|
2015-10-13 21:26:58 +02:00
|
|
|
RecursivelyDeleteTriviallyDeadInstructions(&*I, TLI);
|
2018-05-24 08:09:02 +02:00
|
|
|
if (!NewIExist) {
|
|
|
|
// Rare occation where the new instruction (NewI) have been removed,
|
|
|
|
// probably due to parts of the input code was dead from the
|
|
|
|
// beginning, reset the iterator and start over from the beginning
|
|
|
|
I = BB->begin();
|
|
|
|
continue;
|
|
|
|
}
|
2015-10-13 21:26:58 +02:00
|
|
|
I = NewI->getIterator();
|
2015-04-14 06:59:22 +02:00
|
|
|
}
|
2015-05-22 01:17:30 +02:00
|
|
|
// Add the rewritten instruction to SeenExprs; the original instruction
|
|
|
|
// is deleted.
|
2015-10-13 21:26:58 +02:00
|
|
|
const SCEV *NewSCEV = SE->getSCEV(&*I);
|
2017-05-01 19:07:49 +02:00
|
|
|
SeenExprs[NewSCEV].push_back(WeakTrackingVH(&*I));
|
2015-05-28 06:56:52 +02:00
|
|
|
// Ideally, NewSCEV should equal OldSCEV because tryReassociate(I)
|
|
|
|
// is equivalent to I. However, ScalarEvolution::getSCEV may
|
|
|
|
// weaken nsw causing NewSCEV not to equal OldSCEV. For example, suppose
|
|
|
|
// we reassociate
|
|
|
|
// I = &a[sext(i +nsw j)] // assuming sizeof(a[0]) = 4
|
|
|
|
// to
|
|
|
|
// NewI = &a[sext(i)] + sext(j).
|
|
|
|
//
|
|
|
|
// ScalarEvolution computes
|
|
|
|
// getSCEV(I) = a + 4 * sext(i + j)
|
|
|
|
// getSCEV(newI) = a + 4 * sext(i) + 4 * sext(j)
|
|
|
|
// which are different SCEVs.
|
|
|
|
//
|
|
|
|
// To alleviate this issue of ScalarEvolution not always capturing
|
|
|
|
// equivalence, we add I to SeenExprs[OldSCEV] as well so that we can
|
|
|
|
// map both SCEV before and after tryReassociate(I) to I.
|
|
|
|
//
|
|
|
|
// This improvement is exercised in @reassociate_gep_nsw in nary-gep.ll.
|
|
|
|
if (NewSCEV != OldSCEV)
|
2017-05-01 19:07:49 +02:00
|
|
|
SeenExprs[OldSCEV].push_back(WeakTrackingVH(&*I));
|
2015-04-14 06:59:22 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return Changed;
|
|
|
|
}
|
|
|
|
|
2016-07-22 00:28:52 +02:00
|
|
|
Instruction *NaryReassociatePass::tryReassociate(Instruction *I) {
|
2015-05-22 01:17:30 +02:00
|
|
|
switch (I->getOpcode()) {
|
|
|
|
case Instruction::Add:
|
2015-09-15 19:22:52 +02:00
|
|
|
case Instruction::Mul:
|
|
|
|
return tryReassociateBinaryOp(cast<BinaryOperator>(I));
|
2015-05-22 01:17:30 +02:00
|
|
|
case Instruction::GetElementPtr:
|
|
|
|
return tryReassociateGEP(cast<GetElementPtrInst>(I));
|
|
|
|
default:
|
|
|
|
llvm_unreachable("should be filtered out by isPotentiallyNaryReassociable");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool isGEPFoldable(GetElementPtrInst *GEP,
|
2016-07-08 23:48:05 +02:00
|
|
|
const TargetTransformInfo *TTI) {
|
|
|
|
SmallVector<const Value*, 4> Indices;
|
|
|
|
for (auto I = GEP->idx_begin(); I != GEP->idx_end(); ++I)
|
|
|
|
Indices.push_back(*I);
|
2017-10-13 16:04:21 +02:00
|
|
|
return TTI->getGEPCost(GEP->getSourceElementType(), GEP->getPointerOperand(),
|
2016-07-08 23:48:05 +02:00
|
|
|
Indices) == TargetTransformInfo::TCC_Free;
|
2015-05-22 01:17:30 +02:00
|
|
|
}
|
|
|
|
|
2016-07-22 00:28:52 +02:00
|
|
|
Instruction *NaryReassociatePass::tryReassociateGEP(GetElementPtrInst *GEP) {
|
2015-05-22 01:17:30 +02:00
|
|
|
// Not worth reassociating GEP if it is foldable.
|
2016-07-08 23:48:05 +02:00
|
|
|
if (isGEPFoldable(GEP, TTI))
|
2015-05-22 01:17:30 +02:00
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
gep_type_iterator GTI = gep_type_begin(*GEP);
|
2016-12-02 03:24:42 +01:00
|
|
|
for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) {
|
|
|
|
if (GTI.isSequential()) {
|
|
|
|
if (auto *NewGEP = tryReassociateGEPAtIndex(GEP, I - 1,
|
|
|
|
GTI.getIndexedType())) {
|
2015-05-22 01:17:30 +02:00
|
|
|
return NewGEP;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2016-07-22 00:28:52 +02:00
|
|
|
bool NaryReassociatePass::requiresSignExtension(Value *Index,
|
|
|
|
GetElementPtrInst *GEP) {
|
2015-05-22 01:17:30 +02:00
|
|
|
unsigned PointerSizeInBits =
|
|
|
|
DL->getPointerSizeInBits(GEP->getType()->getPointerAddressSpace());
|
|
|
|
return cast<IntegerType>(Index->getType())->getBitWidth() < PointerSizeInBits;
|
|
|
|
}
|
|
|
|
|
|
|
|
GetElementPtrInst *
|
2016-07-22 00:28:52 +02:00
|
|
|
NaryReassociatePass::tryReassociateGEPAtIndex(GetElementPtrInst *GEP,
|
|
|
|
unsigned I, Type *IndexedType) {
|
2015-05-22 01:17:30 +02:00
|
|
|
Value *IndexToSplit = GEP->getOperand(I + 1);
|
2015-07-01 05:38:49 +02:00
|
|
|
if (SExtInst *SExt = dyn_cast<SExtInst>(IndexToSplit)) {
|
2015-05-22 01:17:30 +02:00
|
|
|
IndexToSplit = SExt->getOperand(0);
|
2015-07-01 05:38:49 +02:00
|
|
|
} else if (ZExtInst *ZExt = dyn_cast<ZExtInst>(IndexToSplit)) {
|
|
|
|
// zext can be treated as sext if the source is non-negative.
|
2016-12-19 09:22:17 +01:00
|
|
|
if (isKnownNonNegative(ZExt->getOperand(0), *DL, 0, AC, GEP, DT))
|
2015-07-01 05:38:49 +02:00
|
|
|
IndexToSplit = ZExt->getOperand(0);
|
|
|
|
}
|
2015-05-22 01:17:30 +02:00
|
|
|
|
|
|
|
if (AddOperator *AO = dyn_cast<AddOperator>(IndexToSplit)) {
|
|
|
|
// If the I-th index needs sext and the underlying add is not equipped with
|
|
|
|
// nsw, we cannot split the add because
|
|
|
|
// sext(LHS + RHS) != sext(LHS) + sext(RHS).
|
2015-08-20 20:27:04 +02:00
|
|
|
if (requiresSignExtension(IndexToSplit, GEP) &&
|
2016-12-19 09:22:17 +01:00
|
|
|
computeOverflowForSignedAdd(AO, *DL, AC, GEP, DT) !=
|
2015-08-20 20:27:04 +02:00
|
|
|
OverflowResult::NeverOverflows)
|
2015-05-22 01:17:30 +02:00
|
|
|
return nullptr;
|
2015-08-20 20:27:04 +02:00
|
|
|
|
2015-05-22 01:17:30 +02:00
|
|
|
Value *LHS = AO->getOperand(0), *RHS = AO->getOperand(1);
|
|
|
|
// IndexToSplit = LHS + RHS.
|
|
|
|
if (auto *NewGEP = tryReassociateGEPAtIndex(GEP, I, LHS, RHS, IndexedType))
|
|
|
|
return NewGEP;
|
|
|
|
// Symmetrically, try IndexToSplit = RHS + LHS.
|
|
|
|
if (LHS != RHS) {
|
|
|
|
if (auto *NewGEP =
|
|
|
|
tryReassociateGEPAtIndex(GEP, I, RHS, LHS, IndexedType))
|
|
|
|
return NewGEP;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2016-07-22 00:28:52 +02:00
|
|
|
GetElementPtrInst *
|
|
|
|
NaryReassociatePass::tryReassociateGEPAtIndex(GetElementPtrInst *GEP,
|
|
|
|
unsigned I, Value *LHS,
|
|
|
|
Value *RHS, Type *IndexedType) {
|
2015-05-22 01:17:30 +02:00
|
|
|
// Look for GEP's closest dominator that has the same SCEV as GEP except that
|
|
|
|
// the I-th index is replaced with LHS.
|
|
|
|
SmallVector<const SCEV *, 4> IndexExprs;
|
|
|
|
for (auto Index = GEP->idx_begin(); Index != GEP->idx_end(); ++Index)
|
|
|
|
IndexExprs.push_back(SE->getSCEV(*Index));
|
|
|
|
// Replace the I-th index with LHS.
|
|
|
|
IndexExprs[I] = SE->getSCEV(LHS);
|
2016-12-19 09:22:17 +01:00
|
|
|
if (isKnownNonNegative(LHS, *DL, 0, AC, GEP, DT) &&
|
2015-07-01 05:38:49 +02:00
|
|
|
DL->getTypeSizeInBits(LHS->getType()) <
|
|
|
|
DL->getTypeSizeInBits(GEP->getOperand(I)->getType())) {
|
|
|
|
// Zero-extend LHS if it is non-negative. InstCombine canonicalizes sext to
|
|
|
|
// zext if the source operand is proved non-negative. We should do that
|
|
|
|
// consistently so that CandidateExpr more likely appears before. See
|
|
|
|
// @reassociate_gep_assume for an example of this canonicalization.
|
|
|
|
IndexExprs[I] =
|
|
|
|
SE->getZeroExtendExpr(IndexExprs[I], GEP->getOperand(I)->getType());
|
|
|
|
}
|
2016-11-13 07:59:50 +01:00
|
|
|
const SCEV *CandidateExpr = SE->getGEPExpr(cast<GEPOperator>(GEP),
|
|
|
|
IndexExprs);
|
2015-05-22 01:17:30 +02:00
|
|
|
|
2015-12-18 22:36:30 +01:00
|
|
|
Value *Candidate = findClosestMatchingDominator(CandidateExpr, GEP);
|
2015-05-22 01:17:30 +02:00
|
|
|
if (Candidate == nullptr)
|
|
|
|
return nullptr;
|
|
|
|
|
2015-12-18 22:36:30 +01:00
|
|
|
IRBuilder<> Builder(GEP);
|
|
|
|
// Candidate does not necessarily have the same pointer type as GEP. Use
|
|
|
|
// bitcast or pointer cast to make sure they have the same type, so that the
|
|
|
|
// later RAUW doesn't complain.
|
|
|
|
Candidate = Builder.CreateBitOrPointerCast(Candidate, GEP->getType());
|
|
|
|
assert(Candidate->getType() == GEP->getType());
|
2015-05-22 01:17:30 +02:00
|
|
|
|
|
|
|
// NewGEP = (char *)Candidate + RHS * sizeof(IndexedType)
|
|
|
|
uint64_t IndexedSize = DL->getTypeAllocSize(IndexedType);
|
2016-01-19 18:28:00 +01:00
|
|
|
Type *ElementType = GEP->getResultElementType();
|
2015-05-22 01:17:30 +02:00
|
|
|
uint64_t ElementSize = DL->getTypeAllocSize(ElementType);
|
|
|
|
// Another less rare case: because I is not necessarily the last index of the
|
|
|
|
// GEP, the size of the type at the I-th index (IndexedSize) is not
|
|
|
|
// necessarily divisible by ElementSize. For example,
|
|
|
|
//
|
|
|
|
// #pragma pack(1)
|
|
|
|
// struct S {
|
|
|
|
// int a[3];
|
|
|
|
// int64 b[8];
|
|
|
|
// };
|
|
|
|
// #pragma pack()
|
|
|
|
//
|
|
|
|
// sizeof(S) = 100 is indivisible by sizeof(int64) = 8.
|
|
|
|
//
|
|
|
|
// TODO: bail out on this case for now. We could emit uglygep.
|
|
|
|
if (IndexedSize % ElementSize != 0)
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
// NewGEP = &Candidate[RHS * (sizeof(IndexedType) / sizeof(Candidate[0])));
|
2015-12-18 22:36:30 +01:00
|
|
|
Type *IntPtrTy = DL->getIntPtrType(GEP->getType());
|
2015-05-22 01:17:30 +02:00
|
|
|
if (RHS->getType() != IntPtrTy)
|
|
|
|
RHS = Builder.CreateSExtOrTrunc(RHS, IntPtrTy);
|
|
|
|
if (IndexedSize != ElementSize) {
|
|
|
|
RHS = Builder.CreateMul(
|
|
|
|
RHS, ConstantInt::get(IntPtrTy, IndexedSize / ElementSize));
|
|
|
|
}
|
2019-02-01 21:44:47 +01:00
|
|
|
GetElementPtrInst *NewGEP = cast<GetElementPtrInst>(
|
|
|
|
Builder.CreateGEP(GEP->getResultElementType(), Candidate, RHS));
|
2015-05-22 01:17:30 +02:00
|
|
|
NewGEP->setIsInBounds(GEP->isInBounds());
|
|
|
|
NewGEP->takeName(GEP);
|
|
|
|
return NewGEP;
|
|
|
|
}
|
|
|
|
|
2016-07-22 00:28:52 +02:00
|
|
|
Instruction *NaryReassociatePass::tryReassociateBinaryOp(BinaryOperator *I) {
|
2015-04-14 06:59:22 +02:00
|
|
|
Value *LHS = I->getOperand(0), *RHS = I->getOperand(1);
|
2018-03-07 03:17:08 +01:00
|
|
|
// There is no need to reassociate 0.
|
|
|
|
if (SE->getSCEV(I)->isZero())
|
|
|
|
return nullptr;
|
2015-09-15 19:22:52 +02:00
|
|
|
if (auto *NewI = tryReassociateBinaryOp(LHS, RHS, I))
|
2015-04-14 06:59:22 +02:00
|
|
|
return NewI;
|
2015-09-15 19:22:52 +02:00
|
|
|
if (auto *NewI = tryReassociateBinaryOp(RHS, LHS, I))
|
2015-04-14 06:59:22 +02:00
|
|
|
return NewI;
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2016-07-22 00:28:52 +02:00
|
|
|
Instruction *NaryReassociatePass::tryReassociateBinaryOp(Value *LHS, Value *RHS,
|
|
|
|
BinaryOperator *I) {
|
2015-04-14 06:59:22 +02:00
|
|
|
Value *A = nullptr, *B = nullptr;
|
2015-09-15 19:22:52 +02:00
|
|
|
// To be conservative, we reassociate I only when it is the only user of (A op
|
|
|
|
// B).
|
|
|
|
if (LHS->hasOneUse() && matchTernaryOp(I, LHS, A, B)) {
|
|
|
|
// I = (A op B) op RHS
|
|
|
|
// = (A op RHS) op B or (B op RHS) op A
|
2015-04-14 06:59:22 +02:00
|
|
|
const SCEV *AExpr = SE->getSCEV(A), *BExpr = SE->getSCEV(B);
|
|
|
|
const SCEV *RHSExpr = SE->getSCEV(RHS);
|
2015-05-13 20:12:24 +02:00
|
|
|
if (BExpr != RHSExpr) {
|
2015-09-15 19:22:52 +02:00
|
|
|
if (auto *NewI =
|
|
|
|
tryReassociatedBinaryOp(getBinarySCEV(I, AExpr, RHSExpr), B, I))
|
2015-05-13 20:12:24 +02:00
|
|
|
return NewI;
|
|
|
|
}
|
|
|
|
if (AExpr != RHSExpr) {
|
2015-09-15 19:22:52 +02:00
|
|
|
if (auto *NewI =
|
|
|
|
tryReassociatedBinaryOp(getBinarySCEV(I, BExpr, RHSExpr), A, I))
|
2015-05-13 20:12:24 +02:00
|
|
|
return NewI;
|
|
|
|
}
|
2015-04-14 06:59:22 +02:00
|
|
|
}
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2016-07-22 00:28:52 +02:00
|
|
|
Instruction *NaryReassociatePass::tryReassociatedBinaryOp(const SCEV *LHSExpr,
|
|
|
|
Value *RHS,
|
|
|
|
BinaryOperator *I) {
|
2015-04-16 20:42:31 +02:00
|
|
|
// Look for the closest dominator LHS of I that computes LHSExpr, and replace
|
2015-09-15 19:22:52 +02:00
|
|
|
// I with LHS op RHS.
|
2015-05-22 01:17:30 +02:00
|
|
|
auto *LHS = findClosestMatchingDominator(LHSExpr, I);
|
|
|
|
if (LHS == nullptr)
|
|
|
|
return nullptr;
|
|
|
|
|
2015-09-15 19:22:52 +02:00
|
|
|
Instruction *NewI = nullptr;
|
|
|
|
switch (I->getOpcode()) {
|
|
|
|
case Instruction::Add:
|
|
|
|
NewI = BinaryOperator::CreateAdd(LHS, RHS, "", I);
|
|
|
|
break;
|
|
|
|
case Instruction::Mul:
|
|
|
|
NewI = BinaryOperator::CreateMul(LHS, RHS, "", I);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unexpected instruction.");
|
|
|
|
}
|
2015-05-22 01:17:30 +02:00
|
|
|
NewI->takeName(I);
|
|
|
|
return NewI;
|
|
|
|
}
|
|
|
|
|
2016-07-22 00:28:52 +02:00
|
|
|
bool NaryReassociatePass::matchTernaryOp(BinaryOperator *I, Value *V,
|
|
|
|
Value *&Op1, Value *&Op2) {
|
2015-09-15 19:22:52 +02:00
|
|
|
switch (I->getOpcode()) {
|
|
|
|
case Instruction::Add:
|
|
|
|
return match(V, m_Add(m_Value(Op1), m_Value(Op2)));
|
|
|
|
case Instruction::Mul:
|
|
|
|
return match(V, m_Mul(m_Value(Op1), m_Value(Op2)));
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unexpected instruction.");
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-07-22 00:28:52 +02:00
|
|
|
const SCEV *NaryReassociatePass::getBinarySCEV(BinaryOperator *I,
|
|
|
|
const SCEV *LHS,
|
|
|
|
const SCEV *RHS) {
|
2015-09-15 19:22:52 +02:00
|
|
|
switch (I->getOpcode()) {
|
|
|
|
case Instruction::Add:
|
|
|
|
return SE->getAddExpr(LHS, RHS);
|
|
|
|
case Instruction::Mul:
|
|
|
|
return SE->getMulExpr(LHS, RHS);
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unexpected instruction.");
|
|
|
|
}
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2015-05-22 01:17:30 +02:00
|
|
|
Instruction *
|
2016-07-22 00:28:52 +02:00
|
|
|
NaryReassociatePass::findClosestMatchingDominator(const SCEV *CandidateExpr,
|
|
|
|
Instruction *Dominatee) {
|
2015-05-22 01:17:30 +02:00
|
|
|
auto Pos = SeenExprs.find(CandidateExpr);
|
|
|
|
if (Pos == SeenExprs.end())
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
auto &Candidates = Pos->second;
|
|
|
|
// Because we process the basic blocks in pre-order of the dominator tree, a
|
2015-04-16 20:42:31 +02:00
|
|
|
// candidate that doesn't dominate the current instruction won't dominate any
|
|
|
|
// future instruction either. Therefore, we pop it out of the stack. This
|
|
|
|
// optimization makes the algorithm O(n).
|
2015-05-22 01:17:30 +02:00
|
|
|
while (!Candidates.empty()) {
|
2017-05-01 19:07:49 +02:00
|
|
|
// Candidates stores WeakTrackingVHs, so a candidate can be nullptr if it's
|
|
|
|
// removed
|
2017-04-26 18:37:05 +02:00
|
|
|
// during rewriting.
|
2015-10-01 05:51:44 +02:00
|
|
|
if (Value *Candidate = Candidates.back()) {
|
|
|
|
Instruction *CandidateInstruction = cast<Instruction>(Candidate);
|
|
|
|
if (DT->dominates(CandidateInstruction, Dominatee))
|
|
|
|
return CandidateInstruction;
|
|
|
|
}
|
2015-05-22 01:17:30 +02:00
|
|
|
Candidates.pop_back();
|
2015-04-14 06:59:22 +02:00
|
|
|
}
|
|
|
|
return nullptr;
|
|
|
|
}
|