1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-22 10:42:39 +01:00

[NaryReassociate] run NaryReassociate iteratively

Summary:
An alternative is to use a worklist approach. However, that approach
would break the traversing order so that we couldn't lookup SeenExprs
efficiently. I don't see a clear winner here, so I picked the easier approach.

Along with two minor improvements:
1. preserves ScalarEvolution by forgetting instructions replaced
2. removes dead code locally avoiding the need of running DCE afterwards

Test Plan: add to slsr-add.ll a test that requires multiple iterations

Reviewers: broune, dberlin, atrick, meheff

Reviewed By: atrick

Subscribers: llvm-commits

Differential Revision: http://reviews.llvm.org/D9058

llvm-svn: 235151
This commit is contained in:
Jingyue Wu 2015-04-17 00:25:10 +00:00
parent 36821b3cea
commit 80d7caa6b3
2 changed files with 69 additions and 9 deletions

View File

@ -36,9 +36,9 @@
// NaryReassociate works as follows. For every instruction in the form of (a +
// b) + c, it checks whether a + c or b + c is already computed by a dominating
// instruction. If so, it then reassociates (a + b) + c into (a + c) + b or (b +
// c) + a respectively. To efficiently look up whether an expression is
// computed before, we store each instruction seen and its SCEV into an
// SCEV-to-instruction map.
// c) + a and removes the redundancy accordingly. To efficiently look up whether
// an expression is computed before, we store each instruction seen and its SCEV
// into an SCEV-to-instruction map.
//
// Although the algorithm pattern-matches only ternary additions, it
// automatically handles many >3-ary expressions by walking through the function
@ -50,6 +50,25 @@
// NaryReassociate first rewrites (a + b) + c to (a + c) + b, and then rewrites
// ((a + c) + b) + d into ((a + c) + d) + b.
//
// Finally, the above dominator-based algorithm may need to be run multiple
// iterations before emitting optimal code. One source of this need is that we
// only split an operand when it is used only once. The above algorithm can
// eliminate an instruction and decrease the usage count of its operands. As a
// result, an instruction that previously had multiple uses may become a
// single-use instruction and thus eligible for split consideration. For
// example,
//
// ac = a + c
// ab = a + b
// abc = ab + c
// ab2 = ab + b
// ab2c = ab2 + c
//
// In the first iteration, we cannot reassociate abc to ac+b because ab is used
// twice. However, we can reassociate ab2c to abc+b in the first iteration. As a
// result, ab2 becomes dead and ab will be used only once in the second
// iteration.
//
// Limitations and TODO items:
//
// 1) We only considers n-ary adds for now. This should be extended and
@ -65,10 +84,12 @@
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils/Local.h"
using namespace llvm;
using namespace PatternMatch;
@ -87,13 +108,18 @@ public:
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addPreserved<DominatorTreeWrapperPass>();
AU.addPreserved<ScalarEvolution>();
AU.addPreserved<TargetLibraryInfoWrapperPass>();
AU.addRequired<DominatorTreeWrapperPass>();
// TODO: can we preserve ScalarEvolution?
AU.addRequired<ScalarEvolution>();
AU.addRequired<TargetLibraryInfoWrapperPass>();
AU.setPreservesCFG();
}
private:
// Runs only one iteration of the dominator-based algorithm. See the header
// comments for why we need multiple iterations.
bool doOneIteration(Function &F);
// Reasssociates I to a better form.
Instruction *tryReassociateAdd(Instruction *I);
// A helper function for tryReassociateAdd. LHS and RHS are explicitly passed.
@ -103,6 +129,7 @@ private:
DominatorTree *DT;
ScalarEvolution *SE;
TargetLibraryInfo *TLI;
// A lookup table quickly telling which instructions compute the given SCEV.
// Note that there can be multiple instructions at different locations
// computing to the same SCEV, so we map a SCEV to an instruction list. For
@ -121,6 +148,7 @@ INITIALIZE_PASS_BEGIN(NaryReassociate, "nary-reassociate", "Nary reassociation",
false, false)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
INITIALIZE_PASS_END(NaryReassociate, "nary-reassociate", "Nary reassociation",
false, false)
@ -134,19 +162,31 @@ bool NaryReassociate::runOnFunction(Function &F) {
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
SE = &getAnalysis<ScalarEvolution>();
TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
// Traverse the dominator tree in the depth-first order. This order makes sure
// all bases of a candidate are in Candidates when we process it.
bool Changed = false, ChangedInThisIteration;
do {
ChangedInThisIteration = doOneIteration(F);
Changed |= ChangedInThisIteration;
} while (ChangedInThisIteration);
return Changed;
}
bool NaryReassociate::doOneIteration(Function &F) {
bool Changed = false;
SeenExprs.clear();
// Traverse the dominator tree in the depth-first order. This order makes sure
// all bases of a candidate are in Candidates when we process it.
for (auto Node = GraphTraits<DominatorTree *>::nodes_begin(DT);
Node != GraphTraits<DominatorTree *>::nodes_end(DT); ++Node) {
BasicBlock *BB = Node->getBlock();
for (auto I = BB->begin(); I != BB->end(); ++I) {
if (I->getOpcode() == Instruction::Add) {
if (Instruction *NewI = tryReassociateAdd(I)) {
Changed = true;
SE->forgetValue(I);
I->replaceAllUsesWith(NewI);
I->eraseFromParent();
RecursivelyDeleteTriviallyDeadInstructions(I, TLI);
I = NewI;
}
// We should add the rewritten instruction because tryReassociateAdd may

View File

@ -1,8 +1,8 @@
; RUN: opt < %s -nary-reassociate -dce -S | FileCheck %s
; RUN: opt < %s -nary-reassociate -S | FileCheck %s
target datalayout = "e-i64:64-v16:16-v32:32-n16:32:64"
declare void @foo(i32 %a)
declare void @foo(i32)
; foo(a + c);
; foo((a + (b + c));
@ -176,3 +176,23 @@ define void @quaternary(i32 %a, i32 %b, i32 %c, i32 %d) {
; CHECK: call void @foo(i32 [[TMP2]]
ret void
}
define void @iterative(i32 %a, i32 %b, i32 %c) {
%ab = add i32 %a, %b
%abc = add i32 %ab, %c
call void @foo(i32 %abc)
%ab2 = add i32 %ab, %b
%ab2c = add i32 %ab2, %c
; CHECK: %ab2c = add i32 %abc, %b
call void @foo(i32 %ab2c)
; CHECK-NEXT: call void @foo(i32 %ab2c)
%ab3 = add i32 %ab2, %b
%ab3c = add i32 %ab3, %c
; CHECK-NEXT: %ab3c = add i32 %ab2c, %b
call void @foo(i32 %ab3c)
; CHECK-NEXT: call void @foo(i32 %ab3c)
ret void
}