2016-05-19 00:55:34 +02:00
|
|
|
//===- GuardWidening.cpp - ---- Guard widening ----------------------------===//
|
|
|
|
//
|
2019-01-19 09:50:56 +01:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2016-05-19 00:55:34 +02:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file implements the guard widening pass. The semantics of the
|
|
|
|
// @llvm.experimental.guard intrinsic lets LLVM transform it so that it fails
|
|
|
|
// more often that it did before the transform. This optimization is called
|
|
|
|
// "widening" and can be used hoist and common runtime checks in situations like
|
|
|
|
// these:
|
|
|
|
//
|
|
|
|
// %cmp0 = 7 u< Length
|
|
|
|
// call @llvm.experimental.guard(i1 %cmp0) [ "deopt"(...) ]
|
|
|
|
// call @unknown_side_effects()
|
|
|
|
// %cmp1 = 9 u< Length
|
|
|
|
// call @llvm.experimental.guard(i1 %cmp1) [ "deopt"(...) ]
|
|
|
|
// ...
|
|
|
|
//
|
|
|
|
// =>
|
|
|
|
//
|
|
|
|
// %cmp0 = 9 u< Length
|
|
|
|
// call @llvm.experimental.guard(i1 %cmp0) [ "deopt"(...) ]
|
|
|
|
// call @unknown_side_effects()
|
|
|
|
// ...
|
|
|
|
//
|
|
|
|
// If %cmp0 is false, @llvm.experimental.guard will "deoptimize" back to a
|
|
|
|
// generic implementation of the same function, which will have the correct
|
|
|
|
// semantics from that point onward. It is always _legal_ to deoptimize (so
|
|
|
|
// replacing %cmp0 with false is "correct"), though it may not always be
|
|
|
|
// profitable to do so.
|
|
|
|
//
|
|
|
|
// NB! This pass is a work in progress. It hasn't been tuned to be "production
|
|
|
|
// ready" yet. It is known to have quadriatic running time and will not scale
|
|
|
|
// to large numbers of guards
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "llvm/Transforms/Scalar/GuardWidening.h"
|
|
|
|
#include "llvm/ADT/DenseMap.h"
|
|
|
|
#include "llvm/ADT/DepthFirstIterator.h"
|
2018-07-31 06:37:11 +02:00
|
|
|
#include "llvm/ADT/Statistic.h"
|
2018-08-06 07:49:19 +02:00
|
|
|
#include "llvm/Analysis/BranchProbabilityInfo.h"
|
2018-08-30 05:39:16 +02:00
|
|
|
#include "llvm/Analysis/GuardUtils.h"
|
2016-05-19 00:55:34 +02:00
|
|
|
#include "llvm/Analysis/LoopInfo.h"
|
2018-04-27 19:29:10 +02:00
|
|
|
#include "llvm/Analysis/LoopPass.h"
|
2016-05-19 00:55:34 +02:00
|
|
|
#include "llvm/Analysis/PostDominators.h"
|
|
|
|
#include "llvm/Analysis/ValueTracking.h"
|
2016-10-21 21:59:26 +02:00
|
|
|
#include "llvm/IR/ConstantRange.h"
|
2016-05-19 00:55:34 +02:00
|
|
|
#include "llvm/IR/Dominators.h"
|
|
|
|
#include "llvm/IR/IntrinsicInst.h"
|
|
|
|
#include "llvm/IR/PatternMatch.h"
|
Sink all InitializePasses.h includes
This file lists every pass in LLVM, and is included by Pass.h, which is
very popular. Every time we add, remove, or rename a pass in LLVM, it
caused lots of recompilation.
I found this fact by looking at this table, which is sorted by the
number of times a file was changed over the last 100,000 git commits
multiplied by the number of object files that depend on it in the
current checkout:
recompiles touches affected_files header
342380 95 3604 llvm/include/llvm/ADT/STLExtras.h
314730 234 1345 llvm/include/llvm/InitializePasses.h
307036 118 2602 llvm/include/llvm/ADT/APInt.h
213049 59 3611 llvm/include/llvm/Support/MathExtras.h
170422 47 3626 llvm/include/llvm/Support/Compiler.h
162225 45 3605 llvm/include/llvm/ADT/Optional.h
158319 63 2513 llvm/include/llvm/ADT/Triple.h
140322 39 3598 llvm/include/llvm/ADT/StringRef.h
137647 59 2333 llvm/include/llvm/Support/Error.h
131619 73 1803 llvm/include/llvm/Support/FileSystem.h
Before this change, touching InitializePasses.h would cause 1345 files
to recompile. After this change, touching it only causes 550 compiles in
an incremental rebuild.
Reviewers: bkramer, asbirlea, bollu, jdoerfert
Differential Revision: https://reviews.llvm.org/D70211
2019-11-13 22:15:01 +01:00
|
|
|
#include "llvm/InitializePasses.h"
|
2017-06-06 13:49:48 +02:00
|
|
|
#include "llvm/Pass.h"
|
2019-11-15 00:15:48 +01:00
|
|
|
#include "llvm/Support/CommandLine.h"
|
2016-05-19 00:55:34 +02:00
|
|
|
#include "llvm/Support/Debug.h"
|
2017-04-26 18:39:58 +02:00
|
|
|
#include "llvm/Support/KnownBits.h"
|
2016-05-19 00:55:34 +02:00
|
|
|
#include "llvm/Transforms/Scalar.h"
|
[NFC] Factor out utilities for manipulating widenable branches
With the widenable condition construct, we have the ability to reason about branches which can be 'widened' (i.e. made to fail more often). We've got a couple o transforms which leverage this. This patch just cleans up the API a bit.
This is prep work for generalizing our definition of a widenable branch slightly. At the moment "br i1 (and A, wc()), ..." is considered widenable, but oddly, neither "br i1 (and wc(), B), ..." or "br i1 wc(), ..." is. That clearly needs addressed, so first, let's centralize the code in one place.
2019-11-19 23:43:13 +01:00
|
|
|
#include "llvm/Transforms/Utils/GuardUtils.h"
|
2018-04-27 19:29:10 +02:00
|
|
|
#include "llvm/Transforms/Utils/LoopUtils.h"
|
Sink all InitializePasses.h includes
This file lists every pass in LLVM, and is included by Pass.h, which is
very popular. Every time we add, remove, or rename a pass in LLVM, it
caused lots of recompilation.
I found this fact by looking at this table, which is sorted by the
number of times a file was changed over the last 100,000 git commits
multiplied by the number of object files that depend on it in the
current checkout:
recompiles touches affected_files header
342380 95 3604 llvm/include/llvm/ADT/STLExtras.h
314730 234 1345 llvm/include/llvm/InitializePasses.h
307036 118 2602 llvm/include/llvm/ADT/APInt.h
213049 59 3611 llvm/include/llvm/Support/MathExtras.h
170422 47 3626 llvm/include/llvm/Support/Compiler.h
162225 45 3605 llvm/include/llvm/ADT/Optional.h
158319 63 2513 llvm/include/llvm/ADT/Triple.h
140322 39 3598 llvm/include/llvm/ADT/StringRef.h
137647 59 2333 llvm/include/llvm/Support/Error.h
131619 73 1803 llvm/include/llvm/Support/FileSystem.h
Before this change, touching InitializePasses.h would cause 1345 files
to recompile. After this change, touching it only causes 550 compiles in
an incremental rebuild.
Reviewers: bkramer, asbirlea, bollu, jdoerfert
Differential Revision: https://reviews.llvm.org/D70211
2019-11-13 22:15:01 +01:00
|
|
|
#include <functional>
|
2016-05-19 00:55:34 +02:00
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
|
|
|
|
#define DEBUG_TYPE "guard-widening"
|
|
|
|
|
2018-07-31 06:37:11 +02:00
|
|
|
STATISTIC(GuardsEliminated, "Number of eliminated guards");
|
2018-08-06 07:49:19 +02:00
|
|
|
STATISTIC(CondBranchEliminated, "Number of eliminated conditional branches");
|
|
|
|
|
2019-02-13 10:56:30 +01:00
|
|
|
static cl::opt<bool>
|
|
|
|
WidenBranchGuards("guard-widening-widen-branch-guards", cl::Hidden,
|
|
|
|
cl::desc("Whether or not we should widen guards "
|
|
|
|
"expressed as branches by widenable conditions"),
|
|
|
|
cl::init(true));
|
2018-07-31 06:37:11 +02:00
|
|
|
|
2016-05-19 00:55:34 +02:00
|
|
|
namespace {
|
|
|
|
|
2018-08-06 07:49:19 +02:00
|
|
|
// Get the condition of \p I. It can either be a guard or a conditional branch.
|
|
|
|
static Value *getCondition(Instruction *I) {
|
|
|
|
if (IntrinsicInst *GI = dyn_cast<IntrinsicInst>(I)) {
|
|
|
|
assert(GI->getIntrinsicID() == Intrinsic::experimental_guard &&
|
|
|
|
"Bad guard intrinsic?");
|
|
|
|
return GI->getArgOperand(0);
|
|
|
|
}
|
2019-11-21 19:44:13 +01:00
|
|
|
Value *Cond, *WC;
|
|
|
|
BasicBlock *IfTrueBB, *IfFalseBB;
|
|
|
|
if (parseWidenableBranch(I, Cond, WC, IfTrueBB, IfFalseBB))
|
|
|
|
return Cond;
|
|
|
|
|
2018-08-06 07:49:19 +02:00
|
|
|
return cast<BranchInst>(I)->getCondition();
|
2018-08-03 12:16:40 +02:00
|
|
|
}
|
|
|
|
|
2018-08-06 07:49:19 +02:00
|
|
|
// Set the condition for \p I to \p NewCond. \p I can either be a guard or a
|
2019-11-21 19:44:13 +01:00
|
|
|
// conditional branch.
|
2018-08-06 07:49:19 +02:00
|
|
|
static void setCondition(Instruction *I, Value *NewCond) {
|
|
|
|
if (IntrinsicInst *GI = dyn_cast<IntrinsicInst>(I)) {
|
|
|
|
assert(GI->getIntrinsicID() == Intrinsic::experimental_guard &&
|
|
|
|
"Bad guard intrinsic?");
|
|
|
|
GI->setArgOperand(0, NewCond);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
cast<BranchInst>(I)->setCondition(NewCond);
|
2018-08-03 12:16:40 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Eliminates the guard instruction properly.
|
|
|
|
static void eliminateGuard(Instruction *GuardInst) {
|
|
|
|
GuardInst->eraseFromParent();
|
|
|
|
++GuardsEliminated;
|
|
|
|
}
|
|
|
|
|
2016-05-19 00:55:34 +02:00
|
|
|
class GuardWideningImpl {
|
|
|
|
DominatorTree &DT;
|
2018-04-28 01:15:56 +02:00
|
|
|
PostDominatorTree *PDT;
|
2016-05-19 00:55:34 +02:00
|
|
|
LoopInfo &LI;
|
|
|
|
|
2018-04-27 19:29:10 +02:00
|
|
|
/// Together, these describe the region of interest. This might be all of
|
|
|
|
/// the blocks within a function, or only a given loop's blocks and preheader.
|
|
|
|
DomTreeNode *Root;
|
|
|
|
std::function<bool(BasicBlock*)> BlockFilter;
|
|
|
|
|
2018-08-06 07:49:19 +02:00
|
|
|
/// The set of guards and conditional branches whose conditions have been
|
|
|
|
/// widened into dominating guards.
|
|
|
|
SmallVector<Instruction *, 16> EliminatedGuardsAndBranches;
|
2016-05-19 00:55:34 +02:00
|
|
|
|
|
|
|
/// The set of guards which have been widened to include conditions to other
|
|
|
|
/// guards.
|
2018-07-30 09:07:32 +02:00
|
|
|
DenseSet<Instruction *> WidenedGuards;
|
2016-05-19 00:55:34 +02:00
|
|
|
|
2019-02-04 11:31:18 +01:00
|
|
|
/// Try to eliminate instruction \p Instr by widening it into an earlier
|
|
|
|
/// dominating guard. \p DFSI is the DFS iterator on the dominator tree that
|
|
|
|
/// is currently visiting the block containing \p Guard, and \p GuardsPerBlock
|
2016-05-19 00:55:34 +02:00
|
|
|
/// maps BasicBlocks to the set of guards seen in that block.
|
2019-02-04 11:31:18 +01:00
|
|
|
bool eliminateInstrViaWidening(
|
|
|
|
Instruction *Instr, const df_iterator<DomTreeNode *> &DFSI,
|
2018-07-30 09:07:32 +02:00
|
|
|
const DenseMap<BasicBlock *, SmallVector<Instruction *, 8>> &
|
2018-08-13 09:58:19 +02:00
|
|
|
GuardsPerBlock, bool InvertCondition = false);
|
2016-05-19 00:55:34 +02:00
|
|
|
|
|
|
|
/// Used to keep track of which widening potential is more effective.
|
|
|
|
enum WideningScore {
|
|
|
|
/// Don't widen.
|
|
|
|
WS_IllegalOrNegative,
|
|
|
|
|
|
|
|
/// Widening is performance neutral as far as the cycles spent in check
|
|
|
|
/// conditions goes (but can still help, e.g., code layout, having less
|
|
|
|
/// deopt state).
|
|
|
|
WS_Neutral,
|
|
|
|
|
|
|
|
/// Widening is profitable.
|
|
|
|
WS_Positive,
|
|
|
|
|
|
|
|
/// Widening is very profitable. Not significantly different from \c
|
|
|
|
/// WS_Positive, except by the order.
|
|
|
|
WS_VeryPositive
|
|
|
|
};
|
|
|
|
|
|
|
|
static StringRef scoreTypeToString(WideningScore WS);
|
|
|
|
|
2019-02-04 11:31:18 +01:00
|
|
|
/// Compute the score for widening the condition in \p DominatedInstr
|
2019-02-04 11:20:51 +01:00
|
|
|
/// into \p DominatingGuard. If \p InvertCond is set, then we widen the
|
2018-08-13 09:58:19 +02:00
|
|
|
/// inverted condition of the dominating guard.
|
2019-02-04 11:31:18 +01:00
|
|
|
WideningScore computeWideningScore(Instruction *DominatedInstr,
|
2018-07-30 09:07:32 +02:00
|
|
|
Instruction *DominatingGuard,
|
2018-08-13 09:58:19 +02:00
|
|
|
bool InvertCond);
|
2016-05-19 00:55:34 +02:00
|
|
|
|
|
|
|
/// Helper to check if \p V can be hoisted to \p InsertPos.
|
2019-02-13 12:54:45 +01:00
|
|
|
bool isAvailableAt(const Value *V, const Instruction *InsertPos) const {
|
|
|
|
SmallPtrSet<const Instruction *, 8> Visited;
|
2016-05-19 00:55:34 +02:00
|
|
|
return isAvailableAt(V, InsertPos, Visited);
|
|
|
|
}
|
|
|
|
|
2019-02-13 12:54:45 +01:00
|
|
|
bool isAvailableAt(const Value *V, const Instruction *InsertPos,
|
|
|
|
SmallPtrSetImpl<const Instruction *> &Visited) const;
|
2016-05-19 00:55:34 +02:00
|
|
|
|
|
|
|
/// Helper to hoist \p V to \p InsertPos. Guaranteed to succeed if \c
|
|
|
|
/// isAvailableAt returned true.
|
2019-02-13 12:54:45 +01:00
|
|
|
void makeAvailableAt(Value *V, Instruction *InsertPos) const;
|
2016-05-19 00:55:34 +02:00
|
|
|
|
|
|
|
/// Common helper used by \c widenGuard and \c isWideningCondProfitable. Try
|
2018-08-13 09:58:19 +02:00
|
|
|
/// to generate an expression computing the logical AND of \p Cond0 and (\p
|
|
|
|
/// Cond1 XOR \p InvertCondition).
|
|
|
|
/// Return true if the expression computing the AND is only as
|
2016-05-19 00:55:34 +02:00
|
|
|
/// expensive as computing one of the two. If \p InsertPt is true then
|
|
|
|
/// actually generate the resulting expression, make it available at \p
|
|
|
|
/// InsertPt and return it in \p Result (else no change to the IR is made).
|
|
|
|
bool widenCondCommon(Value *Cond0, Value *Cond1, Instruction *InsertPt,
|
2018-08-13 09:58:19 +02:00
|
|
|
Value *&Result, bool InvertCondition);
|
2016-05-19 00:55:34 +02:00
|
|
|
|
2016-05-20 00:55:46 +02:00
|
|
|
/// Represents a range check of the form \c Base + \c Offset u< \c Length,
|
|
|
|
/// with the constraint that \c Length is not negative. \c CheckInst is the
|
|
|
|
/// pre-existing instruction in the IR that computes the result of this range
|
|
|
|
/// check.
|
2016-05-24 22:54:45 +02:00
|
|
|
class RangeCheck {
|
2019-02-13 12:54:45 +01:00
|
|
|
const Value *Base;
|
|
|
|
const ConstantInt *Offset;
|
|
|
|
const Value *Length;
|
2016-05-20 00:55:46 +02:00
|
|
|
ICmpInst *CheckInst;
|
|
|
|
|
2016-05-24 22:54:45 +02:00
|
|
|
public:
|
2019-02-13 12:54:45 +01:00
|
|
|
explicit RangeCheck(const Value *Base, const ConstantInt *Offset,
|
|
|
|
const Value *Length, ICmpInst *CheckInst)
|
2016-05-20 00:55:46 +02:00
|
|
|
: Base(Base), Offset(Offset), Length(Length), CheckInst(CheckInst) {}
|
|
|
|
|
2019-02-13 12:54:45 +01:00
|
|
|
void setBase(const Value *NewBase) { Base = NewBase; }
|
|
|
|
void setOffset(const ConstantInt *NewOffset) { Offset = NewOffset; }
|
2016-05-24 22:54:45 +02:00
|
|
|
|
2019-02-13 12:54:45 +01:00
|
|
|
const Value *getBase() const { return Base; }
|
|
|
|
const ConstantInt *getOffset() const { return Offset; }
|
2016-05-24 22:54:45 +02:00
|
|
|
const APInt &getOffsetValue() const { return getOffset()->getValue(); }
|
2019-02-13 12:54:45 +01:00
|
|
|
const Value *getLength() const { return Length; };
|
2016-05-24 22:54:45 +02:00
|
|
|
ICmpInst *getCheckInst() const { return CheckInst; }
|
|
|
|
|
2016-05-20 00:55:46 +02:00
|
|
|
void print(raw_ostream &OS, bool PrintTypes = false) {
|
|
|
|
OS << "Base: ";
|
|
|
|
Base->printAsOperand(OS, PrintTypes);
|
|
|
|
OS << " Offset: ";
|
|
|
|
Offset->printAsOperand(OS, PrintTypes);
|
|
|
|
OS << " Length: ";
|
|
|
|
Length->printAsOperand(OS, PrintTypes);
|
|
|
|
}
|
|
|
|
|
|
|
|
LLVM_DUMP_METHOD void dump() {
|
|
|
|
print(dbgs());
|
|
|
|
dbgs() << "\n";
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
/// Parse \p CheckCond into a conjunction (logical-and) of range checks; and
|
|
|
|
/// append them to \p Checks. Returns true on success, may clobber \c Checks
|
|
|
|
/// on failure.
|
|
|
|
bool parseRangeChecks(Value *CheckCond, SmallVectorImpl<RangeCheck> &Checks) {
|
2019-02-13 12:54:45 +01:00
|
|
|
SmallPtrSet<const Value *, 8> Visited;
|
2016-05-20 00:55:46 +02:00
|
|
|
return parseRangeChecks(CheckCond, Checks, Visited);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool parseRangeChecks(Value *CheckCond, SmallVectorImpl<RangeCheck> &Checks,
|
2019-02-13 12:54:45 +01:00
|
|
|
SmallPtrSetImpl<const Value *> &Visited);
|
2016-05-20 00:55:46 +02:00
|
|
|
|
|
|
|
/// Combine the checks in \p Checks into a smaller set of checks and append
|
|
|
|
/// them into \p CombinedChecks. Return true on success (i.e. all of checks
|
|
|
|
/// in \p Checks were combined into \p CombinedChecks). Clobbers \p Checks
|
|
|
|
/// and \p CombinedChecks on success and on failure.
|
|
|
|
bool combineRangeChecks(SmallVectorImpl<RangeCheck> &Checks,
|
2019-02-13 12:54:45 +01:00
|
|
|
SmallVectorImpl<RangeCheck> &CombinedChecks) const;
|
2016-05-20 00:55:46 +02:00
|
|
|
|
2016-05-19 00:55:34 +02:00
|
|
|
/// Can we compute the logical AND of \p Cond0 and \p Cond1 for the price of
|
|
|
|
/// computing only one of the two expressions?
|
2018-08-13 09:58:19 +02:00
|
|
|
bool isWideningCondProfitable(Value *Cond0, Value *Cond1, bool InvertCond) {
|
2016-05-19 00:55:34 +02:00
|
|
|
Value *ResultUnused;
|
2018-08-13 09:58:19 +02:00
|
|
|
return widenCondCommon(Cond0, Cond1, /*InsertPt=*/nullptr, ResultUnused,
|
|
|
|
InvertCond);
|
2016-05-19 00:55:34 +02:00
|
|
|
}
|
|
|
|
|
2018-08-13 09:58:19 +02:00
|
|
|
/// If \p InvertCondition is false, Widen \p ToWiden to fail if
|
|
|
|
/// \p NewCondition is false, otherwise make it fail if \p NewCondition is
|
|
|
|
/// true (in addition to whatever it is already checking).
|
|
|
|
void widenGuard(Instruction *ToWiden, Value *NewCondition,
|
|
|
|
bool InvertCondition) {
|
2016-05-19 00:55:34 +02:00
|
|
|
Value *Result;
|
2019-11-06 23:05:59 +01:00
|
|
|
|
2019-02-13 10:56:30 +01:00
|
|
|
widenCondCommon(getCondition(ToWiden), NewCondition, ToWiden, Result,
|
2018-08-13 09:58:19 +02:00
|
|
|
InvertCondition);
|
2019-02-13 10:56:30 +01:00
|
|
|
if (isGuardAsWidenableBranch(ToWiden)) {
|
[NFC] Factor out utilities for manipulating widenable branches
With the widenable condition construct, we have the ability to reason about branches which can be 'widened' (i.e. made to fail more often). We've got a couple o transforms which leverage this. This patch just cleans up the API a bit.
This is prep work for generalizing our definition of a widenable branch slightly. At the moment "br i1 (and A, wc()), ..." is considered widenable, but oddly, neither "br i1 (and wc(), B), ..." or "br i1 wc(), ..." is. That clearly needs addressed, so first, let's centralize the code in one place.
2019-11-19 23:43:13 +01:00
|
|
|
setWidenableBranchCond(cast<BranchInst>(ToWiden), Result);
|
2019-11-06 23:05:59 +01:00
|
|
|
return;
|
2019-02-13 10:56:30 +01:00
|
|
|
}
|
2018-08-06 07:49:19 +02:00
|
|
|
setCondition(ToWiden, Result);
|
2016-05-19 00:55:34 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
public:
|
2018-04-27 19:29:10 +02:00
|
|
|
|
2018-04-28 01:15:56 +02:00
|
|
|
explicit GuardWideningImpl(DominatorTree &DT, PostDominatorTree *PDT,
|
2019-11-20 00:14:41 +01:00
|
|
|
LoopInfo &LI, DomTreeNode *Root,
|
2018-04-27 19:29:10 +02:00
|
|
|
std::function<bool(BasicBlock*)> BlockFilter)
|
2019-11-20 00:14:41 +01:00
|
|
|
: DT(DT), PDT(PDT), LI(LI), Root(Root), BlockFilter(BlockFilter)
|
2018-08-06 07:49:19 +02:00
|
|
|
{}
|
2016-05-19 00:55:34 +02:00
|
|
|
|
|
|
|
/// The entry point for this pass.
|
|
|
|
bool run();
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2019-02-13 10:56:30 +01:00
|
|
|
static bool isSupportedGuardInstruction(const Instruction *Insn) {
|
|
|
|
if (isGuard(Insn))
|
|
|
|
return true;
|
|
|
|
if (WidenBranchGuards && isGuardAsWidenableBranch(Insn))
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-05-19 00:55:34 +02:00
|
|
|
bool GuardWideningImpl::run() {
|
2018-07-30 09:07:32 +02:00
|
|
|
DenseMap<BasicBlock *, SmallVector<Instruction *, 8>> GuardsInBlock;
|
2016-05-19 00:55:34 +02:00
|
|
|
bool Changed = false;
|
2018-04-27 19:29:10 +02:00
|
|
|
for (auto DFI = df_begin(Root), DFE = df_end(Root);
|
2016-05-19 00:55:34 +02:00
|
|
|
DFI != DFE; ++DFI) {
|
|
|
|
auto *BB = (*DFI)->getBlock();
|
2018-04-27 19:29:10 +02:00
|
|
|
if (!BlockFilter(BB))
|
|
|
|
continue;
|
|
|
|
|
2016-05-19 00:55:34 +02:00
|
|
|
auto &CurrentList = GuardsInBlock[BB];
|
|
|
|
|
|
|
|
for (auto &I : *BB)
|
2019-02-13 10:56:30 +01:00
|
|
|
if (isSupportedGuardInstruction(&I))
|
2018-07-30 09:07:32 +02:00
|
|
|
CurrentList.push_back(cast<Instruction>(&I));
|
2016-05-19 00:55:34 +02:00
|
|
|
|
|
|
|
for (auto *II : CurrentList)
|
2019-02-04 11:31:18 +01:00
|
|
|
Changed |= eliminateInstrViaWidening(II, DFI, GuardsInBlock);
|
2016-05-19 00:55:34 +02:00
|
|
|
}
|
|
|
|
|
2018-08-06 07:49:19 +02:00
|
|
|
assert(EliminatedGuardsAndBranches.empty() || Changed);
|
|
|
|
for (auto *I : EliminatedGuardsAndBranches)
|
|
|
|
if (!WidenedGuards.count(I)) {
|
|
|
|
assert(isa<ConstantInt>(getCondition(I)) && "Should be!");
|
2019-02-13 10:56:30 +01:00
|
|
|
if (isSupportedGuardInstruction(I))
|
2018-08-06 07:49:19 +02:00
|
|
|
eliminateGuard(I);
|
|
|
|
else {
|
|
|
|
assert(isa<BranchInst>(I) &&
|
|
|
|
"Eliminated something other than guard or branch?");
|
|
|
|
++CondBranchEliminated;
|
|
|
|
}
|
|
|
|
}
|
2016-05-19 00:55:34 +02:00
|
|
|
|
|
|
|
return Changed;
|
|
|
|
}
|
|
|
|
|
2019-02-04 11:31:18 +01:00
|
|
|
bool GuardWideningImpl::eliminateInstrViaWidening(
|
|
|
|
Instruction *Instr, const df_iterator<DomTreeNode *> &DFSI,
|
2018-07-30 09:07:32 +02:00
|
|
|
const DenseMap<BasicBlock *, SmallVector<Instruction *, 8>> &
|
2018-08-13 09:58:19 +02:00
|
|
|
GuardsInBlock, bool InvertCondition) {
|
2018-08-22 04:40:49 +02:00
|
|
|
// Ignore trivial true or false conditions. These instructions will be
|
|
|
|
// trivially eliminated by any cleanup pass. Do not erase them because other
|
|
|
|
// guards can possibly be widened into them.
|
2019-02-04 11:31:18 +01:00
|
|
|
if (isa<ConstantInt>(getCondition(Instr)))
|
2018-08-22 04:40:49 +02:00
|
|
|
return false;
|
|
|
|
|
2018-07-30 09:07:32 +02:00
|
|
|
Instruction *BestSoFar = nullptr;
|
2016-05-19 00:55:34 +02:00
|
|
|
auto BestScoreSoFar = WS_IllegalOrNegative;
|
|
|
|
|
|
|
|
// In the set of dominating guards, find the one we can merge GuardInst with
|
|
|
|
// for the most profit.
|
|
|
|
for (unsigned i = 0, e = DFSI.getPathLength(); i != e; ++i) {
|
|
|
|
auto *CurBB = DFSI.getPath(i)->getBlock();
|
2018-04-27 19:29:10 +02:00
|
|
|
if (!BlockFilter(CurBB))
|
|
|
|
break;
|
2016-05-19 00:55:34 +02:00
|
|
|
assert(GuardsInBlock.count(CurBB) && "Must have been populated by now!");
|
|
|
|
const auto &GuardsInCurBB = GuardsInBlock.find(CurBB)->second;
|
|
|
|
|
|
|
|
auto I = GuardsInCurBB.begin();
|
2021-01-20 05:19:14 +01:00
|
|
|
auto E = Instr->getParent() == CurBB ? find(GuardsInCurBB, Instr)
|
|
|
|
: GuardsInCurBB.end();
|
2016-05-19 00:55:34 +02:00
|
|
|
|
|
|
|
#ifndef NDEBUG
|
|
|
|
{
|
|
|
|
unsigned Index = 0;
|
|
|
|
for (auto &I : *CurBB) {
|
|
|
|
if (Index == GuardsInCurBB.size())
|
|
|
|
break;
|
|
|
|
if (GuardsInCurBB[Index] == &I)
|
|
|
|
Index++;
|
|
|
|
}
|
|
|
|
assert(Index == GuardsInCurBB.size() &&
|
|
|
|
"Guards expected to be in order!");
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2019-02-04 11:31:18 +01:00
|
|
|
assert((i == (e - 1)) == (Instr->getParent() == CurBB) && "Bad DFS?");
|
2016-05-19 00:55:34 +02:00
|
|
|
|
|
|
|
for (auto *Candidate : make_range(I, E)) {
|
2019-02-04 11:31:18 +01:00
|
|
|
auto Score = computeWideningScore(Instr, Candidate, InvertCondition);
|
|
|
|
LLVM_DEBUG(dbgs() << "Score between " << *getCondition(Instr)
|
2018-08-06 07:49:19 +02:00
|
|
|
<< " and " << *getCondition(Candidate) << " is "
|
2018-05-14 14:53:11 +02:00
|
|
|
<< scoreTypeToString(Score) << "\n");
|
2016-05-19 00:55:34 +02:00
|
|
|
if (Score > BestScoreSoFar) {
|
|
|
|
BestScoreSoFar = Score;
|
|
|
|
BestSoFar = Candidate;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (BestScoreSoFar == WS_IllegalOrNegative) {
|
2019-02-04 11:31:18 +01:00
|
|
|
LLVM_DEBUG(dbgs() << "Did not eliminate guard " << *Instr << "\n");
|
2016-05-19 00:55:34 +02:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-02-04 11:31:18 +01:00
|
|
|
assert(BestSoFar != Instr && "Should have never visited same guard!");
|
|
|
|
assert(DT.dominates(BestSoFar, Instr) && "Should be!");
|
2016-05-19 00:55:34 +02:00
|
|
|
|
2019-02-04 11:31:18 +01:00
|
|
|
LLVM_DEBUG(dbgs() << "Widening " << *Instr << " into " << *BestSoFar
|
2018-05-14 14:53:11 +02:00
|
|
|
<< " with score " << scoreTypeToString(BestScoreSoFar)
|
|
|
|
<< "\n");
|
2019-02-04 11:31:18 +01:00
|
|
|
widenGuard(BestSoFar, getCondition(Instr), InvertCondition);
|
2018-08-13 09:58:19 +02:00
|
|
|
auto NewGuardCondition = InvertCondition
|
2019-02-04 11:31:18 +01:00
|
|
|
? ConstantInt::getFalse(Instr->getContext())
|
|
|
|
: ConstantInt::getTrue(Instr->getContext());
|
|
|
|
setCondition(Instr, NewGuardCondition);
|
|
|
|
EliminatedGuardsAndBranches.push_back(Instr);
|
2016-05-19 00:55:34 +02:00
|
|
|
WidenedGuards.insert(BestSoFar);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-02-04 11:20:51 +01:00
|
|
|
GuardWideningImpl::WideningScore
|
2019-02-04 11:31:18 +01:00
|
|
|
GuardWideningImpl::computeWideningScore(Instruction *DominatedInstr,
|
2019-02-04 11:20:51 +01:00
|
|
|
Instruction *DominatingGuard,
|
|
|
|
bool InvertCond) {
|
2019-02-04 11:31:18 +01:00
|
|
|
Loop *DominatedInstrLoop = LI.getLoopFor(DominatedInstr->getParent());
|
2019-02-04 11:20:51 +01:00
|
|
|
Loop *DominatingGuardLoop = LI.getLoopFor(DominatingGuard->getParent());
|
2016-05-19 00:55:34 +02:00
|
|
|
bool HoistingOutOfLoop = false;
|
|
|
|
|
2019-02-04 11:31:18 +01:00
|
|
|
if (DominatingGuardLoop != DominatedInstrLoop) {
|
2018-04-27 19:41:37 +02:00
|
|
|
// Be conservative and don't widen into a sibling loop. TODO: If the
|
|
|
|
// sibling is colder, we should consider allowing this.
|
2016-05-19 00:55:34 +02:00
|
|
|
if (DominatingGuardLoop &&
|
2019-02-04 11:31:18 +01:00
|
|
|
!DominatingGuardLoop->contains(DominatedInstrLoop))
|
2016-05-19 00:55:34 +02:00
|
|
|
return WS_IllegalOrNegative;
|
|
|
|
|
|
|
|
HoistingOutOfLoop = true;
|
|
|
|
}
|
|
|
|
|
2019-02-04 11:31:18 +01:00
|
|
|
if (!isAvailableAt(getCondition(DominatedInstr), DominatingGuard))
|
2016-05-19 00:55:34 +02:00
|
|
|
return WS_IllegalOrNegative;
|
|
|
|
|
2018-04-27 19:41:37 +02:00
|
|
|
// If the guard was conditional executed, it may never be reached
|
|
|
|
// dynamically. There are two potential downsides to hoisting it out of the
|
|
|
|
// conditionally executed region: 1) we may spuriously deopt without need and
|
|
|
|
// 2) we have the extra cost of computing the guard condition in the common
|
|
|
|
// case. At the moment, we really only consider the second in our heuristic
|
|
|
|
// here. TODO: evaluate cost model for spurious deopt
|
2018-04-28 01:15:56 +02:00
|
|
|
// NOTE: As written, this also lets us hoist right over another guard which
|
2018-07-30 21:41:25 +02:00
|
|
|
// is essentially just another spelling for control flow.
|
2019-02-04 11:31:18 +01:00
|
|
|
if (isWideningCondProfitable(getCondition(DominatedInstr),
|
2018-08-13 09:58:19 +02:00
|
|
|
getCondition(DominatingGuard), InvertCond))
|
2016-05-19 00:55:34 +02:00
|
|
|
return HoistingOutOfLoop ? WS_VeryPositive : WS_Positive;
|
|
|
|
|
|
|
|
if (HoistingOutOfLoop)
|
|
|
|
return WS_Positive;
|
|
|
|
|
2018-04-28 01:15:56 +02:00
|
|
|
// Returns true if we might be hoisting above explicit control flow. Note
|
|
|
|
// that this completely ignores implicit control flow (guards, calls which
|
|
|
|
// throw, etc...). That choice appears arbitrary.
|
|
|
|
auto MaybeHoistingOutOfIf = [&]() {
|
|
|
|
auto *DominatingBlock = DominatingGuard->getParent();
|
2019-02-04 11:31:18 +01:00
|
|
|
auto *DominatedBlock = DominatedInstr->getParent();
|
2019-02-13 10:56:30 +01:00
|
|
|
if (isGuardAsWidenableBranch(DominatingGuard))
|
|
|
|
DominatingBlock = cast<BranchInst>(DominatingGuard)->getSuccessor(0);
|
2018-07-30 21:41:25 +02:00
|
|
|
|
2018-04-28 01:15:56 +02:00
|
|
|
// Same Block?
|
|
|
|
if (DominatedBlock == DominatingBlock)
|
|
|
|
return false;
|
|
|
|
// Obvious successor (common loop header/preheader case)
|
|
|
|
if (DominatedBlock == DominatingBlock->getUniqueSuccessor())
|
|
|
|
return false;
|
|
|
|
// TODO: diamond, triangle cases
|
|
|
|
if (!PDT) return true;
|
2018-12-25 08:20:06 +01:00
|
|
|
return !PDT->dominates(DominatedBlock, DominatingBlock);
|
2018-04-28 01:15:56 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
return MaybeHoistingOutOfIf() ? WS_IllegalOrNegative : WS_Neutral;
|
2016-05-19 00:55:34 +02:00
|
|
|
}
|
|
|
|
|
2019-02-13 12:54:45 +01:00
|
|
|
bool GuardWideningImpl::isAvailableAt(
|
|
|
|
const Value *V, const Instruction *Loc,
|
|
|
|
SmallPtrSetImpl<const Instruction *> &Visited) const {
|
2016-05-19 00:55:34 +02:00
|
|
|
auto *Inst = dyn_cast<Instruction>(V);
|
|
|
|
if (!Inst || DT.dominates(Inst, Loc) || Visited.count(Inst))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (!isSafeToSpeculativelyExecute(Inst, Loc, &DT) ||
|
|
|
|
Inst->mayReadFromMemory())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
Visited.insert(Inst);
|
|
|
|
|
|
|
|
// We only want to go _up_ the dominance chain when recursing.
|
|
|
|
assert(!isa<PHINode>(Loc) &&
|
|
|
|
"PHIs should return false for isSafeToSpeculativelyExecute");
|
|
|
|
assert(DT.isReachableFromEntry(Inst->getParent()) &&
|
|
|
|
"We did a DFS from the block entry!");
|
|
|
|
return all_of(Inst->operands(),
|
|
|
|
[&](Value *Op) { return isAvailableAt(Op, Loc, Visited); });
|
|
|
|
}
|
|
|
|
|
2019-02-13 12:54:45 +01:00
|
|
|
void GuardWideningImpl::makeAvailableAt(Value *V, Instruction *Loc) const {
|
2016-05-19 00:55:34 +02:00
|
|
|
auto *Inst = dyn_cast<Instruction>(V);
|
|
|
|
if (!Inst || DT.dominates(Inst, Loc))
|
|
|
|
return;
|
|
|
|
|
|
|
|
assert(isSafeToSpeculativelyExecute(Inst, Loc, &DT) &&
|
|
|
|
!Inst->mayReadFromMemory() && "Should've checked with isAvailableAt!");
|
|
|
|
|
|
|
|
for (Value *Op : Inst->operands())
|
|
|
|
makeAvailableAt(Op, Loc);
|
|
|
|
|
|
|
|
Inst->moveBefore(Loc);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool GuardWideningImpl::widenCondCommon(Value *Cond0, Value *Cond1,
|
2018-08-13 09:58:19 +02:00
|
|
|
Instruction *InsertPt, Value *&Result,
|
|
|
|
bool InvertCondition) {
|
2016-05-19 00:55:34 +02:00
|
|
|
using namespace llvm::PatternMatch;
|
|
|
|
|
|
|
|
{
|
|
|
|
// L >u C0 && L >u C1 -> L >u max(C0, C1)
|
|
|
|
ConstantInt *RHS0, *RHS1;
|
|
|
|
Value *LHS;
|
|
|
|
ICmpInst::Predicate Pred0, Pred1;
|
|
|
|
if (match(Cond0, m_ICmp(Pred0, m_Value(LHS), m_ConstantInt(RHS0))) &&
|
|
|
|
match(Cond1, m_ICmp(Pred1, m_Specific(LHS), m_ConstantInt(RHS1)))) {
|
2018-08-13 09:58:19 +02:00
|
|
|
if (InvertCondition)
|
|
|
|
Pred1 = ICmpInst::getInversePredicate(Pred1);
|
2016-05-19 00:55:34 +02:00
|
|
|
|
2016-05-19 05:53:17 +02:00
|
|
|
ConstantRange CR0 =
|
|
|
|
ConstantRange::makeExactICmpRegion(Pred0, RHS0->getValue());
|
|
|
|
ConstantRange CR1 =
|
|
|
|
ConstantRange::makeExactICmpRegion(Pred1, RHS1->getValue());
|
|
|
|
|
|
|
|
// SubsetIntersect is a subset of the actual mathematical intersection of
|
2016-06-19 19:20:27 +02:00
|
|
|
// CR0 and CR1, while SupersetIntersect is a superset of the actual
|
2016-05-19 05:53:17 +02:00
|
|
|
// mathematical intersection. If these two ConstantRanges are equal, then
|
|
|
|
// we know we were able to represent the actual mathematical intersection
|
|
|
|
// of CR0 and CR1, and can use the same to generate an icmp instruction.
|
|
|
|
//
|
|
|
|
// Given what we're doing here and the semantics of guards, it would
|
|
|
|
// actually be correct to just use SubsetIntersect, but that may be too
|
|
|
|
// aggressive in cases we care about.
|
|
|
|
auto SubsetIntersect = CR0.inverse().unionWith(CR1.inverse()).inverse();
|
|
|
|
auto SupersetIntersect = CR0.intersectWith(CR1);
|
|
|
|
|
|
|
|
APInt NewRHSAP;
|
|
|
|
CmpInst::Predicate Pred;
|
|
|
|
if (SubsetIntersect == SupersetIntersect &&
|
|
|
|
SubsetIntersect.getEquivalentICmp(Pred, NewRHSAP)) {
|
2016-05-19 00:55:34 +02:00
|
|
|
if (InsertPt) {
|
2016-05-19 05:53:17 +02:00
|
|
|
ConstantInt *NewRHS = ConstantInt::get(Cond0->getContext(), NewRHSAP);
|
|
|
|
Result = new ICmpInst(InsertPt, Pred, LHS, NewRHS, "wide.chk");
|
2016-05-19 00:55:34 +02:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-20 00:55:46 +02:00
|
|
|
{
|
|
|
|
SmallVector<GuardWideningImpl::RangeCheck, 4> Checks, CombinedChecks;
|
2018-08-13 09:58:19 +02:00
|
|
|
// TODO: Support InvertCondition case?
|
|
|
|
if (!InvertCondition &&
|
|
|
|
parseRangeChecks(Cond0, Checks) && parseRangeChecks(Cond1, Checks) &&
|
2016-05-20 00:55:46 +02:00
|
|
|
combineRangeChecks(Checks, CombinedChecks)) {
|
|
|
|
if (InsertPt) {
|
|
|
|
Result = nullptr;
|
|
|
|
for (auto &RC : CombinedChecks) {
|
2016-05-24 22:54:45 +02:00
|
|
|
makeAvailableAt(RC.getCheckInst(), InsertPt);
|
2016-05-20 00:55:46 +02:00
|
|
|
if (Result)
|
2016-05-24 22:54:45 +02:00
|
|
|
Result = BinaryOperator::CreateAnd(RC.getCheckInst(), Result, "",
|
|
|
|
InsertPt);
|
2016-05-20 00:55:46 +02:00
|
|
|
else
|
2016-05-24 22:54:45 +02:00
|
|
|
Result = RC.getCheckInst();
|
2016-05-20 00:55:46 +02:00
|
|
|
}
|
2019-10-21 19:15:37 +02:00
|
|
|
assert(Result && "Failed to find result value");
|
2016-05-20 00:55:46 +02:00
|
|
|
Result->setName("wide.chk");
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-19 00:55:34 +02:00
|
|
|
// Base case -- just logical-and the two conditions together.
|
|
|
|
|
|
|
|
if (InsertPt) {
|
|
|
|
makeAvailableAt(Cond0, InsertPt);
|
|
|
|
makeAvailableAt(Cond1, InsertPt);
|
2018-08-13 09:58:19 +02:00
|
|
|
if (InvertCondition)
|
|
|
|
Cond1 = BinaryOperator::CreateNot(Cond1, "inverted", InsertPt);
|
2016-05-19 00:55:34 +02:00
|
|
|
Result = BinaryOperator::CreateAnd(Cond0, Cond1, "wide.chk", InsertPt);
|
|
|
|
}
|
|
|
|
|
|
|
|
// We were not able to compute Cond0 AND Cond1 for the price of one.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-05-20 00:55:46 +02:00
|
|
|
bool GuardWideningImpl::parseRangeChecks(
|
|
|
|
Value *CheckCond, SmallVectorImpl<GuardWideningImpl::RangeCheck> &Checks,
|
2019-02-13 12:54:45 +01:00
|
|
|
SmallPtrSetImpl<const Value *> &Visited) {
|
2016-05-20 00:55:46 +02:00
|
|
|
if (!Visited.insert(CheckCond).second)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
using namespace llvm::PatternMatch;
|
|
|
|
|
|
|
|
{
|
|
|
|
Value *AndLHS, *AndRHS;
|
|
|
|
if (match(CheckCond, m_And(m_Value(AndLHS), m_Value(AndRHS))))
|
|
|
|
return parseRangeChecks(AndLHS, Checks) &&
|
|
|
|
parseRangeChecks(AndRHS, Checks);
|
|
|
|
}
|
|
|
|
|
|
|
|
auto *IC = dyn_cast<ICmpInst>(CheckCond);
|
|
|
|
if (!IC || !IC->getOperand(0)->getType()->isIntegerTy() ||
|
|
|
|
(IC->getPredicate() != ICmpInst::ICMP_ULT &&
|
|
|
|
IC->getPredicate() != ICmpInst::ICMP_UGT))
|
|
|
|
return false;
|
|
|
|
|
2019-02-13 12:54:45 +01:00
|
|
|
const Value *CmpLHS = IC->getOperand(0), *CmpRHS = IC->getOperand(1);
|
2016-05-20 00:55:46 +02:00
|
|
|
if (IC->getPredicate() == ICmpInst::ICMP_UGT)
|
|
|
|
std::swap(CmpLHS, CmpRHS);
|
|
|
|
|
|
|
|
auto &DL = IC->getModule()->getDataLayout();
|
|
|
|
|
2016-05-24 22:54:45 +02:00
|
|
|
GuardWideningImpl::RangeCheck Check(
|
|
|
|
CmpLHS, cast<ConstantInt>(ConstantInt::getNullValue(CmpRHS->getType())),
|
|
|
|
CmpRHS, IC);
|
2016-05-20 00:55:46 +02:00
|
|
|
|
2016-05-24 22:54:45 +02:00
|
|
|
if (!isKnownNonNegative(Check.getLength(), DL))
|
2016-05-20 00:55:46 +02:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// What we have in \c Check now is a correct interpretation of \p CheckCond.
|
|
|
|
// Try to see if we can move some constant offsets into the \c Offset field.
|
|
|
|
|
|
|
|
bool Changed;
|
2016-05-24 22:54:45 +02:00
|
|
|
auto &Ctx = CheckCond->getContext();
|
2016-05-20 00:55:46 +02:00
|
|
|
|
|
|
|
do {
|
|
|
|
Value *OpLHS;
|
|
|
|
ConstantInt *OpRHS;
|
|
|
|
Changed = false;
|
|
|
|
|
|
|
|
#ifndef NDEBUG
|
2016-05-24 22:54:45 +02:00
|
|
|
auto *BaseInst = dyn_cast<Instruction>(Check.getBase());
|
2016-05-20 00:55:46 +02:00
|
|
|
assert((!BaseInst || DT.isReachableFromEntry(BaseInst->getParent())) &&
|
|
|
|
"Unreachable instruction?");
|
|
|
|
#endif
|
|
|
|
|
2016-05-24 22:54:45 +02:00
|
|
|
if (match(Check.getBase(), m_Add(m_Value(OpLHS), m_ConstantInt(OpRHS)))) {
|
|
|
|
Check.setBase(OpLHS);
|
|
|
|
APInt NewOffset = Check.getOffsetValue() + OpRHS->getValue();
|
|
|
|
Check.setOffset(ConstantInt::get(Ctx, NewOffset));
|
2016-05-20 00:55:46 +02:00
|
|
|
Changed = true;
|
2016-05-24 22:54:45 +02:00
|
|
|
} else if (match(Check.getBase(),
|
|
|
|
m_Or(m_Value(OpLHS), m_ConstantInt(OpRHS)))) {
|
2017-05-24 18:53:07 +02:00
|
|
|
KnownBits Known = computeKnownBits(OpLHS, DL);
|
2017-04-26 18:39:58 +02:00
|
|
|
if ((OpRHS->getValue() & Known.Zero) == OpRHS->getValue()) {
|
2016-05-24 22:54:45 +02:00
|
|
|
Check.setBase(OpLHS);
|
|
|
|
APInt NewOffset = Check.getOffsetValue() + OpRHS->getValue();
|
|
|
|
Check.setOffset(ConstantInt::get(Ctx, NewOffset));
|
2016-05-20 00:55:46 +02:00
|
|
|
Changed = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} while (Changed);
|
|
|
|
|
|
|
|
Checks.push_back(Check);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool GuardWideningImpl::combineRangeChecks(
|
|
|
|
SmallVectorImpl<GuardWideningImpl::RangeCheck> &Checks,
|
2019-02-13 12:54:45 +01:00
|
|
|
SmallVectorImpl<GuardWideningImpl::RangeCheck> &RangeChecksOut) const {
|
2016-05-20 00:55:46 +02:00
|
|
|
unsigned OldCount = Checks.size();
|
|
|
|
while (!Checks.empty()) {
|
2016-05-21 04:24:44 +02:00
|
|
|
// Pick all of the range checks with a specific base and length, and try to
|
|
|
|
// merge them.
|
2019-02-13 12:54:45 +01:00
|
|
|
const Value *CurrentBase = Checks.front().getBase();
|
|
|
|
const Value *CurrentLength = Checks.front().getLength();
|
2016-05-21 04:24:44 +02:00
|
|
|
|
|
|
|
SmallVector<GuardWideningImpl::RangeCheck, 3> CurrentChecks;
|
|
|
|
|
|
|
|
auto IsCurrentCheck = [&](GuardWideningImpl::RangeCheck &RC) {
|
2016-05-24 22:54:45 +02:00
|
|
|
return RC.getBase() == CurrentBase && RC.getLength() == CurrentLength;
|
2016-05-21 04:24:44 +02:00
|
|
|
};
|
|
|
|
|
2017-02-21 01:38:44 +01:00
|
|
|
copy_if(Checks, std::back_inserter(CurrentChecks), IsCurrentCheck);
|
2020-12-18 04:53:09 +01:00
|
|
|
erase_if(Checks, IsCurrentCheck);
|
2016-05-21 04:24:44 +02:00
|
|
|
|
|
|
|
assert(CurrentChecks.size() != 0 && "We know we have at least one!");
|
|
|
|
|
|
|
|
if (CurrentChecks.size() < 3) {
|
2020-12-27 18:57:28 +01:00
|
|
|
llvm::append_range(RangeChecksOut, CurrentChecks);
|
2016-05-20 00:55:46 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2016-05-21 04:24:44 +02:00
|
|
|
// CurrentChecks.size() will typically be 3 here, but so far there has been
|
|
|
|
// no need to hard-code that fact.
|
2016-05-20 00:55:46 +02:00
|
|
|
|
llvm::sort(C.begin(), C.end(), ...) -> llvm::sort(C, ...)
Summary: The convenience wrapper in STLExtras is available since rL342102.
Reviewers: dblaikie, javed.absar, JDevlieghere, andreadb
Subscribers: MatzeB, sanjoy, arsenm, dschuff, mehdi_amini, sdardis, nemanjai, jvesely, nhaehnle, sbc100, jgravelle-google, eraman, aheejin, kbarton, JDevlieghere, javed.absar, gbedwell, jrtc27, mgrang, atanasyan, steven_wu, george.burgess.iv, dexonsmith, kristina, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D52573
llvm-svn: 343163
2018-09-27 04:13:45 +02:00
|
|
|
llvm::sort(CurrentChecks, [&](const GuardWideningImpl::RangeCheck &LHS,
|
|
|
|
const GuardWideningImpl::RangeCheck &RHS) {
|
2016-05-24 22:54:45 +02:00
|
|
|
return LHS.getOffsetValue().slt(RHS.getOffsetValue());
|
2016-05-20 00:55:46 +02:00
|
|
|
});
|
|
|
|
|
|
|
|
// Note: std::sort should not invalidate the ChecksStart iterator.
|
|
|
|
|
2019-02-13 12:54:45 +01:00
|
|
|
const ConstantInt *MinOffset = CurrentChecks.front().getOffset();
|
|
|
|
const ConstantInt *MaxOffset = CurrentChecks.back().getOffset();
|
2016-05-20 00:55:46 +02:00
|
|
|
|
|
|
|
unsigned BitWidth = MaxOffset->getValue().getBitWidth();
|
|
|
|
if ((MaxOffset->getValue() - MinOffset->getValue())
|
|
|
|
.ugt(APInt::getSignedMinValue(BitWidth)))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
APInt MaxDiff = MaxOffset->getValue() - MinOffset->getValue();
|
2016-06-08 12:01:20 +02:00
|
|
|
const APInt &HighOffset = MaxOffset->getValue();
|
2016-05-20 01:15:59 +02:00
|
|
|
auto OffsetOK = [&](const GuardWideningImpl::RangeCheck &RC) {
|
2016-05-24 22:54:45 +02:00
|
|
|
return (HighOffset - RC.getOffsetValue()).ult(MaxDiff);
|
2016-05-20 00:55:46 +02:00
|
|
|
};
|
|
|
|
|
2021-01-20 05:19:17 +01:00
|
|
|
if (MaxDiff.isMinValue() || !all_of(drop_begin(CurrentChecks), OffsetOK))
|
2016-05-20 00:55:46 +02:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// We have a series of f+1 checks as:
|
|
|
|
//
|
|
|
|
// I+k_0 u< L ... Chk_0
|
2017-05-03 20:29:34 +02:00
|
|
|
// I+k_1 u< L ... Chk_1
|
2016-05-20 00:55:46 +02:00
|
|
|
// ...
|
2017-05-03 20:29:34 +02:00
|
|
|
// I+k_f u< L ... Chk_f
|
2016-05-20 00:55:46 +02:00
|
|
|
//
|
2017-05-03 20:29:34 +02:00
|
|
|
// with forall i in [0,f]: k_f-k_i u< k_f-k_0 ... Precond_0
|
2016-05-20 00:55:46 +02:00
|
|
|
// k_f-k_0 u< INT_MIN+k_f ... Precond_1
|
|
|
|
// k_f != k_0 ... Precond_2
|
|
|
|
//
|
|
|
|
// Claim:
|
2017-05-03 20:29:34 +02:00
|
|
|
// Chk_0 AND Chk_f implies all the other checks
|
2016-05-20 00:55:46 +02:00
|
|
|
//
|
|
|
|
// Informal proof sketch:
|
|
|
|
//
|
|
|
|
// We will show that the integer range [I+k_0,I+k_f] does not unsigned-wrap
|
|
|
|
// (i.e. going from I+k_0 to I+k_f does not cross the -1,0 boundary) and
|
|
|
|
// thus I+k_f is the greatest unsigned value in that range.
|
|
|
|
//
|
|
|
|
// This combined with Ckh_(f+1) shows that everything in that range is u< L.
|
|
|
|
// Via Precond_0 we know that all of the indices in Chk_0 through Chk_(f+1)
|
|
|
|
// lie in [I+k_0,I+k_f], this proving our claim.
|
|
|
|
//
|
|
|
|
// To see that [I+k_0,I+k_f] is not a wrapping range, note that there are
|
|
|
|
// two possibilities: I+k_0 u< I+k_f or I+k_0 >u I+k_f (they can't be equal
|
|
|
|
// since k_0 != k_f). In the former case, [I+k_0,I+k_f] is not a wrapping
|
|
|
|
// range by definition, and the latter case is impossible:
|
|
|
|
//
|
|
|
|
// 0-----I+k_f---I+k_0----L---INT_MAX,INT_MIN------------------(-1)
|
|
|
|
// xxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
|
|
|
//
|
|
|
|
// For Chk_0 to succeed, we'd have to have k_f-k_0 (the range highlighted
|
|
|
|
// with 'x' above) to be at least >u INT_MIN.
|
|
|
|
|
2016-05-21 04:24:44 +02:00
|
|
|
RangeChecksOut.emplace_back(CurrentChecks.front());
|
|
|
|
RangeChecksOut.emplace_back(CurrentChecks.back());
|
2016-05-20 00:55:46 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
assert(RangeChecksOut.size() <= OldCount && "We pessimized!");
|
|
|
|
return RangeChecksOut.size() != OldCount;
|
|
|
|
}
|
|
|
|
|
2017-07-31 12:07:49 +02:00
|
|
|
#ifndef NDEBUG
|
2016-05-19 00:55:34 +02:00
|
|
|
StringRef GuardWideningImpl::scoreTypeToString(WideningScore WS) {
|
|
|
|
switch (WS) {
|
|
|
|
case WS_IllegalOrNegative:
|
|
|
|
return "IllegalOrNegative";
|
|
|
|
case WS_Neutral:
|
|
|
|
return "Neutral";
|
|
|
|
case WS_Positive:
|
|
|
|
return "Positive";
|
|
|
|
case WS_VeryPositive:
|
|
|
|
return "VeryPositive";
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm_unreachable("Fully covered switch above!");
|
|
|
|
}
|
2017-07-31 12:07:49 +02:00
|
|
|
#endif
|
2016-05-19 00:55:34 +02:00
|
|
|
|
2018-03-24 00:41:47 +01:00
|
|
|
PreservedAnalyses GuardWideningPass::run(Function &F,
|
|
|
|
FunctionAnalysisManager &AM) {
|
|
|
|
auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
|
|
|
|
auto &LI = AM.getResult<LoopAnalysis>(F);
|
|
|
|
auto &PDT = AM.getResult<PostDominatorTreeAnalysis>(F);
|
2019-11-20 00:14:41 +01:00
|
|
|
if (!GuardWideningImpl(DT, &PDT, LI, DT.getRootNode(),
|
2018-04-27 19:29:10 +02:00
|
|
|
[](BasicBlock*) { return true; } ).run())
|
2018-03-24 00:41:47 +01:00
|
|
|
return PreservedAnalyses::all();
|
|
|
|
|
|
|
|
PreservedAnalyses PA;
|
|
|
|
PA.preserveSet<CFGAnalyses>();
|
|
|
|
return PA;
|
|
|
|
}
|
|
|
|
|
2019-04-18 21:17:14 +02:00
|
|
|
PreservedAnalyses GuardWideningPass::run(Loop &L, LoopAnalysisManager &AM,
|
|
|
|
LoopStandardAnalysisResults &AR,
|
|
|
|
LPMUpdater &U) {
|
|
|
|
BasicBlock *RootBB = L.getLoopPredecessor();
|
|
|
|
if (!RootBB)
|
|
|
|
RootBB = L.getHeader();
|
|
|
|
auto BlockFilter = [&](BasicBlock *BB) {
|
|
|
|
return BB == RootBB || L.contains(BB);
|
|
|
|
};
|
2019-11-20 00:14:41 +01:00
|
|
|
if (!GuardWideningImpl(AR.DT, nullptr, AR.LI, AR.DT.getNode(RootBB),
|
2019-04-18 21:17:14 +02:00
|
|
|
BlockFilter).run())
|
|
|
|
return PreservedAnalyses::all();
|
|
|
|
|
|
|
|
return getLoopPassPreservedAnalyses();
|
|
|
|
}
|
|
|
|
|
2018-03-24 00:41:47 +01:00
|
|
|
namespace {
|
|
|
|
struct GuardWideningLegacyPass : public FunctionPass {
|
|
|
|
static char ID;
|
|
|
|
|
|
|
|
GuardWideningLegacyPass() : FunctionPass(ID) {
|
|
|
|
initializeGuardWideningLegacyPassPass(*PassRegistry::getPassRegistry());
|
|
|
|
}
|
|
|
|
|
|
|
|
bool runOnFunction(Function &F) override {
|
|
|
|
if (skipFunction(F))
|
|
|
|
return false;
|
2018-04-27 19:29:10 +02:00
|
|
|
auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
|
|
|
|
auto &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
|
|
|
|
auto &PDT = getAnalysis<PostDominatorTreeWrapperPass>().getPostDomTree();
|
2019-11-20 00:14:41 +01:00
|
|
|
return GuardWideningImpl(DT, &PDT, LI, DT.getRootNode(),
|
2018-04-27 19:29:10 +02:00
|
|
|
[](BasicBlock*) { return true; } ).run();
|
2018-03-24 00:41:47 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void getAnalysisUsage(AnalysisUsage &AU) const override {
|
|
|
|
AU.setPreservesCFG();
|
|
|
|
AU.addRequired<DominatorTreeWrapperPass>();
|
|
|
|
AU.addRequired<PostDominatorTreeWrapperPass>();
|
|
|
|
AU.addRequired<LoopInfoWrapperPass>();
|
|
|
|
}
|
|
|
|
};
|
2018-04-27 19:29:10 +02:00
|
|
|
|
|
|
|
/// Same as above, but restricted to a single loop at a time. Can be
|
|
|
|
/// scheduled with other loop passes w/o breaking out of LPM
|
|
|
|
struct LoopGuardWideningLegacyPass : public LoopPass {
|
|
|
|
static char ID;
|
|
|
|
|
|
|
|
LoopGuardWideningLegacyPass() : LoopPass(ID) {
|
|
|
|
initializeLoopGuardWideningLegacyPassPass(*PassRegistry::getPassRegistry());
|
|
|
|
}
|
|
|
|
|
|
|
|
bool runOnLoop(Loop *L, LPPassManager &LPM) override {
|
|
|
|
if (skipLoop(L))
|
|
|
|
return false;
|
|
|
|
auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
|
|
|
|
auto &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
|
2018-04-28 01:15:56 +02:00
|
|
|
auto *PDTWP = getAnalysisIfAvailable<PostDominatorTreeWrapperPass>();
|
|
|
|
auto *PDT = PDTWP ? &PDTWP->getPostDomTree() : nullptr;
|
2018-04-27 19:29:10 +02:00
|
|
|
BasicBlock *RootBB = L->getLoopPredecessor();
|
|
|
|
if (!RootBB)
|
|
|
|
RootBB = L->getHeader();
|
|
|
|
auto BlockFilter = [&](BasicBlock *BB) {
|
|
|
|
return BB == RootBB || L->contains(BB);
|
|
|
|
};
|
2019-11-20 00:14:41 +01:00
|
|
|
return GuardWideningImpl(DT, PDT, LI,
|
2018-04-27 19:29:10 +02:00
|
|
|
DT.getNode(RootBB), BlockFilter).run();
|
|
|
|
}
|
|
|
|
|
|
|
|
void getAnalysisUsage(AnalysisUsage &AU) const override {
|
|
|
|
AU.setPreservesCFG();
|
|
|
|
getLoopAnalysisUsage(AU);
|
|
|
|
AU.addPreserved<PostDominatorTreeWrapperPass>();
|
|
|
|
}
|
|
|
|
};
|
2018-03-24 00:41:47 +01:00
|
|
|
}
|
|
|
|
|
2016-05-19 00:55:34 +02:00
|
|
|
char GuardWideningLegacyPass::ID = 0;
|
2018-04-27 19:29:10 +02:00
|
|
|
char LoopGuardWideningLegacyPass::ID = 0;
|
2016-05-19 00:55:34 +02:00
|
|
|
|
|
|
|
INITIALIZE_PASS_BEGIN(GuardWideningLegacyPass, "guard-widening", "Widen guards",
|
|
|
|
false, false)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(PostDominatorTreeWrapperPass)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
|
|
|
|
INITIALIZE_PASS_END(GuardWideningLegacyPass, "guard-widening", "Widen guards",
|
|
|
|
false, false)
|
|
|
|
|
2018-04-27 19:29:10 +02:00
|
|
|
INITIALIZE_PASS_BEGIN(LoopGuardWideningLegacyPass, "loop-guard-widening",
|
|
|
|
"Widen guards (within a single loop, as a loop pass)",
|
|
|
|
false, false)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(PostDominatorTreeWrapperPass)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
|
|
|
|
INITIALIZE_PASS_END(LoopGuardWideningLegacyPass, "loop-guard-widening",
|
|
|
|
"Widen guards (within a single loop, as a loop pass)",
|
|
|
|
false, false)
|
|
|
|
|
2016-05-19 00:55:34 +02:00
|
|
|
FunctionPass *llvm::createGuardWideningPass() {
|
|
|
|
return new GuardWideningLegacyPass();
|
|
|
|
}
|
2018-04-27 19:29:10 +02:00
|
|
|
|
|
|
|
Pass *llvm::createLoopGuardWideningPass() {
|
|
|
|
return new LoopGuardWideningLegacyPass();
|
|
|
|
}
|