2017-09-29 23:55:49 +02:00
|
|
|
//===- StackColoring.cpp --------------------------------------------------===//
|
2012-09-06 11:17:37 +02:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This pass implements the stack-coloring optimization that looks for
|
|
|
|
// lifetime markers machine instructions (LIFESTART_BEGIN and LIFESTART_END),
|
|
|
|
// which represent the possible lifetime of stack slots. It attempts to
|
|
|
|
// merge disjoint stack slots and reduce the used stack space.
|
|
|
|
// NOTE: This pass is not StackSlotColoring, which optimizes spill slots.
|
|
|
|
//
|
|
|
|
// TODO: In the future we plan to improve stack coloring in the following ways:
|
|
|
|
// 1. Allow merging multiple small slots into a single larger slot at different
|
|
|
|
// offsets.
|
|
|
|
// 2. Merge this pass with StackSlotColoring and allow merging of allocas with
|
|
|
|
// spill slots.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "llvm/ADT/BitVector.h"
|
2017-09-29 23:55:49 +02:00
|
|
|
#include "llvm/ADT/DenseMap.h"
|
2012-09-06 11:17:37 +02:00
|
|
|
#include "llvm/ADT/DepthFirstIterator.h"
|
|
|
|
#include "llvm/ADT/SmallPtrSet.h"
|
2017-09-29 23:55:49 +02:00
|
|
|
#include "llvm/ADT/SmallVector.h"
|
2012-09-06 11:17:37 +02:00
|
|
|
#include "llvm/ADT/Statistic.h"
|
2012-12-03 17:50:05 +01:00
|
|
|
#include "llvm/Analysis/ValueTracking.h"
|
2012-09-06 11:17:37 +02:00
|
|
|
#include "llvm/CodeGen/LiveInterval.h"
|
2012-12-03 17:50:05 +01:00
|
|
|
#include "llvm/CodeGen/MachineBasicBlock.h"
|
|
|
|
#include "llvm/CodeGen/MachineFrameInfo.h"
|
2017-09-29 23:55:49 +02:00
|
|
|
#include "llvm/CodeGen/MachineFunction.h"
|
2012-09-06 11:17:37 +02:00
|
|
|
#include "llvm/CodeGen/MachineFunctionPass.h"
|
2017-09-29 23:55:49 +02:00
|
|
|
#include "llvm/CodeGen/MachineInstr.h"
|
2012-12-03 17:50:05 +01:00
|
|
|
#include "llvm/CodeGen/MachineMemOperand.h"
|
2017-09-29 23:55:49 +02:00
|
|
|
#include "llvm/CodeGen/MachineOperand.h"
|
2016-01-15 01:46:17 +01:00
|
|
|
#include "llvm/CodeGen/Passes.h"
|
2017-08-01 05:32:15 +02:00
|
|
|
#include "llvm/CodeGen/SelectionDAGNodes.h"
|
2012-09-06 11:17:37 +02:00
|
|
|
#include "llvm/CodeGen/SlotIndexes.h"
|
2014-01-20 20:49:14 +01:00
|
|
|
#include "llvm/CodeGen/StackProtector.h"
|
2017-11-17 02:07:10 +01:00
|
|
|
#include "llvm/CodeGen/TargetOpcodes.h"
|
2016-01-08 09:03:55 +01:00
|
|
|
#include "llvm/CodeGen/WinEHFuncInfo.h"
|
2018-04-30 16:59:11 +02:00
|
|
|
#include "llvm/Config/llvm-config.h"
|
2017-09-29 23:55:49 +02:00
|
|
|
#include "llvm/IR/Constants.h"
|
|
|
|
#include "llvm/IR/DebugInfoMetadata.h"
|
2013-01-02 12:36:10 +01:00
|
|
|
#include "llvm/IR/Function.h"
|
|
|
|
#include "llvm/IR/Instructions.h"
|
2017-09-29 23:55:49 +02:00
|
|
|
#include "llvm/IR/Metadata.h"
|
|
|
|
#include "llvm/IR/Use.h"
|
|
|
|
#include "llvm/IR/Value.h"
|
|
|
|
#include "llvm/Pass.h"
|
|
|
|
#include "llvm/Support/Casting.h"
|
2012-09-06 11:17:37 +02:00
|
|
|
#include "llvm/Support/CommandLine.h"
|
2017-09-29 23:55:49 +02:00
|
|
|
#include "llvm/Support/Compiler.h"
|
2012-09-06 11:17:37 +02:00
|
|
|
#include "llvm/Support/Debug.h"
|
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2017-09-29 23:55:49 +02:00
|
|
|
#include <algorithm>
|
|
|
|
#include <cassert>
|
|
|
|
#include <limits>
|
|
|
|
#include <memory>
|
|
|
|
#include <utility>
|
2012-09-06 11:17:37 +02:00
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
|
2017-05-25 23:26:32 +02:00
|
|
|
#define DEBUG_TYPE "stack-coloring"
|
2014-04-22 04:02:50 +02:00
|
|
|
|
2012-09-06 11:17:37 +02:00
|
|
|
static cl::opt<bool>
|
|
|
|
DisableColoring("no-stack-coloring",
|
2012-09-12 13:06:26 +02:00
|
|
|
cl::init(false), cl::Hidden,
|
|
|
|
cl::desc("Disable stack coloring"));
|
2012-09-06 11:17:37 +02:00
|
|
|
|
2012-09-13 17:46:30 +02:00
|
|
|
/// The user may write code that uses allocas outside of the declared lifetime
|
|
|
|
/// zone. This can happen when the user returns a reference to a local
|
|
|
|
/// data-structure. We can detect these cases and decide not to optimize the
|
2016-05-24 15:23:44 +02:00
|
|
|
/// code. If this flag is enabled, we try to save the user. This option
|
|
|
|
/// is treated as overriding LifetimeStartOnFirstUse below.
|
2012-09-12 13:06:26 +02:00
|
|
|
static cl::opt<bool>
|
2012-09-13 17:46:30 +02:00
|
|
|
ProtectFromEscapedAllocas("protect-from-escaped-allocas",
|
2013-03-25 21:05:35 +01:00
|
|
|
cl::init(false), cl::Hidden,
|
|
|
|
cl::desc("Do not optimize lifetime zones that "
|
|
|
|
"are broken"));
|
2012-09-12 13:06:26 +02:00
|
|
|
|
2016-05-24 15:23:44 +02:00
|
|
|
/// Enable enhanced dataflow scheme for lifetime analysis (treat first
|
|
|
|
/// use of stack slot as start of slot lifetime, as opposed to looking
|
|
|
|
/// for LIFETIME_START marker). See "Implementation notes" below for
|
2016-06-01 19:55:10 +02:00
|
|
|
/// more info.
|
2016-05-24 15:23:44 +02:00
|
|
|
static cl::opt<bool>
|
|
|
|
LifetimeStartOnFirstUse("stackcoloring-lifetime-start-on-first-use",
|
2016-06-01 19:55:10 +02:00
|
|
|
cl::init(true), cl::Hidden,
|
2016-05-24 15:23:44 +02:00
|
|
|
cl::desc("Treat stack lifetimes as starting on first use, not on START marker."));
|
|
|
|
|
|
|
|
|
2012-09-12 13:06:26 +02:00
|
|
|
STATISTIC(NumMarkerSeen, "Number of lifetime markers found.");
|
2012-09-06 11:17:37 +02:00
|
|
|
STATISTIC(StackSpaceSaved, "Number of bytes saved due to merging slots.");
|
|
|
|
STATISTIC(StackSlotMerged, "Number of stack slot merged.");
|
2013-03-25 21:05:35 +01:00
|
|
|
STATISTIC(EscapedAllocas, "Number of allocas that escaped the lifetime region");
|
2012-09-12 13:06:26 +02:00
|
|
|
|
2017-06-12 16:56:02 +02:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// StackColoring Pass
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// Stack Coloring reduces stack usage by merging stack slots when they
|
|
|
|
// can't be used together. For example, consider the following C program:
|
|
|
|
//
|
|
|
|
// void bar(char *, int);
|
|
|
|
// void foo(bool var) {
|
|
|
|
// A: {
|
|
|
|
// char z[4096];
|
|
|
|
// bar(z, 0);
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// char *p;
|
|
|
|
// char x[4096];
|
|
|
|
// char y[4096];
|
|
|
|
// if (var) {
|
|
|
|
// p = x;
|
|
|
|
// } else {
|
|
|
|
// bar(y, 1);
|
|
|
|
// p = y + 1024;
|
|
|
|
// }
|
|
|
|
// B:
|
|
|
|
// bar(p, 2);
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// Naively-compiled, this program would use 12k of stack space. However, the
|
|
|
|
// stack slot corresponding to `z` is always destroyed before either of the
|
|
|
|
// stack slots for `x` or `y` are used, and then `x` is only used if `var`
|
|
|
|
// is true, while `y` is only used if `var` is false. So in no time are 2
|
|
|
|
// of the stack slots used together, and therefore we can merge them,
|
|
|
|
// compiling the function using only a single 4k alloca:
|
|
|
|
//
|
|
|
|
// void foo(bool var) { // equivalent
|
|
|
|
// char x[4096];
|
|
|
|
// char *p;
|
|
|
|
// bar(x, 0);
|
|
|
|
// if (var) {
|
|
|
|
// p = x;
|
|
|
|
// } else {
|
|
|
|
// bar(x, 1);
|
|
|
|
// p = x + 1024;
|
|
|
|
// }
|
|
|
|
// bar(p, 2);
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// This is an important optimization if we want stack space to be under
|
|
|
|
// control in large functions, both open-coded ones and ones created by
|
|
|
|
// inlining.
|
2016-05-24 15:23:44 +02:00
|
|
|
//
|
|
|
|
// Implementation Notes:
|
|
|
|
// ---------------------
|
|
|
|
//
|
2017-06-12 16:56:02 +02:00
|
|
|
// An important part of the above reasoning is that `z` can't be accessed
|
|
|
|
// while the latter 2 calls to `bar` are running. This is justified because
|
|
|
|
// `z`'s lifetime is over after we exit from block `A:`, so any further
|
|
|
|
// accesses to it would be UB. The way we represent this information
|
|
|
|
// in LLVM is by having frontends delimit blocks with `lifetime.start`
|
|
|
|
// and `lifetime.end` intrinsics.
|
|
|
|
//
|
|
|
|
// The effect of these intrinsics seems to be as follows (maybe I should
|
|
|
|
// specify this in the reference?):
|
|
|
|
//
|
|
|
|
// L1) at start, each stack-slot is marked as *out-of-scope*, unless no
|
|
|
|
// lifetime intrinsic refers to that stack slot, in which case
|
|
|
|
// it is marked as *in-scope*.
|
|
|
|
// L2) on a `lifetime.start`, a stack slot is marked as *in-scope* and
|
|
|
|
// the stack slot is overwritten with `undef`.
|
|
|
|
// L3) on a `lifetime.end`, a stack slot is marked as *out-of-scope*.
|
|
|
|
// L4) on function exit, all stack slots are marked as *out-of-scope*.
|
|
|
|
// L5) `lifetime.end` is a no-op when called on a slot that is already
|
|
|
|
// *out-of-scope*.
|
|
|
|
// L6) memory accesses to *out-of-scope* stack slots are UB.
|
|
|
|
// L7) when a stack-slot is marked as *out-of-scope*, all pointers to it
|
|
|
|
// are invalidated, unless the slot is "degenerate". This is used to
|
|
|
|
// justify not marking slots as in-use until the pointer to them is
|
|
|
|
// used, but feels a bit hacky in the presence of things like LICM. See
|
|
|
|
// the "Degenerate Slots" section for more details.
|
|
|
|
//
|
|
|
|
// Now, let's ground stack coloring on these rules. We'll define a slot
|
|
|
|
// as *in-use* at a (dynamic) point in execution if it either can be
|
|
|
|
// written to at that point, or if it has a live and non-undef content
|
|
|
|
// at that point.
|
|
|
|
//
|
|
|
|
// Obviously, slots that are never *in-use* together can be merged, and
|
|
|
|
// in our example `foo`, the slots for `x`, `y` and `z` are never
|
|
|
|
// in-use together (of course, sometimes slots that *are* in-use together
|
|
|
|
// might still be mergable, but we don't care about that here).
|
|
|
|
//
|
|
|
|
// In this implementation, we successively merge pairs of slots that are
|
|
|
|
// not *in-use* together. We could be smarter - for example, we could merge
|
|
|
|
// a single large slot with 2 small slots, or we could construct the
|
|
|
|
// interference graph and run a "smart" graph coloring algorithm, but with
|
|
|
|
// that aside, how do we find out whether a pair of slots might be *in-use*
|
|
|
|
// together?
|
|
|
|
//
|
|
|
|
// From our rules, we see that *out-of-scope* slots are never *in-use*,
|
|
|
|
// and from (L7) we see that "non-degenerate" slots remain non-*in-use*
|
|
|
|
// until their address is taken. Therefore, we can approximate slot activity
|
|
|
|
// using dataflow.
|
|
|
|
//
|
|
|
|
// A subtle point: naively, we might try to figure out which pairs of
|
|
|
|
// stack-slots interfere by propagating `S in-use` through the CFG for every
|
|
|
|
// stack-slot `S`, and having `S` and `T` interfere if there is a CFG point in
|
|
|
|
// which they are both *in-use*.
|
|
|
|
//
|
|
|
|
// That is sound, but overly conservative in some cases: in our (artificial)
|
|
|
|
// example `foo`, either `x` or `y` might be in use at the label `B:`, but
|
|
|
|
// as `x` is only in use if we came in from the `var` edge and `y` only
|
|
|
|
// if we came from the `!var` edge, they still can't be in use together.
|
|
|
|
// See PR32488 for an important real-life case.
|
|
|
|
//
|
|
|
|
// If we wanted to find all points of interference precisely, we could
|
|
|
|
// propagate `S in-use` and `S&T in-use` predicates through the CFG. That
|
|
|
|
// would be precise, but requires propagating `O(n^2)` dataflow facts.
|
|
|
|
//
|
|
|
|
// However, we aren't interested in the *set* of points of interference
|
|
|
|
// between 2 stack slots, only *whether* there *is* such a point. So we
|
|
|
|
// can rely on a little trick: for `S` and `T` to be in-use together,
|
|
|
|
// one of them needs to become in-use while the other is in-use (or
|
|
|
|
// they might both become in use simultaneously). We can check this
|
|
|
|
// by also keeping track of the points at which a stack slot might *start*
|
|
|
|
// being in-use.
|
|
|
|
//
|
|
|
|
// Exact first use:
|
|
|
|
// ----------------
|
|
|
|
//
|
2016-05-24 15:23:44 +02:00
|
|
|
// Consider the following motivating example:
|
|
|
|
//
|
|
|
|
// int foo() {
|
|
|
|
// char b1[1024], b2[1024];
|
|
|
|
// if (...) {
|
|
|
|
// char b3[1024];
|
|
|
|
// <uses of b1, b3>;
|
|
|
|
// return x;
|
|
|
|
// } else {
|
|
|
|
// char b4[1024], b5[1024];
|
|
|
|
// <uses of b2, b4, b5>;
|
|
|
|
// return y;
|
|
|
|
// }
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// In the code above, "b3" and "b4" are declared in distinct lexical
|
|
|
|
// scopes, meaning that it is easy to prove that they can share the
|
|
|
|
// same stack slot. Variables "b1" and "b2" are declared in the same
|
|
|
|
// scope, meaning that from a lexical point of view, their lifetimes
|
|
|
|
// overlap. From a control flow pointer of view, however, the two
|
|
|
|
// variables are accessed in disjoint regions of the CFG, thus it
|
|
|
|
// should be possible for them to share the same stack slot. An ideal
|
|
|
|
// stack allocation for the function above would look like:
|
|
|
|
//
|
|
|
|
// slot 0: b1, b2
|
|
|
|
// slot 1: b3, b4
|
|
|
|
// slot 2: b5
|
|
|
|
//
|
|
|
|
// Achieving this allocation is tricky, however, due to the way
|
|
|
|
// lifetime markers are inserted. Here is a simplified view of the
|
|
|
|
// control flow graph for the code above:
|
|
|
|
//
|
|
|
|
// +------ block 0 -------+
|
|
|
|
// 0| LIFETIME_START b1, b2 |
|
|
|
|
// 1| <test 'if' condition> |
|
|
|
|
// +-----------------------+
|
|
|
|
// ./ \.
|
|
|
|
// +------ block 1 -------+ +------ block 2 -------+
|
|
|
|
// 2| LIFETIME_START b3 | 5| LIFETIME_START b4, b5 |
|
|
|
|
// 3| <uses of b1, b3> | 6| <uses of b2, b4, b5> |
|
|
|
|
// 4| LIFETIME_END b3 | 7| LIFETIME_END b4, b5 |
|
|
|
|
// +-----------------------+ +-----------------------+
|
|
|
|
// \. /.
|
|
|
|
// +------ block 3 -------+
|
|
|
|
// 8| <cleanupcode> |
|
|
|
|
// 9| LIFETIME_END b1, b2 |
|
|
|
|
// 10| return |
|
|
|
|
// +-----------------------+
|
|
|
|
//
|
|
|
|
// If we create live intervals for the variables above strictly based
|
|
|
|
// on the lifetime markers, we'll get the set of intervals on the
|
|
|
|
// left. If we ignore the lifetime start markers and instead treat a
|
|
|
|
// variable's lifetime as beginning with the first reference to the
|
|
|
|
// var, then we get the intervals on the right.
|
|
|
|
//
|
|
|
|
// LIFETIME_START First Use
|
|
|
|
// b1: [0,9] [3,4] [8,9]
|
|
|
|
// b2: [0,9] [6,9]
|
|
|
|
// b3: [2,4] [3,4]
|
|
|
|
// b4: [5,7] [6,7]
|
|
|
|
// b5: [5,7] [6,7]
|
|
|
|
//
|
|
|
|
// For the intervals on the left, the best we can do is overlap two
|
|
|
|
// variables (b3 and b4, for example); this gives us a stack size of
|
|
|
|
// 4*1024 bytes, not ideal. When treating first-use as the start of a
|
|
|
|
// lifetime, we can additionally overlap b1 and b5, giving us a 3*1024
|
|
|
|
// byte stack (better).
|
|
|
|
//
|
2017-06-12 16:56:02 +02:00
|
|
|
// Degenerate Slots:
|
|
|
|
// -----------------
|
|
|
|
//
|
2016-05-24 15:23:44 +02:00
|
|
|
// Relying entirely on first-use of stack slots is problematic,
|
|
|
|
// however, due to the fact that optimizations can sometimes migrate
|
|
|
|
// uses of a variable outside of its lifetime start/end region. Here
|
|
|
|
// is an example:
|
|
|
|
//
|
|
|
|
// int bar() {
|
|
|
|
// char b1[1024], b2[1024];
|
|
|
|
// if (...) {
|
|
|
|
// <uses of b2>
|
|
|
|
// return y;
|
|
|
|
// } else {
|
|
|
|
// <uses of b1>
|
|
|
|
// while (...) {
|
|
|
|
// char b3[1024];
|
|
|
|
// <uses of b3>
|
|
|
|
// }
|
|
|
|
// }
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// Before optimization, the control flow graph for the code above
|
|
|
|
// might look like the following:
|
|
|
|
//
|
|
|
|
// +------ block 0 -------+
|
|
|
|
// 0| LIFETIME_START b1, b2 |
|
|
|
|
// 1| <test 'if' condition> |
|
|
|
|
// +-----------------------+
|
|
|
|
// ./ \.
|
|
|
|
// +------ block 1 -------+ +------- block 2 -------+
|
|
|
|
// 2| <uses of b2> | 3| <uses of b1> |
|
|
|
|
// +-----------------------+ +-----------------------+
|
|
|
|
// | |
|
|
|
|
// | +------- block 3 -------+ <-\.
|
|
|
|
// | 4| <while condition> | |
|
|
|
|
// | +-----------------------+ |
|
|
|
|
// | / | |
|
|
|
|
// | / +------- block 4 -------+
|
|
|
|
// \ / 5| LIFETIME_START b3 | |
|
|
|
|
// \ / 6| <uses of b3> | |
|
|
|
|
// \ / 7| LIFETIME_END b3 | |
|
|
|
|
// \ | +------------------------+ |
|
|
|
|
// \ | \ /
|
|
|
|
// +------ block 5 -----+ \---------------
|
|
|
|
// 8| <cleanupcode> |
|
|
|
|
// 9| LIFETIME_END b1, b2 |
|
|
|
|
// 10| return |
|
|
|
|
// +---------------------+
|
|
|
|
//
|
|
|
|
// During optimization, however, it can happen that an instruction
|
|
|
|
// computing an address in "b3" (for example, a loop-invariant GEP) is
|
|
|
|
// hoisted up out of the loop from block 4 to block 2. [Note that
|
|
|
|
// this is not an actual load from the stack, only an instruction that
|
|
|
|
// computes the address to be loaded]. If this happens, there is now a
|
|
|
|
// path leading from the first use of b3 to the return instruction
|
|
|
|
// that does not encounter the b3 LIFETIME_END, hence b3's lifetime is
|
|
|
|
// now larger than if we were computing live intervals strictly based
|
|
|
|
// on lifetime markers. In the example above, this lengthened lifetime
|
|
|
|
// would mean that it would appear illegal to overlap b3 with b2.
|
|
|
|
//
|
|
|
|
// To deal with this such cases, the code in ::collectMarkers() below
|
|
|
|
// tries to identify "degenerate" slots -- those slots where on a single
|
|
|
|
// forward pass through the CFG we encounter a first reference to slot
|
|
|
|
// K before we hit the slot K lifetime start marker. For such slots,
|
|
|
|
// we fall back on using the lifetime start marker as the beginning of
|
|
|
|
// the variable's lifetime. NB: with this implementation, slots can
|
|
|
|
// appear degenerate in cases where there is unstructured control flow:
|
|
|
|
//
|
|
|
|
// if (q) goto mid;
|
|
|
|
// if (x > 9) {
|
|
|
|
// int b[100];
|
|
|
|
// memcpy(&b[0], ...);
|
|
|
|
// mid: b[k] = ...;
|
|
|
|
// abc(&b);
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// If in RPO ordering chosen to walk the CFG we happen to visit the b[k]
|
|
|
|
// before visiting the memcpy block (which will contain the lifetime start
|
|
|
|
// for "b" then it will appear that 'b' has a degenerate lifetime.
|
|
|
|
//
|
|
|
|
|
2012-09-06 11:17:37 +02:00
|
|
|
namespace {
|
2017-09-29 23:55:49 +02:00
|
|
|
|
2012-09-06 11:17:37 +02:00
|
|
|
/// StackColoring - A machine pass for merging disjoint stack allocations,
|
|
|
|
/// marked by the LIFETIME_START and LIFETIME_END pseudo instructions.
|
|
|
|
class StackColoring : public MachineFunctionPass {
|
|
|
|
MachineFrameInfo *MFI;
|
|
|
|
MachineFunction *MF;
|
|
|
|
|
|
|
|
/// A class representing liveness information for a single basic block.
|
|
|
|
/// Each bit in the BitVector represents the liveness property
|
|
|
|
/// for a different stack slot.
|
|
|
|
struct BlockLifetimeInfo {
|
|
|
|
/// Which slots BEGINs in each basic block.
|
|
|
|
BitVector Begin;
|
2017-09-29 23:55:49 +02:00
|
|
|
|
2012-09-06 11:17:37 +02:00
|
|
|
/// Which slots ENDs in each basic block.
|
|
|
|
BitVector End;
|
2017-09-29 23:55:49 +02:00
|
|
|
|
2012-09-06 11:17:37 +02:00
|
|
|
/// Which slots are marked as LIVE_IN, coming into each basic block.
|
|
|
|
BitVector LiveIn;
|
2017-09-29 23:55:49 +02:00
|
|
|
|
2012-09-06 11:17:37 +02:00
|
|
|
/// Which slots are marked as LIVE_OUT, coming out of each basic block.
|
|
|
|
BitVector LiveOut;
|
|
|
|
};
|
|
|
|
|
|
|
|
/// Maps active slots (per bit) for each basic block.
|
2017-09-29 23:55:49 +02:00
|
|
|
using LivenessMap = DenseMap<const MachineBasicBlock *, BlockLifetimeInfo>;
|
2013-02-19 06:32:02 +01:00
|
|
|
LivenessMap BlockLiveness;
|
2012-09-06 11:17:37 +02:00
|
|
|
|
|
|
|
/// Maps serial numbers to basic blocks.
|
2017-09-29 23:55:49 +02:00
|
|
|
DenseMap<const MachineBasicBlock *, int> BasicBlocks;
|
|
|
|
|
2012-09-06 11:17:37 +02:00
|
|
|
/// Maps basic blocks to a serial number.
|
2017-09-29 23:55:49 +02:00
|
|
|
SmallVector<const MachineBasicBlock *, 8> BasicBlockNumbering;
|
2012-09-06 11:17:37 +02:00
|
|
|
|
2017-06-12 16:56:02 +02:00
|
|
|
/// Maps slots to their use interval. Outside of this interval, slots
|
|
|
|
/// values are either dead or `undef` and they will not be written to.
|
2014-03-09 16:44:45 +01:00
|
|
|
SmallVector<std::unique_ptr<LiveInterval>, 16> Intervals;
|
2017-09-29 23:55:49 +02:00
|
|
|
|
2017-06-12 16:56:02 +02:00
|
|
|
/// Maps slots to the points where they can become in-use.
|
|
|
|
SmallVector<SmallVector<SlotIndex, 4>, 16> LiveStarts;
|
2017-09-29 23:55:49 +02:00
|
|
|
|
2012-09-06 11:17:37 +02:00
|
|
|
/// VNInfo is used for the construction of LiveIntervals.
|
|
|
|
VNInfo::Allocator VNInfoAllocator;
|
2017-09-29 23:55:49 +02:00
|
|
|
|
2012-09-06 11:17:37 +02:00
|
|
|
/// SlotIndex analysis object.
|
2012-09-12 13:06:26 +02:00
|
|
|
SlotIndexes *Indexes;
|
2017-09-29 23:55:49 +02:00
|
|
|
|
2014-01-20 20:49:14 +01:00
|
|
|
/// The stack protector object.
|
|
|
|
StackProtector *SP;
|
2012-09-06 11:17:37 +02:00
|
|
|
|
|
|
|
/// The list of lifetime markers found. These markers are to be removed
|
|
|
|
/// once the coloring is done.
|
|
|
|
SmallVector<MachineInstr*, 8> Markers;
|
|
|
|
|
2016-05-24 15:23:44 +02:00
|
|
|
/// Record the FI slots for which we have seen some sort of
|
|
|
|
/// lifetime marker (either start or end).
|
|
|
|
BitVector InterestingSlots;
|
|
|
|
|
2016-06-01 19:55:10 +02:00
|
|
|
/// FI slots that need to be handled conservatively (for these
|
|
|
|
/// slots lifetime-start-on-first-use is disabled).
|
|
|
|
BitVector ConservativeSlots;
|
2016-05-24 15:23:44 +02:00
|
|
|
|
|
|
|
/// Number of iterations taken during data flow analysis.
|
|
|
|
unsigned NumIterations;
|
|
|
|
|
2012-09-06 11:17:37 +02:00
|
|
|
public:
|
|
|
|
static char ID;
|
2017-09-29 23:55:49 +02:00
|
|
|
|
2012-09-06 11:17:37 +02:00
|
|
|
StackColoring() : MachineFunctionPass(ID) {
|
|
|
|
initializeStackColoringPass(*PassRegistry::getPassRegistry());
|
|
|
|
}
|
2017-09-29 23:55:49 +02:00
|
|
|
|
2014-03-07 10:26:03 +01:00
|
|
|
void getAnalysisUsage(AnalysisUsage &AU) const override;
|
|
|
|
bool runOnMachineFunction(MachineFunction &MF) override;
|
2012-09-06 11:17:37 +02:00
|
|
|
|
|
|
|
private:
|
2017-09-29 23:55:49 +02:00
|
|
|
/// Used in collectMarkers
|
|
|
|
using BlockBitVecMap = DenseMap<const MachineBasicBlock *, BitVector>;
|
|
|
|
|
2012-09-06 11:17:37 +02:00
|
|
|
/// Debug.
|
2013-02-19 04:14:22 +01:00
|
|
|
void dump() const;
|
2016-04-27 21:26:25 +02:00
|
|
|
void dumpIntervals() const;
|
|
|
|
void dumpBB(MachineBasicBlock *MBB) const;
|
|
|
|
void dumpBV(const char *tag, const BitVector &BV) const;
|
2012-09-06 11:17:37 +02:00
|
|
|
|
|
|
|
/// Removes all of the lifetime marker instructions from the function.
|
|
|
|
/// \returns true if any markers were removed.
|
|
|
|
bool removeAllMarkers();
|
|
|
|
|
|
|
|
/// Scan the machine function and find all of the lifetime markers.
|
|
|
|
/// Record the findings in the BEGIN and END vectors.
|
|
|
|
/// \returns the number of markers found.
|
|
|
|
unsigned collectMarkers(unsigned NumSlot);
|
|
|
|
|
|
|
|
/// Perform the dataflow calculation and calculate the lifetime for each of
|
|
|
|
/// the slots, based on the BEGIN/END vectors. Set the LifetimeLIVE_IN and
|
|
|
|
/// LifetimeLIVE_OUT maps that represent which stack slots are live coming
|
|
|
|
/// in and out blocks.
|
|
|
|
void calculateLocalLiveness();
|
|
|
|
|
2016-05-24 15:23:44 +02:00
|
|
|
/// Returns TRUE if we're using the first-use-begins-lifetime method for
|
|
|
|
/// this slot (if FALSE, then the start marker is treated as start of lifetime).
|
|
|
|
bool applyFirstUse(int Slot) {
|
|
|
|
if (!LifetimeStartOnFirstUse || ProtectFromEscapedAllocas)
|
|
|
|
return false;
|
2016-06-01 19:55:10 +02:00
|
|
|
if (ConservativeSlots.test(Slot))
|
2016-05-24 15:23:44 +02:00
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Examines the specified instruction and returns TRUE if the instruction
|
|
|
|
/// represents the start or end of an interesting lifetime. The slot or slots
|
|
|
|
/// starting or ending are added to the vector "slots" and "isStart" is set
|
|
|
|
/// accordingly.
|
|
|
|
/// \returns True if inst contains a lifetime start or end
|
|
|
|
bool isLifetimeStartOrEnd(const MachineInstr &MI,
|
|
|
|
SmallVector<int, 4> &slots,
|
|
|
|
bool &isStart);
|
|
|
|
|
2012-09-06 11:17:37 +02:00
|
|
|
/// Construct the LiveIntervals for the slots.
|
|
|
|
void calculateLiveIntervals(unsigned NumSlots);
|
|
|
|
|
|
|
|
/// Go over the machine function and change instructions which use stack
|
|
|
|
/// slots to use the joint slots.
|
|
|
|
void remapInstructions(DenseMap<int, int> &SlotRemap);
|
|
|
|
|
2013-09-28 13:46:15 +02:00
|
|
|
/// The input program may contain instructions which are not inside lifetime
|
2012-09-12 06:57:37 +02:00
|
|
|
/// markers. This can happen due to a bug in the compiler or due to a bug in
|
|
|
|
/// user code (for example, returning a reference to a local variable).
|
|
|
|
/// This procedure checks all of the instructions in the function and
|
|
|
|
/// invalidates lifetime ranges which do not contain all of the instructions
|
|
|
|
/// which access that frame slot.
|
|
|
|
void removeInvalidSlotRanges();
|
|
|
|
|
2012-09-06 11:17:37 +02:00
|
|
|
/// Map entries which point to other entries to their destination.
|
|
|
|
/// A->B->C becomes A->C.
|
2016-05-24 15:23:44 +02:00
|
|
|
void expungeSlotMap(DenseMap<int, int> &SlotRemap, unsigned NumSlots);
|
2012-09-06 11:17:37 +02:00
|
|
|
};
|
2017-09-29 23:55:49 +02:00
|
|
|
|
2012-09-06 11:17:37 +02:00
|
|
|
} // end anonymous namespace
|
|
|
|
|
|
|
|
char StackColoring::ID = 0;
|
2017-09-29 23:55:49 +02:00
|
|
|
|
2012-09-06 11:17:37 +02:00
|
|
|
char &llvm::StackColoringID = StackColoring::ID;
|
|
|
|
|
2017-05-25 23:26:32 +02:00
|
|
|
INITIALIZE_PASS_BEGIN(StackColoring, DEBUG_TYPE,
|
|
|
|
"Merge disjoint stack slots", false, false)
|
2012-09-06 11:17:37 +02:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
|
2014-01-20 20:49:14 +01:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(StackProtector)
|
2017-05-25 23:26:32 +02:00
|
|
|
INITIALIZE_PASS_END(StackColoring, DEBUG_TYPE,
|
|
|
|
"Merge disjoint stack slots", false, false)
|
2012-09-06 11:17:37 +02:00
|
|
|
|
|
|
|
void StackColoring::getAnalysisUsage(AnalysisUsage &AU) const {
|
|
|
|
AU.addRequired<SlotIndexes>();
|
2014-01-20 20:49:14 +01:00
|
|
|
AU.addRequired<StackProtector>();
|
2012-09-06 11:17:37 +02:00
|
|
|
MachineFunctionPass::getAnalysisUsage(AU);
|
|
|
|
}
|
|
|
|
|
2017-10-15 16:32:27 +02:00
|
|
|
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
|
2016-04-27 21:26:25 +02:00
|
|
|
LLVM_DUMP_METHOD void StackColoring::dumpBV(const char *tag,
|
|
|
|
const BitVector &BV) const {
|
2017-01-28 03:02:38 +01:00
|
|
|
dbgs() << tag << " : { ";
|
2016-04-27 21:26:25 +02:00
|
|
|
for (unsigned I = 0, E = BV.size(); I != E; ++I)
|
2017-01-28 03:02:38 +01:00
|
|
|
dbgs() << BV.test(I) << " ";
|
|
|
|
dbgs() << "}\n";
|
2016-04-27 21:26:25 +02:00
|
|
|
}
|
2012-09-06 11:17:37 +02:00
|
|
|
|
2016-04-27 21:26:25 +02:00
|
|
|
LLVM_DUMP_METHOD void StackColoring::dumpBB(MachineBasicBlock *MBB) const {
|
|
|
|
LivenessMap::const_iterator BI = BlockLiveness.find(MBB);
|
|
|
|
assert(BI != BlockLiveness.end() && "Block not found");
|
|
|
|
const BlockLifetimeInfo &BlockInfo = BI->second;
|
2012-09-06 11:17:37 +02:00
|
|
|
|
2016-04-27 21:26:25 +02:00
|
|
|
dumpBV("BEGIN", BlockInfo.Begin);
|
|
|
|
dumpBV("END", BlockInfo.End);
|
|
|
|
dumpBV("LIVE_IN", BlockInfo.LiveIn);
|
|
|
|
dumpBV("LIVE_OUT", BlockInfo.LiveOut);
|
|
|
|
}
|
2012-09-06 11:17:37 +02:00
|
|
|
|
2016-04-27 21:26:25 +02:00
|
|
|
LLVM_DUMP_METHOD void StackColoring::dump() const {
|
|
|
|
for (MachineBasicBlock *MBB : depth_first(MF)) {
|
2017-01-28 03:02:38 +01:00
|
|
|
dbgs() << "Inspecting block #" << MBB->getNumber() << " ["
|
|
|
|
<< MBB->getName() << "]\n";
|
|
|
|
dumpBB(MBB);
|
2016-04-27 21:26:25 +02:00
|
|
|
}
|
|
|
|
}
|
2012-09-06 11:17:37 +02:00
|
|
|
|
2016-04-27 21:26:25 +02:00
|
|
|
LLVM_DUMP_METHOD void StackColoring::dumpIntervals() const {
|
|
|
|
for (unsigned I = 0, E = Intervals.size(); I != E; ++I) {
|
2017-01-28 03:02:38 +01:00
|
|
|
dbgs() << "Interval[" << I << "]:\n";
|
|
|
|
Intervals[I]->dump();
|
2012-09-06 11:17:37 +02:00
|
|
|
}
|
|
|
|
}
|
2017-01-28 03:02:38 +01:00
|
|
|
#endif
|
2016-04-27 22:07:02 +02:00
|
|
|
|
2016-05-24 15:23:44 +02:00
|
|
|
static inline int getStartOrEndSlot(const MachineInstr &MI)
|
|
|
|
{
|
|
|
|
assert((MI.getOpcode() == TargetOpcode::LIFETIME_START ||
|
|
|
|
MI.getOpcode() == TargetOpcode::LIFETIME_END) &&
|
|
|
|
"Expected LIFETIME_START or LIFETIME_END op");
|
|
|
|
const MachineOperand &MO = MI.getOperand(0);
|
|
|
|
int Slot = MO.getIndex();
|
|
|
|
if (Slot >= 0)
|
|
|
|
return Slot;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
// At the moment the only way to end a variable lifetime is with
|
|
|
|
// a VARIABLE_LIFETIME op (which can't contain a start). If things
|
|
|
|
// change and the IR allows for a single inst that both begins
|
|
|
|
// and ends lifetime(s), this interface will need to be reworked.
|
|
|
|
bool StackColoring::isLifetimeStartOrEnd(const MachineInstr &MI,
|
|
|
|
SmallVector<int, 4> &slots,
|
2017-09-29 23:55:49 +02:00
|
|
|
bool &isStart) {
|
2016-05-24 15:23:44 +02:00
|
|
|
if (MI.getOpcode() == TargetOpcode::LIFETIME_START ||
|
|
|
|
MI.getOpcode() == TargetOpcode::LIFETIME_END) {
|
|
|
|
int Slot = getStartOrEndSlot(MI);
|
|
|
|
if (Slot < 0)
|
|
|
|
return false;
|
|
|
|
if (!InterestingSlots.test(Slot))
|
|
|
|
return false;
|
|
|
|
slots.push_back(Slot);
|
|
|
|
if (MI.getOpcode() == TargetOpcode::LIFETIME_END) {
|
|
|
|
isStart = false;
|
|
|
|
return true;
|
|
|
|
}
|
2018-03-19 14:35:23 +01:00
|
|
|
if (!applyFirstUse(Slot)) {
|
2016-05-24 15:23:44 +02:00
|
|
|
isStart = true;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
} else if (LifetimeStartOnFirstUse && !ProtectFromEscapedAllocas) {
|
2018-03-19 14:35:23 +01:00
|
|
|
if (!MI.isDebugValue()) {
|
2016-05-24 15:23:44 +02:00
|
|
|
bool found = false;
|
|
|
|
for (const MachineOperand &MO : MI.operands()) {
|
|
|
|
if (!MO.isFI())
|
|
|
|
continue;
|
|
|
|
int Slot = MO.getIndex();
|
|
|
|
if (Slot<0)
|
|
|
|
continue;
|
|
|
|
if (InterestingSlots.test(Slot) && applyFirstUse(Slot)) {
|
|
|
|
slots.push_back(Slot);
|
|
|
|
found = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (found) {
|
|
|
|
isStart = true;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-09-29 23:55:49 +02:00
|
|
|
unsigned StackColoring::collectMarkers(unsigned NumSlot) {
|
2012-09-06 11:17:37 +02:00
|
|
|
unsigned MarkersFound = 0;
|
2016-05-24 15:23:44 +02:00
|
|
|
BlockBitVecMap SeenStartMap;
|
|
|
|
InterestingSlots.clear();
|
|
|
|
InterestingSlots.resize(NumSlot);
|
2016-06-01 19:55:10 +02:00
|
|
|
ConservativeSlots.clear();
|
|
|
|
ConservativeSlots.resize(NumSlot);
|
|
|
|
|
|
|
|
// number of start and end lifetime ops for each slot
|
|
|
|
SmallVector<int, 8> NumStartLifetimes(NumSlot, 0);
|
|
|
|
SmallVector<int, 8> NumEndLifetimes(NumSlot, 0);
|
2016-05-24 15:23:44 +02:00
|
|
|
|
|
|
|
// Step 1: collect markers and populate the "InterestingSlots"
|
2016-06-01 19:55:10 +02:00
|
|
|
// and "ConservativeSlots" sets.
|
2016-05-24 15:23:44 +02:00
|
|
|
for (MachineBasicBlock *MBB : depth_first(MF)) {
|
|
|
|
// Compute the set of slots for which we've seen a START marker but have
|
|
|
|
// not yet seen an END marker at this point in the walk (e.g. on entry
|
|
|
|
// to this bb).
|
|
|
|
BitVector BetweenStartEnd;
|
|
|
|
BetweenStartEnd.resize(NumSlot);
|
|
|
|
for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
|
|
|
|
PE = MBB->pred_end(); PI != PE; ++PI) {
|
|
|
|
BlockBitVecMap::const_iterator I = SeenStartMap.find(*PI);
|
|
|
|
if (I != SeenStartMap.end()) {
|
|
|
|
BetweenStartEnd |= I->second;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Walk the instructions in the block to look for start/end ops.
|
|
|
|
for (MachineInstr &MI : *MBB) {
|
|
|
|
if (MI.getOpcode() == TargetOpcode::LIFETIME_START ||
|
|
|
|
MI.getOpcode() == TargetOpcode::LIFETIME_END) {
|
|
|
|
int Slot = getStartOrEndSlot(MI);
|
|
|
|
if (Slot < 0)
|
|
|
|
continue;
|
|
|
|
InterestingSlots.set(Slot);
|
2016-06-01 19:55:10 +02:00
|
|
|
if (MI.getOpcode() == TargetOpcode::LIFETIME_START) {
|
2016-05-24 15:23:44 +02:00
|
|
|
BetweenStartEnd.set(Slot);
|
2016-06-01 19:55:10 +02:00
|
|
|
NumStartLifetimes[Slot] += 1;
|
|
|
|
} else {
|
2016-05-24 15:23:44 +02:00
|
|
|
BetweenStartEnd.reset(Slot);
|
2016-06-01 19:55:10 +02:00
|
|
|
NumEndLifetimes[Slot] += 1;
|
|
|
|
}
|
2016-05-24 15:23:44 +02:00
|
|
|
const AllocaInst *Allocation = MFI->getObjectAllocation(Slot);
|
|
|
|
if (Allocation) {
|
|
|
|
DEBUG(dbgs() << "Found a lifetime ");
|
|
|
|
DEBUG(dbgs() << (MI.getOpcode() == TargetOpcode::LIFETIME_START
|
|
|
|
? "start"
|
|
|
|
: "end"));
|
|
|
|
DEBUG(dbgs() << " marker for slot #" << Slot);
|
|
|
|
DEBUG(dbgs() << " with allocation: " << Allocation->getName()
|
|
|
|
<< "\n");
|
|
|
|
}
|
|
|
|
Markers.push_back(&MI);
|
|
|
|
MarkersFound += 1;
|
|
|
|
} else {
|
|
|
|
for (const MachineOperand &MO : MI.operands()) {
|
|
|
|
if (!MO.isFI())
|
|
|
|
continue;
|
|
|
|
int Slot = MO.getIndex();
|
|
|
|
if (Slot < 0)
|
|
|
|
continue;
|
|
|
|
if (! BetweenStartEnd.test(Slot)) {
|
2016-06-01 19:55:10 +02:00
|
|
|
ConservativeSlots.set(Slot);
|
2016-05-24 15:23:44 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
BitVector &SeenStart = SeenStartMap[MBB];
|
|
|
|
SeenStart |= BetweenStartEnd;
|
|
|
|
}
|
|
|
|
if (!MarkersFound) {
|
|
|
|
return 0;
|
|
|
|
}
|
2016-06-01 19:55:10 +02:00
|
|
|
|
|
|
|
// PR27903: slots with multiple start or end lifetime ops are not
|
|
|
|
// safe to enable for "lifetime-start-on-first-use".
|
|
|
|
for (unsigned slot = 0; slot < NumSlot; ++slot)
|
|
|
|
if (NumStartLifetimes[slot] > 1 || NumEndLifetimes[slot] > 1)
|
|
|
|
ConservativeSlots.set(slot);
|
|
|
|
DEBUG(dumpBV("Conservative slots", ConservativeSlots));
|
2016-05-24 15:23:44 +02:00
|
|
|
|
|
|
|
// Step 2: compute begin/end sets for each block
|
|
|
|
|
2017-03-15 23:40:26 +01:00
|
|
|
// NOTE: We use a depth-first iteration to ensure that we obtain a
|
|
|
|
// deterministic numbering.
|
2014-04-11 03:50:01 +02:00
|
|
|
for (MachineBasicBlock *MBB : depth_first(MF)) {
|
2012-09-06 11:17:37 +02:00
|
|
|
// Assign a serial number to this basic block.
|
2014-04-11 03:50:01 +02:00
|
|
|
BasicBlocks[MBB] = BasicBlockNumbering.size();
|
|
|
|
BasicBlockNumbering.push_back(MBB);
|
2012-09-06 11:17:37 +02:00
|
|
|
|
2013-02-19 04:06:17 +01:00
|
|
|
// Keep a reference to avoid repeated lookups.
|
2014-04-11 03:50:01 +02:00
|
|
|
BlockLifetimeInfo &BlockInfo = BlockLiveness[MBB];
|
2013-02-19 04:06:17 +01:00
|
|
|
|
|
|
|
BlockInfo.Begin.resize(NumSlot);
|
|
|
|
BlockInfo.End.resize(NumSlot);
|
2012-09-06 11:17:37 +02:00
|
|
|
|
2016-05-24 15:23:44 +02:00
|
|
|
SmallVector<int, 4> slots;
|
2014-04-11 03:50:01 +02:00
|
|
|
for (MachineInstr &MI : *MBB) {
|
2016-05-24 15:23:44 +02:00
|
|
|
bool isStart = false;
|
|
|
|
slots.clear();
|
|
|
|
if (isLifetimeStartOrEnd(MI, slots, isStart)) {
|
|
|
|
if (!isStart) {
|
|
|
|
assert(slots.size() == 1 && "unexpected: MI ends multiple slots");
|
|
|
|
int Slot = slots[0];
|
|
|
|
if (BlockInfo.Begin.test(Slot)) {
|
|
|
|
BlockInfo.Begin.reset(Slot);
|
|
|
|
}
|
2013-02-19 04:06:17 +01:00
|
|
|
BlockInfo.End.set(Slot);
|
2016-05-24 15:23:44 +02:00
|
|
|
} else {
|
|
|
|
for (auto Slot : slots) {
|
|
|
|
DEBUG(dbgs() << "Found a use of slot #" << Slot);
|
2017-12-04 18:18:51 +01:00
|
|
|
DEBUG(dbgs() << " at " << printMBBReference(*MBB) << " index ");
|
2016-05-24 15:23:44 +02:00
|
|
|
DEBUG(Indexes->getInstructionIndex(MI).print(dbgs()));
|
|
|
|
const AllocaInst *Allocation = MFI->getObjectAllocation(Slot);
|
|
|
|
if (Allocation) {
|
|
|
|
DEBUG(dbgs() << " with allocation: "<< Allocation->getName());
|
|
|
|
}
|
|
|
|
DEBUG(dbgs() << "\n");
|
|
|
|
if (BlockInfo.End.test(Slot)) {
|
|
|
|
BlockInfo.End.reset(Slot);
|
|
|
|
}
|
|
|
|
BlockInfo.Begin.set(Slot);
|
|
|
|
}
|
2012-09-06 11:17:37 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update statistics.
|
|
|
|
NumMarkerSeen += MarkersFound;
|
|
|
|
return MarkersFound;
|
|
|
|
}
|
|
|
|
|
2017-09-29 23:55:49 +02:00
|
|
|
void StackColoring::calculateLocalLiveness() {
|
2016-05-24 15:23:44 +02:00
|
|
|
unsigned NumIters = 0;
|
2012-09-06 11:17:37 +02:00
|
|
|
bool changed = true;
|
|
|
|
while (changed) {
|
|
|
|
changed = false;
|
2016-05-24 15:23:44 +02:00
|
|
|
++NumIters;
|
2012-09-06 11:17:37 +02:00
|
|
|
|
2014-03-09 16:44:45 +01:00
|
|
|
for (const MachineBasicBlock *BB : BasicBlockNumbering) {
|
2013-02-19 05:47:31 +01:00
|
|
|
// Use an iterator to avoid repeated lookups.
|
2013-02-19 06:32:02 +01:00
|
|
|
LivenessMap::iterator BI = BlockLiveness.find(BB);
|
2013-02-19 05:47:31 +01:00
|
|
|
assert(BI != BlockLiveness.end() && "Block not found");
|
|
|
|
BlockLifetimeInfo &BlockInfo = BI->second;
|
|
|
|
|
2016-05-24 15:23:44 +02:00
|
|
|
// Compute LiveIn by unioning together the LiveOut sets of all preds.
|
2012-09-06 11:17:37 +02:00
|
|
|
BitVector LocalLiveIn;
|
2013-02-19 05:47:31 +01:00
|
|
|
for (MachineBasicBlock::const_pred_iterator PI = BB->pred_begin(),
|
|
|
|
PE = BB->pred_end(); PI != PE; ++PI) {
|
2013-02-19 06:32:02 +01:00
|
|
|
LivenessMap::const_iterator I = BlockLiveness.find(*PI);
|
2013-02-19 05:47:31 +01:00
|
|
|
assert(I != BlockLiveness.end() && "Predecessor not found");
|
|
|
|
LocalLiveIn |= I->second.LiveOut;
|
|
|
|
}
|
2012-09-06 11:17:37 +02:00
|
|
|
|
2016-05-24 15:23:44 +02:00
|
|
|
// Compute LiveOut by subtracting out lifetimes that end in this
|
|
|
|
// block, then adding in lifetimes that begin in this block. If
|
|
|
|
// we have both BEGIN and END markers in the same basic block
|
|
|
|
// then we know that the BEGIN marker comes after the END,
|
|
|
|
// because we already handle the case where the BEGIN comes
|
|
|
|
// before the END when collecting the markers (and building the
|
|
|
|
// BEGIN/END vectors).
|
|
|
|
BitVector LocalLiveOut = LocalLiveIn;
|
2013-02-19 05:47:31 +01:00
|
|
|
LocalLiveOut.reset(BlockInfo.End);
|
2016-05-24 15:23:44 +02:00
|
|
|
LocalLiveOut |= BlockInfo.Begin;
|
2012-09-10 20:51:09 +02:00
|
|
|
|
2016-05-24 15:23:44 +02:00
|
|
|
// Update block LiveIn set, noting whether it has changed.
|
2013-02-19 05:47:31 +01:00
|
|
|
if (LocalLiveIn.test(BlockInfo.LiveIn)) {
|
2012-09-06 11:17:37 +02:00
|
|
|
changed = true;
|
2013-02-19 05:47:31 +01:00
|
|
|
BlockInfo.LiveIn |= LocalLiveIn;
|
2012-09-06 11:17:37 +02:00
|
|
|
}
|
|
|
|
|
2016-05-24 15:23:44 +02:00
|
|
|
// Update block LiveOut set, noting whether it has changed.
|
2013-02-19 05:47:31 +01:00
|
|
|
if (LocalLiveOut.test(BlockInfo.LiveOut)) {
|
2012-09-06 11:17:37 +02:00
|
|
|
changed = true;
|
2013-02-19 05:47:31 +01:00
|
|
|
BlockInfo.LiveOut |= LocalLiveOut;
|
2012-09-06 11:17:37 +02:00
|
|
|
}
|
|
|
|
}
|
2017-09-29 23:55:49 +02:00
|
|
|
} // while changed.
|
2016-05-24 15:23:44 +02:00
|
|
|
|
|
|
|
NumIterations = NumIters;
|
2012-09-06 11:17:37 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void StackColoring::calculateLiveIntervals(unsigned NumSlots) {
|
|
|
|
SmallVector<SlotIndex, 16> Starts;
|
2017-06-12 16:56:02 +02:00
|
|
|
SmallVector<bool, 16> DefinitelyInUse;
|
2012-09-06 11:17:37 +02:00
|
|
|
|
|
|
|
// For each block, find which slots are active within this block
|
|
|
|
// and update the live intervals.
|
2014-03-09 16:44:45 +01:00
|
|
|
for (const MachineBasicBlock &MBB : *MF) {
|
2012-09-06 11:17:37 +02:00
|
|
|
Starts.clear();
|
|
|
|
Starts.resize(NumSlots);
|
2017-06-12 16:56:02 +02:00
|
|
|
DefinitelyInUse.clear();
|
|
|
|
DefinitelyInUse.resize(NumSlots);
|
|
|
|
|
|
|
|
// Start the interval of the slots that we previously found to be 'in-use'.
|
|
|
|
BlockLifetimeInfo &MBBLiveness = BlockLiveness[&MBB];
|
|
|
|
for (int pos = MBBLiveness.LiveIn.find_first(); pos != -1;
|
|
|
|
pos = MBBLiveness.LiveIn.find_next(pos)) {
|
|
|
|
Starts[pos] = Indexes->getMBBStartIdx(&MBB);
|
|
|
|
}
|
2012-09-06 11:17:37 +02:00
|
|
|
|
2016-05-24 15:23:44 +02:00
|
|
|
// Create the interval for the basic blocks containing lifetime begin/end.
|
|
|
|
for (const MachineInstr &MI : MBB) {
|
|
|
|
SmallVector<int, 4> slots;
|
|
|
|
bool IsStart = false;
|
|
|
|
if (!isLifetimeStartOrEnd(MI, slots, IsStart))
|
2016-03-03 01:01:25 +01:00
|
|
|
continue;
|
2016-05-24 15:23:44 +02:00
|
|
|
SlotIndex ThisIndex = Indexes->getInstructionIndex(MI);
|
|
|
|
for (auto Slot : slots) {
|
|
|
|
if (IsStart) {
|
2017-06-12 16:56:02 +02:00
|
|
|
// If a slot is already definitely in use, we don't have to emit
|
|
|
|
// a new start marker because there is already a pre-existing
|
|
|
|
// one.
|
|
|
|
if (!DefinitelyInUse[Slot]) {
|
|
|
|
LiveStarts[Slot].push_back(ThisIndex);
|
|
|
|
DefinitelyInUse[Slot] = true;
|
|
|
|
}
|
|
|
|
if (!Starts[Slot].isValid())
|
2016-05-24 15:23:44 +02:00
|
|
|
Starts[Slot] = ThisIndex;
|
|
|
|
} else {
|
2017-06-12 16:56:02 +02:00
|
|
|
if (Starts[Slot].isValid()) {
|
|
|
|
VNInfo *VNI = Intervals[Slot]->getValNumInfo(0);
|
|
|
|
Intervals[Slot]->addSegment(
|
|
|
|
LiveInterval::Segment(Starts[Slot], ThisIndex, VNI));
|
|
|
|
Starts[Slot] = SlotIndex(); // Invalidate the start index
|
|
|
|
DefinitelyInUse[Slot] = false;
|
|
|
|
}
|
2016-05-24 15:23:44 +02:00
|
|
|
}
|
2012-09-10 14:39:35 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-12 16:56:02 +02:00
|
|
|
// Finish up started segments
|
2012-09-06 11:17:37 +02:00
|
|
|
for (unsigned i = 0; i < NumSlots; ++i) {
|
2012-09-10 14:39:35 +02:00
|
|
|
if (!Starts[i].isValid())
|
2012-09-06 11:17:37 +02:00
|
|
|
continue;
|
|
|
|
|
2017-06-12 16:56:02 +02:00
|
|
|
SlotIndex EndIdx = Indexes->getMBBEndIdx(&MBB);
|
|
|
|
VNInfo *VNI = Intervals[i]->getValNumInfo(0);
|
|
|
|
Intervals[i]->addSegment(LiveInterval::Segment(Starts[i], EndIdx, VNI));
|
2012-09-06 11:17:37 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool StackColoring::removeAllMarkers() {
|
|
|
|
unsigned Count = 0;
|
2014-03-09 16:44:45 +01:00
|
|
|
for (MachineInstr *MI : Markers) {
|
|
|
|
MI->eraseFromParent();
|
2012-09-06 11:17:37 +02:00
|
|
|
Count++;
|
|
|
|
}
|
|
|
|
Markers.clear();
|
|
|
|
|
|
|
|
DEBUG(dbgs()<<"Removed "<<Count<<" markers.\n");
|
|
|
|
return Count;
|
|
|
|
}
|
|
|
|
|
|
|
|
void StackColoring::remapInstructions(DenseMap<int, int> &SlotRemap) {
|
|
|
|
unsigned FixedInstr = 0;
|
|
|
|
unsigned FixedMemOp = 0;
|
|
|
|
unsigned FixedDbg = 0;
|
|
|
|
|
|
|
|
// Remap debug information that refers to stack slots.
|
2016-12-01 00:48:50 +01:00
|
|
|
for (auto &VI : MF->getVariableDbgInfo()) {
|
2014-03-09 16:44:39 +01:00
|
|
|
if (!VI.Var)
|
|
|
|
continue;
|
|
|
|
if (SlotRemap.count(VI.Slot)) {
|
IR: Split Metadata from Value
Split `Metadata` away from the `Value` class hierarchy, as part of
PR21532. Assembly and bitcode changes are in the wings, but this is the
bulk of the change for the IR C++ API.
I have a follow-up patch prepared for `clang`. If this breaks other
sub-projects, I apologize in advance :(. Help me compile it on Darwin
I'll try to fix it. FWIW, the errors should be easy to fix, so it may
be simpler to just fix it yourself.
This breaks the build for all metadata-related code that's out-of-tree.
Rest assured the transition is mechanical and the compiler should catch
almost all of the problems.
Here's a quick guide for updating your code:
- `Metadata` is the root of a class hierarchy with three main classes:
`MDNode`, `MDString`, and `ValueAsMetadata`. It is distinct from
the `Value` class hierarchy. It is typeless -- i.e., instances do
*not* have a `Type`.
- `MDNode`'s operands are all `Metadata *` (instead of `Value *`).
- `TrackingVH<MDNode>` and `WeakVH` referring to metadata can be
replaced with `TrackingMDNodeRef` and `TrackingMDRef`, respectively.
If you're referring solely to resolved `MDNode`s -- post graph
construction -- just use `MDNode*`.
- `MDNode` (and the rest of `Metadata`) have only limited support for
`replaceAllUsesWith()`.
As long as an `MDNode` is pointing at a forward declaration -- the
result of `MDNode::getTemporary()` -- it maintains a side map of its
uses and can RAUW itself. Once the forward declarations are fully
resolved RAUW support is dropped on the ground. This means that
uniquing collisions on changing operands cause nodes to become
"distinct". (This already happened fairly commonly, whenever an
operand went to null.)
If you're constructing complex (non self-reference) `MDNode` cycles,
you need to call `MDNode::resolveCycles()` on each node (or on a
top-level node that somehow references all of the nodes). Also,
don't do that. Metadata cycles (and the RAUW machinery needed to
construct them) are expensive.
- An `MDNode` can only refer to a `Constant` through a bridge called
`ConstantAsMetadata` (one of the subclasses of `ValueAsMetadata`).
As a side effect, accessing an operand of an `MDNode` that is known
to be, e.g., `ConstantInt`, takes three steps: first, cast from
`Metadata` to `ConstantAsMetadata`; second, extract the `Constant`;
third, cast down to `ConstantInt`.
The eventual goal is to introduce `MDInt`/`MDFloat`/etc. and have
metadata schema owners transition away from using `Constant`s when
the type isn't important (and they don't care about referring to
`GlobalValue`s).
In the meantime, I've added transitional API to the `mdconst`
namespace that matches semantics with the old code, in order to
avoid adding the error-prone three-step equivalent to every call
site. If your old code was:
MDNode *N = foo();
bar(isa <ConstantInt>(N->getOperand(0)));
baz(cast <ConstantInt>(N->getOperand(1)));
bak(cast_or_null <ConstantInt>(N->getOperand(2)));
bat(dyn_cast <ConstantInt>(N->getOperand(3)));
bay(dyn_cast_or_null<ConstantInt>(N->getOperand(4)));
you can trivially match its semantics with:
MDNode *N = foo();
bar(mdconst::hasa <ConstantInt>(N->getOperand(0)));
baz(mdconst::extract <ConstantInt>(N->getOperand(1)));
bak(mdconst::extract_or_null <ConstantInt>(N->getOperand(2)));
bat(mdconst::dyn_extract <ConstantInt>(N->getOperand(3)));
bay(mdconst::dyn_extract_or_null<ConstantInt>(N->getOperand(4)));
and when you transition your metadata schema to `MDInt`:
MDNode *N = foo();
bar(isa <MDInt>(N->getOperand(0)));
baz(cast <MDInt>(N->getOperand(1)));
bak(cast_or_null <MDInt>(N->getOperand(2)));
bat(dyn_cast <MDInt>(N->getOperand(3)));
bay(dyn_cast_or_null<MDInt>(N->getOperand(4)));
- A `CallInst` -- specifically, intrinsic instructions -- can refer to
metadata through a bridge called `MetadataAsValue`. This is a
subclass of `Value` where `getType()->isMetadataTy()`.
`MetadataAsValue` is the *only* class that can legally refer to a
`LocalAsMetadata`, which is a bridged form of non-`Constant` values
like `Argument` and `Instruction`. It can also refer to any other
`Metadata` subclass.
(I'll break all your testcases in a follow-up commit, when I propagate
this change to assembly.)
llvm-svn: 223802
2014-12-09 19:38:53 +01:00
|
|
|
DEBUG(dbgs() << "Remapping debug info for ["
|
2015-04-29 18:38:44 +02:00
|
|
|
<< cast<DILocalVariable>(VI.Var)->getName() << "].\n");
|
2014-03-09 16:44:39 +01:00
|
|
|
VI.Slot = SlotRemap[VI.Slot];
|
2012-09-06 11:17:37 +02:00
|
|
|
FixedDbg++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Keep a list of *allocas* which need to be remapped.
|
2012-10-18 21:53:48 +02:00
|
|
|
DenseMap<const AllocaInst*, const AllocaInst*> Allocas;
|
2017-08-01 05:32:15 +02:00
|
|
|
|
|
|
|
// Keep a list of allocas which has been affected by the remap.
|
|
|
|
SmallPtrSet<const AllocaInst*, 32> MergedAllocas;
|
|
|
|
|
2014-03-09 16:44:45 +01:00
|
|
|
for (const std::pair<int, int> &SI : SlotRemap) {
|
|
|
|
const AllocaInst *From = MFI->getObjectAllocation(SI.first);
|
|
|
|
const AllocaInst *To = MFI->getObjectAllocation(SI.second);
|
2012-09-06 11:17:37 +02:00
|
|
|
assert(To && From && "Invalid allocation object");
|
|
|
|
Allocas[From] = To;
|
Update IR when merging slots in stack coloring
The way that stack coloring updated MMOs when merging stack slots, while
correct, is suboptimal, and is incompatible with the use of AA during
instruction scheduling. The solution, which involves the use of const_cast (and
more importantly, updating the IR from within an MI-level pass), obviously
requires some explanation:
When the stack coloring pass was originally committed, the code in
ScheduleDAGInstrs::buildSchedGraph tracked possible alias sets by using
GetUnderlyingObject, and all load/store and store/store memory control
dependencies where added between SUs at the object level (where only one
object, that returned by GetUnderlyingObject, was used to identify the object
associated with each MMO). When stack coloring merged stack slots, it would
replace MMOs derived from the remapped alloca with the alloca with which the
remapped alloca was being replaced. Because ScheduleDAGInstrs only used single
objects, and tracked alias sets at the object level, this was a fine solution.
In r169744, (Andy and) I updated the code in ScheduleDAGInstrs to use
GetUnderlyingObjects, and track alias sets using, potentially, multiple
underlying objects for each MMO. This was done, primarily, to provide the
ability to look through PHIs, and provide better scheduling for
induction-variable-dependent loads and stores inside loops. At this point, the
MMO-updating code in stack coloring became suboptimal, because it would clear
the MMOs for (i.e. completely pessimize) all instructions for which r169744
might help in scheduling. Updating the IR directly is the simplest fix for this
(and the one with, by far, the least compile-time impact), but others are
possible (we could give each MMO a small vector of potential values, or make
use of a remapping table, constructed from MFI, inside ScheduleDAGInstrs).
Unfortunately, replacing all MMO values derived from the remapped alloca with
the base replacement alloca fundamentally breaks our ability to use AA during
instruction scheduling (which is critical to performance on some targets). The
reason is that the original MMO might have had an offset (either constant or
dynamic) from the base remapped alloca, and that offset is not present in the
updated MMO. One possible way around this would be to use
GetPointerBaseWithConstantOffset, and update not only the MMO's value, but also
its offset based on the original offset. Unfortunately, this solution would
only handle constant offsets, and for safety (because AA is not completely
restricted to deducing relationships with constant offsets), we would need to
clear all MMOs without constant offsets over the entire function. This would be
an even worse pessimization than the current single-object restriction. Any
other solution would involve passing around a vector of remapped allocas, and
teaching AA to use it, introducing additional complexity and overhead into AA.
Instead, when remapping an alloca, we replace all IR uses of that alloca as
well (optionally inserting a bitcast as necessary). This is even more efficient
that the old MMO-updating code in the stack coloring pass (because it removes
the need to call GetUnderlyingObject on all MMO values), removes the
single-object pessimization in the default configuration, and enables the
correct use of AA during instruction scheduling (all without any additional
overhead).
LLVM now no longer miscompiles itself on x86_64 when using -enable-misched
-enable-aa-sched-mi -misched-bottomup=0 -misched-topdown=0 -misched=shuffle!
Fixed PR18497.
Because the alloca replacement is now done at the IR level, unless the MMO
directly refers to the remapped alloca, the change cannot be seen at the MI
level. As a result, there is no good way to fix test/CodeGen/X86/pr14090.ll.
llvm-svn: 199658
2014-01-20 15:03:16 +01:00
|
|
|
|
|
|
|
// AA might be used later for instruction scheduling, and we need it to be
|
|
|
|
// able to deduce the correct aliasing releationships between pointers
|
|
|
|
// derived from the alloca being remapped and the target of that remapping.
|
|
|
|
// The only safe way, without directly informing AA about the remapping
|
|
|
|
// somehow, is to directly update the IR to reflect the change being made
|
|
|
|
// here.
|
|
|
|
Instruction *Inst = const_cast<AllocaInst *>(To);
|
|
|
|
if (From->getType() != To->getType()) {
|
|
|
|
BitCastInst *Cast = new BitCastInst(Inst, From->getType());
|
|
|
|
Cast->insertAfter(Inst);
|
|
|
|
Inst = Cast;
|
|
|
|
}
|
|
|
|
|
2017-08-01 05:32:15 +02:00
|
|
|
// We keep both slots to maintain AliasAnalysis metadata later.
|
|
|
|
MergedAllocas.insert(From);
|
|
|
|
MergedAllocas.insert(To);
|
|
|
|
|
2014-01-20 20:49:14 +01:00
|
|
|
// Allow the stack protector to adjust its value map to account for the
|
|
|
|
// upcoming replacement.
|
|
|
|
SP->adjustForColoring(From, To);
|
|
|
|
|
2016-01-15 01:46:17 +01:00
|
|
|
// The new alloca might not be valid in a llvm.dbg.declare for this
|
|
|
|
// variable, so undef out the use to make the verifier happy.
|
|
|
|
AllocaInst *FromAI = const_cast<AllocaInst *>(From);
|
|
|
|
if (FromAI->isUsedByMetadata())
|
|
|
|
ValueAsMetadata::handleRAUW(FromAI, UndefValue::get(FromAI->getType()));
|
|
|
|
for (auto &Use : FromAI->uses()) {
|
|
|
|
if (BitCastInst *BCI = dyn_cast<BitCastInst>(Use.get()))
|
|
|
|
if (BCI->isUsedByMetadata())
|
|
|
|
ValueAsMetadata::handleRAUW(BCI, UndefValue::get(BCI->getType()));
|
|
|
|
}
|
|
|
|
|
Update IR when merging slots in stack coloring
The way that stack coloring updated MMOs when merging stack slots, while
correct, is suboptimal, and is incompatible with the use of AA during
instruction scheduling. The solution, which involves the use of const_cast (and
more importantly, updating the IR from within an MI-level pass), obviously
requires some explanation:
When the stack coloring pass was originally committed, the code in
ScheduleDAGInstrs::buildSchedGraph tracked possible alias sets by using
GetUnderlyingObject, and all load/store and store/store memory control
dependencies where added between SUs at the object level (where only one
object, that returned by GetUnderlyingObject, was used to identify the object
associated with each MMO). When stack coloring merged stack slots, it would
replace MMOs derived from the remapped alloca with the alloca with which the
remapped alloca was being replaced. Because ScheduleDAGInstrs only used single
objects, and tracked alias sets at the object level, this was a fine solution.
In r169744, (Andy and) I updated the code in ScheduleDAGInstrs to use
GetUnderlyingObjects, and track alias sets using, potentially, multiple
underlying objects for each MMO. This was done, primarily, to provide the
ability to look through PHIs, and provide better scheduling for
induction-variable-dependent loads and stores inside loops. At this point, the
MMO-updating code in stack coloring became suboptimal, because it would clear
the MMOs for (i.e. completely pessimize) all instructions for which r169744
might help in scheduling. Updating the IR directly is the simplest fix for this
(and the one with, by far, the least compile-time impact), but others are
possible (we could give each MMO a small vector of potential values, or make
use of a remapping table, constructed from MFI, inside ScheduleDAGInstrs).
Unfortunately, replacing all MMO values derived from the remapped alloca with
the base replacement alloca fundamentally breaks our ability to use AA during
instruction scheduling (which is critical to performance on some targets). The
reason is that the original MMO might have had an offset (either constant or
dynamic) from the base remapped alloca, and that offset is not present in the
updated MMO. One possible way around this would be to use
GetPointerBaseWithConstantOffset, and update not only the MMO's value, but also
its offset based on the original offset. Unfortunately, this solution would
only handle constant offsets, and for safety (because AA is not completely
restricted to deducing relationships with constant offsets), we would need to
clear all MMOs without constant offsets over the entire function. This would be
an even worse pessimization than the current single-object restriction. Any
other solution would involve passing around a vector of remapped allocas, and
teaching AA to use it, introducing additional complexity and overhead into AA.
Instead, when remapping an alloca, we replace all IR uses of that alloca as
well (optionally inserting a bitcast as necessary). This is even more efficient
that the old MMO-updating code in the stack coloring pass (because it removes
the need to call GetUnderlyingObject on all MMO values), removes the
single-object pessimization in the default configuration, and enables the
correct use of AA during instruction scheduling (all without any additional
overhead).
LLVM now no longer miscompiles itself on x86_64 when using -enable-misched
-enable-aa-sched-mi -misched-bottomup=0 -misched-topdown=0 -misched=shuffle!
Fixed PR18497.
Because the alloca replacement is now done at the IR level, unless the MMO
directly refers to the remapped alloca, the change cannot be seen at the MI
level. As a result, there is no good way to fix test/CodeGen/X86/pr14090.ll.
llvm-svn: 199658
2014-01-20 15:03:16 +01:00
|
|
|
// Note that this will not replace uses in MMOs (which we'll update below),
|
|
|
|
// or anywhere else (which is why we won't delete the original
|
|
|
|
// instruction).
|
2016-01-15 01:46:17 +01:00
|
|
|
FromAI->replaceAllUsesWith(Inst);
|
2012-09-06 11:17:37 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Remap all instructions to the new stack slots.
|
2014-03-09 16:44:45 +01:00
|
|
|
for (MachineBasicBlock &BB : *MF)
|
|
|
|
for (MachineInstr &I : BB) {
|
2012-09-10 10:44:15 +02:00
|
|
|
// Skip lifetime markers. We'll remove them soon.
|
2014-03-09 16:44:45 +01:00
|
|
|
if (I.getOpcode() == TargetOpcode::LIFETIME_START ||
|
|
|
|
I.getOpcode() == TargetOpcode::LIFETIME_END)
|
2012-09-10 10:44:15 +02:00
|
|
|
continue;
|
|
|
|
|
2012-09-06 11:17:37 +02:00
|
|
|
// Update the MachineMemOperand to use the new alloca.
|
2014-03-09 16:44:45 +01:00
|
|
|
for (MachineMemOperand *MMO : I.memoperands()) {
|
Update IR when merging slots in stack coloring
The way that stack coloring updated MMOs when merging stack slots, while
correct, is suboptimal, and is incompatible with the use of AA during
instruction scheduling. The solution, which involves the use of const_cast (and
more importantly, updating the IR from within an MI-level pass), obviously
requires some explanation:
When the stack coloring pass was originally committed, the code in
ScheduleDAGInstrs::buildSchedGraph tracked possible alias sets by using
GetUnderlyingObject, and all load/store and store/store memory control
dependencies where added between SUs at the object level (where only one
object, that returned by GetUnderlyingObject, was used to identify the object
associated with each MMO). When stack coloring merged stack slots, it would
replace MMOs derived from the remapped alloca with the alloca with which the
remapped alloca was being replaced. Because ScheduleDAGInstrs only used single
objects, and tracked alias sets at the object level, this was a fine solution.
In r169744, (Andy and) I updated the code in ScheduleDAGInstrs to use
GetUnderlyingObjects, and track alias sets using, potentially, multiple
underlying objects for each MMO. This was done, primarily, to provide the
ability to look through PHIs, and provide better scheduling for
induction-variable-dependent loads and stores inside loops. At this point, the
MMO-updating code in stack coloring became suboptimal, because it would clear
the MMOs for (i.e. completely pessimize) all instructions for which r169744
might help in scheduling. Updating the IR directly is the simplest fix for this
(and the one with, by far, the least compile-time impact), but others are
possible (we could give each MMO a small vector of potential values, or make
use of a remapping table, constructed from MFI, inside ScheduleDAGInstrs).
Unfortunately, replacing all MMO values derived from the remapped alloca with
the base replacement alloca fundamentally breaks our ability to use AA during
instruction scheduling (which is critical to performance on some targets). The
reason is that the original MMO might have had an offset (either constant or
dynamic) from the base remapped alloca, and that offset is not present in the
updated MMO. One possible way around this would be to use
GetPointerBaseWithConstantOffset, and update not only the MMO's value, but also
its offset based on the original offset. Unfortunately, this solution would
only handle constant offsets, and for safety (because AA is not completely
restricted to deducing relationships with constant offsets), we would need to
clear all MMOs without constant offsets over the entire function. This would be
an even worse pessimization than the current single-object restriction. Any
other solution would involve passing around a vector of remapped allocas, and
teaching AA to use it, introducing additional complexity and overhead into AA.
Instead, when remapping an alloca, we replace all IR uses of that alloca as
well (optionally inserting a bitcast as necessary). This is even more efficient
that the old MMO-updating code in the stack coloring pass (because it removes
the need to call GetUnderlyingObject on all MMO values), removes the
single-object pessimization in the default configuration, and enables the
correct use of AA during instruction scheduling (all without any additional
overhead).
LLVM now no longer miscompiles itself on x86_64 when using -enable-misched
-enable-aa-sched-mi -misched-bottomup=0 -misched-topdown=0 -misched=shuffle!
Fixed PR18497.
Because the alloca replacement is now done at the IR level, unless the MMO
directly refers to the remapped alloca, the change cannot be seen at the MI
level. As a result, there is no good way to fix test/CodeGen/X86/pr14090.ll.
llvm-svn: 199658
2014-01-20 15:03:16 +01:00
|
|
|
// We've replaced IR-level uses of the remapped allocas, so we only
|
|
|
|
// need to replace direct uses here.
|
2014-04-15 09:22:52 +02:00
|
|
|
const AllocaInst *AI = dyn_cast_or_null<AllocaInst>(MMO->getValue());
|
|
|
|
if (!AI)
|
2013-05-14 03:42:44 +02:00
|
|
|
continue;
|
|
|
|
|
2012-10-18 21:53:48 +02:00
|
|
|
if (!Allocas.count(AI))
|
2012-09-06 11:17:37 +02:00
|
|
|
continue;
|
|
|
|
|
2012-10-18 21:53:48 +02:00
|
|
|
MMO->setValue(Allocas[AI]);
|
2012-09-06 11:17:37 +02:00
|
|
|
FixedMemOp++;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update all of the machine instruction operands.
|
2014-03-09 16:44:45 +01:00
|
|
|
for (MachineOperand &MO : I.operands()) {
|
2012-09-06 11:17:37 +02:00
|
|
|
if (!MO.isFI())
|
|
|
|
continue;
|
|
|
|
int FromSlot = MO.getIndex();
|
|
|
|
|
|
|
|
// Don't touch arguments.
|
|
|
|
if (FromSlot<0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Only look at mapped slots.
|
|
|
|
if (!SlotRemap.count(FromSlot))
|
|
|
|
continue;
|
|
|
|
|
2012-09-10 10:51:46 +02:00
|
|
|
// In a debug build, check that the instruction that we are modifying is
|
|
|
|
// inside the expected live range. If the instruction is not inside
|
2012-09-10 10:44:15 +02:00
|
|
|
// the calculated range then it means that the alloca usage moved
|
2012-09-13 17:46:30 +02:00
|
|
|
// outside of the lifetime markers, or that the user has a bug.
|
2012-09-13 14:38:37 +02:00
|
|
|
// NOTE: Alloca address calculations which happen outside the lifetime
|
2018-03-31 00:22:31 +02:00
|
|
|
// zone are okay, despite the fact that we don't have a good way
|
2012-09-13 14:38:37 +02:00
|
|
|
// for validating all of the usages of the calculation.
|
2012-09-10 10:44:15 +02:00
|
|
|
#ifndef NDEBUG
|
2014-03-09 16:44:45 +01:00
|
|
|
bool TouchesMemory = I.mayLoad() || I.mayStore();
|
2012-09-13 17:46:30 +02:00
|
|
|
// If we *don't* protect the user from escaped allocas, don't bother
|
|
|
|
// validating the instructions.
|
2014-03-09 16:44:45 +01:00
|
|
|
if (!I.isDebugValue() && TouchesMemory && ProtectFromEscapedAllocas) {
|
2016-02-27 07:40:41 +01:00
|
|
|
SlotIndex Index = Indexes->getInstructionIndex(I);
|
2014-03-09 16:44:45 +01:00
|
|
|
const LiveInterval *Interval = &*Intervals[FromSlot];
|
2012-09-11 14:34:27 +02:00
|
|
|
assert(Interval->find(Index) != Interval->end() &&
|
2013-03-25 22:26:36 +01:00
|
|
|
"Found instruction usage outside of live range.");
|
2012-09-11 14:34:27 +02:00
|
|
|
}
|
2012-09-10 10:44:15 +02:00
|
|
|
#endif
|
|
|
|
|
2012-09-06 11:17:37 +02:00
|
|
|
// Fix the machine instructions.
|
|
|
|
int ToSlot = SlotRemap[FromSlot];
|
|
|
|
MO.setIndex(ToSlot);
|
|
|
|
FixedInstr++;
|
|
|
|
}
|
2017-08-01 05:32:15 +02:00
|
|
|
|
|
|
|
// We adjust AliasAnalysis information for merged stack slots.
|
2017-08-02 20:16:32 +02:00
|
|
|
MachineSDNode::mmo_iterator NewMemOps =
|
2017-08-01 05:32:15 +02:00
|
|
|
MF->allocateMemRefsArray(I.getNumMemOperands());
|
|
|
|
unsigned MemOpIdx = 0;
|
|
|
|
bool ReplaceMemOps = false;
|
|
|
|
for (MachineMemOperand *MMO : I.memoperands()) {
|
|
|
|
// If this memory location can be a slot remapped here,
|
|
|
|
// we remove AA information.
|
|
|
|
bool MayHaveConflictingAAMD = false;
|
|
|
|
if (MMO->getAAInfo()) {
|
|
|
|
if (const Value *MMOV = MMO->getValue()) {
|
|
|
|
SmallVector<Value *, 4> Objs;
|
|
|
|
getUnderlyingObjectsForCodeGen(MMOV, Objs, MF->getDataLayout());
|
|
|
|
|
|
|
|
if (Objs.empty())
|
|
|
|
MayHaveConflictingAAMD = true;
|
|
|
|
else
|
|
|
|
for (Value *V : Objs) {
|
|
|
|
// If this memory location comes from a known stack slot
|
|
|
|
// that is not remapped, we continue checking.
|
|
|
|
// Otherwise, we need to invalidate AA infomation.
|
|
|
|
const AllocaInst *AI = dyn_cast_or_null<AllocaInst>(V);
|
|
|
|
if (AI && MergedAllocas.count(AI)) {
|
|
|
|
MayHaveConflictingAAMD = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (MayHaveConflictingAAMD) {
|
2017-08-02 20:16:32 +02:00
|
|
|
NewMemOps[MemOpIdx++] = MF->getMachineMemOperand(MMO, AAMDNodes());
|
2017-08-01 05:32:15 +02:00
|
|
|
ReplaceMemOps = true;
|
|
|
|
}
|
|
|
|
else
|
2017-08-02 20:16:32 +02:00
|
|
|
NewMemOps[MemOpIdx++] = MMO;
|
2017-08-01 05:32:15 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// If any memory operand is updated, set memory references of
|
|
|
|
// this instruction.
|
|
|
|
if (ReplaceMemOps)
|
2017-08-02 20:16:32 +02:00
|
|
|
I.setMemRefs(std::make_pair(NewMemOps, I.getNumMemOperands()));
|
2012-09-06 11:17:37 +02:00
|
|
|
}
|
|
|
|
|
2016-01-08 18:24:47 +01:00
|
|
|
// Update the location of C++ catch objects for the MSVC personality routine.
|
2016-01-08 09:03:55 +01:00
|
|
|
if (WinEHFuncInfo *EHInfo = MF->getWinEHFuncInfo())
|
|
|
|
for (WinEHTryBlockMapEntry &TBME : EHInfo->TryBlockMap)
|
|
|
|
for (WinEHHandlerType &H : TBME.HandlerArray)
|
2017-09-29 23:55:49 +02:00
|
|
|
if (H.CatchObj.FrameIndex != std::numeric_limits<int>::max() &&
|
2016-01-08 18:24:47 +01:00
|
|
|
SlotRemap.count(H.CatchObj.FrameIndex))
|
2016-01-08 09:03:55 +01:00
|
|
|
H.CatchObj.FrameIndex = SlotRemap[H.CatchObj.FrameIndex];
|
|
|
|
|
2012-09-06 11:17:37 +02:00
|
|
|
DEBUG(dbgs()<<"Fixed "<<FixedMemOp<<" machine memory operands.\n");
|
|
|
|
DEBUG(dbgs()<<"Fixed "<<FixedDbg<<" debug locations.\n");
|
|
|
|
DEBUG(dbgs()<<"Fixed "<<FixedInstr<<" machine instructions.\n");
|
|
|
|
}
|
|
|
|
|
2012-09-12 06:57:37 +02:00
|
|
|
void StackColoring::removeInvalidSlotRanges() {
|
2014-03-09 16:44:45 +01:00
|
|
|
for (MachineBasicBlock &BB : *MF)
|
|
|
|
for (MachineInstr &I : BB) {
|
|
|
|
if (I.getOpcode() == TargetOpcode::LIFETIME_START ||
|
|
|
|
I.getOpcode() == TargetOpcode::LIFETIME_END || I.isDebugValue())
|
2012-09-12 06:57:37 +02:00
|
|
|
continue;
|
|
|
|
|
2012-09-13 14:38:37 +02:00
|
|
|
// Some intervals are suspicious! In some cases we find address
|
|
|
|
// calculations outside of the lifetime zone, but not actual memory
|
|
|
|
// read or write. Memory accesses outside of the lifetime zone are a clear
|
|
|
|
// violation, but address calculations are okay. This can happen when
|
|
|
|
// GEPs are hoisted outside of the lifetime zone.
|
2012-09-13 16:51:00 +02:00
|
|
|
// So, in here we only check instructions which can read or write memory.
|
2014-03-09 16:44:45 +01:00
|
|
|
if (!I.mayLoad() && !I.mayStore())
|
2012-09-13 14:38:37 +02:00
|
|
|
continue;
|
|
|
|
|
2012-09-12 06:57:37 +02:00
|
|
|
// Check all of the machine operands.
|
2014-03-09 16:44:45 +01:00
|
|
|
for (const MachineOperand &MO : I.operands()) {
|
2012-09-12 06:57:37 +02:00
|
|
|
if (!MO.isFI())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
int Slot = MO.getIndex();
|
|
|
|
|
|
|
|
if (Slot<0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (Intervals[Slot]->empty())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Check that the used slot is inside the calculated lifetime range.
|
|
|
|
// If it is not, warn about it and invalidate the range.
|
2014-03-09 16:44:45 +01:00
|
|
|
LiveInterval *Interval = &*Intervals[Slot];
|
2016-02-27 07:40:41 +01:00
|
|
|
SlotIndex Index = Indexes->getInstructionIndex(I);
|
2012-09-12 06:57:37 +02:00
|
|
|
if (Interval->find(Index) == Interval->end()) {
|
2014-03-09 16:44:45 +01:00
|
|
|
Interval->clear();
|
2012-09-12 06:57:37 +02:00
|
|
|
DEBUG(dbgs()<<"Invalidating range #"<<Slot<<"\n");
|
2012-09-12 13:06:26 +02:00
|
|
|
EscapedAllocas++;
|
2012-09-12 06:57:37 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-09-06 11:17:37 +02:00
|
|
|
void StackColoring::expungeSlotMap(DenseMap<int, int> &SlotRemap,
|
|
|
|
unsigned NumSlots) {
|
|
|
|
// Expunge slot remap map.
|
|
|
|
for (unsigned i=0; i < NumSlots; ++i) {
|
|
|
|
// If we are remapping i
|
|
|
|
if (SlotRemap.count(i)) {
|
|
|
|
int Target = SlotRemap[i];
|
|
|
|
// As long as our target is mapped to something else, follow it.
|
|
|
|
while (SlotRemap.count(Target)) {
|
|
|
|
Target = SlotRemap[Target];
|
|
|
|
SlotRemap[i] = Target;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool StackColoring::runOnMachineFunction(MachineFunction &Func) {
|
|
|
|
DEBUG(dbgs() << "********** Stack Coloring **********\n"
|
2017-12-15 23:22:58 +01:00
|
|
|
<< "********** Function: " << Func.getName() << '\n');
|
2012-09-06 11:17:37 +02:00
|
|
|
MF = &Func;
|
2016-07-28 20:40:00 +02:00
|
|
|
MFI = &MF->getFrameInfo();
|
2012-09-06 11:17:37 +02:00
|
|
|
Indexes = &getAnalysis<SlotIndexes>();
|
2014-01-20 20:49:14 +01:00
|
|
|
SP = &getAnalysis<StackProtector>();
|
2012-09-06 11:17:37 +02:00
|
|
|
BlockLiveness.clear();
|
|
|
|
BasicBlocks.clear();
|
|
|
|
BasicBlockNumbering.clear();
|
|
|
|
Markers.clear();
|
|
|
|
Intervals.clear();
|
2017-06-12 16:56:02 +02:00
|
|
|
LiveStarts.clear();
|
2012-09-06 11:17:37 +02:00
|
|
|
VNInfoAllocator.Reset();
|
|
|
|
|
|
|
|
unsigned NumSlots = MFI->getObjectIndexEnd();
|
|
|
|
|
|
|
|
// If there are no stack slots then there are no markers to remove.
|
|
|
|
if (!NumSlots)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
SmallVector<int, 8> SortedSlots;
|
|
|
|
SortedSlots.reserve(NumSlots);
|
|
|
|
Intervals.reserve(NumSlots);
|
2017-06-12 16:56:02 +02:00
|
|
|
LiveStarts.resize(NumSlots);
|
2012-09-06 11:17:37 +02:00
|
|
|
|
|
|
|
unsigned NumMarkers = collectMarkers(NumSlots);
|
|
|
|
|
|
|
|
unsigned TotalSize = 0;
|
|
|
|
DEBUG(dbgs()<<"Found "<<NumMarkers<<" markers and "<<NumSlots<<" slots\n");
|
|
|
|
DEBUG(dbgs()<<"Slot structure:\n");
|
|
|
|
|
|
|
|
for (int i=0; i < MFI->getObjectIndexEnd(); ++i) {
|
|
|
|
DEBUG(dbgs()<<"Slot #"<<i<<" - "<<MFI->getObjectSize(i)<<" bytes.\n");
|
|
|
|
TotalSize += MFI->getObjectSize(i);
|
|
|
|
}
|
|
|
|
|
|
|
|
DEBUG(dbgs()<<"Total Stack size: "<<TotalSize<<" bytes\n\n");
|
|
|
|
|
|
|
|
// Don't continue because there are not enough lifetime markers, or the
|
2012-09-13 14:38:37 +02:00
|
|
|
// stack is too small, or we are told not to optimize the slots.
|
2016-05-28 00:56:49 +02:00
|
|
|
if (NumMarkers < 2 || TotalSize < 16 || DisableColoring ||
|
2017-12-15 23:22:58 +01:00
|
|
|
skipFunction(Func.getFunction())) {
|
2012-09-06 11:17:37 +02:00
|
|
|
DEBUG(dbgs()<<"Will not try to merge slots.\n");
|
|
|
|
return removeAllMarkers();
|
|
|
|
}
|
|
|
|
|
|
|
|
for (unsigned i=0; i < NumSlots; ++i) {
|
2014-03-09 16:44:45 +01:00
|
|
|
std::unique_ptr<LiveInterval> LI(new LiveInterval(i, 0));
|
2012-09-06 11:17:37 +02:00
|
|
|
LI->getNextValue(Indexes->getZeroIndex(), VNInfoAllocator);
|
2014-03-09 16:44:45 +01:00
|
|
|
Intervals.push_back(std::move(LI));
|
2012-09-06 11:17:37 +02:00
|
|
|
SortedSlots.push_back(i);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Calculate the liveness of each block.
|
|
|
|
calculateLocalLiveness();
|
2016-05-24 15:23:44 +02:00
|
|
|
DEBUG(dbgs() << "Dataflow iterations: " << NumIterations << "\n");
|
|
|
|
DEBUG(dump());
|
2012-09-06 11:17:37 +02:00
|
|
|
|
|
|
|
// Propagate the liveness information.
|
|
|
|
calculateLiveIntervals(NumSlots);
|
2016-05-24 15:23:44 +02:00
|
|
|
DEBUG(dumpIntervals());
|
2012-09-06 11:17:37 +02:00
|
|
|
|
2012-09-12 13:06:26 +02:00
|
|
|
// Search for allocas which are used outside of the declared lifetime
|
|
|
|
// markers.
|
2012-09-13 17:46:30 +02:00
|
|
|
if (ProtectFromEscapedAllocas)
|
2012-09-12 13:06:26 +02:00
|
|
|
removeInvalidSlotRanges();
|
2012-09-12 06:57:37 +02:00
|
|
|
|
2012-09-06 11:17:37 +02:00
|
|
|
// Maps old slots to new slots.
|
|
|
|
DenseMap<int, int> SlotRemap;
|
|
|
|
unsigned RemovedSlots = 0;
|
|
|
|
unsigned ReducedSize = 0;
|
|
|
|
|
|
|
|
// Do not bother looking at empty intervals.
|
|
|
|
for (unsigned I = 0; I < NumSlots; ++I) {
|
|
|
|
if (Intervals[SortedSlots[I]]->empty())
|
|
|
|
SortedSlots[I] = -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
// This is a simple greedy algorithm for merging allocas. First, sort the
|
|
|
|
// slots, placing the largest slots first. Next, perform an n^2 scan and look
|
|
|
|
// for disjoint slots. When you find disjoint slots, merge the samller one
|
|
|
|
// into the bigger one and update the live interval. Remove the small alloca
|
|
|
|
// and continue.
|
|
|
|
|
|
|
|
// Sort the slots according to their size. Place unused slots at the end.
|
2012-11-15 20:33:30 +01:00
|
|
|
// Use stable sort to guarantee deterministic code generation.
|
|
|
|
std::stable_sort(SortedSlots.begin(), SortedSlots.end(),
|
2014-03-01 12:47:00 +01:00
|
|
|
[this](int LHS, int RHS) {
|
|
|
|
// We use -1 to denote a uninteresting slot. Place these slots at the end.
|
|
|
|
if (LHS == -1) return false;
|
|
|
|
if (RHS == -1) return true;
|
|
|
|
// Sort according to size.
|
|
|
|
return MFI->getObjectSize(LHS) > MFI->getObjectSize(RHS);
|
|
|
|
});
|
2012-09-06 11:17:37 +02:00
|
|
|
|
2017-06-12 16:56:02 +02:00
|
|
|
for (auto &s : LiveStarts)
|
2018-04-06 20:08:42 +02:00
|
|
|
llvm::sort(s.begin(), s.end());
|
2017-06-12 16:56:02 +02:00
|
|
|
|
2013-03-25 22:26:36 +01:00
|
|
|
bool Changed = true;
|
|
|
|
while (Changed) {
|
|
|
|
Changed = false;
|
2012-09-06 11:17:37 +02:00
|
|
|
for (unsigned I = 0; I < NumSlots; ++I) {
|
|
|
|
if (SortedSlots[I] == -1)
|
|
|
|
continue;
|
|
|
|
|
2012-09-10 14:47:38 +02:00
|
|
|
for (unsigned J=I+1; J < NumSlots; ++J) {
|
2012-09-06 11:17:37 +02:00
|
|
|
if (SortedSlots[J] == -1)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
int FirstSlot = SortedSlots[I];
|
|
|
|
int SecondSlot = SortedSlots[J];
|
2014-03-09 16:44:45 +01:00
|
|
|
LiveInterval *First = &*Intervals[FirstSlot];
|
|
|
|
LiveInterval *Second = &*Intervals[SecondSlot];
|
2017-06-12 16:56:02 +02:00
|
|
|
auto &FirstS = LiveStarts[FirstSlot];
|
|
|
|
auto &SecondS = LiveStarts[SecondSlot];
|
2017-09-29 23:55:49 +02:00
|
|
|
assert(!First->empty() && !Second->empty() && "Found an empty range");
|
2012-09-06 11:17:37 +02:00
|
|
|
|
2017-06-12 16:56:02 +02:00
|
|
|
// Merge disjoint slots. This is a little bit tricky - see the
|
|
|
|
// Implementation Notes section for an explanation.
|
|
|
|
if (!First->isLiveAtIndexes(SecondS) &&
|
|
|
|
!Second->isLiveAtIndexes(FirstS)) {
|
2013-03-25 22:26:36 +01:00
|
|
|
Changed = true;
|
2013-10-10 23:28:43 +02:00
|
|
|
First->MergeSegmentsInAsValue(*Second, First->getValNumInfo(0));
|
2017-06-12 16:56:02 +02:00
|
|
|
|
|
|
|
int OldSize = FirstS.size();
|
|
|
|
FirstS.append(SecondS.begin(), SecondS.end());
|
|
|
|
auto Mid = FirstS.begin() + OldSize;
|
|
|
|
std::inplace_merge(FirstS.begin(), Mid, FirstS.end());
|
|
|
|
|
2012-09-06 11:17:37 +02:00
|
|
|
SlotRemap[SecondSlot] = FirstSlot;
|
|
|
|
SortedSlots[J] = -1;
|
2012-09-10 15:17:58 +02:00
|
|
|
DEBUG(dbgs()<<"Merging #"<<FirstSlot<<" and slots #"<<
|
|
|
|
SecondSlot<<" together.\n");
|
2012-09-06 11:17:37 +02:00
|
|
|
unsigned MaxAlignment = std::max(MFI->getObjectAlignment(FirstSlot),
|
|
|
|
MFI->getObjectAlignment(SecondSlot));
|
|
|
|
|
|
|
|
assert(MFI->getObjectSize(FirstSlot) >=
|
|
|
|
MFI->getObjectSize(SecondSlot) &&
|
|
|
|
"Merging a small object into a larger one");
|
|
|
|
|
|
|
|
RemovedSlots+=1;
|
|
|
|
ReducedSize += MFI->getObjectSize(SecondSlot);
|
|
|
|
MFI->setObjectAlignment(FirstSlot, MaxAlignment);
|
|
|
|
MFI->RemoveStackObject(SecondSlot);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}// While changed.
|
|
|
|
|
|
|
|
// Record statistics.
|
|
|
|
StackSpaceSaved += ReducedSize;
|
|
|
|
StackSlotMerged += RemovedSlots;
|
|
|
|
DEBUG(dbgs()<<"Merge "<<RemovedSlots<<" slots. Saved "<<
|
|
|
|
ReducedSize<<" bytes\n");
|
|
|
|
|
|
|
|
// Scan the entire function and update all machine operands that use frame
|
|
|
|
// indices to use the remapped frame index.
|
|
|
|
expungeSlotMap(SlotRemap, NumSlots);
|
|
|
|
remapInstructions(SlotRemap);
|
|
|
|
|
|
|
|
return removeAllMarkers();
|
|
|
|
}
|