2017-09-29 23:55:49 +02:00
|
|
|
//===- StackColoring.cpp --------------------------------------------------===//
|
2012-09-06 11:17:37 +02:00
|
|
|
//
|
2019-01-19 09:50:56 +01:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2012-09-06 11:17:37 +02:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This pass implements the stack-coloring optimization that looks for
|
|
|
|
// lifetime markers machine instructions (LIFESTART_BEGIN and LIFESTART_END),
|
|
|
|
// which represent the possible lifetime of stack slots. It attempts to
|
|
|
|
// merge disjoint stack slots and reduce the used stack space.
|
|
|
|
// NOTE: This pass is not StackSlotColoring, which optimizes spill slots.
|
|
|
|
//
|
|
|
|
// TODO: In the future we plan to improve stack coloring in the following ways:
|
|
|
|
// 1. Allow merging multiple small slots into a single larger slot at different
|
|
|
|
// offsets.
|
|
|
|
// 2. Merge this pass with StackSlotColoring and allow merging of allocas with
|
|
|
|
// spill slots.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "llvm/ADT/BitVector.h"
|
2017-09-29 23:55:49 +02:00
|
|
|
#include "llvm/ADT/DenseMap.h"
|
2012-09-06 11:17:37 +02:00
|
|
|
#include "llvm/ADT/DepthFirstIterator.h"
|
|
|
|
#include "llvm/ADT/SmallPtrSet.h"
|
2017-09-29 23:55:49 +02:00
|
|
|
#include "llvm/ADT/SmallVector.h"
|
2012-09-06 11:17:37 +02:00
|
|
|
#include "llvm/ADT/Statistic.h"
|
2012-12-03 17:50:05 +01:00
|
|
|
#include "llvm/Analysis/ValueTracking.h"
|
2012-09-06 11:17:37 +02:00
|
|
|
#include "llvm/CodeGen/LiveInterval.h"
|
2012-12-03 17:50:05 +01:00
|
|
|
#include "llvm/CodeGen/MachineBasicBlock.h"
|
|
|
|
#include "llvm/CodeGen/MachineFrameInfo.h"
|
2017-09-29 23:55:49 +02:00
|
|
|
#include "llvm/CodeGen/MachineFunction.h"
|
2012-09-06 11:17:37 +02:00
|
|
|
#include "llvm/CodeGen/MachineFunctionPass.h"
|
2017-09-29 23:55:49 +02:00
|
|
|
#include "llvm/CodeGen/MachineInstr.h"
|
2012-12-03 17:50:05 +01:00
|
|
|
#include "llvm/CodeGen/MachineMemOperand.h"
|
2017-09-29 23:55:49 +02:00
|
|
|
#include "llvm/CodeGen/MachineOperand.h"
|
2016-01-15 01:46:17 +01:00
|
|
|
#include "llvm/CodeGen/Passes.h"
|
2017-08-01 05:32:15 +02:00
|
|
|
#include "llvm/CodeGen/SelectionDAGNodes.h"
|
2012-09-06 11:17:37 +02:00
|
|
|
#include "llvm/CodeGen/SlotIndexes.h"
|
2017-11-17 02:07:10 +01:00
|
|
|
#include "llvm/CodeGen/TargetOpcodes.h"
|
2016-01-08 09:03:55 +01:00
|
|
|
#include "llvm/CodeGen/WinEHFuncInfo.h"
|
2018-04-30 16:59:11 +02:00
|
|
|
#include "llvm/Config/llvm-config.h"
|
2017-09-29 23:55:49 +02:00
|
|
|
#include "llvm/IR/Constants.h"
|
|
|
|
#include "llvm/IR/DebugInfoMetadata.h"
|
2013-01-02 12:36:10 +01:00
|
|
|
#include "llvm/IR/Function.h"
|
|
|
|
#include "llvm/IR/Instructions.h"
|
2017-09-29 23:55:49 +02:00
|
|
|
#include "llvm/IR/Metadata.h"
|
|
|
|
#include "llvm/IR/Use.h"
|
|
|
|
#include "llvm/IR/Value.h"
|
Sink all InitializePasses.h includes
This file lists every pass in LLVM, and is included by Pass.h, which is
very popular. Every time we add, remove, or rename a pass in LLVM, it
caused lots of recompilation.
I found this fact by looking at this table, which is sorted by the
number of times a file was changed over the last 100,000 git commits
multiplied by the number of object files that depend on it in the
current checkout:
recompiles touches affected_files header
342380 95 3604 llvm/include/llvm/ADT/STLExtras.h
314730 234 1345 llvm/include/llvm/InitializePasses.h
307036 118 2602 llvm/include/llvm/ADT/APInt.h
213049 59 3611 llvm/include/llvm/Support/MathExtras.h
170422 47 3626 llvm/include/llvm/Support/Compiler.h
162225 45 3605 llvm/include/llvm/ADT/Optional.h
158319 63 2513 llvm/include/llvm/ADT/Triple.h
140322 39 3598 llvm/include/llvm/ADT/StringRef.h
137647 59 2333 llvm/include/llvm/Support/Error.h
131619 73 1803 llvm/include/llvm/Support/FileSystem.h
Before this change, touching InitializePasses.h would cause 1345 files
to recompile. After this change, touching it only causes 550 compiles in
an incremental rebuild.
Reviewers: bkramer, asbirlea, bollu, jdoerfert
Differential Revision: https://reviews.llvm.org/D70211
2019-11-13 22:15:01 +01:00
|
|
|
#include "llvm/InitializePasses.h"
|
2017-09-29 23:55:49 +02:00
|
|
|
#include "llvm/Pass.h"
|
|
|
|
#include "llvm/Support/Casting.h"
|
2012-09-06 11:17:37 +02:00
|
|
|
#include "llvm/Support/CommandLine.h"
|
2017-09-29 23:55:49 +02:00
|
|
|
#include "llvm/Support/Compiler.h"
|
2012-09-06 11:17:37 +02:00
|
|
|
#include "llvm/Support/Debug.h"
|
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2017-09-29 23:55:49 +02:00
|
|
|
#include <algorithm>
|
|
|
|
#include <cassert>
|
|
|
|
#include <limits>
|
|
|
|
#include <memory>
|
|
|
|
#include <utility>
|
2012-09-06 11:17:37 +02:00
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
|
2017-05-25 23:26:32 +02:00
|
|
|
#define DEBUG_TYPE "stack-coloring"
|
2014-04-22 04:02:50 +02:00
|
|
|
|
2012-09-06 11:17:37 +02:00
|
|
|
static cl::opt<bool>
|
|
|
|
DisableColoring("no-stack-coloring",
|
2012-09-12 13:06:26 +02:00
|
|
|
cl::init(false), cl::Hidden,
|
|
|
|
cl::desc("Disable stack coloring"));
|
2012-09-06 11:17:37 +02:00
|
|
|
|
2012-09-13 17:46:30 +02:00
|
|
|
/// The user may write code that uses allocas outside of the declared lifetime
|
|
|
|
/// zone. This can happen when the user returns a reference to a local
|
|
|
|
/// data-structure. We can detect these cases and decide not to optimize the
|
2016-05-24 15:23:44 +02:00
|
|
|
/// code. If this flag is enabled, we try to save the user. This option
|
|
|
|
/// is treated as overriding LifetimeStartOnFirstUse below.
|
2012-09-12 13:06:26 +02:00
|
|
|
static cl::opt<bool>
|
2012-09-13 17:46:30 +02:00
|
|
|
ProtectFromEscapedAllocas("protect-from-escaped-allocas",
|
2013-03-25 21:05:35 +01:00
|
|
|
cl::init(false), cl::Hidden,
|
|
|
|
cl::desc("Do not optimize lifetime zones that "
|
|
|
|
"are broken"));
|
2012-09-12 13:06:26 +02:00
|
|
|
|
2016-05-24 15:23:44 +02:00
|
|
|
/// Enable enhanced dataflow scheme for lifetime analysis (treat first
|
|
|
|
/// use of stack slot as start of slot lifetime, as opposed to looking
|
|
|
|
/// for LIFETIME_START marker). See "Implementation notes" below for
|
2016-06-01 19:55:10 +02:00
|
|
|
/// more info.
|
2016-05-24 15:23:44 +02:00
|
|
|
static cl::opt<bool>
|
|
|
|
LifetimeStartOnFirstUse("stackcoloring-lifetime-start-on-first-use",
|
2016-06-01 19:55:10 +02:00
|
|
|
cl::init(true), cl::Hidden,
|
2016-05-24 15:23:44 +02:00
|
|
|
cl::desc("Treat stack lifetimes as starting on first use, not on START marker."));
|
|
|
|
|
|
|
|
|
2012-09-12 13:06:26 +02:00
|
|
|
STATISTIC(NumMarkerSeen, "Number of lifetime markers found.");
|
2012-09-06 11:17:37 +02:00
|
|
|
STATISTIC(StackSpaceSaved, "Number of bytes saved due to merging slots.");
|
|
|
|
STATISTIC(StackSlotMerged, "Number of stack slot merged.");
|
2013-03-25 21:05:35 +01:00
|
|
|
STATISTIC(EscapedAllocas, "Number of allocas that escaped the lifetime region");
|
2012-09-12 13:06:26 +02:00
|
|
|
|
2017-06-12 16:56:02 +02:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// StackColoring Pass
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// Stack Coloring reduces stack usage by merging stack slots when they
|
|
|
|
// can't be used together. For example, consider the following C program:
|
|
|
|
//
|
|
|
|
// void bar(char *, int);
|
|
|
|
// void foo(bool var) {
|
|
|
|
// A: {
|
|
|
|
// char z[4096];
|
|
|
|
// bar(z, 0);
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// char *p;
|
|
|
|
// char x[4096];
|
|
|
|
// char y[4096];
|
|
|
|
// if (var) {
|
|
|
|
// p = x;
|
|
|
|
// } else {
|
|
|
|
// bar(y, 1);
|
|
|
|
// p = y + 1024;
|
|
|
|
// }
|
|
|
|
// B:
|
|
|
|
// bar(p, 2);
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// Naively-compiled, this program would use 12k of stack space. However, the
|
|
|
|
// stack slot corresponding to `z` is always destroyed before either of the
|
|
|
|
// stack slots for `x` or `y` are used, and then `x` is only used if `var`
|
|
|
|
// is true, while `y` is only used if `var` is false. So in no time are 2
|
|
|
|
// of the stack slots used together, and therefore we can merge them,
|
|
|
|
// compiling the function using only a single 4k alloca:
|
|
|
|
//
|
|
|
|
// void foo(bool var) { // equivalent
|
|
|
|
// char x[4096];
|
|
|
|
// char *p;
|
|
|
|
// bar(x, 0);
|
|
|
|
// if (var) {
|
|
|
|
// p = x;
|
|
|
|
// } else {
|
|
|
|
// bar(x, 1);
|
|
|
|
// p = x + 1024;
|
|
|
|
// }
|
|
|
|
// bar(p, 2);
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// This is an important optimization if we want stack space to be under
|
|
|
|
// control in large functions, both open-coded ones and ones created by
|
|
|
|
// inlining.
|
2016-05-24 15:23:44 +02:00
|
|
|
//
|
|
|
|
// Implementation Notes:
|
|
|
|
// ---------------------
|
|
|
|
//
|
2017-06-12 16:56:02 +02:00
|
|
|
// An important part of the above reasoning is that `z` can't be accessed
|
|
|
|
// while the latter 2 calls to `bar` are running. This is justified because
|
|
|
|
// `z`'s lifetime is over after we exit from block `A:`, so any further
|
|
|
|
// accesses to it would be UB. The way we represent this information
|
|
|
|
// in LLVM is by having frontends delimit blocks with `lifetime.start`
|
|
|
|
// and `lifetime.end` intrinsics.
|
|
|
|
//
|
|
|
|
// The effect of these intrinsics seems to be as follows (maybe I should
|
|
|
|
// specify this in the reference?):
|
|
|
|
//
|
|
|
|
// L1) at start, each stack-slot is marked as *out-of-scope*, unless no
|
|
|
|
// lifetime intrinsic refers to that stack slot, in which case
|
|
|
|
// it is marked as *in-scope*.
|
|
|
|
// L2) on a `lifetime.start`, a stack slot is marked as *in-scope* and
|
|
|
|
// the stack slot is overwritten with `undef`.
|
|
|
|
// L3) on a `lifetime.end`, a stack slot is marked as *out-of-scope*.
|
|
|
|
// L4) on function exit, all stack slots are marked as *out-of-scope*.
|
|
|
|
// L5) `lifetime.end` is a no-op when called on a slot that is already
|
|
|
|
// *out-of-scope*.
|
|
|
|
// L6) memory accesses to *out-of-scope* stack slots are UB.
|
|
|
|
// L7) when a stack-slot is marked as *out-of-scope*, all pointers to it
|
|
|
|
// are invalidated, unless the slot is "degenerate". This is used to
|
|
|
|
// justify not marking slots as in-use until the pointer to them is
|
|
|
|
// used, but feels a bit hacky in the presence of things like LICM. See
|
|
|
|
// the "Degenerate Slots" section for more details.
|
|
|
|
//
|
|
|
|
// Now, let's ground stack coloring on these rules. We'll define a slot
|
|
|
|
// as *in-use* at a (dynamic) point in execution if it either can be
|
|
|
|
// written to at that point, or if it has a live and non-undef content
|
|
|
|
// at that point.
|
|
|
|
//
|
|
|
|
// Obviously, slots that are never *in-use* together can be merged, and
|
|
|
|
// in our example `foo`, the slots for `x`, `y` and `z` are never
|
|
|
|
// in-use together (of course, sometimes slots that *are* in-use together
|
|
|
|
// might still be mergable, but we don't care about that here).
|
|
|
|
//
|
|
|
|
// In this implementation, we successively merge pairs of slots that are
|
|
|
|
// not *in-use* together. We could be smarter - for example, we could merge
|
|
|
|
// a single large slot with 2 small slots, or we could construct the
|
|
|
|
// interference graph and run a "smart" graph coloring algorithm, but with
|
|
|
|
// that aside, how do we find out whether a pair of slots might be *in-use*
|
|
|
|
// together?
|
|
|
|
//
|
|
|
|
// From our rules, we see that *out-of-scope* slots are never *in-use*,
|
|
|
|
// and from (L7) we see that "non-degenerate" slots remain non-*in-use*
|
|
|
|
// until their address is taken. Therefore, we can approximate slot activity
|
|
|
|
// using dataflow.
|
|
|
|
//
|
|
|
|
// A subtle point: naively, we might try to figure out which pairs of
|
|
|
|
// stack-slots interfere by propagating `S in-use` through the CFG for every
|
|
|
|
// stack-slot `S`, and having `S` and `T` interfere if there is a CFG point in
|
|
|
|
// which they are both *in-use*.
|
|
|
|
//
|
|
|
|
// That is sound, but overly conservative in some cases: in our (artificial)
|
|
|
|
// example `foo`, either `x` or `y` might be in use at the label `B:`, but
|
|
|
|
// as `x` is only in use if we came in from the `var` edge and `y` only
|
|
|
|
// if we came from the `!var` edge, they still can't be in use together.
|
|
|
|
// See PR32488 for an important real-life case.
|
|
|
|
//
|
|
|
|
// If we wanted to find all points of interference precisely, we could
|
|
|
|
// propagate `S in-use` and `S&T in-use` predicates through the CFG. That
|
|
|
|
// would be precise, but requires propagating `O(n^2)` dataflow facts.
|
|
|
|
//
|
|
|
|
// However, we aren't interested in the *set* of points of interference
|
|
|
|
// between 2 stack slots, only *whether* there *is* such a point. So we
|
|
|
|
// can rely on a little trick: for `S` and `T` to be in-use together,
|
|
|
|
// one of them needs to become in-use while the other is in-use (or
|
|
|
|
// they might both become in use simultaneously). We can check this
|
|
|
|
// by also keeping track of the points at which a stack slot might *start*
|
|
|
|
// being in-use.
|
|
|
|
//
|
|
|
|
// Exact first use:
|
|
|
|
// ----------------
|
|
|
|
//
|
2016-05-24 15:23:44 +02:00
|
|
|
// Consider the following motivating example:
|
|
|
|
//
|
|
|
|
// int foo() {
|
|
|
|
// char b1[1024], b2[1024];
|
|
|
|
// if (...) {
|
|
|
|
// char b3[1024];
|
|
|
|
// <uses of b1, b3>;
|
|
|
|
// return x;
|
|
|
|
// } else {
|
|
|
|
// char b4[1024], b5[1024];
|
|
|
|
// <uses of b2, b4, b5>;
|
|
|
|
// return y;
|
|
|
|
// }
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// In the code above, "b3" and "b4" are declared in distinct lexical
|
|
|
|
// scopes, meaning that it is easy to prove that they can share the
|
|
|
|
// same stack slot. Variables "b1" and "b2" are declared in the same
|
|
|
|
// scope, meaning that from a lexical point of view, their lifetimes
|
|
|
|
// overlap. From a control flow pointer of view, however, the two
|
|
|
|
// variables are accessed in disjoint regions of the CFG, thus it
|
|
|
|
// should be possible for them to share the same stack slot. An ideal
|
|
|
|
// stack allocation for the function above would look like:
|
|
|
|
//
|
|
|
|
// slot 0: b1, b2
|
|
|
|
// slot 1: b3, b4
|
|
|
|
// slot 2: b5
|
|
|
|
//
|
|
|
|
// Achieving this allocation is tricky, however, due to the way
|
|
|
|
// lifetime markers are inserted. Here is a simplified view of the
|
|
|
|
// control flow graph for the code above:
|
|
|
|
//
|
|
|
|
// +------ block 0 -------+
|
|
|
|
// 0| LIFETIME_START b1, b2 |
|
|
|
|
// 1| <test 'if' condition> |
|
|
|
|
// +-----------------------+
|
|
|
|
// ./ \.
|
|
|
|
// +------ block 1 -------+ +------ block 2 -------+
|
|
|
|
// 2| LIFETIME_START b3 | 5| LIFETIME_START b4, b5 |
|
|
|
|
// 3| <uses of b1, b3> | 6| <uses of b2, b4, b5> |
|
|
|
|
// 4| LIFETIME_END b3 | 7| LIFETIME_END b4, b5 |
|
|
|
|
// +-----------------------+ +-----------------------+
|
|
|
|
// \. /.
|
|
|
|
// +------ block 3 -------+
|
|
|
|
// 8| <cleanupcode> |
|
|
|
|
// 9| LIFETIME_END b1, b2 |
|
|
|
|
// 10| return |
|
|
|
|
// +-----------------------+
|
|
|
|
//
|
|
|
|
// If we create live intervals for the variables above strictly based
|
|
|
|
// on the lifetime markers, we'll get the set of intervals on the
|
|
|
|
// left. If we ignore the lifetime start markers and instead treat a
|
|
|
|
// variable's lifetime as beginning with the first reference to the
|
|
|
|
// var, then we get the intervals on the right.
|
|
|
|
//
|
|
|
|
// LIFETIME_START First Use
|
|
|
|
// b1: [0,9] [3,4] [8,9]
|
|
|
|
// b2: [0,9] [6,9]
|
|
|
|
// b3: [2,4] [3,4]
|
|
|
|
// b4: [5,7] [6,7]
|
|
|
|
// b5: [5,7] [6,7]
|
|
|
|
//
|
|
|
|
// For the intervals on the left, the best we can do is overlap two
|
|
|
|
// variables (b3 and b4, for example); this gives us a stack size of
|
|
|
|
// 4*1024 bytes, not ideal. When treating first-use as the start of a
|
|
|
|
// lifetime, we can additionally overlap b1 and b5, giving us a 3*1024
|
|
|
|
// byte stack (better).
|
|
|
|
//
|
2017-06-12 16:56:02 +02:00
|
|
|
// Degenerate Slots:
|
|
|
|
// -----------------
|
|
|
|
//
|
2016-05-24 15:23:44 +02:00
|
|
|
// Relying entirely on first-use of stack slots is problematic,
|
|
|
|
// however, due to the fact that optimizations can sometimes migrate
|
|
|
|
// uses of a variable outside of its lifetime start/end region. Here
|
|
|
|
// is an example:
|
|
|
|
//
|
|
|
|
// int bar() {
|
|
|
|
// char b1[1024], b2[1024];
|
|
|
|
// if (...) {
|
|
|
|
// <uses of b2>
|
|
|
|
// return y;
|
|
|
|
// } else {
|
|
|
|
// <uses of b1>
|
|
|
|
// while (...) {
|
|
|
|
// char b3[1024];
|
|
|
|
// <uses of b3>
|
|
|
|
// }
|
|
|
|
// }
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// Before optimization, the control flow graph for the code above
|
|
|
|
// might look like the following:
|
|
|
|
//
|
|
|
|
// +------ block 0 -------+
|
|
|
|
// 0| LIFETIME_START b1, b2 |
|
|
|
|
// 1| <test 'if' condition> |
|
|
|
|
// +-----------------------+
|
|
|
|
// ./ \.
|
|
|
|
// +------ block 1 -------+ +------- block 2 -------+
|
|
|
|
// 2| <uses of b2> | 3| <uses of b1> |
|
|
|
|
// +-----------------------+ +-----------------------+
|
|
|
|
// | |
|
|
|
|
// | +------- block 3 -------+ <-\.
|
|
|
|
// | 4| <while condition> | |
|
|
|
|
// | +-----------------------+ |
|
|
|
|
// | / | |
|
|
|
|
// | / +------- block 4 -------+
|
|
|
|
// \ / 5| LIFETIME_START b3 | |
|
|
|
|
// \ / 6| <uses of b3> | |
|
|
|
|
// \ / 7| LIFETIME_END b3 | |
|
|
|
|
// \ | +------------------------+ |
|
|
|
|
// \ | \ /
|
|
|
|
// +------ block 5 -----+ \---------------
|
|
|
|
// 8| <cleanupcode> |
|
|
|
|
// 9| LIFETIME_END b1, b2 |
|
|
|
|
// 10| return |
|
|
|
|
// +---------------------+
|
|
|
|
//
|
|
|
|
// During optimization, however, it can happen that an instruction
|
|
|
|
// computing an address in "b3" (for example, a loop-invariant GEP) is
|
|
|
|
// hoisted up out of the loop from block 4 to block 2. [Note that
|
|
|
|
// this is not an actual load from the stack, only an instruction that
|
|
|
|
// computes the address to be loaded]. If this happens, there is now a
|
|
|
|
// path leading from the first use of b3 to the return instruction
|
|
|
|
// that does not encounter the b3 LIFETIME_END, hence b3's lifetime is
|
|
|
|
// now larger than if we were computing live intervals strictly based
|
|
|
|
// on lifetime markers. In the example above, this lengthened lifetime
|
|
|
|
// would mean that it would appear illegal to overlap b3 with b2.
|
|
|
|
//
|
|
|
|
// To deal with this such cases, the code in ::collectMarkers() below
|
|
|
|
// tries to identify "degenerate" slots -- those slots where on a single
|
|
|
|
// forward pass through the CFG we encounter a first reference to slot
|
|
|
|
// K before we hit the slot K lifetime start marker. For such slots,
|
|
|
|
// we fall back on using the lifetime start marker as the beginning of
|
|
|
|
// the variable's lifetime. NB: with this implementation, slots can
|
|
|
|
// appear degenerate in cases where there is unstructured control flow:
|
|
|
|
//
|
|
|
|
// if (q) goto mid;
|
|
|
|
// if (x > 9) {
|
|
|
|
// int b[100];
|
|
|
|
// memcpy(&b[0], ...);
|
|
|
|
// mid: b[k] = ...;
|
|
|
|
// abc(&b);
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// If in RPO ordering chosen to walk the CFG we happen to visit the b[k]
|
|
|
|
// before visiting the memcpy block (which will contain the lifetime start
|
|
|
|
// for "b" then it will appear that 'b' has a degenerate lifetime.
|
|
|
|
//
|
2020-11-03 02:12:35 +01:00
|
|
|
// Handle Windows Exception with LifetimeStartOnFirstUse:
|
|
|
|
// -----------------
|
|
|
|
//
|
|
|
|
// There was a bug for using LifetimeStartOnFirstUse in win32.
|
|
|
|
// class Type1 {
|
|
|
|
// ...
|
|
|
|
// ~Type1(){ write memory;}
|
|
|
|
// }
|
|
|
|
// ...
|
|
|
|
// try{
|
|
|
|
// Type1 V
|
|
|
|
// ...
|
|
|
|
// } catch (Type2 X){
|
|
|
|
// ...
|
|
|
|
// }
|
|
|
|
// For variable X in catch(X), we put point pX=&(&X) into ConservativeSlots
|
|
|
|
// to prevent using LifetimeStartOnFirstUse. Because pX may merged with
|
|
|
|
// object V which may call destructor after implicitly writing pX. All these
|
|
|
|
// are done in C++ EH runtime libs (through CxxThrowException), and can't
|
|
|
|
// obviously check it in IR level.
|
|
|
|
//
|
|
|
|
// The loader of pX, without obvious writing IR, is usually the first LOAD MI
|
|
|
|
// in EHPad, Some like:
|
|
|
|
// bb.x.catch.i (landing-pad, ehfunclet-entry):
|
|
|
|
// ; predecessors: %bb...
|
|
|
|
// successors: %bb...
|
|
|
|
// %n:gr32 = MOV32rm %stack.pX ...
|
|
|
|
// ...
|
|
|
|
// The Type2** %stack.pX will only be written in EH runtime libs, so we
|
|
|
|
// check the StoreSlots to screen it out.
|
2016-05-24 15:23:44 +02:00
|
|
|
|
2012-09-06 11:17:37 +02:00
|
|
|
namespace {
|
2017-09-29 23:55:49 +02:00
|
|
|
|
2012-09-06 11:17:37 +02:00
|
|
|
/// StackColoring - A machine pass for merging disjoint stack allocations,
|
|
|
|
/// marked by the LIFETIME_START and LIFETIME_END pseudo instructions.
|
|
|
|
class StackColoring : public MachineFunctionPass {
|
|
|
|
MachineFrameInfo *MFI;
|
|
|
|
MachineFunction *MF;
|
|
|
|
|
|
|
|
/// A class representing liveness information for a single basic block.
|
|
|
|
/// Each bit in the BitVector represents the liveness property
|
|
|
|
/// for a different stack slot.
|
|
|
|
struct BlockLifetimeInfo {
|
|
|
|
/// Which slots BEGINs in each basic block.
|
|
|
|
BitVector Begin;
|
2017-09-29 23:55:49 +02:00
|
|
|
|
2012-09-06 11:17:37 +02:00
|
|
|
/// Which slots ENDs in each basic block.
|
|
|
|
BitVector End;
|
2017-09-29 23:55:49 +02:00
|
|
|
|
2012-09-06 11:17:37 +02:00
|
|
|
/// Which slots are marked as LIVE_IN, coming into each basic block.
|
|
|
|
BitVector LiveIn;
|
2017-09-29 23:55:49 +02:00
|
|
|
|
2012-09-06 11:17:37 +02:00
|
|
|
/// Which slots are marked as LIVE_OUT, coming out of each basic block.
|
|
|
|
BitVector LiveOut;
|
|
|
|
};
|
|
|
|
|
|
|
|
/// Maps active slots (per bit) for each basic block.
|
2017-09-29 23:55:49 +02:00
|
|
|
using LivenessMap = DenseMap<const MachineBasicBlock *, BlockLifetimeInfo>;
|
2013-02-19 06:32:02 +01:00
|
|
|
LivenessMap BlockLiveness;
|
2012-09-06 11:17:37 +02:00
|
|
|
|
|
|
|
/// Maps serial numbers to basic blocks.
|
2017-09-29 23:55:49 +02:00
|
|
|
DenseMap<const MachineBasicBlock *, int> BasicBlocks;
|
|
|
|
|
2012-09-06 11:17:37 +02:00
|
|
|
/// Maps basic blocks to a serial number.
|
2017-09-29 23:55:49 +02:00
|
|
|
SmallVector<const MachineBasicBlock *, 8> BasicBlockNumbering;
|
2012-09-06 11:17:37 +02:00
|
|
|
|
2017-06-12 16:56:02 +02:00
|
|
|
/// Maps slots to their use interval. Outside of this interval, slots
|
|
|
|
/// values are either dead or `undef` and they will not be written to.
|
2014-03-09 16:44:45 +01:00
|
|
|
SmallVector<std::unique_ptr<LiveInterval>, 16> Intervals;
|
2017-09-29 23:55:49 +02:00
|
|
|
|
2017-06-12 16:56:02 +02:00
|
|
|
/// Maps slots to the points where they can become in-use.
|
|
|
|
SmallVector<SmallVector<SlotIndex, 4>, 16> LiveStarts;
|
2017-09-29 23:55:49 +02:00
|
|
|
|
2012-09-06 11:17:37 +02:00
|
|
|
/// VNInfo is used for the construction of LiveIntervals.
|
|
|
|
VNInfo::Allocator VNInfoAllocator;
|
2017-09-29 23:55:49 +02:00
|
|
|
|
2012-09-06 11:17:37 +02:00
|
|
|
/// SlotIndex analysis object.
|
2012-09-12 13:06:26 +02:00
|
|
|
SlotIndexes *Indexes;
|
2017-09-29 23:55:49 +02:00
|
|
|
|
2012-09-06 11:17:37 +02:00
|
|
|
/// The list of lifetime markers found. These markers are to be removed
|
|
|
|
/// once the coloring is done.
|
|
|
|
SmallVector<MachineInstr*, 8> Markers;
|
|
|
|
|
2016-05-24 15:23:44 +02:00
|
|
|
/// Record the FI slots for which we have seen some sort of
|
|
|
|
/// lifetime marker (either start or end).
|
|
|
|
BitVector InterestingSlots;
|
|
|
|
|
2016-06-01 19:55:10 +02:00
|
|
|
/// FI slots that need to be handled conservatively (for these
|
|
|
|
/// slots lifetime-start-on-first-use is disabled).
|
|
|
|
BitVector ConservativeSlots;
|
2016-05-24 15:23:44 +02:00
|
|
|
|
2020-11-03 02:12:35 +01:00
|
|
|
/// Record the FI slots referenced by a 'may write to memory'.
|
|
|
|
BitVector StoreSlots;
|
|
|
|
|
2016-05-24 15:23:44 +02:00
|
|
|
/// Number of iterations taken during data flow analysis.
|
|
|
|
unsigned NumIterations;
|
|
|
|
|
2012-09-06 11:17:37 +02:00
|
|
|
public:
|
|
|
|
static char ID;
|
2017-09-29 23:55:49 +02:00
|
|
|
|
2012-09-06 11:17:37 +02:00
|
|
|
StackColoring() : MachineFunctionPass(ID) {
|
|
|
|
initializeStackColoringPass(*PassRegistry::getPassRegistry());
|
|
|
|
}
|
2017-09-29 23:55:49 +02:00
|
|
|
|
2014-03-07 10:26:03 +01:00
|
|
|
void getAnalysisUsage(AnalysisUsage &AU) const override;
|
2018-07-16 20:51:40 +02:00
|
|
|
bool runOnMachineFunction(MachineFunction &Func) override;
|
2012-09-06 11:17:37 +02:00
|
|
|
|
|
|
|
private:
|
2017-09-29 23:55:49 +02:00
|
|
|
/// Used in collectMarkers
|
|
|
|
using BlockBitVecMap = DenseMap<const MachineBasicBlock *, BitVector>;
|
|
|
|
|
2012-09-06 11:17:37 +02:00
|
|
|
/// Debug.
|
2013-02-19 04:14:22 +01:00
|
|
|
void dump() const;
|
2016-04-27 21:26:25 +02:00
|
|
|
void dumpIntervals() const;
|
|
|
|
void dumpBB(MachineBasicBlock *MBB) const;
|
|
|
|
void dumpBV(const char *tag, const BitVector &BV) const;
|
2012-09-06 11:17:37 +02:00
|
|
|
|
|
|
|
/// Removes all of the lifetime marker instructions from the function.
|
|
|
|
/// \returns true if any markers were removed.
|
|
|
|
bool removeAllMarkers();
|
|
|
|
|
|
|
|
/// Scan the machine function and find all of the lifetime markers.
|
|
|
|
/// Record the findings in the BEGIN and END vectors.
|
|
|
|
/// \returns the number of markers found.
|
|
|
|
unsigned collectMarkers(unsigned NumSlot);
|
|
|
|
|
|
|
|
/// Perform the dataflow calculation and calculate the lifetime for each of
|
|
|
|
/// the slots, based on the BEGIN/END vectors. Set the LifetimeLIVE_IN and
|
|
|
|
/// LifetimeLIVE_OUT maps that represent which stack slots are live coming
|
|
|
|
/// in and out blocks.
|
|
|
|
void calculateLocalLiveness();
|
|
|
|
|
2016-05-24 15:23:44 +02:00
|
|
|
/// Returns TRUE if we're using the first-use-begins-lifetime method for
|
|
|
|
/// this slot (if FALSE, then the start marker is treated as start of lifetime).
|
|
|
|
bool applyFirstUse(int Slot) {
|
|
|
|
if (!LifetimeStartOnFirstUse || ProtectFromEscapedAllocas)
|
|
|
|
return false;
|
2016-06-01 19:55:10 +02:00
|
|
|
if (ConservativeSlots.test(Slot))
|
2016-05-24 15:23:44 +02:00
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Examines the specified instruction and returns TRUE if the instruction
|
|
|
|
/// represents the start or end of an interesting lifetime. The slot or slots
|
|
|
|
/// starting or ending are added to the vector "slots" and "isStart" is set
|
|
|
|
/// accordingly.
|
|
|
|
/// \returns True if inst contains a lifetime start or end
|
|
|
|
bool isLifetimeStartOrEnd(const MachineInstr &MI,
|
|
|
|
SmallVector<int, 4> &slots,
|
|
|
|
bool &isStart);
|
|
|
|
|
2012-09-06 11:17:37 +02:00
|
|
|
/// Construct the LiveIntervals for the slots.
|
|
|
|
void calculateLiveIntervals(unsigned NumSlots);
|
|
|
|
|
|
|
|
/// Go over the machine function and change instructions which use stack
|
|
|
|
/// slots to use the joint slots.
|
|
|
|
void remapInstructions(DenseMap<int, int> &SlotRemap);
|
|
|
|
|
2013-09-28 13:46:15 +02:00
|
|
|
/// The input program may contain instructions which are not inside lifetime
|
2012-09-12 06:57:37 +02:00
|
|
|
/// markers. This can happen due to a bug in the compiler or due to a bug in
|
|
|
|
/// user code (for example, returning a reference to a local variable).
|
|
|
|
/// This procedure checks all of the instructions in the function and
|
|
|
|
/// invalidates lifetime ranges which do not contain all of the instructions
|
|
|
|
/// which access that frame slot.
|
|
|
|
void removeInvalidSlotRanges();
|
|
|
|
|
2012-09-06 11:17:37 +02:00
|
|
|
/// Map entries which point to other entries to their destination.
|
|
|
|
/// A->B->C becomes A->C.
|
2016-05-24 15:23:44 +02:00
|
|
|
void expungeSlotMap(DenseMap<int, int> &SlotRemap, unsigned NumSlots);
|
2012-09-06 11:17:37 +02:00
|
|
|
};
|
2017-09-29 23:55:49 +02:00
|
|
|
|
2012-09-06 11:17:37 +02:00
|
|
|
} // end anonymous namespace
|
|
|
|
|
|
|
|
char StackColoring::ID = 0;
|
2017-09-29 23:55:49 +02:00
|
|
|
|
2012-09-06 11:17:37 +02:00
|
|
|
char &llvm::StackColoringID = StackColoring::ID;
|
|
|
|
|
2017-05-25 23:26:32 +02:00
|
|
|
INITIALIZE_PASS_BEGIN(StackColoring, DEBUG_TYPE,
|
|
|
|
"Merge disjoint stack slots", false, false)
|
2012-09-06 11:17:37 +02:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
|
2017-05-25 23:26:32 +02:00
|
|
|
INITIALIZE_PASS_END(StackColoring, DEBUG_TYPE,
|
|
|
|
"Merge disjoint stack slots", false, false)
|
2012-09-06 11:17:37 +02:00
|
|
|
|
|
|
|
void StackColoring::getAnalysisUsage(AnalysisUsage &AU) const {
|
|
|
|
AU.addRequired<SlotIndexes>();
|
|
|
|
MachineFunctionPass::getAnalysisUsage(AU);
|
|
|
|
}
|
|
|
|
|
2017-10-15 16:32:27 +02:00
|
|
|
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
|
2016-04-27 21:26:25 +02:00
|
|
|
LLVM_DUMP_METHOD void StackColoring::dumpBV(const char *tag,
|
|
|
|
const BitVector &BV) const {
|
2017-01-28 03:02:38 +01:00
|
|
|
dbgs() << tag << " : { ";
|
2016-04-27 21:26:25 +02:00
|
|
|
for (unsigned I = 0, E = BV.size(); I != E; ++I)
|
2017-01-28 03:02:38 +01:00
|
|
|
dbgs() << BV.test(I) << " ";
|
|
|
|
dbgs() << "}\n";
|
2016-04-27 21:26:25 +02:00
|
|
|
}
|
2012-09-06 11:17:37 +02:00
|
|
|
|
2016-04-27 21:26:25 +02:00
|
|
|
LLVM_DUMP_METHOD void StackColoring::dumpBB(MachineBasicBlock *MBB) const {
|
|
|
|
LivenessMap::const_iterator BI = BlockLiveness.find(MBB);
|
|
|
|
assert(BI != BlockLiveness.end() && "Block not found");
|
|
|
|
const BlockLifetimeInfo &BlockInfo = BI->second;
|
2012-09-06 11:17:37 +02:00
|
|
|
|
2016-04-27 21:26:25 +02:00
|
|
|
dumpBV("BEGIN", BlockInfo.Begin);
|
|
|
|
dumpBV("END", BlockInfo.End);
|
|
|
|
dumpBV("LIVE_IN", BlockInfo.LiveIn);
|
|
|
|
dumpBV("LIVE_OUT", BlockInfo.LiveOut);
|
|
|
|
}
|
2012-09-06 11:17:37 +02:00
|
|
|
|
2016-04-27 21:26:25 +02:00
|
|
|
LLVM_DUMP_METHOD void StackColoring::dump() const {
|
|
|
|
for (MachineBasicBlock *MBB : depth_first(MF)) {
|
2017-01-28 03:02:38 +01:00
|
|
|
dbgs() << "Inspecting block #" << MBB->getNumber() << " ["
|
|
|
|
<< MBB->getName() << "]\n";
|
|
|
|
dumpBB(MBB);
|
2016-04-27 21:26:25 +02:00
|
|
|
}
|
|
|
|
}
|
2012-09-06 11:17:37 +02:00
|
|
|
|
2016-04-27 21:26:25 +02:00
|
|
|
LLVM_DUMP_METHOD void StackColoring::dumpIntervals() const {
|
|
|
|
for (unsigned I = 0, E = Intervals.size(); I != E; ++I) {
|
2017-01-28 03:02:38 +01:00
|
|
|
dbgs() << "Interval[" << I << "]:\n";
|
|
|
|
Intervals[I]->dump();
|
2012-09-06 11:17:37 +02:00
|
|
|
}
|
|
|
|
}
|
2017-01-28 03:02:38 +01:00
|
|
|
#endif
|
2016-04-27 22:07:02 +02:00
|
|
|
|
2016-05-24 15:23:44 +02:00
|
|
|
static inline int getStartOrEndSlot(const MachineInstr &MI)
|
|
|
|
{
|
|
|
|
assert((MI.getOpcode() == TargetOpcode::LIFETIME_START ||
|
|
|
|
MI.getOpcode() == TargetOpcode::LIFETIME_END) &&
|
|
|
|
"Expected LIFETIME_START or LIFETIME_END op");
|
|
|
|
const MachineOperand &MO = MI.getOperand(0);
|
|
|
|
int Slot = MO.getIndex();
|
|
|
|
if (Slot >= 0)
|
|
|
|
return Slot;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
// At the moment the only way to end a variable lifetime is with
|
|
|
|
// a VARIABLE_LIFETIME op (which can't contain a start). If things
|
|
|
|
// change and the IR allows for a single inst that both begins
|
|
|
|
// and ends lifetime(s), this interface will need to be reworked.
|
|
|
|
bool StackColoring::isLifetimeStartOrEnd(const MachineInstr &MI,
|
|
|
|
SmallVector<int, 4> &slots,
|
2017-09-29 23:55:49 +02:00
|
|
|
bool &isStart) {
|
2016-05-24 15:23:44 +02:00
|
|
|
if (MI.getOpcode() == TargetOpcode::LIFETIME_START ||
|
|
|
|
MI.getOpcode() == TargetOpcode::LIFETIME_END) {
|
|
|
|
int Slot = getStartOrEndSlot(MI);
|
|
|
|
if (Slot < 0)
|
|
|
|
return false;
|
|
|
|
if (!InterestingSlots.test(Slot))
|
|
|
|
return false;
|
|
|
|
slots.push_back(Slot);
|
|
|
|
if (MI.getOpcode() == TargetOpcode::LIFETIME_END) {
|
|
|
|
isStart = false;
|
|
|
|
return true;
|
|
|
|
}
|
2018-03-19 14:35:23 +01:00
|
|
|
if (!applyFirstUse(Slot)) {
|
2016-05-24 15:23:44 +02:00
|
|
|
isStart = true;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
} else if (LifetimeStartOnFirstUse && !ProtectFromEscapedAllocas) {
|
2018-05-09 04:42:00 +02:00
|
|
|
if (!MI.isDebugInstr()) {
|
2016-05-24 15:23:44 +02:00
|
|
|
bool found = false;
|
|
|
|
for (const MachineOperand &MO : MI.operands()) {
|
|
|
|
if (!MO.isFI())
|
|
|
|
continue;
|
|
|
|
int Slot = MO.getIndex();
|
|
|
|
if (Slot<0)
|
|
|
|
continue;
|
|
|
|
if (InterestingSlots.test(Slot) && applyFirstUse(Slot)) {
|
|
|
|
slots.push_back(Slot);
|
|
|
|
found = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (found) {
|
|
|
|
isStart = true;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-09-29 23:55:49 +02:00
|
|
|
unsigned StackColoring::collectMarkers(unsigned NumSlot) {
|
2012-09-06 11:17:37 +02:00
|
|
|
unsigned MarkersFound = 0;
|
2016-05-24 15:23:44 +02:00
|
|
|
BlockBitVecMap SeenStartMap;
|
|
|
|
InterestingSlots.clear();
|
|
|
|
InterestingSlots.resize(NumSlot);
|
2016-06-01 19:55:10 +02:00
|
|
|
ConservativeSlots.clear();
|
|
|
|
ConservativeSlots.resize(NumSlot);
|
2020-11-03 02:12:35 +01:00
|
|
|
StoreSlots.clear();
|
|
|
|
StoreSlots.resize(NumSlot);
|
2016-06-01 19:55:10 +02:00
|
|
|
|
|
|
|
// number of start and end lifetime ops for each slot
|
|
|
|
SmallVector<int, 8> NumStartLifetimes(NumSlot, 0);
|
|
|
|
SmallVector<int, 8> NumEndLifetimes(NumSlot, 0);
|
2020-11-03 02:12:35 +01:00
|
|
|
SmallVector<int, 8> NumLoadInCatchPad(NumSlot, 0);
|
2016-05-24 15:23:44 +02:00
|
|
|
|
|
|
|
// Step 1: collect markers and populate the "InterestingSlots"
|
2016-06-01 19:55:10 +02:00
|
|
|
// and "ConservativeSlots" sets.
|
2016-05-24 15:23:44 +02:00
|
|
|
for (MachineBasicBlock *MBB : depth_first(MF)) {
|
|
|
|
// Compute the set of slots for which we've seen a START marker but have
|
|
|
|
// not yet seen an END marker at this point in the walk (e.g. on entry
|
|
|
|
// to this bb).
|
|
|
|
BitVector BetweenStartEnd;
|
|
|
|
BetweenStartEnd.resize(NumSlot);
|
2021-02-20 07:44:14 +01:00
|
|
|
for (const MachineBasicBlock *Pred : MBB->predecessors()) {
|
|
|
|
BlockBitVecMap::const_iterator I = SeenStartMap.find(Pred);
|
2016-05-24 15:23:44 +02:00
|
|
|
if (I != SeenStartMap.end()) {
|
|
|
|
BetweenStartEnd |= I->second;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Walk the instructions in the block to look for start/end ops.
|
|
|
|
for (MachineInstr &MI : *MBB) {
|
|
|
|
if (MI.getOpcode() == TargetOpcode::LIFETIME_START ||
|
|
|
|
MI.getOpcode() == TargetOpcode::LIFETIME_END) {
|
|
|
|
int Slot = getStartOrEndSlot(MI);
|
|
|
|
if (Slot < 0)
|
|
|
|
continue;
|
|
|
|
InterestingSlots.set(Slot);
|
2016-06-01 19:55:10 +02:00
|
|
|
if (MI.getOpcode() == TargetOpcode::LIFETIME_START) {
|
2016-05-24 15:23:44 +02:00
|
|
|
BetweenStartEnd.set(Slot);
|
2016-06-01 19:55:10 +02:00
|
|
|
NumStartLifetimes[Slot] += 1;
|
|
|
|
} else {
|
2016-05-24 15:23:44 +02:00
|
|
|
BetweenStartEnd.reset(Slot);
|
2016-06-01 19:55:10 +02:00
|
|
|
NumEndLifetimes[Slot] += 1;
|
|
|
|
}
|
2016-05-24 15:23:44 +02:00
|
|
|
const AllocaInst *Allocation = MFI->getObjectAllocation(Slot);
|
|
|
|
if (Allocation) {
|
2018-05-14 14:53:11 +02:00
|
|
|
LLVM_DEBUG(dbgs() << "Found a lifetime ");
|
|
|
|
LLVM_DEBUG(dbgs() << (MI.getOpcode() == TargetOpcode::LIFETIME_START
|
|
|
|
? "start"
|
|
|
|
: "end"));
|
|
|
|
LLVM_DEBUG(dbgs() << " marker for slot #" << Slot);
|
|
|
|
LLVM_DEBUG(dbgs()
|
|
|
|
<< " with allocation: " << Allocation->getName() << "\n");
|
2016-05-24 15:23:44 +02:00
|
|
|
}
|
|
|
|
Markers.push_back(&MI);
|
|
|
|
MarkersFound += 1;
|
|
|
|
} else {
|
|
|
|
for (const MachineOperand &MO : MI.operands()) {
|
|
|
|
if (!MO.isFI())
|
|
|
|
continue;
|
|
|
|
int Slot = MO.getIndex();
|
|
|
|
if (Slot < 0)
|
|
|
|
continue;
|
|
|
|
if (! BetweenStartEnd.test(Slot)) {
|
2016-06-01 19:55:10 +02:00
|
|
|
ConservativeSlots.set(Slot);
|
2016-05-24 15:23:44 +02:00
|
|
|
}
|
2020-11-03 02:12:35 +01:00
|
|
|
// Here we check the StoreSlots to screen catch point out. For more
|
|
|
|
// information, please refer "Handle Windows Exception with
|
|
|
|
// LifetimeStartOnFirstUse" at the head of this file.
|
|
|
|
if (MI.mayStore())
|
|
|
|
StoreSlots.set(Slot);
|
|
|
|
if (MF->getWinEHFuncInfo() && MBB->isEHPad() && MI.mayLoad())
|
|
|
|
NumLoadInCatchPad[Slot] += 1;
|
2016-05-24 15:23:44 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
BitVector &SeenStart = SeenStartMap[MBB];
|
|
|
|
SeenStart |= BetweenStartEnd;
|
|
|
|
}
|
|
|
|
if (!MarkersFound) {
|
|
|
|
return 0;
|
|
|
|
}
|
2016-06-01 19:55:10 +02:00
|
|
|
|
2020-11-03 02:12:35 +01:00
|
|
|
// 1) PR27903: slots with multiple start or end lifetime ops are not
|
2016-06-01 19:55:10 +02:00
|
|
|
// safe to enable for "lifetime-start-on-first-use".
|
2020-11-03 02:12:35 +01:00
|
|
|
// 2) And also not safe for variable X in catch(X) in windows.
|
|
|
|
for (unsigned slot = 0; slot < NumSlot; ++slot) {
|
|
|
|
if (NumStartLifetimes[slot] > 1 || NumEndLifetimes[slot] > 1 ||
|
|
|
|
(NumLoadInCatchPad[slot] > 1 && !StoreSlots.test(slot)))
|
2016-06-01 19:55:10 +02:00
|
|
|
ConservativeSlots.set(slot);
|
2020-11-03 02:12:35 +01:00
|
|
|
}
|
2018-05-14 14:53:11 +02:00
|
|
|
LLVM_DEBUG(dumpBV("Conservative slots", ConservativeSlots));
|
2016-05-24 15:23:44 +02:00
|
|
|
|
|
|
|
// Step 2: compute begin/end sets for each block
|
|
|
|
|
2017-03-15 23:40:26 +01:00
|
|
|
// NOTE: We use a depth-first iteration to ensure that we obtain a
|
|
|
|
// deterministic numbering.
|
2014-04-11 03:50:01 +02:00
|
|
|
for (MachineBasicBlock *MBB : depth_first(MF)) {
|
2012-09-06 11:17:37 +02:00
|
|
|
// Assign a serial number to this basic block.
|
2014-04-11 03:50:01 +02:00
|
|
|
BasicBlocks[MBB] = BasicBlockNumbering.size();
|
|
|
|
BasicBlockNumbering.push_back(MBB);
|
2012-09-06 11:17:37 +02:00
|
|
|
|
2013-02-19 04:06:17 +01:00
|
|
|
// Keep a reference to avoid repeated lookups.
|
2014-04-11 03:50:01 +02:00
|
|
|
BlockLifetimeInfo &BlockInfo = BlockLiveness[MBB];
|
2013-02-19 04:06:17 +01:00
|
|
|
|
|
|
|
BlockInfo.Begin.resize(NumSlot);
|
|
|
|
BlockInfo.End.resize(NumSlot);
|
2012-09-06 11:17:37 +02:00
|
|
|
|
2016-05-24 15:23:44 +02:00
|
|
|
SmallVector<int, 4> slots;
|
2014-04-11 03:50:01 +02:00
|
|
|
for (MachineInstr &MI : *MBB) {
|
2016-05-24 15:23:44 +02:00
|
|
|
bool isStart = false;
|
|
|
|
slots.clear();
|
|
|
|
if (isLifetimeStartOrEnd(MI, slots, isStart)) {
|
|
|
|
if (!isStart) {
|
|
|
|
assert(slots.size() == 1 && "unexpected: MI ends multiple slots");
|
|
|
|
int Slot = slots[0];
|
|
|
|
if (BlockInfo.Begin.test(Slot)) {
|
|
|
|
BlockInfo.Begin.reset(Slot);
|
|
|
|
}
|
2013-02-19 04:06:17 +01:00
|
|
|
BlockInfo.End.set(Slot);
|
2016-05-24 15:23:44 +02:00
|
|
|
} else {
|
|
|
|
for (auto Slot : slots) {
|
2018-05-14 14:53:11 +02:00
|
|
|
LLVM_DEBUG(dbgs() << "Found a use of slot #" << Slot);
|
|
|
|
LLVM_DEBUG(dbgs()
|
|
|
|
<< " at " << printMBBReference(*MBB) << " index ");
|
|
|
|
LLVM_DEBUG(Indexes->getInstructionIndex(MI).print(dbgs()));
|
2016-05-24 15:23:44 +02:00
|
|
|
const AllocaInst *Allocation = MFI->getObjectAllocation(Slot);
|
|
|
|
if (Allocation) {
|
2018-05-14 14:53:11 +02:00
|
|
|
LLVM_DEBUG(dbgs()
|
|
|
|
<< " with allocation: " << Allocation->getName());
|
2016-05-24 15:23:44 +02:00
|
|
|
}
|
2018-05-14 14:53:11 +02:00
|
|
|
LLVM_DEBUG(dbgs() << "\n");
|
2016-05-24 15:23:44 +02:00
|
|
|
if (BlockInfo.End.test(Slot)) {
|
|
|
|
BlockInfo.End.reset(Slot);
|
|
|
|
}
|
|
|
|
BlockInfo.Begin.set(Slot);
|
|
|
|
}
|
2012-09-06 11:17:37 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update statistics.
|
|
|
|
NumMarkerSeen += MarkersFound;
|
|
|
|
return MarkersFound;
|
|
|
|
}
|
|
|
|
|
2017-09-29 23:55:49 +02:00
|
|
|
void StackColoring::calculateLocalLiveness() {
|
2016-05-24 15:23:44 +02:00
|
|
|
unsigned NumIters = 0;
|
2012-09-06 11:17:37 +02:00
|
|
|
bool changed = true;
|
|
|
|
while (changed) {
|
|
|
|
changed = false;
|
2016-05-24 15:23:44 +02:00
|
|
|
++NumIters;
|
2012-09-06 11:17:37 +02:00
|
|
|
|
2014-03-09 16:44:45 +01:00
|
|
|
for (const MachineBasicBlock *BB : BasicBlockNumbering) {
|
2013-02-19 05:47:31 +01:00
|
|
|
// Use an iterator to avoid repeated lookups.
|
2013-02-19 06:32:02 +01:00
|
|
|
LivenessMap::iterator BI = BlockLiveness.find(BB);
|
2013-02-19 05:47:31 +01:00
|
|
|
assert(BI != BlockLiveness.end() && "Block not found");
|
|
|
|
BlockLifetimeInfo &BlockInfo = BI->second;
|
|
|
|
|
2016-05-24 15:23:44 +02:00
|
|
|
// Compute LiveIn by unioning together the LiveOut sets of all preds.
|
2012-09-06 11:17:37 +02:00
|
|
|
BitVector LocalLiveIn;
|
2021-02-20 07:44:14 +01:00
|
|
|
for (MachineBasicBlock *Pred : BB->predecessors()) {
|
|
|
|
LivenessMap::const_iterator I = BlockLiveness.find(Pred);
|
2018-05-29 15:52:24 +02:00
|
|
|
// PR37130: transformations prior to stack coloring can
|
|
|
|
// sometimes leave behind statically unreachable blocks; these
|
|
|
|
// can be safely skipped here.
|
|
|
|
if (I != BlockLiveness.end())
|
|
|
|
LocalLiveIn |= I->second.LiveOut;
|
2013-02-19 05:47:31 +01:00
|
|
|
}
|
2012-09-06 11:17:37 +02:00
|
|
|
|
2016-05-24 15:23:44 +02:00
|
|
|
// Compute LiveOut by subtracting out lifetimes that end in this
|
|
|
|
// block, then adding in lifetimes that begin in this block. If
|
|
|
|
// we have both BEGIN and END markers in the same basic block
|
|
|
|
// then we know that the BEGIN marker comes after the END,
|
|
|
|
// because we already handle the case where the BEGIN comes
|
|
|
|
// before the END when collecting the markers (and building the
|
|
|
|
// BEGIN/END vectors).
|
|
|
|
BitVector LocalLiveOut = LocalLiveIn;
|
2013-02-19 05:47:31 +01:00
|
|
|
LocalLiveOut.reset(BlockInfo.End);
|
2016-05-24 15:23:44 +02:00
|
|
|
LocalLiveOut |= BlockInfo.Begin;
|
2012-09-10 20:51:09 +02:00
|
|
|
|
2016-05-24 15:23:44 +02:00
|
|
|
// Update block LiveIn set, noting whether it has changed.
|
2013-02-19 05:47:31 +01:00
|
|
|
if (LocalLiveIn.test(BlockInfo.LiveIn)) {
|
2012-09-06 11:17:37 +02:00
|
|
|
changed = true;
|
2013-02-19 05:47:31 +01:00
|
|
|
BlockInfo.LiveIn |= LocalLiveIn;
|
2012-09-06 11:17:37 +02:00
|
|
|
}
|
|
|
|
|
2016-05-24 15:23:44 +02:00
|
|
|
// Update block LiveOut set, noting whether it has changed.
|
2013-02-19 05:47:31 +01:00
|
|
|
if (LocalLiveOut.test(BlockInfo.LiveOut)) {
|
2012-09-06 11:17:37 +02:00
|
|
|
changed = true;
|
2013-02-19 05:47:31 +01:00
|
|
|
BlockInfo.LiveOut |= LocalLiveOut;
|
2012-09-06 11:17:37 +02:00
|
|
|
}
|
|
|
|
}
|
2017-09-29 23:55:49 +02:00
|
|
|
} // while changed.
|
2016-05-24 15:23:44 +02:00
|
|
|
|
|
|
|
NumIterations = NumIters;
|
2012-09-06 11:17:37 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void StackColoring::calculateLiveIntervals(unsigned NumSlots) {
|
|
|
|
SmallVector<SlotIndex, 16> Starts;
|
2017-06-12 16:56:02 +02:00
|
|
|
SmallVector<bool, 16> DefinitelyInUse;
|
2012-09-06 11:17:37 +02:00
|
|
|
|
|
|
|
// For each block, find which slots are active within this block
|
|
|
|
// and update the live intervals.
|
2014-03-09 16:44:45 +01:00
|
|
|
for (const MachineBasicBlock &MBB : *MF) {
|
2012-09-06 11:17:37 +02:00
|
|
|
Starts.clear();
|
|
|
|
Starts.resize(NumSlots);
|
2017-06-12 16:56:02 +02:00
|
|
|
DefinitelyInUse.clear();
|
|
|
|
DefinitelyInUse.resize(NumSlots);
|
|
|
|
|
|
|
|
// Start the interval of the slots that we previously found to be 'in-use'.
|
|
|
|
BlockLifetimeInfo &MBBLiveness = BlockLiveness[&MBB];
|
|
|
|
for (int pos = MBBLiveness.LiveIn.find_first(); pos != -1;
|
|
|
|
pos = MBBLiveness.LiveIn.find_next(pos)) {
|
|
|
|
Starts[pos] = Indexes->getMBBStartIdx(&MBB);
|
|
|
|
}
|
2012-09-06 11:17:37 +02:00
|
|
|
|
2016-05-24 15:23:44 +02:00
|
|
|
// Create the interval for the basic blocks containing lifetime begin/end.
|
|
|
|
for (const MachineInstr &MI : MBB) {
|
|
|
|
SmallVector<int, 4> slots;
|
|
|
|
bool IsStart = false;
|
|
|
|
if (!isLifetimeStartOrEnd(MI, slots, IsStart))
|
2016-03-03 01:01:25 +01:00
|
|
|
continue;
|
2016-05-24 15:23:44 +02:00
|
|
|
SlotIndex ThisIndex = Indexes->getInstructionIndex(MI);
|
|
|
|
for (auto Slot : slots) {
|
|
|
|
if (IsStart) {
|
2017-06-12 16:56:02 +02:00
|
|
|
// If a slot is already definitely in use, we don't have to emit
|
|
|
|
// a new start marker because there is already a pre-existing
|
|
|
|
// one.
|
|
|
|
if (!DefinitelyInUse[Slot]) {
|
|
|
|
LiveStarts[Slot].push_back(ThisIndex);
|
|
|
|
DefinitelyInUse[Slot] = true;
|
|
|
|
}
|
|
|
|
if (!Starts[Slot].isValid())
|
2016-05-24 15:23:44 +02:00
|
|
|
Starts[Slot] = ThisIndex;
|
|
|
|
} else {
|
2017-06-12 16:56:02 +02:00
|
|
|
if (Starts[Slot].isValid()) {
|
|
|
|
VNInfo *VNI = Intervals[Slot]->getValNumInfo(0);
|
|
|
|
Intervals[Slot]->addSegment(
|
|
|
|
LiveInterval::Segment(Starts[Slot], ThisIndex, VNI));
|
|
|
|
Starts[Slot] = SlotIndex(); // Invalidate the start index
|
|
|
|
DefinitelyInUse[Slot] = false;
|
|
|
|
}
|
2016-05-24 15:23:44 +02:00
|
|
|
}
|
2012-09-10 14:39:35 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-12 16:56:02 +02:00
|
|
|
// Finish up started segments
|
2012-09-06 11:17:37 +02:00
|
|
|
for (unsigned i = 0; i < NumSlots; ++i) {
|
2012-09-10 14:39:35 +02:00
|
|
|
if (!Starts[i].isValid())
|
2012-09-06 11:17:37 +02:00
|
|
|
continue;
|
|
|
|
|
2017-06-12 16:56:02 +02:00
|
|
|
SlotIndex EndIdx = Indexes->getMBBEndIdx(&MBB);
|
|
|
|
VNInfo *VNI = Intervals[i]->getValNumInfo(0);
|
|
|
|
Intervals[i]->addSegment(LiveInterval::Segment(Starts[i], EndIdx, VNI));
|
2012-09-06 11:17:37 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool StackColoring::removeAllMarkers() {
|
|
|
|
unsigned Count = 0;
|
2014-03-09 16:44:45 +01:00
|
|
|
for (MachineInstr *MI : Markers) {
|
|
|
|
MI->eraseFromParent();
|
2012-09-06 11:17:37 +02:00
|
|
|
Count++;
|
|
|
|
}
|
|
|
|
Markers.clear();
|
|
|
|
|
2018-05-14 14:53:11 +02:00
|
|
|
LLVM_DEBUG(dbgs() << "Removed " << Count << " markers.\n");
|
2012-09-06 11:17:37 +02:00
|
|
|
return Count;
|
|
|
|
}
|
|
|
|
|
|
|
|
void StackColoring::remapInstructions(DenseMap<int, int> &SlotRemap) {
|
|
|
|
unsigned FixedInstr = 0;
|
|
|
|
unsigned FixedMemOp = 0;
|
|
|
|
unsigned FixedDbg = 0;
|
|
|
|
|
|
|
|
// Remap debug information that refers to stack slots.
|
2016-12-01 00:48:50 +01:00
|
|
|
for (auto &VI : MF->getVariableDbgInfo()) {
|
2014-03-09 16:44:39 +01:00
|
|
|
if (!VI.Var)
|
|
|
|
continue;
|
|
|
|
if (SlotRemap.count(VI.Slot)) {
|
2018-05-14 14:53:11 +02:00
|
|
|
LLVM_DEBUG(dbgs() << "Remapping debug info for ["
|
|
|
|
<< cast<DILocalVariable>(VI.Var)->getName() << "].\n");
|
2014-03-09 16:44:39 +01:00
|
|
|
VI.Slot = SlotRemap[VI.Slot];
|
2012-09-06 11:17:37 +02:00
|
|
|
FixedDbg++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Keep a list of *allocas* which need to be remapped.
|
2012-10-18 21:53:48 +02:00
|
|
|
DenseMap<const AllocaInst*, const AllocaInst*> Allocas;
|
2017-08-01 05:32:15 +02:00
|
|
|
|
|
|
|
// Keep a list of allocas which has been affected by the remap.
|
|
|
|
SmallPtrSet<const AllocaInst*, 32> MergedAllocas;
|
|
|
|
|
2014-03-09 16:44:45 +01:00
|
|
|
for (const std::pair<int, int> &SI : SlotRemap) {
|
|
|
|
const AllocaInst *From = MFI->getObjectAllocation(SI.first);
|
|
|
|
const AllocaInst *To = MFI->getObjectAllocation(SI.second);
|
2012-09-06 11:17:37 +02:00
|
|
|
assert(To && From && "Invalid allocation object");
|
|
|
|
Allocas[From] = To;
|
Update IR when merging slots in stack coloring
The way that stack coloring updated MMOs when merging stack slots, while
correct, is suboptimal, and is incompatible with the use of AA during
instruction scheduling. The solution, which involves the use of const_cast (and
more importantly, updating the IR from within an MI-level pass), obviously
requires some explanation:
When the stack coloring pass was originally committed, the code in
ScheduleDAGInstrs::buildSchedGraph tracked possible alias sets by using
GetUnderlyingObject, and all load/store and store/store memory control
dependencies where added between SUs at the object level (where only one
object, that returned by GetUnderlyingObject, was used to identify the object
associated with each MMO). When stack coloring merged stack slots, it would
replace MMOs derived from the remapped alloca with the alloca with which the
remapped alloca was being replaced. Because ScheduleDAGInstrs only used single
objects, and tracked alias sets at the object level, this was a fine solution.
In r169744, (Andy and) I updated the code in ScheduleDAGInstrs to use
GetUnderlyingObjects, and track alias sets using, potentially, multiple
underlying objects for each MMO. This was done, primarily, to provide the
ability to look through PHIs, and provide better scheduling for
induction-variable-dependent loads and stores inside loops. At this point, the
MMO-updating code in stack coloring became suboptimal, because it would clear
the MMOs for (i.e. completely pessimize) all instructions for which r169744
might help in scheduling. Updating the IR directly is the simplest fix for this
(and the one with, by far, the least compile-time impact), but others are
possible (we could give each MMO a small vector of potential values, or make
use of a remapping table, constructed from MFI, inside ScheduleDAGInstrs).
Unfortunately, replacing all MMO values derived from the remapped alloca with
the base replacement alloca fundamentally breaks our ability to use AA during
instruction scheduling (which is critical to performance on some targets). The
reason is that the original MMO might have had an offset (either constant or
dynamic) from the base remapped alloca, and that offset is not present in the
updated MMO. One possible way around this would be to use
GetPointerBaseWithConstantOffset, and update not only the MMO's value, but also
its offset based on the original offset. Unfortunately, this solution would
only handle constant offsets, and for safety (because AA is not completely
restricted to deducing relationships with constant offsets), we would need to
clear all MMOs without constant offsets over the entire function. This would be
an even worse pessimization than the current single-object restriction. Any
other solution would involve passing around a vector of remapped allocas, and
teaching AA to use it, introducing additional complexity and overhead into AA.
Instead, when remapping an alloca, we replace all IR uses of that alloca as
well (optionally inserting a bitcast as necessary). This is even more efficient
that the old MMO-updating code in the stack coloring pass (because it removes
the need to call GetUnderlyingObject on all MMO values), removes the
single-object pessimization in the default configuration, and enables the
correct use of AA during instruction scheduling (all without any additional
overhead).
LLVM now no longer miscompiles itself on x86_64 when using -enable-misched
-enable-aa-sched-mi -misched-bottomup=0 -misched-topdown=0 -misched=shuffle!
Fixed PR18497.
Because the alloca replacement is now done at the IR level, unless the MMO
directly refers to the remapped alloca, the change cannot be seen at the MI
level. As a result, there is no good way to fix test/CodeGen/X86/pr14090.ll.
llvm-svn: 199658
2014-01-20 15:03:16 +01:00
|
|
|
|
2020-05-19 18:59:24 +02:00
|
|
|
// If From is before wo, its possible that there is a use of From between
|
|
|
|
// them.
|
|
|
|
if (From->comesBefore(To))
|
|
|
|
const_cast<AllocaInst*>(To)->moveBefore(const_cast<AllocaInst*>(From));
|
|
|
|
|
Update IR when merging slots in stack coloring
The way that stack coloring updated MMOs when merging stack slots, while
correct, is suboptimal, and is incompatible with the use of AA during
instruction scheduling. The solution, which involves the use of const_cast (and
more importantly, updating the IR from within an MI-level pass), obviously
requires some explanation:
When the stack coloring pass was originally committed, the code in
ScheduleDAGInstrs::buildSchedGraph tracked possible alias sets by using
GetUnderlyingObject, and all load/store and store/store memory control
dependencies where added between SUs at the object level (where only one
object, that returned by GetUnderlyingObject, was used to identify the object
associated with each MMO). When stack coloring merged stack slots, it would
replace MMOs derived from the remapped alloca with the alloca with which the
remapped alloca was being replaced. Because ScheduleDAGInstrs only used single
objects, and tracked alias sets at the object level, this was a fine solution.
In r169744, (Andy and) I updated the code in ScheduleDAGInstrs to use
GetUnderlyingObjects, and track alias sets using, potentially, multiple
underlying objects for each MMO. This was done, primarily, to provide the
ability to look through PHIs, and provide better scheduling for
induction-variable-dependent loads and stores inside loops. At this point, the
MMO-updating code in stack coloring became suboptimal, because it would clear
the MMOs for (i.e. completely pessimize) all instructions for which r169744
might help in scheduling. Updating the IR directly is the simplest fix for this
(and the one with, by far, the least compile-time impact), but others are
possible (we could give each MMO a small vector of potential values, or make
use of a remapping table, constructed from MFI, inside ScheduleDAGInstrs).
Unfortunately, replacing all MMO values derived from the remapped alloca with
the base replacement alloca fundamentally breaks our ability to use AA during
instruction scheduling (which is critical to performance on some targets). The
reason is that the original MMO might have had an offset (either constant or
dynamic) from the base remapped alloca, and that offset is not present in the
updated MMO. One possible way around this would be to use
GetPointerBaseWithConstantOffset, and update not only the MMO's value, but also
its offset based on the original offset. Unfortunately, this solution would
only handle constant offsets, and for safety (because AA is not completely
restricted to deducing relationships with constant offsets), we would need to
clear all MMOs without constant offsets over the entire function. This would be
an even worse pessimization than the current single-object restriction. Any
other solution would involve passing around a vector of remapped allocas, and
teaching AA to use it, introducing additional complexity and overhead into AA.
Instead, when remapping an alloca, we replace all IR uses of that alloca as
well (optionally inserting a bitcast as necessary). This is even more efficient
that the old MMO-updating code in the stack coloring pass (because it removes
the need to call GetUnderlyingObject on all MMO values), removes the
single-object pessimization in the default configuration, and enables the
correct use of AA during instruction scheduling (all without any additional
overhead).
LLVM now no longer miscompiles itself on x86_64 when using -enable-misched
-enable-aa-sched-mi -misched-bottomup=0 -misched-topdown=0 -misched=shuffle!
Fixed PR18497.
Because the alloca replacement is now done at the IR level, unless the MMO
directly refers to the remapped alloca, the change cannot be seen at the MI
level. As a result, there is no good way to fix test/CodeGen/X86/pr14090.ll.
llvm-svn: 199658
2014-01-20 15:03:16 +01:00
|
|
|
// AA might be used later for instruction scheduling, and we need it to be
|
|
|
|
// able to deduce the correct aliasing releationships between pointers
|
|
|
|
// derived from the alloca being remapped and the target of that remapping.
|
|
|
|
// The only safe way, without directly informing AA about the remapping
|
|
|
|
// somehow, is to directly update the IR to reflect the change being made
|
|
|
|
// here.
|
|
|
|
Instruction *Inst = const_cast<AllocaInst *>(To);
|
|
|
|
if (From->getType() != To->getType()) {
|
|
|
|
BitCastInst *Cast = new BitCastInst(Inst, From->getType());
|
|
|
|
Cast->insertAfter(Inst);
|
|
|
|
Inst = Cast;
|
|
|
|
}
|
|
|
|
|
2017-08-01 05:32:15 +02:00
|
|
|
// We keep both slots to maintain AliasAnalysis metadata later.
|
|
|
|
MergedAllocas.insert(From);
|
|
|
|
MergedAllocas.insert(To);
|
|
|
|
|
2018-07-13 02:08:38 +02:00
|
|
|
// Transfer the stack protector layout tag, but make sure that SSPLK_AddrOf
|
|
|
|
// does not overwrite SSPLK_SmallArray or SSPLK_LargeArray, and make sure
|
|
|
|
// that SSPLK_SmallArray does not overwrite SSPLK_LargeArray.
|
|
|
|
MachineFrameInfo::SSPLayoutKind FromKind
|
|
|
|
= MFI->getObjectSSPLayout(SI.first);
|
|
|
|
MachineFrameInfo::SSPLayoutKind ToKind = MFI->getObjectSSPLayout(SI.second);
|
|
|
|
if (FromKind != MachineFrameInfo::SSPLK_None &&
|
|
|
|
(ToKind == MachineFrameInfo::SSPLK_None ||
|
|
|
|
(ToKind != MachineFrameInfo::SSPLK_LargeArray &&
|
|
|
|
FromKind != MachineFrameInfo::SSPLK_AddrOf)))
|
|
|
|
MFI->setObjectSSPLayout(SI.second, FromKind);
|
2014-01-20 20:49:14 +01:00
|
|
|
|
2016-01-15 01:46:17 +01:00
|
|
|
// The new alloca might not be valid in a llvm.dbg.declare for this
|
|
|
|
// variable, so undef out the use to make the verifier happy.
|
|
|
|
AllocaInst *FromAI = const_cast<AllocaInst *>(From);
|
|
|
|
if (FromAI->isUsedByMetadata())
|
|
|
|
ValueAsMetadata::handleRAUW(FromAI, UndefValue::get(FromAI->getType()));
|
|
|
|
for (auto &Use : FromAI->uses()) {
|
|
|
|
if (BitCastInst *BCI = dyn_cast<BitCastInst>(Use.get()))
|
|
|
|
if (BCI->isUsedByMetadata())
|
|
|
|
ValueAsMetadata::handleRAUW(BCI, UndefValue::get(BCI->getType()));
|
|
|
|
}
|
|
|
|
|
Update IR when merging slots in stack coloring
The way that stack coloring updated MMOs when merging stack slots, while
correct, is suboptimal, and is incompatible with the use of AA during
instruction scheduling. The solution, which involves the use of const_cast (and
more importantly, updating the IR from within an MI-level pass), obviously
requires some explanation:
When the stack coloring pass was originally committed, the code in
ScheduleDAGInstrs::buildSchedGraph tracked possible alias sets by using
GetUnderlyingObject, and all load/store and store/store memory control
dependencies where added between SUs at the object level (where only one
object, that returned by GetUnderlyingObject, was used to identify the object
associated with each MMO). When stack coloring merged stack slots, it would
replace MMOs derived from the remapped alloca with the alloca with which the
remapped alloca was being replaced. Because ScheduleDAGInstrs only used single
objects, and tracked alias sets at the object level, this was a fine solution.
In r169744, (Andy and) I updated the code in ScheduleDAGInstrs to use
GetUnderlyingObjects, and track alias sets using, potentially, multiple
underlying objects for each MMO. This was done, primarily, to provide the
ability to look through PHIs, and provide better scheduling for
induction-variable-dependent loads and stores inside loops. At this point, the
MMO-updating code in stack coloring became suboptimal, because it would clear
the MMOs for (i.e. completely pessimize) all instructions for which r169744
might help in scheduling. Updating the IR directly is the simplest fix for this
(and the one with, by far, the least compile-time impact), but others are
possible (we could give each MMO a small vector of potential values, or make
use of a remapping table, constructed from MFI, inside ScheduleDAGInstrs).
Unfortunately, replacing all MMO values derived from the remapped alloca with
the base replacement alloca fundamentally breaks our ability to use AA during
instruction scheduling (which is critical to performance on some targets). The
reason is that the original MMO might have had an offset (either constant or
dynamic) from the base remapped alloca, and that offset is not present in the
updated MMO. One possible way around this would be to use
GetPointerBaseWithConstantOffset, and update not only the MMO's value, but also
its offset based on the original offset. Unfortunately, this solution would
only handle constant offsets, and for safety (because AA is not completely
restricted to deducing relationships with constant offsets), we would need to
clear all MMOs without constant offsets over the entire function. This would be
an even worse pessimization than the current single-object restriction. Any
other solution would involve passing around a vector of remapped allocas, and
teaching AA to use it, introducing additional complexity and overhead into AA.
Instead, when remapping an alloca, we replace all IR uses of that alloca as
well (optionally inserting a bitcast as necessary). This is even more efficient
that the old MMO-updating code in the stack coloring pass (because it removes
the need to call GetUnderlyingObject on all MMO values), removes the
single-object pessimization in the default configuration, and enables the
correct use of AA during instruction scheduling (all without any additional
overhead).
LLVM now no longer miscompiles itself on x86_64 when using -enable-misched
-enable-aa-sched-mi -misched-bottomup=0 -misched-topdown=0 -misched=shuffle!
Fixed PR18497.
Because the alloca replacement is now done at the IR level, unless the MMO
directly refers to the remapped alloca, the change cannot be seen at the MI
level. As a result, there is no good way to fix test/CodeGen/X86/pr14090.ll.
llvm-svn: 199658
2014-01-20 15:03:16 +01:00
|
|
|
// Note that this will not replace uses in MMOs (which we'll update below),
|
|
|
|
// or anywhere else (which is why we won't delete the original
|
|
|
|
// instruction).
|
2016-01-15 01:46:17 +01:00
|
|
|
FromAI->replaceAllUsesWith(Inst);
|
2012-09-06 11:17:37 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Remap all instructions to the new stack slots.
|
2020-01-28 00:10:07 +01:00
|
|
|
std::vector<std::vector<MachineMemOperand *>> SSRefs(
|
|
|
|
MFI->getObjectIndexEnd());
|
2014-03-09 16:44:45 +01:00
|
|
|
for (MachineBasicBlock &BB : *MF)
|
|
|
|
for (MachineInstr &I : BB) {
|
2012-09-10 10:44:15 +02:00
|
|
|
// Skip lifetime markers. We'll remove them soon.
|
2014-03-09 16:44:45 +01:00
|
|
|
if (I.getOpcode() == TargetOpcode::LIFETIME_START ||
|
|
|
|
I.getOpcode() == TargetOpcode::LIFETIME_END)
|
2012-09-10 10:44:15 +02:00
|
|
|
continue;
|
|
|
|
|
2012-09-06 11:17:37 +02:00
|
|
|
// Update the MachineMemOperand to use the new alloca.
|
2014-03-09 16:44:45 +01:00
|
|
|
for (MachineMemOperand *MMO : I.memoperands()) {
|
Update IR when merging slots in stack coloring
The way that stack coloring updated MMOs when merging stack slots, while
correct, is suboptimal, and is incompatible with the use of AA during
instruction scheduling. The solution, which involves the use of const_cast (and
more importantly, updating the IR from within an MI-level pass), obviously
requires some explanation:
When the stack coloring pass was originally committed, the code in
ScheduleDAGInstrs::buildSchedGraph tracked possible alias sets by using
GetUnderlyingObject, and all load/store and store/store memory control
dependencies where added between SUs at the object level (where only one
object, that returned by GetUnderlyingObject, was used to identify the object
associated with each MMO). When stack coloring merged stack slots, it would
replace MMOs derived from the remapped alloca with the alloca with which the
remapped alloca was being replaced. Because ScheduleDAGInstrs only used single
objects, and tracked alias sets at the object level, this was a fine solution.
In r169744, (Andy and) I updated the code in ScheduleDAGInstrs to use
GetUnderlyingObjects, and track alias sets using, potentially, multiple
underlying objects for each MMO. This was done, primarily, to provide the
ability to look through PHIs, and provide better scheduling for
induction-variable-dependent loads and stores inside loops. At this point, the
MMO-updating code in stack coloring became suboptimal, because it would clear
the MMOs for (i.e. completely pessimize) all instructions for which r169744
might help in scheduling. Updating the IR directly is the simplest fix for this
(and the one with, by far, the least compile-time impact), but others are
possible (we could give each MMO a small vector of potential values, or make
use of a remapping table, constructed from MFI, inside ScheduleDAGInstrs).
Unfortunately, replacing all MMO values derived from the remapped alloca with
the base replacement alloca fundamentally breaks our ability to use AA during
instruction scheduling (which is critical to performance on some targets). The
reason is that the original MMO might have had an offset (either constant or
dynamic) from the base remapped alloca, and that offset is not present in the
updated MMO. One possible way around this would be to use
GetPointerBaseWithConstantOffset, and update not only the MMO's value, but also
its offset based on the original offset. Unfortunately, this solution would
only handle constant offsets, and for safety (because AA is not completely
restricted to deducing relationships with constant offsets), we would need to
clear all MMOs without constant offsets over the entire function. This would be
an even worse pessimization than the current single-object restriction. Any
other solution would involve passing around a vector of remapped allocas, and
teaching AA to use it, introducing additional complexity and overhead into AA.
Instead, when remapping an alloca, we replace all IR uses of that alloca as
well (optionally inserting a bitcast as necessary). This is even more efficient
that the old MMO-updating code in the stack coloring pass (because it removes
the need to call GetUnderlyingObject on all MMO values), removes the
single-object pessimization in the default configuration, and enables the
correct use of AA during instruction scheduling (all without any additional
overhead).
LLVM now no longer miscompiles itself on x86_64 when using -enable-misched
-enable-aa-sched-mi -misched-bottomup=0 -misched-topdown=0 -misched=shuffle!
Fixed PR18497.
Because the alloca replacement is now done at the IR level, unless the MMO
directly refers to the remapped alloca, the change cannot be seen at the MI
level. As a result, there is no good way to fix test/CodeGen/X86/pr14090.ll.
llvm-svn: 199658
2014-01-20 15:03:16 +01:00
|
|
|
// We've replaced IR-level uses of the remapped allocas, so we only
|
|
|
|
// need to replace direct uses here.
|
2014-04-15 09:22:52 +02:00
|
|
|
const AllocaInst *AI = dyn_cast_or_null<AllocaInst>(MMO->getValue());
|
|
|
|
if (!AI)
|
2013-05-14 03:42:44 +02:00
|
|
|
continue;
|
|
|
|
|
2012-10-18 21:53:48 +02:00
|
|
|
if (!Allocas.count(AI))
|
2012-09-06 11:17:37 +02:00
|
|
|
continue;
|
|
|
|
|
2012-10-18 21:53:48 +02:00
|
|
|
MMO->setValue(Allocas[AI]);
|
2012-09-06 11:17:37 +02:00
|
|
|
FixedMemOp++;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update all of the machine instruction operands.
|
2014-03-09 16:44:45 +01:00
|
|
|
for (MachineOperand &MO : I.operands()) {
|
2012-09-06 11:17:37 +02:00
|
|
|
if (!MO.isFI())
|
|
|
|
continue;
|
|
|
|
int FromSlot = MO.getIndex();
|
|
|
|
|
|
|
|
// Don't touch arguments.
|
|
|
|
if (FromSlot<0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Only look at mapped slots.
|
|
|
|
if (!SlotRemap.count(FromSlot))
|
|
|
|
continue;
|
|
|
|
|
2012-09-10 10:51:46 +02:00
|
|
|
// In a debug build, check that the instruction that we are modifying is
|
|
|
|
// inside the expected live range. If the instruction is not inside
|
2012-09-10 10:44:15 +02:00
|
|
|
// the calculated range then it means that the alloca usage moved
|
2012-09-13 17:46:30 +02:00
|
|
|
// outside of the lifetime markers, or that the user has a bug.
|
2012-09-13 14:38:37 +02:00
|
|
|
// NOTE: Alloca address calculations which happen outside the lifetime
|
2018-03-31 00:22:31 +02:00
|
|
|
// zone are okay, despite the fact that we don't have a good way
|
2012-09-13 14:38:37 +02:00
|
|
|
// for validating all of the usages of the calculation.
|
2012-09-10 10:44:15 +02:00
|
|
|
#ifndef NDEBUG
|
2019-12-19 12:19:36 +01:00
|
|
|
bool TouchesMemory = I.mayLoadOrStore();
|
2012-09-13 17:46:30 +02:00
|
|
|
// If we *don't* protect the user from escaped allocas, don't bother
|
|
|
|
// validating the instructions.
|
2018-05-09 04:42:00 +02:00
|
|
|
if (!I.isDebugInstr() && TouchesMemory && ProtectFromEscapedAllocas) {
|
2016-02-27 07:40:41 +01:00
|
|
|
SlotIndex Index = Indexes->getInstructionIndex(I);
|
2014-03-09 16:44:45 +01:00
|
|
|
const LiveInterval *Interval = &*Intervals[FromSlot];
|
2012-09-11 14:34:27 +02:00
|
|
|
assert(Interval->find(Index) != Interval->end() &&
|
2013-03-25 22:26:36 +01:00
|
|
|
"Found instruction usage outside of live range.");
|
2012-09-11 14:34:27 +02:00
|
|
|
}
|
2012-09-10 10:44:15 +02:00
|
|
|
#endif
|
|
|
|
|
2012-09-06 11:17:37 +02:00
|
|
|
// Fix the machine instructions.
|
|
|
|
int ToSlot = SlotRemap[FromSlot];
|
|
|
|
MO.setIndex(ToSlot);
|
|
|
|
FixedInstr++;
|
|
|
|
}
|
2017-08-01 05:32:15 +02:00
|
|
|
|
|
|
|
// We adjust AliasAnalysis information for merged stack slots.
|
2018-08-16 23:30:05 +02:00
|
|
|
SmallVector<MachineMemOperand *, 2> NewMMOs;
|
2017-08-01 05:32:15 +02:00
|
|
|
bool ReplaceMemOps = false;
|
|
|
|
for (MachineMemOperand *MMO : I.memoperands()) {
|
2020-01-28 00:10:07 +01:00
|
|
|
// Collect MachineMemOperands which reference
|
|
|
|
// FixedStackPseudoSourceValues with old frame indices.
|
2020-01-20 06:53:10 +01:00
|
|
|
if (const auto *FSV = dyn_cast_or_null<FixedStackPseudoSourceValue>(
|
|
|
|
MMO->getPseudoValue())) {
|
|
|
|
int FI = FSV->getFrameIndex();
|
|
|
|
auto To = SlotRemap.find(FI);
|
|
|
|
if (To != SlotRemap.end())
|
2020-01-28 00:10:07 +01:00
|
|
|
SSRefs[FI].push_back(MMO);
|
2020-01-20 06:53:10 +01:00
|
|
|
}
|
|
|
|
|
2017-08-01 05:32:15 +02:00
|
|
|
// If this memory location can be a slot remapped here,
|
|
|
|
// we remove AA information.
|
|
|
|
bool MayHaveConflictingAAMD = false;
|
|
|
|
if (MMO->getAAInfo()) {
|
|
|
|
if (const Value *MMOV = MMO->getValue()) {
|
|
|
|
SmallVector<Value *, 4> Objs;
|
2020-07-31 11:09:54 +02:00
|
|
|
getUnderlyingObjectsForCodeGen(MMOV, Objs);
|
2017-08-01 05:32:15 +02:00
|
|
|
|
|
|
|
if (Objs.empty())
|
|
|
|
MayHaveConflictingAAMD = true;
|
|
|
|
else
|
|
|
|
for (Value *V : Objs) {
|
|
|
|
// If this memory location comes from a known stack slot
|
|
|
|
// that is not remapped, we continue checking.
|
|
|
|
// Otherwise, we need to invalidate AA infomation.
|
|
|
|
const AllocaInst *AI = dyn_cast_or_null<AllocaInst>(V);
|
|
|
|
if (AI && MergedAllocas.count(AI)) {
|
|
|
|
MayHaveConflictingAAMD = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (MayHaveConflictingAAMD) {
|
2018-08-16 23:30:05 +02:00
|
|
|
NewMMOs.push_back(MF->getMachineMemOperand(MMO, AAMDNodes()));
|
2017-08-01 05:32:15 +02:00
|
|
|
ReplaceMemOps = true;
|
2018-08-16 23:30:05 +02:00
|
|
|
} else {
|
|
|
|
NewMMOs.push_back(MMO);
|
2017-08-01 05:32:15 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If any memory operand is updated, set memory references of
|
|
|
|
// this instruction.
|
|
|
|
if (ReplaceMemOps)
|
2018-08-16 23:30:05 +02:00
|
|
|
I.setMemRefs(*MF, NewMMOs);
|
2012-09-06 11:17:37 +02:00
|
|
|
}
|
|
|
|
|
2020-01-28 00:10:07 +01:00
|
|
|
// Rewrite MachineMemOperands that reference old frame indices.
|
|
|
|
for (auto E : enumerate(SSRefs))
|
|
|
|
if (!E.value().empty()) {
|
|
|
|
const PseudoSourceValue *NewSV =
|
|
|
|
MF->getPSVManager().getFixedStack(SlotRemap.find(E.index())->second);
|
|
|
|
for (MachineMemOperand *Ref : E.value())
|
|
|
|
Ref->setValue(NewSV);
|
|
|
|
}
|
|
|
|
|
2016-01-08 18:24:47 +01:00
|
|
|
// Update the location of C++ catch objects for the MSVC personality routine.
|
2016-01-08 09:03:55 +01:00
|
|
|
if (WinEHFuncInfo *EHInfo = MF->getWinEHFuncInfo())
|
|
|
|
for (WinEHTryBlockMapEntry &TBME : EHInfo->TryBlockMap)
|
|
|
|
for (WinEHHandlerType &H : TBME.HandlerArray)
|
2017-09-29 23:55:49 +02:00
|
|
|
if (H.CatchObj.FrameIndex != std::numeric_limits<int>::max() &&
|
2016-01-08 18:24:47 +01:00
|
|
|
SlotRemap.count(H.CatchObj.FrameIndex))
|
2016-01-08 09:03:55 +01:00
|
|
|
H.CatchObj.FrameIndex = SlotRemap[H.CatchObj.FrameIndex];
|
|
|
|
|
2018-05-14 14:53:11 +02:00
|
|
|
LLVM_DEBUG(dbgs() << "Fixed " << FixedMemOp << " machine memory operands.\n");
|
|
|
|
LLVM_DEBUG(dbgs() << "Fixed " << FixedDbg << " debug locations.\n");
|
|
|
|
LLVM_DEBUG(dbgs() << "Fixed " << FixedInstr << " machine instructions.\n");
|
2012-09-06 11:17:37 +02:00
|
|
|
}
|
|
|
|
|
2012-09-12 06:57:37 +02:00
|
|
|
void StackColoring::removeInvalidSlotRanges() {
|
2014-03-09 16:44:45 +01:00
|
|
|
for (MachineBasicBlock &BB : *MF)
|
|
|
|
for (MachineInstr &I : BB) {
|
|
|
|
if (I.getOpcode() == TargetOpcode::LIFETIME_START ||
|
2018-05-09 04:42:00 +02:00
|
|
|
I.getOpcode() == TargetOpcode::LIFETIME_END || I.isDebugInstr())
|
2012-09-12 06:57:37 +02:00
|
|
|
continue;
|
|
|
|
|
2012-09-13 14:38:37 +02:00
|
|
|
// Some intervals are suspicious! In some cases we find address
|
|
|
|
// calculations outside of the lifetime zone, but not actual memory
|
|
|
|
// read or write. Memory accesses outside of the lifetime zone are a clear
|
|
|
|
// violation, but address calculations are okay. This can happen when
|
|
|
|
// GEPs are hoisted outside of the lifetime zone.
|
2012-09-13 16:51:00 +02:00
|
|
|
// So, in here we only check instructions which can read or write memory.
|
2014-03-09 16:44:45 +01:00
|
|
|
if (!I.mayLoad() && !I.mayStore())
|
2012-09-13 14:38:37 +02:00
|
|
|
continue;
|
|
|
|
|
2012-09-12 06:57:37 +02:00
|
|
|
// Check all of the machine operands.
|
2014-03-09 16:44:45 +01:00
|
|
|
for (const MachineOperand &MO : I.operands()) {
|
2012-09-12 06:57:37 +02:00
|
|
|
if (!MO.isFI())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
int Slot = MO.getIndex();
|
|
|
|
|
|
|
|
if (Slot<0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (Intervals[Slot]->empty())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Check that the used slot is inside the calculated lifetime range.
|
|
|
|
// If it is not, warn about it and invalidate the range.
|
2014-03-09 16:44:45 +01:00
|
|
|
LiveInterval *Interval = &*Intervals[Slot];
|
2016-02-27 07:40:41 +01:00
|
|
|
SlotIndex Index = Indexes->getInstructionIndex(I);
|
2012-09-12 06:57:37 +02:00
|
|
|
if (Interval->find(Index) == Interval->end()) {
|
2014-03-09 16:44:45 +01:00
|
|
|
Interval->clear();
|
2018-05-14 14:53:11 +02:00
|
|
|
LLVM_DEBUG(dbgs() << "Invalidating range #" << Slot << "\n");
|
2012-09-12 13:06:26 +02:00
|
|
|
EscapedAllocas++;
|
2012-09-12 06:57:37 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-09-06 11:17:37 +02:00
|
|
|
void StackColoring::expungeSlotMap(DenseMap<int, int> &SlotRemap,
|
|
|
|
unsigned NumSlots) {
|
|
|
|
// Expunge slot remap map.
|
|
|
|
for (unsigned i=0; i < NumSlots; ++i) {
|
|
|
|
// If we are remapping i
|
|
|
|
if (SlotRemap.count(i)) {
|
|
|
|
int Target = SlotRemap[i];
|
|
|
|
// As long as our target is mapped to something else, follow it.
|
|
|
|
while (SlotRemap.count(Target)) {
|
|
|
|
Target = SlotRemap[Target];
|
|
|
|
SlotRemap[i] = Target;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool StackColoring::runOnMachineFunction(MachineFunction &Func) {
|
2018-05-14 14:53:11 +02:00
|
|
|
LLVM_DEBUG(dbgs() << "********** Stack Coloring **********\n"
|
|
|
|
<< "********** Function: " << Func.getName() << '\n');
|
2012-09-06 11:17:37 +02:00
|
|
|
MF = &Func;
|
2016-07-28 20:40:00 +02:00
|
|
|
MFI = &MF->getFrameInfo();
|
2012-09-06 11:17:37 +02:00
|
|
|
Indexes = &getAnalysis<SlotIndexes>();
|
|
|
|
BlockLiveness.clear();
|
|
|
|
BasicBlocks.clear();
|
|
|
|
BasicBlockNumbering.clear();
|
|
|
|
Markers.clear();
|
|
|
|
Intervals.clear();
|
2017-06-12 16:56:02 +02:00
|
|
|
LiveStarts.clear();
|
2012-09-06 11:17:37 +02:00
|
|
|
VNInfoAllocator.Reset();
|
|
|
|
|
|
|
|
unsigned NumSlots = MFI->getObjectIndexEnd();
|
|
|
|
|
|
|
|
// If there are no stack slots then there are no markers to remove.
|
|
|
|
if (!NumSlots)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
SmallVector<int, 8> SortedSlots;
|
|
|
|
SortedSlots.reserve(NumSlots);
|
|
|
|
Intervals.reserve(NumSlots);
|
2017-06-12 16:56:02 +02:00
|
|
|
LiveStarts.resize(NumSlots);
|
2012-09-06 11:17:37 +02:00
|
|
|
|
|
|
|
unsigned NumMarkers = collectMarkers(NumSlots);
|
|
|
|
|
|
|
|
unsigned TotalSize = 0;
|
2018-05-14 14:53:11 +02:00
|
|
|
LLVM_DEBUG(dbgs() << "Found " << NumMarkers << " markers and " << NumSlots
|
|
|
|
<< " slots\n");
|
|
|
|
LLVM_DEBUG(dbgs() << "Slot structure:\n");
|
2012-09-06 11:17:37 +02:00
|
|
|
|
|
|
|
for (int i=0; i < MFI->getObjectIndexEnd(); ++i) {
|
2018-05-14 14:53:11 +02:00
|
|
|
LLVM_DEBUG(dbgs() << "Slot #" << i << " - " << MFI->getObjectSize(i)
|
|
|
|
<< " bytes.\n");
|
2012-09-06 11:17:37 +02:00
|
|
|
TotalSize += MFI->getObjectSize(i);
|
|
|
|
}
|
|
|
|
|
2018-05-14 14:53:11 +02:00
|
|
|
LLVM_DEBUG(dbgs() << "Total Stack size: " << TotalSize << " bytes\n\n");
|
2012-09-06 11:17:37 +02:00
|
|
|
|
|
|
|
// Don't continue because there are not enough lifetime markers, or the
|
2012-09-13 14:38:37 +02:00
|
|
|
// stack is too small, or we are told not to optimize the slots.
|
2016-05-28 00:56:49 +02:00
|
|
|
if (NumMarkers < 2 || TotalSize < 16 || DisableColoring ||
|
2017-12-15 23:22:58 +01:00
|
|
|
skipFunction(Func.getFunction())) {
|
2018-05-14 14:53:11 +02:00
|
|
|
LLVM_DEBUG(dbgs() << "Will not try to merge slots.\n");
|
2012-09-06 11:17:37 +02:00
|
|
|
return removeAllMarkers();
|
|
|
|
}
|
|
|
|
|
|
|
|
for (unsigned i=0; i < NumSlots; ++i) {
|
2014-03-09 16:44:45 +01:00
|
|
|
std::unique_ptr<LiveInterval> LI(new LiveInterval(i, 0));
|
2012-09-06 11:17:37 +02:00
|
|
|
LI->getNextValue(Indexes->getZeroIndex(), VNInfoAllocator);
|
2014-03-09 16:44:45 +01:00
|
|
|
Intervals.push_back(std::move(LI));
|
2012-09-06 11:17:37 +02:00
|
|
|
SortedSlots.push_back(i);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Calculate the liveness of each block.
|
|
|
|
calculateLocalLiveness();
|
2018-05-14 14:53:11 +02:00
|
|
|
LLVM_DEBUG(dbgs() << "Dataflow iterations: " << NumIterations << "\n");
|
|
|
|
LLVM_DEBUG(dump());
|
2012-09-06 11:17:37 +02:00
|
|
|
|
|
|
|
// Propagate the liveness information.
|
|
|
|
calculateLiveIntervals(NumSlots);
|
2018-05-14 14:53:11 +02:00
|
|
|
LLVM_DEBUG(dumpIntervals());
|
2012-09-06 11:17:37 +02:00
|
|
|
|
2012-09-12 13:06:26 +02:00
|
|
|
// Search for allocas which are used outside of the declared lifetime
|
|
|
|
// markers.
|
2012-09-13 17:46:30 +02:00
|
|
|
if (ProtectFromEscapedAllocas)
|
2012-09-12 13:06:26 +02:00
|
|
|
removeInvalidSlotRanges();
|
2012-09-12 06:57:37 +02:00
|
|
|
|
2012-09-06 11:17:37 +02:00
|
|
|
// Maps old slots to new slots.
|
|
|
|
DenseMap<int, int> SlotRemap;
|
|
|
|
unsigned RemovedSlots = 0;
|
|
|
|
unsigned ReducedSize = 0;
|
|
|
|
|
|
|
|
// Do not bother looking at empty intervals.
|
|
|
|
for (unsigned I = 0; I < NumSlots; ++I) {
|
|
|
|
if (Intervals[SortedSlots[I]]->empty())
|
|
|
|
SortedSlots[I] = -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
// This is a simple greedy algorithm for merging allocas. First, sort the
|
|
|
|
// slots, placing the largest slots first. Next, perform an n^2 scan and look
|
2020-11-05 15:09:44 +01:00
|
|
|
// for disjoint slots. When you find disjoint slots, merge the smaller one
|
2012-09-06 11:17:37 +02:00
|
|
|
// into the bigger one and update the live interval. Remove the small alloca
|
|
|
|
// and continue.
|
|
|
|
|
|
|
|
// Sort the slots according to their size. Place unused slots at the end.
|
2012-11-15 20:33:30 +01:00
|
|
|
// Use stable sort to guarantee deterministic code generation.
|
2019-04-23 16:51:27 +02:00
|
|
|
llvm::stable_sort(SortedSlots, [this](int LHS, int RHS) {
|
2014-03-01 12:47:00 +01:00
|
|
|
// We use -1 to denote a uninteresting slot. Place these slots at the end.
|
2019-04-23 16:51:27 +02:00
|
|
|
if (LHS == -1)
|
|
|
|
return false;
|
|
|
|
if (RHS == -1)
|
|
|
|
return true;
|
2014-03-01 12:47:00 +01:00
|
|
|
// Sort according to size.
|
|
|
|
return MFI->getObjectSize(LHS) > MFI->getObjectSize(RHS);
|
|
|
|
});
|
2012-09-06 11:17:37 +02:00
|
|
|
|
2017-06-12 16:56:02 +02:00
|
|
|
for (auto &s : LiveStarts)
|
llvm::sort(C.begin(), C.end(), ...) -> llvm::sort(C, ...)
Summary: The convenience wrapper in STLExtras is available since rL342102.
Reviewers: dblaikie, javed.absar, JDevlieghere, andreadb
Subscribers: MatzeB, sanjoy, arsenm, dschuff, mehdi_amini, sdardis, nemanjai, jvesely, nhaehnle, sbc100, jgravelle-google, eraman, aheejin, kbarton, JDevlieghere, javed.absar, gbedwell, jrtc27, mgrang, atanasyan, steven_wu, george.burgess.iv, dexonsmith, kristina, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D52573
llvm-svn: 343163
2018-09-27 04:13:45 +02:00
|
|
|
llvm::sort(s);
|
2017-06-12 16:56:02 +02:00
|
|
|
|
2013-03-25 22:26:36 +01:00
|
|
|
bool Changed = true;
|
|
|
|
while (Changed) {
|
|
|
|
Changed = false;
|
2012-09-06 11:17:37 +02:00
|
|
|
for (unsigned I = 0; I < NumSlots; ++I) {
|
|
|
|
if (SortedSlots[I] == -1)
|
|
|
|
continue;
|
|
|
|
|
2012-09-10 14:47:38 +02:00
|
|
|
for (unsigned J=I+1; J < NumSlots; ++J) {
|
2012-09-06 11:17:37 +02:00
|
|
|
if (SortedSlots[J] == -1)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
int FirstSlot = SortedSlots[I];
|
|
|
|
int SecondSlot = SortedSlots[J];
|
2014-03-09 16:44:45 +01:00
|
|
|
LiveInterval *First = &*Intervals[FirstSlot];
|
|
|
|
LiveInterval *Second = &*Intervals[SecondSlot];
|
2017-06-12 16:56:02 +02:00
|
|
|
auto &FirstS = LiveStarts[FirstSlot];
|
|
|
|
auto &SecondS = LiveStarts[SecondSlot];
|
2017-09-29 23:55:49 +02:00
|
|
|
assert(!First->empty() && !Second->empty() && "Found an empty range");
|
2012-09-06 11:17:37 +02:00
|
|
|
|
2017-06-12 16:56:02 +02:00
|
|
|
// Merge disjoint slots. This is a little bit tricky - see the
|
|
|
|
// Implementation Notes section for an explanation.
|
|
|
|
if (!First->isLiveAtIndexes(SecondS) &&
|
|
|
|
!Second->isLiveAtIndexes(FirstS)) {
|
2013-03-25 22:26:36 +01:00
|
|
|
Changed = true;
|
2013-10-10 23:28:43 +02:00
|
|
|
First->MergeSegmentsInAsValue(*Second, First->getValNumInfo(0));
|
2017-06-12 16:56:02 +02:00
|
|
|
|
|
|
|
int OldSize = FirstS.size();
|
|
|
|
FirstS.append(SecondS.begin(), SecondS.end());
|
|
|
|
auto Mid = FirstS.begin() + OldSize;
|
|
|
|
std::inplace_merge(FirstS.begin(), Mid, FirstS.end());
|
|
|
|
|
2012-09-06 11:17:37 +02:00
|
|
|
SlotRemap[SecondSlot] = FirstSlot;
|
|
|
|
SortedSlots[J] = -1;
|
2018-05-14 14:53:11 +02:00
|
|
|
LLVM_DEBUG(dbgs() << "Merging #" << FirstSlot << " and slots #"
|
|
|
|
<< SecondSlot << " together.\n");
|
[Alignment][NFC] Transition to MachineFrameInfo::getObjectAlign()
Summary:
This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790
Reviewers: courbet
Subscribers: arsenm, sdardis, nemanjai, jvesely, nhaehnle, hiraditya, kbarton, jrtc27, atanasyan, kerbowa, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D77215
2020-04-01 15:49:04 +02:00
|
|
|
Align MaxAlignment = std::max(MFI->getObjectAlign(FirstSlot),
|
|
|
|
MFI->getObjectAlign(SecondSlot));
|
2012-09-06 11:17:37 +02:00
|
|
|
|
|
|
|
assert(MFI->getObjectSize(FirstSlot) >=
|
|
|
|
MFI->getObjectSize(SecondSlot) &&
|
|
|
|
"Merging a small object into a larger one");
|
|
|
|
|
|
|
|
RemovedSlots+=1;
|
|
|
|
ReducedSize += MFI->getObjectSize(SecondSlot);
|
|
|
|
MFI->setObjectAlignment(FirstSlot, MaxAlignment);
|
|
|
|
MFI->RemoveStackObject(SecondSlot);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}// While changed.
|
|
|
|
|
|
|
|
// Record statistics.
|
|
|
|
StackSpaceSaved += ReducedSize;
|
|
|
|
StackSlotMerged += RemovedSlots;
|
2018-05-14 14:53:11 +02:00
|
|
|
LLVM_DEBUG(dbgs() << "Merge " << RemovedSlots << " slots. Saved "
|
|
|
|
<< ReducedSize << " bytes\n");
|
2012-09-06 11:17:37 +02:00
|
|
|
|
|
|
|
// Scan the entire function and update all machine operands that use frame
|
|
|
|
// indices to use the remapped frame index.
|
|
|
|
expungeSlotMap(SlotRemap, NumSlots);
|
|
|
|
remapInstructions(SlotRemap);
|
|
|
|
|
|
|
|
return removeAllMarkers();
|
|
|
|
}
|