2017-10-05 02:33:50 +02:00
|
|
|
//===- X86OptimizeLEAs.cpp - optimize usage of LEA instructions -----------===//
|
2015-12-04 11:53:15 +01:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file defines the pass that performs some optimizations with LEA
|
2016-05-19 12:18:29 +02:00
|
|
|
// instructions in order to improve performance and code size.
|
2016-01-13 12:30:44 +01:00
|
|
|
// Currently, it does two things:
|
|
|
|
// 1) If there are two LEA instructions calculating addresses which only differ
|
|
|
|
// by displacement inside a basic block, one of them is removed.
|
|
|
|
// 2) Address calculations in load and store instructions are replaced by
|
2015-12-04 11:53:15 +01:00
|
|
|
// existing LEA def registers where possible.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2017-10-05 02:33:50 +02:00
|
|
|
#include "MCTargetDesc/X86BaseInfo.h"
|
2015-12-04 11:53:15 +01:00
|
|
|
#include "X86.h"
|
|
|
|
#include "X86InstrInfo.h"
|
|
|
|
#include "X86Subtarget.h"
|
2017-10-05 02:33:50 +02:00
|
|
|
#include "llvm/ADT/DenseMap.h"
|
|
|
|
#include "llvm/ADT/DenseMapInfo.h"
|
|
|
|
#include "llvm/ADT/Hashing.h"
|
|
|
|
#include "llvm/ADT/SmallVector.h"
|
2015-12-04 11:53:15 +01:00
|
|
|
#include "llvm/ADT/Statistic.h"
|
2017-10-05 02:33:50 +02:00
|
|
|
#include "llvm/CodeGen/MachineBasicBlock.h"
|
[X86] Improvement in CodeGen instruction selection for LEAs.
Summary:
1/ Operand folding during complex pattern matching for LEAs has been extended, such that it promotes Scale to
accommodate similar operand appearing in the DAG e.g.
T1 = A + B
T2 = T1 + 10
T3 = T2 + A
For above DAG rooted at T3, X86AddressMode will now look like
Base = B , Index = A , Scale = 2 , Disp = 10
2/ During OptimizeLEAPass down the pipeline factorization is now performed over LEAs so that if there is an opportunity
then complex LEAs (having 3 operands) could be factored out e.g.
leal 1(%rax,%rcx,1), %rdx
leal 1(%rax,%rcx,2), %rcx
will be factored as following
leal 1(%rax,%rcx,1), %rdx
leal (%rdx,%rcx) , %edx
3/ Aggressive operand folding for AM based selection for LEAs is sensitive to loops, thus avoiding creation of any complex LEAs within a loop.
4/ Simplify LEA converts (lea (BASE,1,INDEX,0) --> add (BASE, INDEX) which offers better through put.
PR32755 will be taken care of by this pathc.
Previous patch revisions : r313343 , r314886
Reviewers: lsaba, RKSimon, craig.topper, qcolombet, jmolloy, jbhateja
Reviewed By: lsaba, RKSimon, jbhateja
Subscribers: jmolloy, spatel, igorb, llvm-commits
Differential Revision: https://reviews.llvm.org/D35014
llvm-svn: 319543
2017-12-01 15:07:38 +01:00
|
|
|
#include "llvm/CodeGen/MachineDominators.h"
|
2017-10-05 02:33:50 +02:00
|
|
|
#include "llvm/CodeGen/MachineFunction.h"
|
2015-12-04 11:53:15 +01:00
|
|
|
#include "llvm/CodeGen/MachineFunctionPass.h"
|
2017-10-05 02:33:50 +02:00
|
|
|
#include "llvm/CodeGen/MachineInstr.h"
|
2015-12-04 11:53:15 +01:00
|
|
|
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
2016-02-20 11:58:28 +01:00
|
|
|
#include "llvm/CodeGen/MachineOperand.h"
|
2015-12-04 11:53:15 +01:00
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
2017-11-17 02:07:10 +01:00
|
|
|
#include "llvm/CodeGen/TargetOpcodes.h"
|
|
|
|
#include "llvm/CodeGen/TargetRegisterInfo.h"
|
2017-06-06 13:49:48 +02:00
|
|
|
#include "llvm/IR/DebugInfoMetadata.h"
|
2017-10-05 02:33:50 +02:00
|
|
|
#include "llvm/IR/DebugLoc.h"
|
2015-12-04 11:53:15 +01:00
|
|
|
#include "llvm/IR/Function.h"
|
2017-10-05 02:33:50 +02:00
|
|
|
#include "llvm/MC/MCInstrDesc.h"
|
|
|
|
#include "llvm/Support/CommandLine.h"
|
2015-12-04 11:53:15 +01:00
|
|
|
#include "llvm/Support/Debug.h"
|
2017-10-05 02:33:50 +02:00
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
|
|
|
#include "llvm/Support/MathExtras.h"
|
2015-12-04 11:53:15 +01:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2017-10-05 02:33:50 +02:00
|
|
|
#include <cassert>
|
|
|
|
#include <cstdint>
|
|
|
|
#include <iterator>
|
2015-12-04 11:53:15 +01:00
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
|
|
|
|
#define DEBUG_TYPE "x86-optimize-LEAs"
|
|
|
|
|
2016-02-20 12:11:55 +01:00
|
|
|
static cl::opt<bool>
|
|
|
|
DisableX86LEAOpt("disable-x86-lea-opt", cl::Hidden,
|
|
|
|
cl::desc("X86: Disable LEA optimizations."),
|
|
|
|
cl::init(false));
|
2015-12-17 08:34:39 +01:00
|
|
|
|
2015-12-04 11:53:15 +01:00
|
|
|
STATISTIC(NumSubstLEAs, "Number of LEA instruction substitutions");
|
[X86] Improvement in CodeGen instruction selection for LEAs.
Summary:
1/ Operand folding during complex pattern matching for LEAs has been extended, such that it promotes Scale to
accommodate similar operand appearing in the DAG e.g.
T1 = A + B
T2 = T1 + 10
T3 = T2 + A
For above DAG rooted at T3, X86AddressMode will now look like
Base = B , Index = A , Scale = 2 , Disp = 10
2/ During OptimizeLEAPass down the pipeline factorization is now performed over LEAs so that if there is an opportunity
then complex LEAs (having 3 operands) could be factored out e.g.
leal 1(%rax,%rcx,1), %rdx
leal 1(%rax,%rcx,2), %rcx
will be factored as following
leal 1(%rax,%rcx,1), %rdx
leal (%rdx,%rcx) , %edx
3/ Aggressive operand folding for AM based selection for LEAs is sensitive to loops, thus avoiding creation of any complex LEAs within a loop.
4/ Simplify LEA converts (lea (BASE,1,INDEX,0) --> add (BASE, INDEX) which offers better through put.
PR32755 will be taken care of by this pathc.
Previous patch revisions : r313343 , r314886
Reviewers: lsaba, RKSimon, craig.topper, qcolombet, jmolloy, jbhateja
Reviewed By: lsaba, RKSimon, jbhateja
Subscribers: jmolloy, spatel, igorb, llvm-commits
Differential Revision: https://reviews.llvm.org/D35014
llvm-svn: 319543
2017-12-01 15:07:38 +01:00
|
|
|
STATISTIC(NumFactoredLEAs, "Number of LEAs factorized");
|
2016-01-13 12:30:44 +01:00
|
|
|
STATISTIC(NumRedundantLEAs, "Number of redundant LEA instructions removed");
|
2015-12-04 11:53:15 +01:00
|
|
|
|
2016-02-04 09:57:03 +01:00
|
|
|
/// \brief Returns true if two machine operands are identical and they are not
|
|
|
|
/// physical registers.
|
|
|
|
static inline bool isIdenticalOp(const MachineOperand &MO1,
|
|
|
|
const MachineOperand &MO2);
|
|
|
|
|
[X86] Improvement in CodeGen instruction selection for LEAs.
Summary:
1/ Operand folding during complex pattern matching for LEAs has been extended, such that it promotes Scale to
accommodate similar operand appearing in the DAG e.g.
T1 = A + B
T2 = T1 + 10
T3 = T2 + A
For above DAG rooted at T3, X86AddressMode will now look like
Base = B , Index = A , Scale = 2 , Disp = 10
2/ During OptimizeLEAPass down the pipeline factorization is now performed over LEAs so that if there is an opportunity
then complex LEAs (having 3 operands) could be factored out e.g.
leal 1(%rax,%rcx,1), %rdx
leal 1(%rax,%rcx,2), %rcx
will be factored as following
leal 1(%rax,%rcx,1), %rdx
leal (%rdx,%rcx) , %edx
3/ Aggressive operand folding for AM based selection for LEAs is sensitive to loops, thus avoiding creation of any complex LEAs within a loop.
4/ Simplify LEA converts (lea (BASE,1,INDEX,0) --> add (BASE, INDEX) which offers better through put.
PR32755 will be taken care of by this pathc.
Previous patch revisions : r313343 , r314886
Reviewers: lsaba, RKSimon, craig.topper, qcolombet, jmolloy, jbhateja
Reviewed By: lsaba, RKSimon, jbhateja
Subscribers: jmolloy, spatel, igorb, llvm-commits
Differential Revision: https://reviews.llvm.org/D35014
llvm-svn: 319543
2017-12-01 15:07:38 +01:00
|
|
|
/// \brief Returns true if two machine instructions have identical operands.
|
|
|
|
static bool isIdenticalMI(MachineRegisterInfo *MRI, const MachineOperand &MO1,
|
|
|
|
const MachineOperand &MO2);
|
|
|
|
|
2016-02-20 11:58:28 +01:00
|
|
|
/// \brief Returns true if two address displacement operands are of the same
|
|
|
|
/// type and use the same symbol/index/address regardless of the offset.
|
|
|
|
static bool isSimilarDispOp(const MachineOperand &MO1,
|
|
|
|
const MachineOperand &MO2);
|
|
|
|
|
2016-02-04 09:57:03 +01:00
|
|
|
/// \brief Returns true if the instruction is LEA.
|
|
|
|
static inline bool isLEA(const MachineInstr &MI);
|
|
|
|
|
[X86] Improvement in CodeGen instruction selection for LEAs.
Summary:
1/ Operand folding during complex pattern matching for LEAs has been extended, such that it promotes Scale to
accommodate similar operand appearing in the DAG e.g.
T1 = A + B
T2 = T1 + 10
T3 = T2 + A
For above DAG rooted at T3, X86AddressMode will now look like
Base = B , Index = A , Scale = 2 , Disp = 10
2/ During OptimizeLEAPass down the pipeline factorization is now performed over LEAs so that if there is an opportunity
then complex LEAs (having 3 operands) could be factored out e.g.
leal 1(%rax,%rcx,1), %rdx
leal 1(%rax,%rcx,2), %rcx
will be factored as following
leal 1(%rax,%rcx,1), %rdx
leal (%rdx,%rcx) , %edx
3/ Aggressive operand folding for AM based selection for LEAs is sensitive to loops, thus avoiding creation of any complex LEAs within a loop.
4/ Simplify LEA converts (lea (BASE,1,INDEX,0) --> add (BASE, INDEX) which offers better through put.
PR32755 will be taken care of by this pathc.
Previous patch revisions : r313343 , r314886
Reviewers: lsaba, RKSimon, craig.topper, qcolombet, jmolloy, jbhateja
Reviewed By: lsaba, RKSimon, jbhateja
Subscribers: jmolloy, spatel, igorb, llvm-commits
Differential Revision: https://reviews.llvm.org/D35014
llvm-svn: 319543
2017-12-01 15:07:38 +01:00
|
|
|
/// \brief Returns true if Definition of Operand is a copylike instruction.
|
|
|
|
static bool isDefCopyLike(MachineRegisterInfo *MRI, const MachineOperand &Opr);
|
|
|
|
|
2016-08-06 13:13:10 +02:00
|
|
|
namespace {
|
2017-10-05 02:33:50 +02:00
|
|
|
|
2016-02-04 09:57:03 +01:00
|
|
|
/// A key based on instruction's memory operands.
|
|
|
|
class MemOpKey {
|
|
|
|
public:
|
|
|
|
MemOpKey(const MachineOperand *Base, const MachineOperand *Scale,
|
|
|
|
const MachineOperand *Index, const MachineOperand *Segment,
|
[X86] Improvement in CodeGen instruction selection for LEAs.
Summary:
1/ Operand folding during complex pattern matching for LEAs has been extended, such that it promotes Scale to
accommodate similar operand appearing in the DAG e.g.
T1 = A + B
T2 = T1 + 10
T3 = T2 + A
For above DAG rooted at T3, X86AddressMode will now look like
Base = B , Index = A , Scale = 2 , Disp = 10
2/ During OptimizeLEAPass down the pipeline factorization is now performed over LEAs so that if there is an opportunity
then complex LEAs (having 3 operands) could be factored out e.g.
leal 1(%rax,%rcx,1), %rdx
leal 1(%rax,%rcx,2), %rcx
will be factored as following
leal 1(%rax,%rcx,1), %rdx
leal (%rdx,%rcx) , %edx
3/ Aggressive operand folding for AM based selection for LEAs is sensitive to loops, thus avoiding creation of any complex LEAs within a loop.
4/ Simplify LEA converts (lea (BASE,1,INDEX,0) --> add (BASE, INDEX) which offers better through put.
PR32755 will be taken care of by this pathc.
Previous patch revisions : r313343 , r314886
Reviewers: lsaba, RKSimon, craig.topper, qcolombet, jmolloy, jbhateja
Reviewed By: lsaba, RKSimon, jbhateja
Subscribers: jmolloy, spatel, igorb, llvm-commits
Differential Revision: https://reviews.llvm.org/D35014
llvm-svn: 319543
2017-12-01 15:07:38 +01:00
|
|
|
const MachineOperand *Disp, bool DispCheck = false)
|
|
|
|
: Disp(Disp), DeepCheck(DispCheck) {
|
2016-02-04 09:57:03 +01:00
|
|
|
Operands[0] = Base;
|
|
|
|
Operands[1] = Scale;
|
|
|
|
Operands[2] = Index;
|
|
|
|
Operands[3] = Segment;
|
|
|
|
}
|
|
|
|
|
[X86] Improvement in CodeGen instruction selection for LEAs.
Summary:
1/ Operand folding during complex pattern matching for LEAs has been extended, such that it promotes Scale to
accommodate similar operand appearing in the DAG e.g.
T1 = A + B
T2 = T1 + 10
T3 = T2 + A
For above DAG rooted at T3, X86AddressMode will now look like
Base = B , Index = A , Scale = 2 , Disp = 10
2/ During OptimizeLEAPass down the pipeline factorization is now performed over LEAs so that if there is an opportunity
then complex LEAs (having 3 operands) could be factored out e.g.
leal 1(%rax,%rcx,1), %rdx
leal 1(%rax,%rcx,2), %rcx
will be factored as following
leal 1(%rax,%rcx,1), %rdx
leal (%rdx,%rcx) , %edx
3/ Aggressive operand folding for AM based selection for LEAs is sensitive to loops, thus avoiding creation of any complex LEAs within a loop.
4/ Simplify LEA converts (lea (BASE,1,INDEX,0) --> add (BASE, INDEX) which offers better through put.
PR32755 will be taken care of by this pathc.
Previous patch revisions : r313343 , r314886
Reviewers: lsaba, RKSimon, craig.topper, qcolombet, jmolloy, jbhateja
Reviewed By: lsaba, RKSimon, jbhateja
Subscribers: jmolloy, spatel, igorb, llvm-commits
Differential Revision: https://reviews.llvm.org/D35014
llvm-svn: 319543
2017-12-01 15:07:38 +01:00
|
|
|
/// Checks operands of MemOpKey are identical, if Base or Index
|
|
|
|
/// operand definitions are of kind SUBREG_TO_REG then compare
|
|
|
|
/// operands of defining MI.
|
|
|
|
bool performDeepCheck(const MemOpKey &Other) const {
|
|
|
|
MachineInstr *MI = const_cast<MachineInstr *>(Operands[0]->getParent());
|
|
|
|
MachineRegisterInfo *MRI = MI->getRegInfo();
|
|
|
|
|
|
|
|
for (int i = 0; i < 4; i++) {
|
|
|
|
bool CopyLike = isDefCopyLike(MRI, *Operands[i]);
|
|
|
|
if (CopyLike && !isIdenticalMI(MRI, *Operands[i], *Other.Operands[i]))
|
|
|
|
return false;
|
|
|
|
else if (!CopyLike && !isIdenticalOp(*Operands[i], *Other.Operands[i]))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return isIdenticalOp(*Disp, *Other.Disp);
|
|
|
|
}
|
|
|
|
|
2016-02-04 09:57:03 +01:00
|
|
|
bool operator==(const MemOpKey &Other) const {
|
[X86] Improvement in CodeGen instruction selection for LEAs.
Summary:
1/ Operand folding during complex pattern matching for LEAs has been extended, such that it promotes Scale to
accommodate similar operand appearing in the DAG e.g.
T1 = A + B
T2 = T1 + 10
T3 = T2 + A
For above DAG rooted at T3, X86AddressMode will now look like
Base = B , Index = A , Scale = 2 , Disp = 10
2/ During OptimizeLEAPass down the pipeline factorization is now performed over LEAs so that if there is an opportunity
then complex LEAs (having 3 operands) could be factored out e.g.
leal 1(%rax,%rcx,1), %rdx
leal 1(%rax,%rcx,2), %rcx
will be factored as following
leal 1(%rax,%rcx,1), %rdx
leal (%rdx,%rcx) , %edx
3/ Aggressive operand folding for AM based selection for LEAs is sensitive to loops, thus avoiding creation of any complex LEAs within a loop.
4/ Simplify LEA converts (lea (BASE,1,INDEX,0) --> add (BASE, INDEX) which offers better through put.
PR32755 will be taken care of by this pathc.
Previous patch revisions : r313343 , r314886
Reviewers: lsaba, RKSimon, craig.topper, qcolombet, jmolloy, jbhateja
Reviewed By: lsaba, RKSimon, jbhateja
Subscribers: jmolloy, spatel, igorb, llvm-commits
Differential Revision: https://reviews.llvm.org/D35014
llvm-svn: 319543
2017-12-01 15:07:38 +01:00
|
|
|
if (DeepCheck)
|
|
|
|
return performDeepCheck(Other);
|
|
|
|
|
2016-02-04 09:57:03 +01:00
|
|
|
// Addresses' bases, scales, indices and segments must be identical.
|
|
|
|
for (int i = 0; i < 4; ++i)
|
|
|
|
if (!isIdenticalOp(*Operands[i], *Other.Operands[i]))
|
|
|
|
return false;
|
|
|
|
|
2016-02-20 11:58:28 +01:00
|
|
|
// Addresses' displacements don't have to be exactly the same. It only
|
|
|
|
// matters that they use the same symbol/index/address. Immediates' or
|
|
|
|
// offsets' differences will be taken care of during instruction
|
|
|
|
// substitution.
|
|
|
|
return isSimilarDispOp(*Disp, *Other.Disp);
|
2016-02-04 09:57:03 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Address' base, scale, index and segment operands.
|
|
|
|
const MachineOperand *Operands[4];
|
|
|
|
|
|
|
|
// Address' displacement operand.
|
|
|
|
const MachineOperand *Disp;
|
[X86] Improvement in CodeGen instruction selection for LEAs.
Summary:
1/ Operand folding during complex pattern matching for LEAs has been extended, such that it promotes Scale to
accommodate similar operand appearing in the DAG e.g.
T1 = A + B
T2 = T1 + 10
T3 = T2 + A
For above DAG rooted at T3, X86AddressMode will now look like
Base = B , Index = A , Scale = 2 , Disp = 10
2/ During OptimizeLEAPass down the pipeline factorization is now performed over LEAs so that if there is an opportunity
then complex LEAs (having 3 operands) could be factored out e.g.
leal 1(%rax,%rcx,1), %rdx
leal 1(%rax,%rcx,2), %rcx
will be factored as following
leal 1(%rax,%rcx,1), %rdx
leal (%rdx,%rcx) , %edx
3/ Aggressive operand folding for AM based selection for LEAs is sensitive to loops, thus avoiding creation of any complex LEAs within a loop.
4/ Simplify LEA converts (lea (BASE,1,INDEX,0) --> add (BASE, INDEX) which offers better through put.
PR32755 will be taken care of by this pathc.
Previous patch revisions : r313343 , r314886
Reviewers: lsaba, RKSimon, craig.topper, qcolombet, jmolloy, jbhateja
Reviewed By: lsaba, RKSimon, jbhateja
Subscribers: jmolloy, spatel, igorb, llvm-commits
Differential Revision: https://reviews.llvm.org/D35014
llvm-svn: 319543
2017-12-01 15:07:38 +01:00
|
|
|
|
|
|
|
// If true checks Address' base, index, segment and
|
|
|
|
// displacement are identical, in additions if base/index
|
|
|
|
// are defined by copylike instruction then futher
|
|
|
|
// compare the operands of the defining instruction.
|
|
|
|
bool DeepCheck;
|
2016-02-04 09:57:03 +01:00
|
|
|
};
|
2017-10-05 02:33:50 +02:00
|
|
|
|
2016-08-06 13:13:10 +02:00
|
|
|
} // end anonymous namespace
|
2016-02-04 09:57:03 +01:00
|
|
|
|
|
|
|
/// Provide DenseMapInfo for MemOpKey.
|
|
|
|
namespace llvm {
|
2017-10-05 02:33:50 +02:00
|
|
|
|
2016-02-04 09:57:03 +01:00
|
|
|
template <> struct DenseMapInfo<MemOpKey> {
|
2017-10-05 02:33:50 +02:00
|
|
|
using PtrInfo = DenseMapInfo<const MachineOperand *>;
|
2016-02-04 09:57:03 +01:00
|
|
|
|
|
|
|
static inline MemOpKey getEmptyKey() {
|
|
|
|
return MemOpKey(PtrInfo::getEmptyKey(), PtrInfo::getEmptyKey(),
|
|
|
|
PtrInfo::getEmptyKey(), PtrInfo::getEmptyKey(),
|
|
|
|
PtrInfo::getEmptyKey());
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline MemOpKey getTombstoneKey() {
|
|
|
|
return MemOpKey(PtrInfo::getTombstoneKey(), PtrInfo::getTombstoneKey(),
|
|
|
|
PtrInfo::getTombstoneKey(), PtrInfo::getTombstoneKey(),
|
|
|
|
PtrInfo::getTombstoneKey());
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned getHashValue(const MemOpKey &Val) {
|
|
|
|
// Checking any field of MemOpKey is enough to determine if the key is
|
|
|
|
// empty or tombstone.
|
[X86] Improvement in CodeGen instruction selection for LEAs.
Summary:
1/ Operand folding during complex pattern matching for LEAs has been extended, such that it promotes Scale to
accommodate similar operand appearing in the DAG e.g.
T1 = A + B
T2 = T1 + 10
T3 = T2 + A
For above DAG rooted at T3, X86AddressMode will now look like
Base = B , Index = A , Scale = 2 , Disp = 10
2/ During OptimizeLEAPass down the pipeline factorization is now performed over LEAs so that if there is an opportunity
then complex LEAs (having 3 operands) could be factored out e.g.
leal 1(%rax,%rcx,1), %rdx
leal 1(%rax,%rcx,2), %rcx
will be factored as following
leal 1(%rax,%rcx,1), %rdx
leal (%rdx,%rcx) , %edx
3/ Aggressive operand folding for AM based selection for LEAs is sensitive to loops, thus avoiding creation of any complex LEAs within a loop.
4/ Simplify LEA converts (lea (BASE,1,INDEX,0) --> add (BASE, INDEX) which offers better through put.
PR32755 will be taken care of by this pathc.
Previous patch revisions : r313343 , r314886
Reviewers: lsaba, RKSimon, craig.topper, qcolombet, jmolloy, jbhateja
Reviewed By: lsaba, RKSimon, jbhateja
Subscribers: jmolloy, spatel, igorb, llvm-commits
Differential Revision: https://reviews.llvm.org/D35014
llvm-svn: 319543
2017-12-01 15:07:38 +01:00
|
|
|
hash_code Hash(0);
|
2016-02-04 09:57:03 +01:00
|
|
|
assert(Val.Disp != PtrInfo::getEmptyKey() && "Cannot hash the empty key");
|
|
|
|
assert(Val.Disp != PtrInfo::getTombstoneKey() &&
|
|
|
|
"Cannot hash the tombstone key");
|
|
|
|
|
[X86] Improvement in CodeGen instruction selection for LEAs.
Summary:
1/ Operand folding during complex pattern matching for LEAs has been extended, such that it promotes Scale to
accommodate similar operand appearing in the DAG e.g.
T1 = A + B
T2 = T1 + 10
T3 = T2 + A
For above DAG rooted at T3, X86AddressMode will now look like
Base = B , Index = A , Scale = 2 , Disp = 10
2/ During OptimizeLEAPass down the pipeline factorization is now performed over LEAs so that if there is an opportunity
then complex LEAs (having 3 operands) could be factored out e.g.
leal 1(%rax,%rcx,1), %rdx
leal 1(%rax,%rcx,2), %rcx
will be factored as following
leal 1(%rax,%rcx,1), %rdx
leal (%rdx,%rcx) , %edx
3/ Aggressive operand folding for AM based selection for LEAs is sensitive to loops, thus avoiding creation of any complex LEAs within a loop.
4/ Simplify LEA converts (lea (BASE,1,INDEX,0) --> add (BASE, INDEX) which offers better through put.
PR32755 will be taken care of by this pathc.
Previous patch revisions : r313343 , r314886
Reviewers: lsaba, RKSimon, craig.topper, qcolombet, jmolloy, jbhateja
Reviewed By: lsaba, RKSimon, jbhateja
Subscribers: jmolloy, spatel, igorb, llvm-commits
Differential Revision: https://reviews.llvm.org/D35014
llvm-svn: 319543
2017-12-01 15:07:38 +01:00
|
|
|
auto getMIHash = [](MachineInstr *MI) -> hash_code {
|
|
|
|
hash_code h(0);
|
|
|
|
for (unsigned i = 1, e = MI->getNumOperands(); i < e; i++)
|
|
|
|
h = hash_combine(h, MI->getOperand(i));
|
|
|
|
return h;
|
|
|
|
};
|
|
|
|
|
|
|
|
const MachineOperand &Base = *Val.Operands[0];
|
|
|
|
const MachineOperand &Index = *Val.Operands[2];
|
|
|
|
MachineInstr *MI = const_cast<MachineInstr *>(Base.getParent());
|
|
|
|
MachineRegisterInfo *MRI = MI->getRegInfo();
|
|
|
|
|
|
|
|
if (isDefCopyLike(MRI, Base))
|
|
|
|
Hash = getMIHash(MRI->getVRegDef(Base.getReg()));
|
|
|
|
else
|
|
|
|
Hash = hash_combine(Hash, Base);
|
|
|
|
|
|
|
|
if (isDefCopyLike(MRI, Index))
|
|
|
|
Hash = getMIHash(MRI->getVRegDef(Index.getReg()));
|
|
|
|
else
|
|
|
|
Hash = hash_combine(Hash, Index);
|
|
|
|
|
|
|
|
Hash = hash_combine(Hash, *Val.Operands[1], *Val.Operands[3]);
|
2016-02-04 09:57:03 +01:00
|
|
|
|
|
|
|
// If the address displacement is an immediate, it should not affect the
|
|
|
|
// hash so that memory operands which differ only be immediate displacement
|
2016-02-20 11:58:28 +01:00
|
|
|
// would have the same hash. If the address displacement is something else,
|
|
|
|
// we should reflect symbol/index/address in the hash.
|
|
|
|
switch (Val.Disp->getType()) {
|
|
|
|
case MachineOperand::MO_Immediate:
|
|
|
|
break;
|
|
|
|
case MachineOperand::MO_ConstantPoolIndex:
|
|
|
|
case MachineOperand::MO_JumpTableIndex:
|
|
|
|
Hash = hash_combine(Hash, Val.Disp->getIndex());
|
|
|
|
break;
|
|
|
|
case MachineOperand::MO_ExternalSymbol:
|
|
|
|
Hash = hash_combine(Hash, Val.Disp->getSymbolName());
|
|
|
|
break;
|
|
|
|
case MachineOperand::MO_GlobalAddress:
|
2016-02-04 09:57:03 +01:00
|
|
|
Hash = hash_combine(Hash, Val.Disp->getGlobal());
|
2016-02-20 11:58:28 +01:00
|
|
|
break;
|
|
|
|
case MachineOperand::MO_BlockAddress:
|
|
|
|
Hash = hash_combine(Hash, Val.Disp->getBlockAddress());
|
|
|
|
break;
|
|
|
|
case MachineOperand::MO_MCSymbol:
|
|
|
|
Hash = hash_combine(Hash, Val.Disp->getMCSymbol());
|
|
|
|
break;
|
2016-04-26 14:18:12 +02:00
|
|
|
case MachineOperand::MO_MachineBasicBlock:
|
|
|
|
Hash = hash_combine(Hash, Val.Disp->getMBB());
|
|
|
|
break;
|
2016-02-20 11:58:28 +01:00
|
|
|
default:
|
|
|
|
llvm_unreachable("Invalid address displacement operand");
|
|
|
|
}
|
2016-02-04 09:57:03 +01:00
|
|
|
|
|
|
|
return (unsigned)Hash;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool isEqual(const MemOpKey &LHS, const MemOpKey &RHS) {
|
|
|
|
// Checking any field of MemOpKey is enough to determine if the key is
|
|
|
|
// empty or tombstone.
|
|
|
|
if (RHS.Disp == PtrInfo::getEmptyKey())
|
|
|
|
return LHS.Disp == PtrInfo::getEmptyKey();
|
|
|
|
if (RHS.Disp == PtrInfo::getTombstoneKey())
|
|
|
|
return LHS.Disp == PtrInfo::getTombstoneKey();
|
|
|
|
return LHS == RHS;
|
|
|
|
}
|
|
|
|
};
|
2017-10-05 02:33:50 +02:00
|
|
|
|
|
|
|
} // end namespace llvm
|
2016-02-04 09:57:03 +01:00
|
|
|
|
2016-08-06 13:13:10 +02:00
|
|
|
/// \brief Returns a hash table key based on memory operands of \p MI. The
|
|
|
|
/// number of the first memory operand of \p MI is specified through \p N.
|
2016-02-04 09:57:03 +01:00
|
|
|
static inline MemOpKey getMemOpKey(const MachineInstr &MI, unsigned N) {
|
|
|
|
assert((isLEA(MI) || MI.mayLoadOrStore()) &&
|
|
|
|
"The instruction must be a LEA, a load or a store");
|
|
|
|
return MemOpKey(&MI.getOperand(N + X86::AddrBaseReg),
|
|
|
|
&MI.getOperand(N + X86::AddrScaleAmt),
|
|
|
|
&MI.getOperand(N + X86::AddrIndexReg),
|
|
|
|
&MI.getOperand(N + X86::AddrSegmentReg),
|
|
|
|
&MI.getOperand(N + X86::AddrDisp));
|
|
|
|
}
|
|
|
|
|
[X86] Improvement in CodeGen instruction selection for LEAs.
Summary:
1/ Operand folding during complex pattern matching for LEAs has been extended, such that it promotes Scale to
accommodate similar operand appearing in the DAG e.g.
T1 = A + B
T2 = T1 + 10
T3 = T2 + A
For above DAG rooted at T3, X86AddressMode will now look like
Base = B , Index = A , Scale = 2 , Disp = 10
2/ During OptimizeLEAPass down the pipeline factorization is now performed over LEAs so that if there is an opportunity
then complex LEAs (having 3 operands) could be factored out e.g.
leal 1(%rax,%rcx,1), %rdx
leal 1(%rax,%rcx,2), %rcx
will be factored as following
leal 1(%rax,%rcx,1), %rdx
leal (%rdx,%rcx) , %edx
3/ Aggressive operand folding for AM based selection for LEAs is sensitive to loops, thus avoiding creation of any complex LEAs within a loop.
4/ Simplify LEA converts (lea (BASE,1,INDEX,0) --> add (BASE, INDEX) which offers better through put.
PR32755 will be taken care of by this pathc.
Previous patch revisions : r313343 , r314886
Reviewers: lsaba, RKSimon, craig.topper, qcolombet, jmolloy, jbhateja
Reviewed By: lsaba, RKSimon, jbhateja
Subscribers: jmolloy, spatel, igorb, llvm-commits
Differential Revision: https://reviews.llvm.org/D35014
llvm-svn: 319543
2017-12-01 15:07:38 +01:00
|
|
|
static inline MemOpKey getMemOpCSEKey(const MachineInstr &MI, unsigned N) {
|
|
|
|
static MachineOperand DummyScale = MachineOperand::CreateImm(1);
|
|
|
|
assert((isLEA(MI) || MI.mayLoadOrStore()) &&
|
|
|
|
"The instruction must be a LEA, a load or a store");
|
|
|
|
return MemOpKey(&MI.getOperand(N + X86::AddrBaseReg), &DummyScale,
|
|
|
|
&MI.getOperand(N + X86::AddrIndexReg),
|
|
|
|
&MI.getOperand(N + X86::AddrSegmentReg),
|
|
|
|
&MI.getOperand(N + X86::AddrDisp), true);
|
|
|
|
}
|
|
|
|
|
2016-02-04 09:57:03 +01:00
|
|
|
static inline bool isIdenticalOp(const MachineOperand &MO1,
|
|
|
|
const MachineOperand &MO2) {
|
|
|
|
return MO1.isIdenticalTo(MO2) &&
|
|
|
|
(!MO1.isReg() ||
|
|
|
|
!TargetRegisterInfo::isPhysicalRegister(MO1.getReg()));
|
|
|
|
}
|
|
|
|
|
[X86] Improvement in CodeGen instruction selection for LEAs.
Summary:
1/ Operand folding during complex pattern matching for LEAs has been extended, such that it promotes Scale to
accommodate similar operand appearing in the DAG e.g.
T1 = A + B
T2 = T1 + 10
T3 = T2 + A
For above DAG rooted at T3, X86AddressMode will now look like
Base = B , Index = A , Scale = 2 , Disp = 10
2/ During OptimizeLEAPass down the pipeline factorization is now performed over LEAs so that if there is an opportunity
then complex LEAs (having 3 operands) could be factored out e.g.
leal 1(%rax,%rcx,1), %rdx
leal 1(%rax,%rcx,2), %rcx
will be factored as following
leal 1(%rax,%rcx,1), %rdx
leal (%rdx,%rcx) , %edx
3/ Aggressive operand folding for AM based selection for LEAs is sensitive to loops, thus avoiding creation of any complex LEAs within a loop.
4/ Simplify LEA converts (lea (BASE,1,INDEX,0) --> add (BASE, INDEX) which offers better through put.
PR32755 will be taken care of by this pathc.
Previous patch revisions : r313343 , r314886
Reviewers: lsaba, RKSimon, craig.topper, qcolombet, jmolloy, jbhateja
Reviewed By: lsaba, RKSimon, jbhateja
Subscribers: jmolloy, spatel, igorb, llvm-commits
Differential Revision: https://reviews.llvm.org/D35014
llvm-svn: 319543
2017-12-01 15:07:38 +01:00
|
|
|
static bool isIdenticalMI(MachineRegisterInfo *MRI, const MachineOperand &MO1,
|
|
|
|
const MachineOperand &MO2) {
|
|
|
|
MachineInstr *MI1 = nullptr;
|
|
|
|
MachineInstr *MI2 = nullptr;
|
|
|
|
if (!MO1.isReg() || !MO2.isReg())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
MI1 = MRI->getVRegDef(MO1.getReg());
|
|
|
|
MI2 = MRI->getVRegDef(MO2.getReg());
|
|
|
|
if (!MI1 || !MI2)
|
|
|
|
return false;
|
|
|
|
if (MI1->getOpcode() != MI2->getOpcode())
|
|
|
|
return false;
|
|
|
|
if (MI1->getNumOperands() != MI2->getNumOperands())
|
|
|
|
return false;
|
|
|
|
for (unsigned i = 1, e = MI1->getNumOperands(); i < e; ++i)
|
|
|
|
if (!isIdenticalOp(MI1->getOperand(i), MI2->getOperand(i)))
|
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-02-24 08:58:02 +01:00
|
|
|
#ifndef NDEBUG
|
|
|
|
static bool isValidDispOp(const MachineOperand &MO) {
|
|
|
|
return MO.isImm() || MO.isCPI() || MO.isJTI() || MO.isSymbol() ||
|
2016-04-26 14:18:12 +02:00
|
|
|
MO.isGlobal() || MO.isBlockAddress() || MO.isMCSymbol() || MO.isMBB();
|
2016-02-24 08:58:02 +01:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2016-02-20 11:58:28 +01:00
|
|
|
static bool isSimilarDispOp(const MachineOperand &MO1,
|
|
|
|
const MachineOperand &MO2) {
|
|
|
|
assert(isValidDispOp(MO1) && isValidDispOp(MO2) &&
|
|
|
|
"Address displacement operand is not valid");
|
|
|
|
return (MO1.isImm() && MO2.isImm()) ||
|
|
|
|
(MO1.isCPI() && MO2.isCPI() && MO1.getIndex() == MO2.getIndex()) ||
|
|
|
|
(MO1.isJTI() && MO2.isJTI() && MO1.getIndex() == MO2.getIndex()) ||
|
|
|
|
(MO1.isSymbol() && MO2.isSymbol() &&
|
|
|
|
MO1.getSymbolName() == MO2.getSymbolName()) ||
|
|
|
|
(MO1.isGlobal() && MO2.isGlobal() &&
|
|
|
|
MO1.getGlobal() == MO2.getGlobal()) ||
|
|
|
|
(MO1.isBlockAddress() && MO2.isBlockAddress() &&
|
|
|
|
MO1.getBlockAddress() == MO2.getBlockAddress()) ||
|
|
|
|
(MO1.isMCSymbol() && MO2.isMCSymbol() &&
|
2016-04-26 14:18:12 +02:00
|
|
|
MO1.getMCSymbol() == MO2.getMCSymbol()) ||
|
|
|
|
(MO1.isMBB() && MO2.isMBB() && MO1.getMBB() == MO2.getMBB());
|
2016-02-20 11:58:28 +01:00
|
|
|
}
|
|
|
|
|
2016-02-04 09:57:03 +01:00
|
|
|
static inline bool isLEA(const MachineInstr &MI) {
|
|
|
|
unsigned Opcode = MI.getOpcode();
|
|
|
|
return Opcode == X86::LEA16r || Opcode == X86::LEA32r ||
|
|
|
|
Opcode == X86::LEA64r || Opcode == X86::LEA64_32r;
|
|
|
|
}
|
|
|
|
|
[X86] Improvement in CodeGen instruction selection for LEAs.
Summary:
1/ Operand folding during complex pattern matching for LEAs has been extended, such that it promotes Scale to
accommodate similar operand appearing in the DAG e.g.
T1 = A + B
T2 = T1 + 10
T3 = T2 + A
For above DAG rooted at T3, X86AddressMode will now look like
Base = B , Index = A , Scale = 2 , Disp = 10
2/ During OptimizeLEAPass down the pipeline factorization is now performed over LEAs so that if there is an opportunity
then complex LEAs (having 3 operands) could be factored out e.g.
leal 1(%rax,%rcx,1), %rdx
leal 1(%rax,%rcx,2), %rcx
will be factored as following
leal 1(%rax,%rcx,1), %rdx
leal (%rdx,%rcx) , %edx
3/ Aggressive operand folding for AM based selection for LEAs is sensitive to loops, thus avoiding creation of any complex LEAs within a loop.
4/ Simplify LEA converts (lea (BASE,1,INDEX,0) --> add (BASE, INDEX) which offers better through put.
PR32755 will be taken care of by this pathc.
Previous patch revisions : r313343 , r314886
Reviewers: lsaba, RKSimon, craig.topper, qcolombet, jmolloy, jbhateja
Reviewed By: lsaba, RKSimon, jbhateja
Subscribers: jmolloy, spatel, igorb, llvm-commits
Differential Revision: https://reviews.llvm.org/D35014
llvm-svn: 319543
2017-12-01 15:07:38 +01:00
|
|
|
static bool isDefCopyLike(MachineRegisterInfo *MRI, const MachineOperand &Opr) {
|
|
|
|
bool isInstrErased = !(Opr.isReg() && Opr.getParent()->getParent());
|
|
|
|
if (!Opr.isReg() || isInstrErased ||
|
|
|
|
TargetRegisterInfo::isPhysicalRegister(Opr.getReg()))
|
|
|
|
return false;
|
|
|
|
MachineInstr *MI = MRI->getVRegDef(Opr.getReg());
|
|
|
|
return MI && MI->isCopyLike();
|
|
|
|
}
|
|
|
|
|
2015-12-04 11:53:15 +01:00
|
|
|
namespace {
|
2017-10-05 02:33:50 +02:00
|
|
|
|
[X86] Improvement in CodeGen instruction selection for LEAs.
Summary:
1/ Operand folding during complex pattern matching for LEAs has been extended, such that it promotes Scale to
accommodate similar operand appearing in the DAG e.g.
T1 = A + B
T2 = T1 + 10
T3 = T2 + A
For above DAG rooted at T3, X86AddressMode will now look like
Base = B , Index = A , Scale = 2 , Disp = 10
2/ During OptimizeLEAPass down the pipeline factorization is now performed over LEAs so that if there is an opportunity
then complex LEAs (having 3 operands) could be factored out e.g.
leal 1(%rax,%rcx,1), %rdx
leal 1(%rax,%rcx,2), %rcx
will be factored as following
leal 1(%rax,%rcx,1), %rdx
leal (%rdx,%rcx) , %edx
3/ Aggressive operand folding for AM based selection for LEAs is sensitive to loops, thus avoiding creation of any complex LEAs within a loop.
4/ Simplify LEA converts (lea (BASE,1,INDEX,0) --> add (BASE, INDEX) which offers better through put.
PR32755 will be taken care of by this pathc.
Previous patch revisions : r313343 , r314886
Reviewers: lsaba, RKSimon, craig.topper, qcolombet, jmolloy, jbhateja
Reviewed By: lsaba, RKSimon, jbhateja
Subscribers: jmolloy, spatel, igorb, llvm-commits
Differential Revision: https://reviews.llvm.org/D35014
llvm-svn: 319543
2017-12-01 15:07:38 +01:00
|
|
|
/// This class captures the functions and attributes
|
|
|
|
/// needed to factorize LEA within and across basic
|
|
|
|
/// blocks.LEA instruction with same BASE,OFFSET and
|
|
|
|
/// INDEX are the candidates for factorization.
|
|
|
|
class FactorizeLEAOpt {
|
|
|
|
public:
|
|
|
|
using LEAListT = std::list<MachineInstr *>;
|
|
|
|
using LEAMapT = DenseMap<MemOpKey, LEAListT>;
|
|
|
|
using ValueT = DenseMap<MemOpKey, unsigned>;
|
|
|
|
using ScopeEntryT = std::pair<MachineBasicBlock *, ValueT>;
|
|
|
|
using ScopeStackT = std::vector<ScopeEntryT>;
|
|
|
|
|
|
|
|
FactorizeLEAOpt() = default;
|
|
|
|
FactorizeLEAOpt(const FactorizeLEAOpt &) = delete;
|
|
|
|
FactorizeLEAOpt &operator=(const FactorizeLEAOpt &) = delete;
|
|
|
|
|
|
|
|
void performCleanup() {
|
|
|
|
for (auto LEA : removedLEAs)
|
|
|
|
LEA->eraseFromParent();
|
|
|
|
LEAs.clear();
|
|
|
|
Stack.clear();
|
|
|
|
removedLEAs.clear();
|
|
|
|
}
|
|
|
|
|
|
|
|
LEAMapT &getLEAMap() { return LEAs; }
|
|
|
|
ScopeEntryT *getTopScope() { return &Stack.back(); }
|
|
|
|
|
|
|
|
void addForLazyRemoval(MachineInstr *Instr) { removedLEAs.insert(Instr); }
|
|
|
|
|
|
|
|
bool checkIfScheduledForRemoval(MachineInstr *Instr) {
|
|
|
|
return removedLEAs.find(Instr) != removedLEAs.end();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Push the ScopeEntry for the BasicBlock over Stack.
|
|
|
|
/// Also traverses over list of instruction and update
|
|
|
|
/// LEAs Map and ScopeEntry for each LEA instruction
|
|
|
|
/// found using insertLEA().
|
|
|
|
void collectDataForBasicBlock(MachineBasicBlock *MBB);
|
|
|
|
|
|
|
|
/// Stores the size of MachineInstr list corrosponding
|
|
|
|
/// to key K from LEAs MAP into the ScopeEntry of
|
|
|
|
/// the basic block, then insert the LEA at the beginning
|
|
|
|
/// of the list.
|
|
|
|
void insertLEA(MachineInstr *MI);
|
|
|
|
|
|
|
|
/// Pops out ScopeEntry of top most BasicBlock from the stack
|
|
|
|
/// and remove the LEA instructions contained in the scope
|
|
|
|
/// from the LEAs Map.
|
|
|
|
void removeDataForBasicBlock();
|
|
|
|
|
|
|
|
/// If LEA contains Physical Registers then its not a candidate
|
|
|
|
/// for factorizations since physical registers may violate SSA
|
|
|
|
/// semantics of MI.
|
|
|
|
bool containsPhyReg(MachineInstr *MI, unsigned RecLevel);
|
|
|
|
|
|
|
|
private:
|
|
|
|
ScopeStackT Stack;
|
|
|
|
LEAMapT LEAs;
|
|
|
|
std::set<MachineInstr *> removedLEAs;
|
|
|
|
};
|
|
|
|
|
|
|
|
void FactorizeLEAOpt::collectDataForBasicBlock(MachineBasicBlock *MBB) {
|
|
|
|
ValueT EmptyMap;
|
|
|
|
ScopeEntryT SE = std::make_pair(MBB, EmptyMap);
|
|
|
|
Stack.push_back(SE);
|
|
|
|
for (auto &MI : *MBB) {
|
|
|
|
if (isLEA(MI))
|
|
|
|
insertLEA(&MI);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void FactorizeLEAOpt::removeDataForBasicBlock() {
|
|
|
|
ScopeEntryT &SE = Stack.back();
|
|
|
|
for (auto MapEntry : SE.second) {
|
|
|
|
LEAMapT::iterator Itr = LEAs.find(MapEntry.first);
|
|
|
|
assert((Itr != LEAs.end()) &&
|
|
|
|
"LEAs map must have a node corresponding to ScopeEntry's Key.");
|
|
|
|
|
|
|
|
while (((*Itr).second.size() > MapEntry.second))
|
|
|
|
(*Itr).second.pop_front();
|
|
|
|
// If list goes empty remove entry from LEAs Map.
|
|
|
|
if ((*Itr).second.empty())
|
|
|
|
LEAs.erase(Itr);
|
|
|
|
}
|
|
|
|
Stack.pop_back();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool FactorizeLEAOpt::containsPhyReg(MachineInstr *MI, unsigned RecLevel) {
|
|
|
|
if (!MI || !RecLevel)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
MachineRegisterInfo *MRI = MI->getRegInfo();
|
|
|
|
for (auto Operand : MI->operands()) {
|
|
|
|
if (!Operand.isReg())
|
|
|
|
continue;
|
|
|
|
if (TargetRegisterInfo::isPhysicalRegister(Operand.getReg()))
|
|
|
|
return true;
|
|
|
|
MachineInstr *OperDefMI = MRI->getVRegDef(Operand.getReg());
|
|
|
|
if (OperDefMI && (MI != OperDefMI) && OperDefMI->isCopyLike() &&
|
|
|
|
containsPhyReg(OperDefMI, RecLevel - 1))
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
void FactorizeLEAOpt::insertLEA(MachineInstr *MI) {
|
|
|
|
unsigned lsize;
|
|
|
|
if (containsPhyReg(MI, 2))
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Factorization is beneficial only for complex LEAs.
|
|
|
|
MachineOperand &Base = MI->getOperand(1);
|
|
|
|
MachineOperand &Index = MI->getOperand(3);
|
|
|
|
MachineOperand &Offset = MI->getOperand(4);
|
|
|
|
if ((Offset.isImm() && !Offset.getImm()) ||
|
|
|
|
(!Base.isReg() || !Base.getReg()) || (!Index.isReg() || !Index.getReg()))
|
|
|
|
return;
|
|
|
|
|
|
|
|
MemOpKey Key = getMemOpCSEKey(*MI, 1);
|
|
|
|
ScopeEntryT *TopScope = getTopScope();
|
|
|
|
|
|
|
|
LEAMapT::iterator Itr = LEAs.find(Key);
|
|
|
|
if (Itr == LEAs.end()) {
|
|
|
|
lsize = 0;
|
|
|
|
LEAs[Key].push_front(MI);
|
|
|
|
} else {
|
|
|
|
lsize = (*Itr).second.size();
|
|
|
|
(*Itr).second.push_front(MI);
|
|
|
|
}
|
|
|
|
if (TopScope->second.find(Key) == TopScope->second.end())
|
|
|
|
TopScope->second[Key] = lsize;
|
|
|
|
}
|
|
|
|
|
2015-12-04 11:53:15 +01:00
|
|
|
class OptimizeLEAPass : public MachineFunctionPass {
|
|
|
|
public:
|
|
|
|
OptimizeLEAPass() : MachineFunctionPass(ID) {}
|
|
|
|
|
2016-10-01 04:56:57 +02:00
|
|
|
StringRef getPassName() const override { return "X86 LEA Optimize"; }
|
2015-12-04 11:53:15 +01:00
|
|
|
|
|
|
|
/// \brief Loop over all of the basic blocks, replacing address
|
|
|
|
/// calculations in load and store instructions, if it's already
|
|
|
|
/// been calculated by LEA. Also, remove redundant LEAs.
|
|
|
|
bool runOnMachineFunction(MachineFunction &MF) override;
|
|
|
|
|
[X86] Improvement in CodeGen instruction selection for LEAs.
Summary:
1/ Operand folding during complex pattern matching for LEAs has been extended, such that it promotes Scale to
accommodate similar operand appearing in the DAG e.g.
T1 = A + B
T2 = T1 + 10
T3 = T2 + A
For above DAG rooted at T3, X86AddressMode will now look like
Base = B , Index = A , Scale = 2 , Disp = 10
2/ During OptimizeLEAPass down the pipeline factorization is now performed over LEAs so that if there is an opportunity
then complex LEAs (having 3 operands) could be factored out e.g.
leal 1(%rax,%rcx,1), %rdx
leal 1(%rax,%rcx,2), %rcx
will be factored as following
leal 1(%rax,%rcx,1), %rdx
leal (%rdx,%rcx) , %edx
3/ Aggressive operand folding for AM based selection for LEAs is sensitive to loops, thus avoiding creation of any complex LEAs within a loop.
4/ Simplify LEA converts (lea (BASE,1,INDEX,0) --> add (BASE, INDEX) which offers better through put.
PR32755 will be taken care of by this pathc.
Previous patch revisions : r313343 , r314886
Reviewers: lsaba, RKSimon, craig.topper, qcolombet, jmolloy, jbhateja
Reviewed By: lsaba, RKSimon, jbhateja
Subscribers: jmolloy, spatel, igorb, llvm-commits
Differential Revision: https://reviews.llvm.org/D35014
llvm-svn: 319543
2017-12-01 15:07:38 +01:00
|
|
|
void getAnalysisUsage(AnalysisUsage &AU) const override {
|
|
|
|
AU.setPreservesCFG();
|
|
|
|
MachineFunctionPass::getAnalysisUsage(AU);
|
|
|
|
AU.addRequired<MachineDominatorTree>();
|
|
|
|
}
|
|
|
|
|
2015-12-04 11:53:15 +01:00
|
|
|
private:
|
2017-10-05 02:33:50 +02:00
|
|
|
using MemOpMap = DenseMap<MemOpKey, SmallVector<MachineInstr *, 16>>;
|
2016-02-04 09:57:03 +01:00
|
|
|
|
2015-12-04 11:53:15 +01:00
|
|
|
/// \brief Returns a distance between two instructions inside one basic block.
|
|
|
|
/// Negative result means, that instructions occur in reverse order.
|
|
|
|
int calcInstrDist(const MachineInstr &First, const MachineInstr &Last);
|
|
|
|
|
|
|
|
/// \brief Choose the best \p LEA instruction from the \p List to replace
|
|
|
|
/// address calculation in \p MI instruction. Return the address displacement
|
2016-11-17 20:03:05 +01:00
|
|
|
/// and the distance between \p MI and the chosen \p BestLEA in
|
2016-02-04 09:57:03 +01:00
|
|
|
/// \p AddrDispShift and \p Dist.
|
2015-12-04 11:53:15 +01:00
|
|
|
bool chooseBestLEA(const SmallVectorImpl<MachineInstr *> &List,
|
2016-02-04 09:57:03 +01:00
|
|
|
const MachineInstr &MI, MachineInstr *&BestLEA,
|
2015-12-04 11:53:15 +01:00
|
|
|
int64_t &AddrDispShift, int &Dist);
|
|
|
|
|
2016-02-04 09:57:03 +01:00
|
|
|
/// \brief Returns the difference between addresses' displacements of \p MI1
|
|
|
|
/// and \p MI2. The numbers of the first memory operands for the instructions
|
|
|
|
/// are specified through \p N1 and \p N2.
|
|
|
|
int64_t getAddrDispShift(const MachineInstr &MI1, unsigned N1,
|
|
|
|
const MachineInstr &MI2, unsigned N2) const;
|
2015-12-04 11:53:15 +01:00
|
|
|
|
2016-01-13 12:30:44 +01:00
|
|
|
/// \brief Returns true if the \p Last LEA instruction can be replaced by the
|
|
|
|
/// \p First. The difference between displacements of the addresses calculated
|
|
|
|
/// by these LEAs is returned in \p AddrDispShift. It'll be used for proper
|
|
|
|
/// replacement of the \p Last LEA's uses with the \p First's def register.
|
|
|
|
bool isReplaceable(const MachineInstr &First, const MachineInstr &Last,
|
2016-02-04 09:57:03 +01:00
|
|
|
int64_t &AddrDispShift) const;
|
2015-12-04 11:53:15 +01:00
|
|
|
|
2016-01-11 12:52:29 +01:00
|
|
|
/// \brief Find all LEA instructions in the basic block. Also, assign position
|
|
|
|
/// numbers to all instructions in the basic block to speed up calculation of
|
|
|
|
/// distance between them.
|
2016-02-04 09:57:03 +01:00
|
|
|
void findLEAs(const MachineBasicBlock &MBB, MemOpMap &LEAs);
|
2015-12-04 11:53:15 +01:00
|
|
|
|
|
|
|
/// \brief Removes redundant address calculations.
|
2016-02-04 09:57:03 +01:00
|
|
|
bool removeRedundantAddrCalc(MemOpMap &LEAs);
|
2015-12-04 11:53:15 +01:00
|
|
|
|
2017-04-28 10:44:30 +02:00
|
|
|
/// Replace debug value MI with a new debug value instruction using register
|
|
|
|
/// VReg with an appropriate offset and DIExpression to incorporate the
|
|
|
|
/// address displacement AddrDispShift. Return new debug value instruction.
|
|
|
|
MachineInstr *replaceDebugValue(MachineInstr &MI, unsigned VReg,
|
|
|
|
int64_t AddrDispShift);
|
|
|
|
|
2016-01-13 12:30:44 +01:00
|
|
|
/// \brief Removes LEAs which calculate similar addresses.
|
2016-02-04 09:57:03 +01:00
|
|
|
bool removeRedundantLEAs(MemOpMap &LEAs);
|
2016-01-13 12:30:44 +01:00
|
|
|
|
[X86] Improvement in CodeGen instruction selection for LEAs.
Summary:
1/ Operand folding during complex pattern matching for LEAs has been extended, such that it promotes Scale to
accommodate similar operand appearing in the DAG e.g.
T1 = A + B
T2 = T1 + 10
T3 = T2 + A
For above DAG rooted at T3, X86AddressMode will now look like
Base = B , Index = A , Scale = 2 , Disp = 10
2/ During OptimizeLEAPass down the pipeline factorization is now performed over LEAs so that if there is an opportunity
then complex LEAs (having 3 operands) could be factored out e.g.
leal 1(%rax,%rcx,1), %rdx
leal 1(%rax,%rcx,2), %rcx
will be factored as following
leal 1(%rax,%rcx,1), %rdx
leal (%rdx,%rcx) , %edx
3/ Aggressive operand folding for AM based selection for LEAs is sensitive to loops, thus avoiding creation of any complex LEAs within a loop.
4/ Simplify LEA converts (lea (BASE,1,INDEX,0) --> add (BASE, INDEX) which offers better through put.
PR32755 will be taken care of by this pathc.
Previous patch revisions : r313343 , r314886
Reviewers: lsaba, RKSimon, craig.topper, qcolombet, jmolloy, jbhateja
Reviewed By: lsaba, RKSimon, jbhateja
Subscribers: jmolloy, spatel, igorb, llvm-commits
Differential Revision: https://reviews.llvm.org/D35014
llvm-svn: 319543
2017-12-01 15:07:38 +01:00
|
|
|
/// \brief Visit over basic blocks, collect LEAs in a scoped
|
|
|
|
/// hash map (FactorizeLEAOpt::LEAs) and try to factor them out.
|
|
|
|
bool FactorizeLEAsAllBasicBlocks(MachineFunction &MF);
|
|
|
|
|
|
|
|
bool FactorizeLEAsBasicBlock(MachineDomTreeNode *DN);
|
|
|
|
|
|
|
|
/// \brief Factor out LEAs which share Base,Index,Offset and Segment.
|
|
|
|
bool processBasicBlock(const MachineBasicBlock &MBB);
|
|
|
|
|
|
|
|
/// \brief Try to replace LEA with a lower strength instruction
|
|
|
|
/// to improves latency and throughput.
|
|
|
|
bool strengthReduceLEAs(MemOpMap &LEAs, const MachineBasicBlock &MBB);
|
|
|
|
|
2016-01-11 12:52:29 +01:00
|
|
|
DenseMap<const MachineInstr *, unsigned> InstrPos;
|
|
|
|
|
[X86] Improvement in CodeGen instruction selection for LEAs.
Summary:
1/ Operand folding during complex pattern matching for LEAs has been extended, such that it promotes Scale to
accommodate similar operand appearing in the DAG e.g.
T1 = A + B
T2 = T1 + 10
T3 = T2 + A
For above DAG rooted at T3, X86AddressMode will now look like
Base = B , Index = A , Scale = 2 , Disp = 10
2/ During OptimizeLEAPass down the pipeline factorization is now performed over LEAs so that if there is an opportunity
then complex LEAs (having 3 operands) could be factored out e.g.
leal 1(%rax,%rcx,1), %rdx
leal 1(%rax,%rcx,2), %rcx
will be factored as following
leal 1(%rax,%rcx,1), %rdx
leal (%rdx,%rcx) , %edx
3/ Aggressive operand folding for AM based selection for LEAs is sensitive to loops, thus avoiding creation of any complex LEAs within a loop.
4/ Simplify LEA converts (lea (BASE,1,INDEX,0) --> add (BASE, INDEX) which offers better through put.
PR32755 will be taken care of by this pathc.
Previous patch revisions : r313343 , r314886
Reviewers: lsaba, RKSimon, craig.topper, qcolombet, jmolloy, jbhateja
Reviewed By: lsaba, RKSimon, jbhateja
Subscribers: jmolloy, spatel, igorb, llvm-commits
Differential Revision: https://reviews.llvm.org/D35014
llvm-svn: 319543
2017-12-01 15:07:38 +01:00
|
|
|
FactorizeLEAOpt FactorOpt;
|
|
|
|
|
|
|
|
MachineDominatorTree *DT;
|
2015-12-04 11:53:15 +01:00
|
|
|
MachineRegisterInfo *MRI;
|
|
|
|
const X86InstrInfo *TII;
|
|
|
|
const X86RegisterInfo *TRI;
|
|
|
|
|
|
|
|
static char ID;
|
|
|
|
};
|
2017-10-05 02:33:50 +02:00
|
|
|
|
|
|
|
} // end anonymous namespace
|
|
|
|
|
2015-12-04 11:53:15 +01:00
|
|
|
char OptimizeLEAPass::ID = 0;
|
|
|
|
|
|
|
|
FunctionPass *llvm::createX86OptimizeLEAs() { return new OptimizeLEAPass(); }
|
|
|
|
|
|
|
|
int OptimizeLEAPass::calcInstrDist(const MachineInstr &First,
|
|
|
|
const MachineInstr &Last) {
|
2016-01-11 12:52:29 +01:00
|
|
|
// Both instructions must be in the same basic block and they must be
|
|
|
|
// presented in InstrPos.
|
|
|
|
assert(Last.getParent() == First.getParent() &&
|
2015-12-04 11:53:15 +01:00
|
|
|
"Instructions are in different basic blocks");
|
2016-01-11 12:52:29 +01:00
|
|
|
assert(InstrPos.find(&First) != InstrPos.end() &&
|
|
|
|
InstrPos.find(&Last) != InstrPos.end() &&
|
|
|
|
"Instructions' positions are undefined");
|
2015-12-04 11:53:15 +01:00
|
|
|
|
2016-01-11 12:52:29 +01:00
|
|
|
return InstrPos[&Last] - InstrPos[&First];
|
2015-12-04 11:53:15 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Find the best LEA instruction in the List to replace address recalculation in
|
|
|
|
// MI. Such LEA must meet these requirements:
|
|
|
|
// 1) The address calculated by the LEA differs only by the displacement from
|
|
|
|
// the address used in MI.
|
|
|
|
// 2) The register class of the definition of the LEA is compatible with the
|
|
|
|
// register class of the address base register of MI.
|
|
|
|
// 3) Displacement of the new memory operand should fit in 1 byte if possible.
|
|
|
|
// 4) The LEA should be as close to MI as possible, and prior to it if
|
|
|
|
// possible.
|
|
|
|
bool OptimizeLEAPass::chooseBestLEA(const SmallVectorImpl<MachineInstr *> &List,
|
2016-02-04 09:57:03 +01:00
|
|
|
const MachineInstr &MI,
|
|
|
|
MachineInstr *&BestLEA,
|
2015-12-04 11:53:15 +01:00
|
|
|
int64_t &AddrDispShift, int &Dist) {
|
|
|
|
const MachineFunction *MF = MI.getParent()->getParent();
|
|
|
|
const MCInstrDesc &Desc = MI.getDesc();
|
2016-04-28 07:58:46 +02:00
|
|
|
int MemOpNo = X86II::getMemoryOperandNo(Desc.TSFlags) +
|
2015-12-04 11:53:15 +01:00
|
|
|
X86II::getOperandBias(Desc);
|
|
|
|
|
2016-02-04 09:57:03 +01:00
|
|
|
BestLEA = nullptr;
|
2015-12-04 11:53:15 +01:00
|
|
|
|
|
|
|
// Loop over all LEA instructions.
|
|
|
|
for (auto DefMI : List) {
|
2016-02-04 09:57:03 +01:00
|
|
|
// Get new address displacement.
|
|
|
|
int64_t AddrDispShiftTemp = getAddrDispShift(MI, MemOpNo, *DefMI, 1);
|
2015-12-04 11:53:15 +01:00
|
|
|
|
|
|
|
// Make sure address displacement fits 4 bytes.
|
|
|
|
if (!isInt<32>(AddrDispShiftTemp))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Check that LEA def register can be used as MI address base. Some
|
|
|
|
// instructions can use a limited set of registers as address base, for
|
|
|
|
// example MOV8mr_NOREX. We could constrain the register class of the LEA
|
|
|
|
// def to suit MI, however since this case is very rare and hard to
|
|
|
|
// reproduce in a test it's just more reliable to skip the LEA.
|
|
|
|
if (TII->getRegClass(Desc, MemOpNo + X86::AddrBaseReg, TRI, *MF) !=
|
|
|
|
MRI->getRegClass(DefMI->getOperand(0).getReg()))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Choose the closest LEA instruction from the list, prior to MI if
|
|
|
|
// possible. Note that we took into account resulting address displacement
|
|
|
|
// as well. Also note that the list is sorted by the order in which the LEAs
|
|
|
|
// occur, so the break condition is pretty simple.
|
|
|
|
int DistTemp = calcInstrDist(*DefMI, MI);
|
|
|
|
assert(DistTemp != 0 &&
|
|
|
|
"The distance between two different instructions cannot be zero");
|
2016-02-04 09:57:03 +01:00
|
|
|
if (DistTemp > 0 || BestLEA == nullptr) {
|
2015-12-04 11:53:15 +01:00
|
|
|
// Do not update return LEA, if the current one provides a displacement
|
|
|
|
// which fits in 1 byte, while the new candidate does not.
|
2016-02-04 09:57:03 +01:00
|
|
|
if (BestLEA != nullptr && !isInt<8>(AddrDispShiftTemp) &&
|
2015-12-04 11:53:15 +01:00
|
|
|
isInt<8>(AddrDispShift))
|
|
|
|
continue;
|
|
|
|
|
2016-02-04 09:57:03 +01:00
|
|
|
BestLEA = DefMI;
|
2015-12-04 11:53:15 +01:00
|
|
|
AddrDispShift = AddrDispShiftTemp;
|
|
|
|
Dist = DistTemp;
|
|
|
|
}
|
|
|
|
|
|
|
|
// FIXME: Maybe we should not always stop at the first LEA after MI.
|
|
|
|
if (DistTemp < 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2016-02-04 09:57:03 +01:00
|
|
|
return BestLEA != nullptr;
|
2015-12-04 11:53:15 +01:00
|
|
|
}
|
|
|
|
|
2016-02-04 09:57:03 +01:00
|
|
|
// Get the difference between the addresses' displacements of the two
|
|
|
|
// instructions \p MI1 and \p MI2. The numbers of the first memory operands are
|
|
|
|
// passed through \p N1 and \p N2.
|
|
|
|
int64_t OptimizeLEAPass::getAddrDispShift(const MachineInstr &MI1, unsigned N1,
|
|
|
|
const MachineInstr &MI2,
|
|
|
|
unsigned N2) const {
|
|
|
|
const MachineOperand &Op1 = MI1.getOperand(N1 + X86::AddrDisp);
|
|
|
|
const MachineOperand &Op2 = MI2.getOperand(N2 + X86::AddrDisp);
|
2016-02-20 11:58:28 +01:00
|
|
|
|
|
|
|
assert(isSimilarDispOp(Op1, Op2) &&
|
|
|
|
"Address displacement operands are not compatible");
|
|
|
|
|
|
|
|
// After the assert above we can be sure that both operands are of the same
|
|
|
|
// valid type and use the same symbol/index/address, thus displacement shift
|
|
|
|
// calculation is rather simple.
|
|
|
|
if (Op1.isJTI())
|
|
|
|
return 0;
|
|
|
|
return Op1.isImm() ? Op1.getImm() - Op2.getImm()
|
|
|
|
: Op1.getOffset() - Op2.getOffset();
|
2015-12-04 11:53:15 +01:00
|
|
|
}
|
|
|
|
|
2016-01-13 12:30:44 +01:00
|
|
|
// Check that the Last LEA can be replaced by the First LEA. To be so,
|
|
|
|
// these requirements must be met:
|
|
|
|
// 1) Addresses calculated by LEAs differ only by displacement.
|
|
|
|
// 2) Def registers of LEAs belong to the same class.
|
|
|
|
// 3) All uses of the Last LEA def register are replaceable, thus the
|
|
|
|
// register is used only as address base.
|
|
|
|
bool OptimizeLEAPass::isReplaceable(const MachineInstr &First,
|
|
|
|
const MachineInstr &Last,
|
2016-02-04 09:57:03 +01:00
|
|
|
int64_t &AddrDispShift) const {
|
2016-01-13 12:30:44 +01:00
|
|
|
assert(isLEA(First) && isLEA(Last) &&
|
|
|
|
"The function works only with LEA instructions");
|
|
|
|
|
|
|
|
// Make sure that LEA def registers belong to the same class. There may be
|
|
|
|
// instructions (like MOV8mr_NOREX) which allow a limited set of registers to
|
|
|
|
// be used as their operands, so we must be sure that replacing one LEA
|
|
|
|
// with another won't lead to putting a wrong register in the instruction.
|
|
|
|
if (MRI->getRegClass(First.getOperand(0).getReg()) !=
|
|
|
|
MRI->getRegClass(Last.getOperand(0).getReg()))
|
|
|
|
return false;
|
|
|
|
|
2017-03-21 12:36:21 +01:00
|
|
|
// Get new address displacement.
|
|
|
|
AddrDispShift = getAddrDispShift(Last, 1, First, 1);
|
|
|
|
|
2016-01-13 12:30:44 +01:00
|
|
|
// Loop over all uses of the Last LEA to check that its def register is
|
|
|
|
// used only as address base for memory accesses. If so, it can be
|
|
|
|
// replaced, otherwise - no.
|
2017-03-21 12:36:21 +01:00
|
|
|
for (auto &MO : MRI->use_nodbg_operands(Last.getOperand(0).getReg())) {
|
2016-01-13 12:30:44 +01:00
|
|
|
MachineInstr &MI = *MO.getParent();
|
|
|
|
|
|
|
|
// Get the number of the first memory operand.
|
|
|
|
const MCInstrDesc &Desc = MI.getDesc();
|
2016-04-28 07:58:46 +02:00
|
|
|
int MemOpNo = X86II::getMemoryOperandNo(Desc.TSFlags);
|
2016-01-13 12:30:44 +01:00
|
|
|
|
|
|
|
// If the use instruction has no memory operand - the LEA is not
|
|
|
|
// replaceable.
|
|
|
|
if (MemOpNo < 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
MemOpNo += X86II::getOperandBias(Desc);
|
|
|
|
|
|
|
|
// If the address base of the use instruction is not the LEA def register -
|
|
|
|
// the LEA is not replaceable.
|
|
|
|
if (!isIdenticalOp(MI.getOperand(MemOpNo + X86::AddrBaseReg), MO))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// If the LEA def register is used as any other operand of the use
|
|
|
|
// instruction - the LEA is not replaceable.
|
|
|
|
for (unsigned i = 0; i < MI.getNumOperands(); i++)
|
|
|
|
if (i != (unsigned)(MemOpNo + X86::AddrBaseReg) &&
|
|
|
|
isIdenticalOp(MI.getOperand(i), MO))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Check that the new address displacement will fit 4 bytes.
|
|
|
|
if (MI.getOperand(MemOpNo + X86::AddrDisp).isImm() &&
|
|
|
|
!isInt<32>(MI.getOperand(MemOpNo + X86::AddrDisp).getImm() +
|
|
|
|
AddrDispShift))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-02-04 09:57:03 +01:00
|
|
|
void OptimizeLEAPass::findLEAs(const MachineBasicBlock &MBB, MemOpMap &LEAs) {
|
2016-01-11 12:52:29 +01:00
|
|
|
unsigned Pos = 0;
|
2015-12-04 11:53:15 +01:00
|
|
|
for (auto &MI : MBB) {
|
2016-01-11 12:52:29 +01:00
|
|
|
// Assign the position number to the instruction. Note that we are going to
|
|
|
|
// move some instructions during the optimization however there will never
|
|
|
|
// be a need to move two instructions before any selected instruction. So to
|
|
|
|
// avoid multiple positions' updates during moves we just increase position
|
|
|
|
// counter by two leaving a free space for instructions which will be moved.
|
|
|
|
InstrPos[&MI] = Pos += 2;
|
|
|
|
|
2015-12-04 11:53:15 +01:00
|
|
|
if (isLEA(MI))
|
2016-02-04 09:57:03 +01:00
|
|
|
LEAs[getMemOpKey(MI, 1)].push_back(const_cast<MachineInstr *>(&MI));
|
2015-12-04 11:53:15 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try to find load and store instructions which recalculate addresses already
|
|
|
|
// calculated by some LEA and replace their memory operands with its def
|
|
|
|
// register.
|
2016-02-04 09:57:03 +01:00
|
|
|
bool OptimizeLEAPass::removeRedundantAddrCalc(MemOpMap &LEAs) {
|
2015-12-04 11:53:15 +01:00
|
|
|
bool Changed = false;
|
|
|
|
|
[X86] Improvement in CodeGen instruction selection for LEAs.
Summary:
1/ Operand folding during complex pattern matching for LEAs has been extended, such that it promotes Scale to
accommodate similar operand appearing in the DAG e.g.
T1 = A + B
T2 = T1 + 10
T3 = T2 + A
For above DAG rooted at T3, X86AddressMode will now look like
Base = B , Index = A , Scale = 2 , Disp = 10
2/ During OptimizeLEAPass down the pipeline factorization is now performed over LEAs so that if there is an opportunity
then complex LEAs (having 3 operands) could be factored out e.g.
leal 1(%rax,%rcx,1), %rdx
leal 1(%rax,%rcx,2), %rcx
will be factored as following
leal 1(%rax,%rcx,1), %rdx
leal (%rdx,%rcx) , %edx
3/ Aggressive operand folding for AM based selection for LEAs is sensitive to loops, thus avoiding creation of any complex LEAs within a loop.
4/ Simplify LEA converts (lea (BASE,1,INDEX,0) --> add (BASE, INDEX) which offers better through put.
PR32755 will be taken care of by this pathc.
Previous patch revisions : r313343 , r314886
Reviewers: lsaba, RKSimon, craig.topper, qcolombet, jmolloy, jbhateja
Reviewed By: lsaba, RKSimon, jbhateja
Subscribers: jmolloy, spatel, igorb, llvm-commits
Differential Revision: https://reviews.llvm.org/D35014
llvm-svn: 319543
2017-12-01 15:07:38 +01:00
|
|
|
if (LEAs.empty())
|
|
|
|
return Changed;
|
|
|
|
|
2016-02-04 09:57:03 +01:00
|
|
|
MachineBasicBlock *MBB = (*LEAs.begin()->second.begin())->getParent();
|
2015-12-04 11:53:15 +01:00
|
|
|
|
|
|
|
// Process all instructions in basic block.
|
|
|
|
for (auto I = MBB->begin(), E = MBB->end(); I != E;) {
|
|
|
|
MachineInstr &MI = *I++;
|
|
|
|
|
|
|
|
// Instruction must be load or store.
|
|
|
|
if (!MI.mayLoadOrStore())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Get the number of the first memory operand.
|
|
|
|
const MCInstrDesc &Desc = MI.getDesc();
|
2016-04-28 07:58:46 +02:00
|
|
|
int MemOpNo = X86II::getMemoryOperandNo(Desc.TSFlags);
|
2015-12-04 11:53:15 +01:00
|
|
|
|
|
|
|
// If instruction has no memory operand - skip it.
|
|
|
|
if (MemOpNo < 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
MemOpNo += X86II::getOperandBias(Desc);
|
|
|
|
|
|
|
|
// Get the best LEA instruction to replace address calculation.
|
|
|
|
MachineInstr *DefMI;
|
|
|
|
int64_t AddrDispShift;
|
|
|
|
int Dist;
|
2016-02-04 09:57:03 +01:00
|
|
|
if (!chooseBestLEA(LEAs[getMemOpKey(MI, MemOpNo)], MI, DefMI, AddrDispShift,
|
|
|
|
Dist))
|
2015-12-04 11:53:15 +01:00
|
|
|
continue;
|
|
|
|
|
|
|
|
// If LEA occurs before current instruction, we can freely replace
|
|
|
|
// the instruction. If LEA occurs after, we can lift LEA above the
|
|
|
|
// instruction and this way to be able to replace it. Since LEA and the
|
|
|
|
// instruction have similar memory operands (thus, the same def
|
|
|
|
// instructions for these operands), we can always do that, without
|
|
|
|
// worries of using registers before their defs.
|
|
|
|
if (Dist < 0) {
|
|
|
|
DefMI->removeFromParent();
|
|
|
|
MBB->insert(MachineBasicBlock::iterator(&MI), DefMI);
|
2016-01-11 12:52:29 +01:00
|
|
|
InstrPos[DefMI] = InstrPos[&MI] - 1;
|
|
|
|
|
|
|
|
// Make sure the instructions' position numbers are sane.
|
2016-07-12 05:18:50 +02:00
|
|
|
assert(((InstrPos[DefMI] == 1 &&
|
|
|
|
MachineBasicBlock::iterator(DefMI) == MBB->begin()) ||
|
2016-01-11 12:52:29 +01:00
|
|
|
InstrPos[DefMI] >
|
2016-07-12 05:18:50 +02:00
|
|
|
InstrPos[&*std::prev(MachineBasicBlock::iterator(DefMI))]) &&
|
2016-01-11 12:52:29 +01:00
|
|
|
"Instruction positioning is broken");
|
2015-12-04 11:53:15 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Since we can possibly extend register lifetime, clear kill flags.
|
|
|
|
MRI->clearKillFlags(DefMI->getOperand(0).getReg());
|
|
|
|
|
|
|
|
++NumSubstLEAs;
|
|
|
|
DEBUG(dbgs() << "OptimizeLEAs: Candidate to replace: "; MI.dump(););
|
|
|
|
|
|
|
|
// Change instruction operands.
|
|
|
|
MI.getOperand(MemOpNo + X86::AddrBaseReg)
|
|
|
|
.ChangeToRegister(DefMI->getOperand(0).getReg(), false);
|
|
|
|
MI.getOperand(MemOpNo + X86::AddrScaleAmt).ChangeToImmediate(1);
|
|
|
|
MI.getOperand(MemOpNo + X86::AddrIndexReg)
|
|
|
|
.ChangeToRegister(X86::NoRegister, false);
|
|
|
|
MI.getOperand(MemOpNo + X86::AddrDisp).ChangeToImmediate(AddrDispShift);
|
|
|
|
MI.getOperand(MemOpNo + X86::AddrSegmentReg)
|
|
|
|
.ChangeToRegister(X86::NoRegister, false);
|
|
|
|
|
|
|
|
DEBUG(dbgs() << "OptimizeLEAs: Replaced by: "; MI.dump(););
|
|
|
|
|
|
|
|
Changed = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return Changed;
|
|
|
|
}
|
|
|
|
|
2017-04-28 10:44:30 +02:00
|
|
|
MachineInstr *OptimizeLEAPass::replaceDebugValue(MachineInstr &MI,
|
|
|
|
unsigned VReg,
|
|
|
|
int64_t AddrDispShift) {
|
|
|
|
DIExpression *Expr = const_cast<DIExpression *>(MI.getDebugExpression());
|
|
|
|
|
2017-04-28 19:51:05 +02:00
|
|
|
if (AddrDispShift != 0)
|
|
|
|
Expr = DIExpression::prepend(Expr, DIExpression::NoDeref, AddrDispShift,
|
|
|
|
DIExpression::WithStackValue);
|
2017-04-28 10:44:30 +02:00
|
|
|
|
|
|
|
// Replace DBG_VALUE instruction with modified version.
|
|
|
|
MachineBasicBlock *MBB = MI.getParent();
|
|
|
|
DebugLoc DL = MI.getDebugLoc();
|
|
|
|
bool IsIndirect = MI.isIndirectDebugValue();
|
|
|
|
const MDNode *Var = MI.getDebugVariable();
|
2017-07-29 01:00:45 +02:00
|
|
|
if (IsIndirect)
|
|
|
|
assert(MI.getOperand(1).getImm() == 0 && "DBG_VALUE with nonzero offset");
|
2017-04-28 10:44:30 +02:00
|
|
|
return BuildMI(*MBB, MBB->erase(&MI), DL, TII->get(TargetOpcode::DBG_VALUE),
|
2017-07-29 01:00:45 +02:00
|
|
|
IsIndirect, VReg, Var, Expr);
|
2017-04-28 10:44:30 +02:00
|
|
|
}
|
|
|
|
|
2016-01-13 12:30:44 +01:00
|
|
|
// Try to find similar LEAs in the list and replace one with another.
|
2016-02-04 09:57:03 +01:00
|
|
|
bool OptimizeLEAPass::removeRedundantLEAs(MemOpMap &LEAs) {
|
2016-01-13 12:30:44 +01:00
|
|
|
bool Changed = false;
|
|
|
|
|
2016-02-04 09:57:03 +01:00
|
|
|
// Loop over all entries in the table.
|
|
|
|
for (auto &E : LEAs) {
|
|
|
|
auto &List = E.second;
|
|
|
|
|
|
|
|
// Loop over all LEA pairs.
|
|
|
|
auto I1 = List.begin();
|
|
|
|
while (I1 != List.end()) {
|
|
|
|
MachineInstr &First = **I1;
|
|
|
|
auto I2 = std::next(I1);
|
|
|
|
while (I2 != List.end()) {
|
|
|
|
MachineInstr &Last = **I2;
|
|
|
|
int64_t AddrDispShift;
|
|
|
|
|
2016-11-17 20:03:05 +01:00
|
|
|
// LEAs should be in occurrence order in the list, so we can freely
|
2016-02-04 09:57:03 +01:00
|
|
|
// replace later LEAs with earlier ones.
|
|
|
|
assert(calcInstrDist(First, Last) > 0 &&
|
2016-11-17 20:03:05 +01:00
|
|
|
"LEAs must be in occurrence order in the list");
|
2016-02-04 09:57:03 +01:00
|
|
|
|
|
|
|
// Check that the Last LEA instruction can be replaced by the First.
|
|
|
|
if (!isReplaceable(First, Last, AddrDispShift)) {
|
|
|
|
++I2;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Loop over all uses of the Last LEA and update their operands. Note
|
|
|
|
// that the correctness of this has already been checked in the
|
|
|
|
// isReplaceable function.
|
2017-04-28 10:44:30 +02:00
|
|
|
unsigned FirstVReg = First.getOperand(0).getReg();
|
2017-03-21 12:36:21 +01:00
|
|
|
unsigned LastVReg = Last.getOperand(0).getReg();
|
2017-04-28 10:44:30 +02:00
|
|
|
for (auto UI = MRI->use_begin(LastVReg), UE = MRI->use_end();
|
2016-02-04 09:57:03 +01:00
|
|
|
UI != UE;) {
|
|
|
|
MachineOperand &MO = *UI++;
|
|
|
|
MachineInstr &MI = *MO.getParent();
|
|
|
|
|
2017-04-28 10:44:30 +02:00
|
|
|
if (MI.isDebugValue()) {
|
|
|
|
// Replace DBG_VALUE instruction with modified version using the
|
|
|
|
// register from the replacing LEA and the address displacement
|
|
|
|
// between the LEA instructions.
|
|
|
|
replaceDebugValue(MI, FirstVReg, AddrDispShift);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2016-02-04 09:57:03 +01:00
|
|
|
// Get the number of the first memory operand.
|
|
|
|
const MCInstrDesc &Desc = MI.getDesc();
|
|
|
|
int MemOpNo =
|
2016-04-28 07:58:46 +02:00
|
|
|
X86II::getMemoryOperandNo(Desc.TSFlags) +
|
2016-02-04 09:57:03 +01:00
|
|
|
X86II::getOperandBias(Desc);
|
|
|
|
|
|
|
|
// Update address base.
|
2017-04-28 10:44:30 +02:00
|
|
|
MO.setReg(FirstVReg);
|
2016-02-04 09:57:03 +01:00
|
|
|
|
|
|
|
// Update address disp.
|
2016-02-20 11:58:28 +01:00
|
|
|
MachineOperand &Op = MI.getOperand(MemOpNo + X86::AddrDisp);
|
|
|
|
if (Op.isImm())
|
|
|
|
Op.setImm(Op.getImm() + AddrDispShift);
|
|
|
|
else if (!Op.isJTI())
|
|
|
|
Op.setOffset(Op.getOffset() + AddrDispShift);
|
2016-02-04 09:57:03 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Since we can possibly extend register lifetime, clear kill flags.
|
2017-04-28 10:44:30 +02:00
|
|
|
MRI->clearKillFlags(FirstVReg);
|
2016-02-04 09:57:03 +01:00
|
|
|
|
|
|
|
++NumRedundantLEAs;
|
|
|
|
DEBUG(dbgs() << "OptimizeLEAs: Remove redundant LEA: "; Last.dump(););
|
|
|
|
|
|
|
|
// By this moment, all of the Last LEA's uses must be replaced. So we
|
|
|
|
// can freely remove it.
|
2017-03-21 12:36:21 +01:00
|
|
|
assert(MRI->use_empty(LastVReg) &&
|
2016-02-04 09:57:03 +01:00
|
|
|
"The LEA's def register must have no uses");
|
|
|
|
Last.eraseFromParent();
|
|
|
|
|
|
|
|
// Erase removed LEA from the list.
|
|
|
|
I2 = List.erase(I2);
|
|
|
|
|
[X86] Improvement in CodeGen instruction selection for LEAs.
Summary:
1/ Operand folding during complex pattern matching for LEAs has been extended, such that it promotes Scale to
accommodate similar operand appearing in the DAG e.g.
T1 = A + B
T2 = T1 + 10
T3 = T2 + A
For above DAG rooted at T3, X86AddressMode will now look like
Base = B , Index = A , Scale = 2 , Disp = 10
2/ During OptimizeLEAPass down the pipeline factorization is now performed over LEAs so that if there is an opportunity
then complex LEAs (having 3 operands) could be factored out e.g.
leal 1(%rax,%rcx,1), %rdx
leal 1(%rax,%rcx,2), %rcx
will be factored as following
leal 1(%rax,%rcx,1), %rdx
leal (%rdx,%rcx) , %edx
3/ Aggressive operand folding for AM based selection for LEAs is sensitive to loops, thus avoiding creation of any complex LEAs within a loop.
4/ Simplify LEA converts (lea (BASE,1,INDEX,0) --> add (BASE, INDEX) which offers better through put.
PR32755 will be taken care of by this pathc.
Previous patch revisions : r313343 , r314886
Reviewers: lsaba, RKSimon, craig.topper, qcolombet, jmolloy, jbhateja
Reviewed By: lsaba, RKSimon, jbhateja
Subscribers: jmolloy, spatel, igorb, llvm-commits
Differential Revision: https://reviews.llvm.org/D35014
llvm-svn: 319543
2017-12-01 15:07:38 +01:00
|
|
|
// If List becomes empty remove it from LEAs map.
|
|
|
|
if (List.empty())
|
|
|
|
LEAs.erase(E.first);
|
|
|
|
|
2016-02-04 09:57:03 +01:00
|
|
|
Changed = true;
|
2016-01-13 12:30:44 +01:00
|
|
|
}
|
2016-02-04 09:57:03 +01:00
|
|
|
++I1;
|
2016-01-13 12:30:44 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return Changed;
|
|
|
|
}
|
|
|
|
|
[X86] Improvement in CodeGen instruction selection for LEAs.
Summary:
1/ Operand folding during complex pattern matching for LEAs has been extended, such that it promotes Scale to
accommodate similar operand appearing in the DAG e.g.
T1 = A + B
T2 = T1 + 10
T3 = T2 + A
For above DAG rooted at T3, X86AddressMode will now look like
Base = B , Index = A , Scale = 2 , Disp = 10
2/ During OptimizeLEAPass down the pipeline factorization is now performed over LEAs so that if there is an opportunity
then complex LEAs (having 3 operands) could be factored out e.g.
leal 1(%rax,%rcx,1), %rdx
leal 1(%rax,%rcx,2), %rcx
will be factored as following
leal 1(%rax,%rcx,1), %rdx
leal (%rdx,%rcx) , %edx
3/ Aggressive operand folding for AM based selection for LEAs is sensitive to loops, thus avoiding creation of any complex LEAs within a loop.
4/ Simplify LEA converts (lea (BASE,1,INDEX,0) --> add (BASE, INDEX) which offers better through put.
PR32755 will be taken care of by this pathc.
Previous patch revisions : r313343 , r314886
Reviewers: lsaba, RKSimon, craig.topper, qcolombet, jmolloy, jbhateja
Reviewed By: lsaba, RKSimon, jbhateja
Subscribers: jmolloy, spatel, igorb, llvm-commits
Differential Revision: https://reviews.llvm.org/D35014
llvm-svn: 319543
2017-12-01 15:07:38 +01:00
|
|
|
static inline int getADDrrFromLEA(int LEAOpcode) {
|
|
|
|
switch (LEAOpcode) {
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unexpected LEA instruction");
|
|
|
|
case X86::LEA16r:
|
|
|
|
return X86::ADD16rr;
|
|
|
|
case X86::LEA32r:
|
|
|
|
return X86::ADD32rr;
|
|
|
|
case X86::LEA64_32r:
|
|
|
|
case X86::LEA64r:
|
|
|
|
return X86::ADD64rr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool OptimizeLEAPass::strengthReduceLEAs(MemOpMap &LEAs,
|
|
|
|
const MachineBasicBlock &BB) {
|
|
|
|
bool Changed = false;
|
|
|
|
|
|
|
|
// Loop over all entries in the table.
|
|
|
|
for (auto &E : LEAs) {
|
|
|
|
auto &List = E.second;
|
|
|
|
|
|
|
|
// Loop over all LEA pairs.
|
|
|
|
auto I1 = List.begin();
|
|
|
|
while (I1 != List.end()) {
|
|
|
|
MachineInstrBuilder NewMI;
|
|
|
|
MachineInstr &First = **I1;
|
|
|
|
MachineOperand &Res = First.getOperand(0);
|
|
|
|
MachineOperand &Base = First.getOperand(1);
|
|
|
|
MachineOperand &Scale = First.getOperand(2);
|
|
|
|
MachineOperand &Index = First.getOperand(3);
|
|
|
|
MachineOperand &Offset = First.getOperand(4);
|
|
|
|
|
|
|
|
const MCInstrDesc &ADDrr = TII->get(getADDrrFromLEA(First.getOpcode()));
|
|
|
|
const DebugLoc DL = First.getDebugLoc();
|
|
|
|
|
|
|
|
if (!Base.isReg() || !Index.isReg() || !Index.getReg()) {
|
|
|
|
I1++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (TargetRegisterInfo::isPhysicalRegister(Res.getReg()) ||
|
|
|
|
TargetRegisterInfo::isPhysicalRegister(Base.getReg()) ||
|
|
|
|
TargetRegisterInfo::isPhysicalRegister(Index.getReg())) {
|
|
|
|
I1++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check for register class compatibility between Result and
|
|
|
|
// Index operands.
|
|
|
|
const TargetRegisterClass *ResRC = MRI->getRegClass(Res.getReg());
|
|
|
|
const TargetRegisterClass *IndexRC = MRI->getRegClass(Index.getReg());
|
|
|
|
if (TRI->getRegSizeInBits(*ResRC) != TRI->getRegSizeInBits(*IndexRC)) {
|
|
|
|
I1++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
MachineBasicBlock &MBB = *(const_cast<MachineBasicBlock *>(&BB));
|
|
|
|
// R = B + I
|
|
|
|
if (Scale.isImm() && Scale.getImm() == 1 && Offset.isImm() &&
|
|
|
|
!Offset.getImm()) {
|
|
|
|
NewMI = BuildMI(MBB, &First, DL, ADDrr)
|
|
|
|
.addDef(Res.getReg())
|
|
|
|
.addUse(Base.getReg())
|
|
|
|
.addUse(Index.getReg());
|
|
|
|
Changed = NewMI.getInstr() != nullptr;
|
|
|
|
First.eraseFromParent();
|
|
|
|
I1 = List.erase(I1);
|
|
|
|
|
|
|
|
// If List becomes empty remove it from LEAs map.
|
|
|
|
if (List.empty())
|
|
|
|
LEAs.erase(E.first);
|
|
|
|
} else
|
|
|
|
I1++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return Changed;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool OptimizeLEAPass::processBasicBlock(const MachineBasicBlock &MBB) {
|
|
|
|
bool cseDone = false;
|
|
|
|
|
|
|
|
// Legal scale value (1,2,4 & 8) vector.
|
|
|
|
auto LegalScale = [](int scale) {
|
|
|
|
return scale == 1 || scale == 2 || scale == 4 || scale == 8;
|
|
|
|
};
|
|
|
|
|
|
|
|
auto CompareFn = [](const MachineInstr *Arg1,
|
|
|
|
const MachineInstr *Arg2) -> bool {
|
|
|
|
return Arg1->getOperand(2).getImm() >= Arg2->getOperand(2).getImm();
|
|
|
|
};
|
|
|
|
|
|
|
|
// Loop over all entries in the table.
|
|
|
|
for (auto &E : FactorOpt.getLEAMap()) {
|
|
|
|
auto &List = E.second;
|
|
|
|
if (List.size() > 1)
|
|
|
|
List.sort(CompareFn);
|
|
|
|
|
|
|
|
// Loop over all LEA pairs.
|
|
|
|
for (auto Iter1 = List.begin(); Iter1 != List.end(); Iter1++) {
|
|
|
|
for (auto Iter2 = std::next(Iter1); Iter2 != List.end(); Iter2++) {
|
|
|
|
MachineInstr &LI1 = **Iter1;
|
|
|
|
MachineInstr &LI2 = **Iter2;
|
|
|
|
|
|
|
|
if (!DT->dominates(&LI2, &LI1))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
int Scale1 = LI1.getOperand(2).getImm();
|
|
|
|
int Scale2 = LI2.getOperand(2).getImm();
|
|
|
|
assert(LI2.getOperand(0).isReg() && "Result is a VirtualReg");
|
|
|
|
DebugLoc DL = LI1.getDebugLoc();
|
|
|
|
|
|
|
|
// Continue if instruction has already been factorized.
|
|
|
|
if (FactorOpt.checkIfScheduledForRemoval(&LI1))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
int Factor = Scale1 - Scale2;
|
|
|
|
if (Factor > 0 && LegalScale(Factor)) {
|
|
|
|
MachineOperand NewBase = LI2.getOperand(0);
|
|
|
|
MachineOperand NewIndex = LI1.getOperand(3);
|
|
|
|
|
|
|
|
const TargetRegisterClass *LI2ResRC =
|
|
|
|
MRI->getRegClass(LI2.getOperand(0).getReg());
|
|
|
|
const TargetRegisterClass *LI1BaseRC =
|
|
|
|
MRI->getRegClass(LI1.getOperand(1).getReg());
|
|
|
|
|
|
|
|
if (TRI->getRegSizeInBits(*LI1BaseRC) >
|
|
|
|
TRI->getRegSizeInBits(*LI2ResRC)) {
|
|
|
|
MachineInstr *LI1IndexDef =
|
|
|
|
MRI->getVRegDef(LI1.getOperand(3).getReg());
|
|
|
|
if (LI1IndexDef->getOpcode() != TargetOpcode::SUBREG_TO_REG)
|
|
|
|
continue;
|
|
|
|
MachineOperand &SubReg = LI1IndexDef->getOperand(2);
|
|
|
|
const TargetRegisterClass *SubRegRC =
|
|
|
|
MRI->getRegClass(SubReg.getReg());
|
|
|
|
if (TRI->getRegSizeInBits(*SubRegRC) !=
|
|
|
|
TRI->getRegSizeInBits(*LI2ResRC))
|
|
|
|
continue;
|
|
|
|
NewIndex = SubReg;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEBUG(dbgs() << "CSE LEAs: Candidate to replace: "; LI1.dump(););
|
|
|
|
MachineInstrBuilder NewMI =
|
|
|
|
BuildMI(*(const_cast<MachineBasicBlock *>(&MBB)), &LI1, DL,
|
|
|
|
TII->get(LI1.getOpcode()))
|
|
|
|
.addDef(LI1.getOperand(0).getReg()) // Dst = Dst of LI1.
|
|
|
|
.addUse(NewBase.getReg()) // Base = Dst to LI2.
|
|
|
|
.addImm(Factor) // Scale = Diff b/w scales.
|
|
|
|
.addUse(NewIndex.getReg()) // Index = Index of LI1.
|
|
|
|
.addImm(0) // Disp = 0
|
|
|
|
.addUse(
|
|
|
|
LI1.getOperand(5).getReg()); // Segment = Segmant of LI1.
|
|
|
|
|
|
|
|
cseDone = NewMI.getInstr() != nullptr;
|
|
|
|
|
|
|
|
/// To preserve the SSA nature we need to remove Def flag
|
|
|
|
/// from old result.
|
|
|
|
LI1.getOperand(0).setIsDef(false);
|
|
|
|
|
|
|
|
/// Lazy removal shall ensure that replaced LEA remains
|
|
|
|
/// till we finish processing all the basic block. This shall
|
|
|
|
/// provide opportunity for further factorization based on
|
|
|
|
/// the replaced LEA which will be legal since it has same
|
|
|
|
/// destination as newly formed LEA.
|
|
|
|
FactorOpt.addForLazyRemoval(&LI1);
|
|
|
|
|
|
|
|
NumFactoredLEAs++;
|
|
|
|
DEBUG(dbgs() << "CSE LEAs: Replaced by: "; NewMI->dump(););
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return cseDone;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool OptimizeLEAPass::FactorizeLEAsBasicBlock(MachineDomTreeNode *DN) {
|
|
|
|
bool Changed = false;
|
|
|
|
using StackT = SmallVector<MachineDomTreeNode *, 16>;
|
|
|
|
using VisitedNodeMapT = SmallSet<MachineDomTreeNode *, 16>;
|
|
|
|
|
|
|
|
StackT WorkList;
|
|
|
|
VisitedNodeMapT DomNodeMap;
|
|
|
|
|
|
|
|
WorkList.push_back(DN);
|
|
|
|
while (!WorkList.empty()) {
|
|
|
|
MachineDomTreeNode *MDN = WorkList.back();
|
|
|
|
FactorOpt.collectDataForBasicBlock(MDN->getBlock());
|
|
|
|
Changed |= processBasicBlock(*MDN->getBlock());
|
|
|
|
|
|
|
|
if (DomNodeMap.find(MDN) == DomNodeMap.end()) {
|
|
|
|
DomNodeMap.insert(MDN);
|
|
|
|
for (auto Child : MDN->getChildren())
|
|
|
|
WorkList.push_back(Child);
|
|
|
|
}
|
|
|
|
|
|
|
|
MachineDomTreeNode *TDM = WorkList.back();
|
|
|
|
if (MDN->getLevel() == TDM->getLevel()) {
|
|
|
|
FactorOpt.removeDataForBasicBlock();
|
|
|
|
DomNodeMap.erase(MDN);
|
|
|
|
WorkList.pop_back();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return Changed;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool OptimizeLEAPass::FactorizeLEAsAllBasicBlocks(MachineFunction &MF) {
|
|
|
|
bool Changed = FactorizeLEAsBasicBlock(DT->getRootNode());
|
|
|
|
FactorOpt.performCleanup();
|
|
|
|
return Changed;
|
|
|
|
}
|
|
|
|
|
2015-12-04 11:53:15 +01:00
|
|
|
bool OptimizeLEAPass::runOnMachineFunction(MachineFunction &MF) {
|
|
|
|
bool Changed = false;
|
|
|
|
|
2016-05-19 12:18:29 +02:00
|
|
|
if (DisableX86LEAOpt || skipFunction(*MF.getFunction()))
|
2015-12-04 11:53:15 +01:00
|
|
|
return false;
|
|
|
|
|
|
|
|
MRI = &MF.getRegInfo();
|
|
|
|
TII = MF.getSubtarget<X86Subtarget>().getInstrInfo();
|
|
|
|
TRI = MF.getSubtarget<X86Subtarget>().getRegisterInfo();
|
[X86] Improvement in CodeGen instruction selection for LEAs.
Summary:
1/ Operand folding during complex pattern matching for LEAs has been extended, such that it promotes Scale to
accommodate similar operand appearing in the DAG e.g.
T1 = A + B
T2 = T1 + 10
T3 = T2 + A
For above DAG rooted at T3, X86AddressMode will now look like
Base = B , Index = A , Scale = 2 , Disp = 10
2/ During OptimizeLEAPass down the pipeline factorization is now performed over LEAs so that if there is an opportunity
then complex LEAs (having 3 operands) could be factored out e.g.
leal 1(%rax,%rcx,1), %rdx
leal 1(%rax,%rcx,2), %rcx
will be factored as following
leal 1(%rax,%rcx,1), %rdx
leal (%rdx,%rcx) , %edx
3/ Aggressive operand folding for AM based selection for LEAs is sensitive to loops, thus avoiding creation of any complex LEAs within a loop.
4/ Simplify LEA converts (lea (BASE,1,INDEX,0) --> add (BASE, INDEX) which offers better through put.
PR32755 will be taken care of by this pathc.
Previous patch revisions : r313343 , r314886
Reviewers: lsaba, RKSimon, craig.topper, qcolombet, jmolloy, jbhateja
Reviewed By: lsaba, RKSimon, jbhateja
Subscribers: jmolloy, spatel, igorb, llvm-commits
Differential Revision: https://reviews.llvm.org/D35014
llvm-svn: 319543
2017-12-01 15:07:38 +01:00
|
|
|
DT = &getAnalysis<MachineDominatorTree>();
|
|
|
|
|
|
|
|
// Attempt factorizing LEAs.
|
|
|
|
Changed |= FactorizeLEAsAllBasicBlocks(MF);
|
2015-12-04 11:53:15 +01:00
|
|
|
|
|
|
|
// Process all basic blocks.
|
|
|
|
for (auto &MBB : MF) {
|
2016-02-04 09:57:03 +01:00
|
|
|
MemOpMap LEAs;
|
2016-01-11 12:52:29 +01:00
|
|
|
InstrPos.clear();
|
2015-12-04 11:53:15 +01:00
|
|
|
|
|
|
|
// Find all LEA instructions in basic block.
|
|
|
|
findLEAs(MBB, LEAs);
|
|
|
|
|
|
|
|
// If current basic block has no LEAs, move on to the next one.
|
|
|
|
if (LEAs.empty())
|
|
|
|
continue;
|
|
|
|
|
2016-05-19 12:18:29 +02:00
|
|
|
// Remove redundant LEA instructions.
|
|
|
|
Changed |= removeRedundantLEAs(LEAs);
|
2016-01-13 12:30:44 +01:00
|
|
|
|
[X86] Improvement in CodeGen instruction selection for LEAs.
Summary:
1/ Operand folding during complex pattern matching for LEAs has been extended, such that it promotes Scale to
accommodate similar operand appearing in the DAG e.g.
T1 = A + B
T2 = T1 + 10
T3 = T2 + A
For above DAG rooted at T3, X86AddressMode will now look like
Base = B , Index = A , Scale = 2 , Disp = 10
2/ During OptimizeLEAPass down the pipeline factorization is now performed over LEAs so that if there is an opportunity
then complex LEAs (having 3 operands) could be factored out e.g.
leal 1(%rax,%rcx,1), %rdx
leal 1(%rax,%rcx,2), %rcx
will be factored as following
leal 1(%rax,%rcx,1), %rdx
leal (%rdx,%rcx) , %edx
3/ Aggressive operand folding for AM based selection for LEAs is sensitive to loops, thus avoiding creation of any complex LEAs within a loop.
4/ Simplify LEA converts (lea (BASE,1,INDEX,0) --> add (BASE, INDEX) which offers better through put.
PR32755 will be taken care of by this pathc.
Previous patch revisions : r313343 , r314886
Reviewers: lsaba, RKSimon, craig.topper, qcolombet, jmolloy, jbhateja
Reviewed By: lsaba, RKSimon, jbhateja
Subscribers: jmolloy, spatel, igorb, llvm-commits
Differential Revision: https://reviews.llvm.org/D35014
llvm-svn: 319543
2017-12-01 15:07:38 +01:00
|
|
|
// Strength reduce LEA instructions.
|
|
|
|
Changed |= strengthReduceLEAs(LEAs, MBB);
|
|
|
|
|
2016-05-19 12:18:29 +02:00
|
|
|
// Remove redundant address calculations. Do it only for -Os/-Oz since only
|
|
|
|
// a code size gain is expected from this part of the pass.
|
|
|
|
if (MF.getFunction()->optForSize())
|
|
|
|
Changed |= removeRedundantAddrCalc(LEAs);
|
2015-12-04 11:53:15 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return Changed;
|
|
|
|
}
|