mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-24 19:52:54 +01:00
Add post-indexed load / store transformations.
llvm-svn: 31498
This commit is contained in:
parent
e3c204efe1
commit
3db2b3aab9
@ -47,6 +47,11 @@ namespace {
|
||||
static Statistic<> NodesCombined ("dagcombiner",
|
||||
"Number of dag nodes combined");
|
||||
|
||||
static Statistic<> PreIndexedNodes ("pre_indexed_ops",
|
||||
"Number of pre-indexed nodes created");
|
||||
static Statistic<> PostIndexedNodes ("post_indexed_ops",
|
||||
"Number of post-indexed nodes created");
|
||||
|
||||
static cl::opt<bool>
|
||||
CombinerAA("combiner-alias-analysis", cl::Hidden,
|
||||
cl::desc("Turn on alias analysis during testing"));
|
||||
@ -174,13 +179,16 @@ namespace {
|
||||
return true;
|
||||
}
|
||||
|
||||
/// CombineToPreIndexedLoadStore - Try turning a load / store into a
|
||||
/// pre-indexed load store when the base pointer is a add or subtract
|
||||
/// and it has other uses besides the load / store. When the
|
||||
/// transformation is done, the new indexed load / store effectively
|
||||
/// folded the add / subtract in and all of its other uses are redirected
|
||||
/// to the new load / store.
|
||||
/// CombineToPreIndexedLoadStore - Try turning a load / store and a
|
||||
/// pre-indexed load / store when the base pointer is a add or subtract
|
||||
/// and it has other uses besides the load / store. After the
|
||||
/// transformation, the new indexed load / store has effectively folded
|
||||
/// the add / subtract in and all of its other uses are redirected to the
|
||||
/// new load / store.
|
||||
bool CombineToPreIndexedLoadStore(SDNode *N) {
|
||||
if (!AfterLegalize)
|
||||
return false;
|
||||
|
||||
bool isLoad = true;
|
||||
SDOperand Ptr;
|
||||
if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
|
||||
@ -191,8 +199,7 @@ namespace {
|
||||
} else
|
||||
return false;
|
||||
|
||||
if (AfterLegalize &&
|
||||
(Ptr.getOpcode() == ISD::ADD || Ptr.getOpcode() == ISD::SUB) &&
|
||||
if ((Ptr.getOpcode() == ISD::ADD || Ptr.getOpcode() == ISD::SUB) &&
|
||||
Ptr.Val->use_size() > 1) {
|
||||
SDOperand BasePtr;
|
||||
SDOperand Offset;
|
||||
@ -203,57 +210,56 @@ namespace {
|
||||
// that would create a cycle.
|
||||
// 2) All uses are load / store ops that use it as base ptr and offset
|
||||
// is just an addressing mode immediate.
|
||||
// 3) If the would-be new base may not to be dead at N. FIXME: The
|
||||
// proper check is too expensive (in turns of compile time) to
|
||||
// check. Just make sure other uses of the new base are not also
|
||||
// themselves use of loads / stores.
|
||||
// 3) If the would-be new base may not to be dead at N.
|
||||
|
||||
bool OffIsAMImm = Offset.getOpcode() == ISD::Constant &&
|
||||
TLI.isLegalAddressImmediate(
|
||||
cast<ConstantSDNode>(Offset)->getValue());
|
||||
bool OffIsAMImm = Offset.getOpcode() == ISD::Constant && TLI.
|
||||
isLegalAddressImmediate(cast<ConstantSDNode>(Offset)->getValue());
|
||||
|
||||
// Check for #3.
|
||||
if (OffIsAMImm && BasePtr.Val->use_size() > 1) {
|
||||
for (SDNode::use_iterator I = BasePtr.Val->use_begin(),
|
||||
E = BasePtr.Val->use_end(); I != E; ++I) {
|
||||
SDNode *Use = *I;
|
||||
if (Use == Ptr.Val)
|
||||
continue;
|
||||
if (Use->getOpcode() == ISD::ADD || Use->getOpcode() == ISD::SUB){
|
||||
for (SDNode::use_iterator II = Use->use_begin(),
|
||||
EE = Use->use_end(); II != EE; ++II) {
|
||||
SDNode *UseUse = *II;
|
||||
if (UseUse->getOpcode() == ISD::LOAD &&
|
||||
cast<LoadSDNode>(UseUse)->getBasePtr().Val == Use)
|
||||
return false;
|
||||
else if (UseUse->getOpcode() == ISD::STORE &&
|
||||
cast<StoreSDNode>(UseUse)->getBasePtr().Val == Use)
|
||||
return false;
|
||||
}
|
||||
for (SDNode::use_iterator I = BasePtr.Val->use_begin(),
|
||||
E = BasePtr.Val->use_end(); I != E; ++I) {
|
||||
SDNode *Use = *I;
|
||||
if (Use == Ptr.Val)
|
||||
continue;
|
||||
if (Use->getOpcode() == ISD::CopyToReg)
|
||||
return false;
|
||||
if (OffIsAMImm && (Use->getOpcode() == ISD::ADD ||
|
||||
Use->getOpcode() == ISD::SUB)) {
|
||||
for (SDNode::use_iterator II = Use->use_begin(),
|
||||
EE = Use->use_end(); II != EE; ++II) {
|
||||
SDNode *UseUse = *II;
|
||||
if (UseUse->getOpcode() == ISD::LOAD &&
|
||||
cast<LoadSDNode>(UseUse)->getBasePtr().Val == Use)
|
||||
return false;
|
||||
else if (UseUse->getOpcode() == ISD::STORE &&
|
||||
cast<StoreSDNode>(UseUse)->getBasePtr().Val == Use)
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Now check for #1 and #2.
|
||||
unsigned NumRealUses = 0;
|
||||
for (SDNode::use_iterator I = Ptr.Val->use_begin(),
|
||||
E = Ptr.Val->use_end(); I != E; ++I) {
|
||||
SDNode *Use = *I;
|
||||
if (Use == N)
|
||||
continue;
|
||||
if (Use->isPredecessor(N))
|
||||
return false;
|
||||
if (OffIsAMImm) {
|
||||
for (SDNode::use_iterator I = Ptr.Val->use_begin(),
|
||||
E = Ptr.Val->use_end(); I != E; ++I) {
|
||||
SDNode *Use = *I;
|
||||
if (Use == N)
|
||||
continue;
|
||||
if (Use->isPredecessor(N))
|
||||
return false;
|
||||
|
||||
if (!OffIsAMImm) {
|
||||
NumRealUses++;
|
||||
} else if (Use->getOpcode() == ISD::LOAD) {
|
||||
if (cast<LoadSDNode>(Use)->getBasePtr().Val != Ptr.Val)
|
||||
if (!OffIsAMImm) {
|
||||
NumRealUses++;
|
||||
} else if (Use->getOpcode() == ISD::STORE) {
|
||||
if (cast<StoreSDNode>(Use)->getBasePtr().Val != Ptr.Val)
|
||||
} else if (Use->getOpcode() == ISD::LOAD) {
|
||||
if (cast<LoadSDNode>(Use)->getBasePtr().Val != Ptr.Val)
|
||||
NumRealUses++;
|
||||
} else if (Use->getOpcode() == ISD::STORE) {
|
||||
if (cast<StoreSDNode>(Use)->getBasePtr().Val != Ptr.Val)
|
||||
NumRealUses++;
|
||||
} else
|
||||
NumRealUses++;
|
||||
} else
|
||||
NumRealUses++;
|
||||
}
|
||||
}
|
||||
if (NumRealUses == 0)
|
||||
return false;
|
||||
@ -261,6 +267,7 @@ namespace {
|
||||
SDOperand Result = isLoad
|
||||
? DAG.getIndexedLoad(SDOperand(N,0), BasePtr, Offset, AM)
|
||||
: DAG.getIndexedStore(SDOperand(N,0), BasePtr, Offset, AM);
|
||||
++PreIndexedNodes;
|
||||
++NodesCombined;
|
||||
DEBUG(std::cerr << "\nReplacing.4 "; N->dump();
|
||||
std::cerr << "\nWith: "; Result.Val->dump(&DAG);
|
||||
@ -294,7 +301,149 @@ namespace {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/// CombineToPostIndexedLoadStore - Try combine a load / store with a
|
||||
/// add / sub of the base pointer node into a post-indexed load / store.
|
||||
/// The transformation folded the add / subtract into the new indexed
|
||||
/// load / store effectively and all of its uses are redirected to the
|
||||
/// new load / store.
|
||||
bool CombineToPostIndexedLoadStore(SDNode *N) {
|
||||
if (!AfterLegalize)
|
||||
return false;
|
||||
|
||||
bool isLoad = true;
|
||||
SDOperand Ptr;
|
||||
MVT::ValueType VT;
|
||||
if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
|
||||
Ptr = LD->getBasePtr();
|
||||
VT = LD->getLoadedVT();
|
||||
} else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
|
||||
Ptr = ST->getBasePtr();
|
||||
VT = ST->getStoredVT();
|
||||
isLoad = false;
|
||||
} else
|
||||
return false;
|
||||
|
||||
if (Ptr.Val->use_size() > 1) {
|
||||
for (SDNode::use_iterator I = Ptr.Val->use_begin(),
|
||||
E = Ptr.Val->use_end(); I != E; ++I) {
|
||||
SDNode *Op = *I;
|
||||
if (Op == N ||
|
||||
(Op->getOpcode() != ISD::ADD && Op->getOpcode() != ISD::SUB))
|
||||
continue;
|
||||
|
||||
SDOperand BasePtr;
|
||||
SDOperand Offset;
|
||||
ISD::MemOpAddrMode AM = ISD::UNINDEXED;
|
||||
if (TLI.getPostIndexedAddressParts(Op, VT, BasePtr, Offset, AM,DAG) &&
|
||||
BasePtr.Val == Ptr.Val) {
|
||||
// Try turning it into a post-indexed load / store except when
|
||||
// 1) Op must be independent of N, i.e. Op is neither a predecessor
|
||||
// nor a successor of N. Otherwise, if Op is folded that would
|
||||
// create a cycle.
|
||||
// 2) All uses are load / store ops that use it as base ptr and offset
|
||||
// is just an addressing mode immediate.
|
||||
// 3) If the would-be new base may not to be dead at N.
|
||||
|
||||
bool OffIsAMImm = Offset.getOpcode() == ISD::Constant && TLI.
|
||||
isLegalAddressImmediate(cast<ConstantSDNode>(Offset)->getValue());
|
||||
|
||||
// Check for #3.
|
||||
bool TryNext = false;
|
||||
for (SDNode::use_iterator II = BasePtr.Val->use_begin(),
|
||||
EE = BasePtr.Val->use_end(); II != EE; ++II) {
|
||||
SDNode *Use = *II;
|
||||
if (Use == Ptr.Val)
|
||||
continue;
|
||||
if (Use->getOpcode() == ISD::CopyToReg) {
|
||||
TryNext = true;
|
||||
break;
|
||||
}
|
||||
|
||||
if (OffIsAMImm && (Use->getOpcode() == ISD::ADD ||
|
||||
Use->getOpcode() == ISD::SUB)) {
|
||||
for (SDNode::use_iterator III = Use->use_begin(),
|
||||
EEE = Use->use_end(); III != EEE; ++III) {
|
||||
SDNode *UseUse = *III;
|
||||
if (UseUse->getOpcode() == ISD::LOAD &&
|
||||
cast<LoadSDNode>(UseUse)->getBasePtr().Val == Use) {
|
||||
TryNext = true;
|
||||
break;
|
||||
} else if (UseUse->getOpcode() == ISD::STORE &&
|
||||
cast<StoreSDNode>(UseUse)->getBasePtr().Val == Use) {
|
||||
|
||||
TryNext = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (TryNext)
|
||||
continue;
|
||||
|
||||
if (OffIsAMImm) {
|
||||
// Check for #2.
|
||||
unsigned NumRealUses = 0;
|
||||
for (SDNode::use_iterator II = Op->use_begin(), EE = Op->use_end();
|
||||
II != EE; ++II) {
|
||||
SDNode *Use = *II;
|
||||
if (Use->getOpcode() == ISD::LOAD) {
|
||||
if (cast<LoadSDNode>(Use)->getBasePtr().Val != Op)
|
||||
NumRealUses++;
|
||||
} else if (Use->getOpcode() == ISD::STORE) {
|
||||
if (cast<StoreSDNode>(Use)->getBasePtr().Val != Op)
|
||||
NumRealUses++;
|
||||
} else
|
||||
NumRealUses++;
|
||||
}
|
||||
if (NumRealUses == 0)
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check for #1
|
||||
if (!Op->isPredecessor(N) && !N->isPredecessor(Op)) {
|
||||
SDOperand Result = isLoad
|
||||
? DAG.getIndexedLoad(SDOperand(N,0), BasePtr, Offset, AM)
|
||||
: DAG.getIndexedStore(SDOperand(N,0), BasePtr, Offset, AM);
|
||||
++PostIndexedNodes;
|
||||
++NodesCombined;
|
||||
DEBUG(std::cerr << "\nReplacing.5 "; N->dump();
|
||||
std::cerr << "\nWith: "; Result.Val->dump(&DAG);
|
||||
std::cerr << '\n');
|
||||
std::vector<SDNode*> NowDead;
|
||||
if (isLoad) {
|
||||
DAG.ReplaceAllUsesOfValueWith(SDOperand(N, 0), Result.getValue(0),
|
||||
NowDead);
|
||||
DAG.ReplaceAllUsesOfValueWith(SDOperand(N, 1), Result.getValue(2),
|
||||
NowDead);
|
||||
} else {
|
||||
DAG.ReplaceAllUsesOfValueWith(SDOperand(N, 0), Result.getValue(1),
|
||||
NowDead);
|
||||
}
|
||||
|
||||
// Nodes can end up on the worklist more than once. Make sure we do
|
||||
// not process a node that has been replaced.
|
||||
for (unsigned i = 0, e = NowDead.size(); i != e; ++i)
|
||||
removeFromWorkList(NowDead[i]);
|
||||
// Finally, since the node is now dead, remove it from the graph.
|
||||
DAG.DeleteNode(N);
|
||||
|
||||
// Replace the uses of Use with uses of the updated base value.
|
||||
DAG.ReplaceAllUsesOfValueWith(SDOperand(Op, 0),
|
||||
Result.getValue(isLoad ? 1 : 0),
|
||||
NowDead);
|
||||
removeFromWorkList(Op);
|
||||
for (unsigned i = 0, e = NowDead.size(); i != e; ++i)
|
||||
removeFromWorkList(NowDead[i]);
|
||||
DAG.DeleteNode(Op);
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -2877,7 +3026,7 @@ SDOperand DAGCombiner::visitLOAD(SDNode *N) {
|
||||
}
|
||||
|
||||
// Try transforming N to an indexed load.
|
||||
if (CombineToPreIndexedLoadStore(N))
|
||||
if (CombineToPreIndexedLoadStore(N) || CombineToPostIndexedLoadStore(N))
|
||||
return SDOperand(N, 0);
|
||||
|
||||
return SDOperand();
|
||||
@ -2923,7 +3072,7 @@ SDOperand DAGCombiner::visitSTORE(SDNode *N) {
|
||||
}
|
||||
|
||||
// Try transforming N to an indexed store.
|
||||
if (CombineToPreIndexedLoadStore(N))
|
||||
if (CombineToPreIndexedLoadStore(N) || CombineToPostIndexedLoadStore(N))
|
||||
return SDOperand(N, 0);
|
||||
|
||||
return SDOperand();
|
||||
|
Loading…
Reference in New Issue
Block a user