mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-24 03:33:20 +01:00
[SystemZ] Remove no-op MVCs
The stack coloring pass has code to delete stores and loads that become trivially dead after coloring. Extend it to cope with single instructions that copy from one frame index to another. The testcase happens to show an example of this kicking in at the moment. It did occur in Real Code too though. llvm-svn: 185705
This commit is contained in:
parent
c75be20764
commit
c0fe83c1b6
@ -173,6 +173,14 @@ public:
|
||||
const MachineMemOperand *&MMO,
|
||||
int &FrameIndex) const;
|
||||
|
||||
/// isStackSlotCopy - Return true if the specified machine instruction
|
||||
/// is a copy of one stack slot to another and has no other effect.
|
||||
/// Provide the identity of the two frame indices.
|
||||
virtual bool isStackSlotCopy(const MachineInstr *MI, int &DestFrameIndex,
|
||||
int &SrcFrameIndex) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
/// reMaterialize - Re-issue the specified 'original' instruction at the
|
||||
/// specific location targeting a new destination register.
|
||||
/// The register in Orig->getOperand(0).getReg() will be substituted by
|
||||
|
@ -377,10 +377,19 @@ bool StackSlotColoring::RemoveDeadStores(MachineBasicBlock* MBB) {
|
||||
if (DCELimit != -1 && (int)NumDead >= DCELimit)
|
||||
break;
|
||||
|
||||
int FirstSS, SecondSS;
|
||||
if (TII->isStackSlotCopy(I, FirstSS, SecondSS) &&
|
||||
FirstSS == SecondSS &&
|
||||
FirstSS != -1) {
|
||||
++NumDead;
|
||||
changed = true;
|
||||
toErase.push_back(I);
|
||||
continue;
|
||||
}
|
||||
|
||||
MachineBasicBlock::iterator NextMI = llvm::next(I);
|
||||
if (NextMI == MBB->end()) continue;
|
||||
|
||||
int FirstSS, SecondSS;
|
||||
unsigned LoadReg = 0;
|
||||
unsigned StoreReg = 0;
|
||||
if (!(LoadReg = TII->isLoadFromStackSlot(I, FirstSS))) continue;
|
||||
|
@ -104,6 +104,31 @@ unsigned SystemZInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
|
||||
return isSimpleMove(MI, FrameIndex, SystemZII::SimpleBDXStore);
|
||||
}
|
||||
|
||||
bool SystemZInstrInfo::isStackSlotCopy(const MachineInstr *MI,
|
||||
int &DestFrameIndex,
|
||||
int &SrcFrameIndex) const {
|
||||
// Check for MVC 0(Length,FI1),0(FI2)
|
||||
const MachineFrameInfo *MFI = MI->getParent()->getParent()->getFrameInfo();
|
||||
if (MI->getOpcode() != SystemZ::MVC ||
|
||||
!MI->getOperand(0).isFI() ||
|
||||
MI->getOperand(1).getImm() != 0 ||
|
||||
!MI->getOperand(3).isFI() ||
|
||||
MI->getOperand(4).getImm() != 0)
|
||||
return false;
|
||||
|
||||
// Check that Length covers the full slots.
|
||||
int64_t Length = MI->getOperand(2).getImm();
|
||||
unsigned FI1 = MI->getOperand(0).getIndex();
|
||||
unsigned FI2 = MI->getOperand(3).getIndex();
|
||||
if (MFI->getObjectSize(FI1) != Length ||
|
||||
MFI->getObjectSize(FI2) != Length)
|
||||
return false;
|
||||
|
||||
DestFrameIndex = FI1;
|
||||
SrcFrameIndex = FI2;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool SystemZInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock *&TBB,
|
||||
MachineBasicBlock *&FBB,
|
||||
|
@ -91,6 +91,8 @@ public:
|
||||
int &FrameIndex) const LLVM_OVERRIDE;
|
||||
virtual unsigned isStoreToStackSlot(const MachineInstr *MI,
|
||||
int &FrameIndex) const LLVM_OVERRIDE;
|
||||
virtual bool isStackSlotCopy(const MachineInstr *MI, int &DestFrameIndex,
|
||||
int &SrcFrameIndex) const LLVM_OVERRIDE;
|
||||
virtual bool AnalyzeBranch(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock *&TBB,
|
||||
MachineBasicBlock *&FBB,
|
||||
|
@ -456,3 +456,92 @@ skip:
|
||||
|
||||
ret void
|
||||
}
|
||||
|
||||
; This used to generate a no-op MVC. It is very sensitive to spill heuristics.
|
||||
define void @f11() {
|
||||
; CHECK: f11:
|
||||
; CHECK-NOT: mvc [[OFFSET:[0-9]+]](8,%r15), [[OFFSET]](%r15)
|
||||
; CHECK: br %r14
|
||||
entry:
|
||||
%val0 = load volatile i64 *@h0
|
||||
%val1 = load volatile i64 *@h1
|
||||
%val2 = load volatile i64 *@h2
|
||||
%val3 = load volatile i64 *@h3
|
||||
%val4 = load volatile i64 *@h4
|
||||
%val5 = load volatile i64 *@h5
|
||||
%val6 = load volatile i64 *@h6
|
||||
%val7 = load volatile i64 *@h7
|
||||
|
||||
%altval0 = load volatile i64 *@h0
|
||||
%altval1 = load volatile i64 *@h1
|
||||
|
||||
call void @foo()
|
||||
|
||||
store volatile i64 %val0, i64 *@h0
|
||||
store volatile i64 %val1, i64 *@h1
|
||||
store volatile i64 %val2, i64 *@h2
|
||||
store volatile i64 %val3, i64 *@h3
|
||||
store volatile i64 %val4, i64 *@h4
|
||||
store volatile i64 %val5, i64 *@h5
|
||||
store volatile i64 %val6, i64 *@h6
|
||||
store volatile i64 %val7, i64 *@h7
|
||||
|
||||
%check = load volatile i64 *@h0
|
||||
%cond = icmp eq i64 %check, 0
|
||||
br i1 %cond, label %a1, label %b1
|
||||
|
||||
a1:
|
||||
call void @foo()
|
||||
br label %join1
|
||||
|
||||
b1:
|
||||
call void @foo()
|
||||
br label %join1
|
||||
|
||||
join1:
|
||||
%newval0 = phi i64 [ %val0, %a1 ], [ %altval0, %b1 ]
|
||||
|
||||
call void @foo()
|
||||
|
||||
store volatile i64 %val1, i64 *@h1
|
||||
store volatile i64 %val2, i64 *@h2
|
||||
store volatile i64 %val3, i64 *@h3
|
||||
store volatile i64 %val4, i64 *@h4
|
||||
store volatile i64 %val5, i64 *@h5
|
||||
store volatile i64 %val6, i64 *@h6
|
||||
store volatile i64 %val7, i64 *@h7
|
||||
br i1 %cond, label %a2, label %b2
|
||||
|
||||
a2:
|
||||
call void @foo()
|
||||
br label %join2
|
||||
|
||||
b2:
|
||||
call void @foo()
|
||||
br label %join2
|
||||
|
||||
join2:
|
||||
%newval1 = phi i64 [ %val1, %a2 ], [ %altval1, %b2 ]
|
||||
|
||||
call void @foo()
|
||||
|
||||
store volatile i64 %val2, i64 *@h2
|
||||
store volatile i64 %val3, i64 *@h3
|
||||
store volatile i64 %val4, i64 *@h4
|
||||
store volatile i64 %val5, i64 *@h5
|
||||
store volatile i64 %val6, i64 *@h6
|
||||
store volatile i64 %val7, i64 *@h7
|
||||
|
||||
call void @foo()
|
||||
|
||||
store volatile i64 %newval0, i64 *@h0
|
||||
store volatile i64 %newval1, i64 *@h1
|
||||
store volatile i64 %val2, i64 *@h2
|
||||
store volatile i64 %val3, i64 *@h3
|
||||
store volatile i64 %val4, i64 *@h4
|
||||
store volatile i64 %val5, i64 *@h5
|
||||
store volatile i64 %val6, i64 *@h6
|
||||
store volatile i64 %val7, i64 *@h7
|
||||
|
||||
ret void
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user