1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 11:13:28 +01:00

Revert "[VirtRegRewriter] Avoid clobbering registers when expanding copy bundles"

There's an msan failure:

  http://lab.llvm.org:8011/builders/sanitizer-x86_64-linux-fast/builds/19549

This reverts r334750.

llvm-svn: 334754
This commit is contained in:
Justin Bogner 2018-06-14 19:10:57 +00:00
parent c2f9b2d826
commit 0705216f21
3 changed files with 7 additions and 142 deletions

View File

@ -406,8 +406,6 @@ void VirtRegRewriter::expandCopyBundle(MachineInstr &MI) const {
return;
if (MI.isBundledWithPred() && !MI.isBundledWithSucc()) {
SmallVector<MachineInstr *, 2> MIs({&MI});
// Only do this when the complete bundle is made out of COPYs.
MachineBasicBlock &MBB = *MI.getParent();
for (MachineBasicBlock::reverse_instr_iterator I =
@ -415,53 +413,16 @@ void VirtRegRewriter::expandCopyBundle(MachineInstr &MI) const {
I != E && I->isBundledWithSucc(); ++I) {
if (!I->isCopy())
return;
MIs.push_back(&*I);
}
MachineInstr *FirstMI = MIs.back();
auto anyRegsAlias = [](const MachineInstr *Dst,
ArrayRef<MachineInstr *> Srcs,
const TargetRegisterInfo *TRI) {
for (const MachineInstr *Src : Srcs)
if (Src != Dst)
if (TRI->regsOverlap(Dst->getOperand(0).getReg(),
Src->getOperand(1).getReg()))
return true;
return false;
};
// If any of the destination registers in the bundle of copies alias any of
// the source registers, try to schedule the instructions to avoid any
// clobbering.
for (int E = MIs.size(), PrevE; E > 1; PrevE = E) {
for (int I = E; I--; )
if (!anyRegsAlias(MIs[I], makeArrayRef(MIs).take_front(E), TRI)) {
if (I + 1 != E)
std::swap(MIs[I], MIs[E - 1]);
--E;
}
if (PrevE == E) {
MF->getFunction().getContext().emitError(
"register rewriting failed: cycle in copy bundle");
break;
}
}
MachineInstr *BundleStart = FirstMI;
for (MachineInstr *BundledMI : llvm::reverse(MIs)) {
// If instruction is in the middle of the bundle, move it before the
// bundle starts, otherwise, just unbundle it. When we get to the last
// instruction, the bundle will have been completely undone.
if (BundledMI != BundleStart) {
BundledMI->removeFromBundle();
MBB.insert(FirstMI, BundledMI);
} else if (BundledMI->isBundledWithSucc()) {
BundledMI->unbundleFromSucc();
BundleStart = &*std::next(BundledMI->getIterator());
}
for (MachineBasicBlock::reverse_instr_iterator I = MI.getReverseIterator();
I->isBundledWithPred(); ) {
MachineInstr &MI = *I;
++I;
if (Indexes && BundledMI != FirstMI)
Indexes->insertMachineInstrInMaps(*BundledMI);
MI.unbundleFromPred();
if (Indexes)
Indexes->insertMachineInstrInMaps(MI);
}
}
}

View File

@ -1,16 +0,0 @@
# RUN: not llc -mtriple=aarch64-apple-ios -run-pass=greedy -run-pass=virtregrewriter %s -o /dev/null 2>&1 | FileCheck %s
# Check we don't infinitely loop on cycles in copy bundles.
# CHECK: error: register rewriting failed: cycle in copy bundle
---
name: func0
body: |
bb.0:
$x0 = IMPLICIT_DEF
$q0_q1_q2_q3 = IMPLICIT_DEF
$q1_q2_q3 = COPY $q0_q1_q2 {
$q2_q3_q4 = COPY $q1_q2_q3
}
ST4i64 $q1_q2_q3_q4, 0, $x0
...

View File

@ -1,80 +0,0 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=aarch64-apple-ios -run-pass=greedy -run-pass=virtregrewriter %s -o - | FileCheck %s
---
name: func0
body: |
bb.0:
; Make sure we don't clobber q3 when we expand the bundle
; CHECK-LABEL: name: func0
; CHECK: $x0 = IMPLICIT_DEF
; CHECK: $q0_q1_q2_q3 = IMPLICIT_DEF
; CHECK: $q4 = COPY $q3
; CHECK: $q1_q2_q3 = COPY $q0_q1_q2
; CHECK: ST4i64 $q1_q2_q3_q4, 0, $x0
$x0 = IMPLICIT_DEF
$q0_q1_q2_q3 = IMPLICIT_DEF
$q1_q2_q3 = COPY $q0_q1_q2 {
$q4 = COPY $q3
}
ST4i64 $q1_q2_q3_q4, 0, $x0
...
---
name: func1
body: |
bb.0:
; If it was already ordered, make sure we don't break it
; CHECK-LABEL: name: func1
; CHECK: $x0 = IMPLICIT_DEF
; CHECK: $q0_q1_q2_q3 = IMPLICIT_DEF
; CHECK: $q4 = COPY $q3
; CHECK: $q1_q2_q3 = COPY $q0_q1_q2
; CHECK: ST4i64 $q1_q2_q3_q4, 0, $x0
$x0 = IMPLICIT_DEF
$q0_q1_q2_q3 = IMPLICIT_DEF
$q4 = COPY $q3 {
$q1_q2_q3 = COPY $q0_q1_q2
}
ST4i64 $q1_q2_q3_q4, 0, $x0
...
---
name: func2
body: |
bb.0:
; A bit less realistic, but check that we handle multiple nodes
; CHECK-LABEL: name: func2
; CHECK: $x0 = IMPLICIT_DEF
; CHECK: $q0_q1_q2_q3 = IMPLICIT_DEF
; CHECK: $q3 = COPY $q2
; CHECK: $q4 = COPY $q1
; CHECK: $q1_q2 = COPY $q0_q1
; CHECK: ST4i64 $q1_q2_q3_q4, 0, $x0
$x0 = IMPLICIT_DEF
$q0_q1_q2_q3 = IMPLICIT_DEF
$q1_q2 = COPY $q0_q1 {
$q3 = COPY $q2
$q4 = COPY $q1
}
ST4i64 $q1_q2_q3_q4, 0, $x0
...
---
name: func3
body: |
bb.0:
; If there was nothing wrong, don't change the order for no reason
; CHECK-LABEL: name: func3
; CHECK: $x0 = IMPLICIT_DEF
; CHECK: $q1_q2_q3_q4 = IMPLICIT_DEF
; CHECK: $q0_q1 = COPY $q1_q2
; CHECK: $q2_q3 = COPY $q3_q4
; CHECK: ST4i64 $q0_q1_q2_q3, 0, $x0
$x0 = IMPLICIT_DEF
$q1_q2_q3_q4 = IMPLICIT_DEF
$q0_q1 = COPY $q1_q2 {
$q2_q3 = COPY $q3_q4
}
ST4i64 $q0_q1_q2_q3, 0, $x0
...