1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-19 02:52:53 +02:00

[RISCV] Optimize emission of SELECT sequences

This patch optimizes the emission of a sequence of SELECTs with the same
condition, avoiding the insertion of unnecessary control flow. Such a sequence
often occurs when a SELECT of values wider than XLEN is legalized into two
SELECTs with legal types. We have identified several use cases where the
SELECTs could be interleaved with other instructions. Therefore, we extend the
sequence to include non-SELECT instructions if we are able to detect that the
non-SELECT instructions do not impact the optimization.

This patch supersedes https://reviews.llvm.org/D59096, which attempted to
address this issue by introducing a new SelectionDAG node. Hat tip to Eli
Friedman for his feedback on how to best handle this issue.

Differential Revision: https://reviews.llvm.org/D59355
Patch by Luís Marques.

llvm-svn: 356741
This commit is contained in:
Alex Bradbury 2019-03-22 10:45:03 +00:00
parent e6a58c70c2
commit f2b04fe6b7
4 changed files with 804 additions and 417 deletions

View File

@ -17,6 +17,7 @@
#include "RISCVRegisterInfo.h"
#include "RISCVSubtarget.h"
#include "RISCVTargetMachine.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
@ -787,10 +788,21 @@ static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
return BB;
}
static bool isSelectPseudo(MachineInstr &MI) {
switch (MI.getOpcode()) {
default:
return false;
case RISCV::Select_GPR_Using_CC_GPR:
case RISCV::Select_FPR32_Using_CC_GPR:
case RISCV::Select_FPR64_Using_CC_GPR:
return true;
}
}
static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
MachineBasicBlock *BB) {
// To "insert" a SELECT instruction, we actually have to insert the triangle
// control-flow pattern. The incoming instruction knows the destination vreg
// To "insert" Select_* instructions, we actually have to insert the triangle
// control-flow pattern. The incoming instructions know the destination vreg
// to set, the condition code register to branch on, the true/false values to
// select between, and the condcode to use to select the appropriate branch.
//
@ -800,6 +812,54 @@ static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
// | IfFalseMBB
// | /
// TailMBB
//
// When we find a sequence of selects we attempt to optimize their emission
// by sharing the control flow. Currently we only handle cases where we have
// multiple selects with the exact same condition (same LHS, RHS and CC).
// The selects may be interleaved with other instructions if the other
// instructions meet some requirements we deem safe:
// - They are debug instructions. Otherwise,
// - They do not have side-effects, do not access memory and their inputs do
// not depend on the results of the select pseudo-instructions.
// The TrueV/FalseV operands of the selects cannot depend on the result of
// previous selects in the sequence.
// These conditions could be further relaxed. See the X86 target for a
// related approach and more information.
unsigned LHS = MI.getOperand(1).getReg();
unsigned RHS = MI.getOperand(2).getReg();
auto CC = static_cast<ISD::CondCode>(MI.getOperand(3).getImm());
SmallVector<MachineInstr *, 4> SelectDebugValues;
SmallSet<unsigned, 4> SelectDests;
SelectDests.insert(MI.getOperand(0).getReg());
MachineInstr *LastSelectPseudo = &MI;
for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI);
SequenceMBBI != E; ++SequenceMBBI) {
if (SequenceMBBI->isDebugInstr())
continue;
else if (isSelectPseudo(*SequenceMBBI)) {
if (SequenceMBBI->getOperand(1).getReg() != LHS ||
SequenceMBBI->getOperand(2).getReg() != RHS ||
SequenceMBBI->getOperand(3).getImm() != CC ||
SelectDests.count(SequenceMBBI->getOperand(4).getReg()) ||
SelectDests.count(SequenceMBBI->getOperand(5).getReg()))
break;
LastSelectPseudo = &*SequenceMBBI;
SequenceMBBI->collectDebugValues(SelectDebugValues);
SelectDests.insert(SequenceMBBI->getOperand(0).getReg());
} else {
if (SequenceMBBI->hasUnmodeledSideEffects() ||
SequenceMBBI->mayLoadOrStore())
break;
if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) {
return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg());
}))
break;
}
}
const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
const BasicBlock *LLVM_BB = BB->getBasicBlock();
DebugLoc DL = MI.getDebugLoc();
@ -812,20 +872,23 @@ static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
F->insert(I, IfFalseMBB);
F->insert(I, TailMBB);
// Move all remaining instructions to TailMBB.
TailMBB->splice(TailMBB->begin(), HeadMBB, std::next(MI.getIterator()),
HeadMBB->end());
// Transfer debug instructions associated with the selects to TailMBB.
for (MachineInstr *DebugInstr : SelectDebugValues) {
TailMBB->push_back(DebugInstr->removeFromParent());
}
// Move all instructions after the sequence to TailMBB.
TailMBB->splice(TailMBB->end(), HeadMBB,
std::next(LastSelectPseudo->getIterator()), HeadMBB->end());
// Update machine-CFG edges by transferring all successors of the current
// block to the new block which will contain the Phi node for the select.
// block to the new block which will contain the Phi nodes for the selects.
TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB);
// Set the successors for HeadMBB.
HeadMBB->addSuccessor(IfFalseMBB);
HeadMBB->addSuccessor(TailMBB);
// Insert appropriate branch.
unsigned LHS = MI.getOperand(1).getReg();
unsigned RHS = MI.getOperand(2).getReg();
auto CC = static_cast<ISD::CondCode>(MI.getOperand(3).getImm());
unsigned Opcode = getBranchOpcodeForIntCondCode(CC);
BuildMI(HeadMBB, DL, TII.get(Opcode))
@ -836,15 +899,25 @@ static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
// IfFalseMBB just falls through to TailMBB.
IfFalseMBB->addSuccessor(TailMBB);
// %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
BuildMI(*TailMBB, TailMBB->begin(), DL, TII.get(RISCV::PHI),
MI.getOperand(0).getReg())
.addReg(MI.getOperand(4).getReg())
.addMBB(HeadMBB)
.addReg(MI.getOperand(5).getReg())
.addMBB(IfFalseMBB);
// Create PHIs for all of the select pseudo-instructions.
auto SelectMBBI = MI.getIterator();
auto SelectEnd = std::next(LastSelectPseudo->getIterator());
auto InsertionPoint = TailMBB->begin();
while (SelectMBBI != SelectEnd) {
auto Next = std::next(SelectMBBI);
if (isSelectPseudo(*SelectMBBI)) {
// %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(),
TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg())
.addReg(SelectMBBI->getOperand(4).getReg())
.addMBB(HeadMBB)
.addReg(SelectMBBI->getOperand(5).getReg())
.addMBB(IfFalseMBB);
SelectMBBI->eraseFromParent();
}
SelectMBBI = Next;
}
MI.eraseFromParent(); // The pseudo instruction is gone now.
return TailMBB;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,323 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \
; RUN: | FileCheck %s -check-prefix=RV32I
; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
; RUN: | FileCheck %s -check-prefix=RV64I
; Selects of wide values are split into two selects, which can easily cause
; unnecessary control flow. Here we check some cases where we can currently
; emit a sequence of selects with shared control flow.
define i64 @cmovcc64(i32 signext %a, i64 %b, i64 %c) nounwind {
; RV32I-LABEL: cmovcc64:
; RV32I: # %bb.0: # %entry
; RV32I-NEXT: addi a5, zero, 123
; RV32I-NEXT: beq a0, a5, .LBB0_2
; RV32I-NEXT: # %bb.1: # %entry
; RV32I-NEXT: mv a1, a3
; RV32I-NEXT: mv a2, a4
; RV32I-NEXT: .LBB0_2: # %entry
; RV32I-NEXT: mv a0, a1
; RV32I-NEXT: mv a1, a2
; RV32I-NEXT: ret
;
; RV64I-LABEL: cmovcc64:
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: addi a3, zero, 123
; RV64I-NEXT: beq a0, a3, .LBB0_2
; RV64I-NEXT: # %bb.1: # %entry
; RV64I-NEXT: mv a1, a2
; RV64I-NEXT: .LBB0_2: # %entry
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: ret
entry:
%cmp = icmp eq i32 %a, 123
%cond = select i1 %cmp, i64 %b, i64 %c
ret i64 %cond
}
define i128 @cmovcc128(i64 signext %a, i128 %b, i128 %c) nounwind {
; RV32I-LABEL: cmovcc128:
; RV32I: # %bb.0: # %entry
; RV32I-NEXT: xori a1, a1, 123
; RV32I-NEXT: or a1, a1, a2
; RV32I-NEXT: beqz a1, .LBB1_2
; RV32I-NEXT: # %bb.1: # %entry
; RV32I-NEXT: addi a1, a4, 4
; RV32I-NEXT: addi a2, a4, 8
; RV32I-NEXT: addi a5, a4, 12
; RV32I-NEXT: mv a3, a4
; RV32I-NEXT: j .LBB1_3
; RV32I-NEXT: .LBB1_2:
; RV32I-NEXT: addi a1, a3, 4
; RV32I-NEXT: addi a2, a3, 8
; RV32I-NEXT: addi a5, a3, 12
; RV32I-NEXT: .LBB1_3: # %entry
; RV32I-NEXT: lw a4, 0(a5)
; RV32I-NEXT: sw a4, 12(a0)
; RV32I-NEXT: lw a2, 0(a2)
; RV32I-NEXT: sw a2, 8(a0)
; RV32I-NEXT: lw a1, 0(a1)
; RV32I-NEXT: sw a1, 4(a0)
; RV32I-NEXT: lw a1, 0(a3)
; RV32I-NEXT: sw a1, 0(a0)
; RV32I-NEXT: ret
;
; RV64I-LABEL: cmovcc128:
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: addi a5, zero, 123
; RV64I-NEXT: beq a0, a5, .LBB1_2
; RV64I-NEXT: # %bb.1: # %entry
; RV64I-NEXT: mv a1, a3
; RV64I-NEXT: mv a2, a4
; RV64I-NEXT: .LBB1_2: # %entry
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: mv a1, a2
; RV64I-NEXT: ret
entry:
%cmp = icmp eq i64 %a, 123
%cond = select i1 %cmp, i128 %b, i128 %c
ret i128 %cond
}
define i64 @cmov64(i1 %a, i64 %b, i64 %c) nounwind {
; RV32I-LABEL: cmov64:
; RV32I: # %bb.0: # %entry
; RV32I-NEXT: andi a0, a0, 1
; RV32I-NEXT: bnez a0, .LBB2_2
; RV32I-NEXT: # %bb.1: # %entry
; RV32I-NEXT: mv a1, a3
; RV32I-NEXT: mv a2, a4
; RV32I-NEXT: .LBB2_2: # %entry
; RV32I-NEXT: mv a0, a1
; RV32I-NEXT: mv a1, a2
; RV32I-NEXT: ret
;
; RV64I-LABEL: cmov64:
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: andi a0, a0, 1
; RV64I-NEXT: bnez a0, .LBB2_2
; RV64I-NEXT: # %bb.1: # %entry
; RV64I-NEXT: mv a1, a2
; RV64I-NEXT: .LBB2_2: # %entry
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: ret
entry:
%cond = select i1 %a, i64 %b, i64 %c
ret i64 %cond
}
define i128 @cmov128(i1 %a, i128 %b, i128 %c) nounwind {
; RV32I-LABEL: cmov128:
; RV32I: # %bb.0: # %entry
; RV32I-NEXT: andi a1, a1, 1
; RV32I-NEXT: bnez a1, .LBB3_2
; RV32I-NEXT: # %bb.1: # %entry
; RV32I-NEXT: addi a1, a3, 4
; RV32I-NEXT: addi a4, a3, 8
; RV32I-NEXT: addi a5, a3, 12
; RV32I-NEXT: mv a2, a3
; RV32I-NEXT: j .LBB3_3
; RV32I-NEXT: .LBB3_2:
; RV32I-NEXT: addi a1, a2, 4
; RV32I-NEXT: addi a4, a2, 8
; RV32I-NEXT: addi a5, a2, 12
; RV32I-NEXT: .LBB3_3: # %entry
; RV32I-NEXT: lw a3, 0(a5)
; RV32I-NEXT: sw a3, 12(a0)
; RV32I-NEXT: lw a3, 0(a4)
; RV32I-NEXT: sw a3, 8(a0)
; RV32I-NEXT: lw a1, 0(a1)
; RV32I-NEXT: sw a1, 4(a0)
; RV32I-NEXT: lw a1, 0(a2)
; RV32I-NEXT: sw a1, 0(a0)
; RV32I-NEXT: ret
;
; RV64I-LABEL: cmov128:
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: andi a0, a0, 1
; RV64I-NEXT: bnez a0, .LBB3_2
; RV64I-NEXT: # %bb.1: # %entry
; RV64I-NEXT: mv a1, a3
; RV64I-NEXT: mv a2, a4
; RV64I-NEXT: .LBB3_2: # %entry
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: mv a1, a2
; RV64I-NEXT: ret
entry:
%cond = select i1 %a, i128 %b, i128 %c
ret i128 %cond
}
define float @cmovfloat(i1 %a, float %b, float %c, float %d, float %e) nounwind {
; RV32I-LABEL: cmovfloat:
; RV32I: # %bb.0: # %entry
; RV32I-NEXT: andi a0, a0, 1
; RV32I-NEXT: bnez a0, .LBB4_2
; RV32I-NEXT: # %bb.1: # %entry
; RV32I-NEXT: fmv.w.x ft0, a4
; RV32I-NEXT: fmv.w.x ft1, a2
; RV32I-NEXT: j .LBB4_3
; RV32I-NEXT: .LBB4_2:
; RV32I-NEXT: fmv.w.x ft0, a3
; RV32I-NEXT: fmv.w.x ft1, a1
; RV32I-NEXT: .LBB4_3: # %entry
; RV32I-NEXT: fadd.s ft0, ft1, ft0
; RV32I-NEXT: fmv.x.w a0, ft0
; RV32I-NEXT: ret
;
; RV64I-LABEL: cmovfloat:
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: andi a0, a0, 1
; RV64I-NEXT: bnez a0, .LBB4_2
; RV64I-NEXT: # %bb.1: # %entry
; RV64I-NEXT: fmv.w.x ft0, a4
; RV64I-NEXT: fmv.w.x ft1, a2
; RV64I-NEXT: j .LBB4_3
; RV64I-NEXT: .LBB4_2:
; RV64I-NEXT: fmv.w.x ft0, a3
; RV64I-NEXT: fmv.w.x ft1, a1
; RV64I-NEXT: .LBB4_3: # %entry
; RV64I-NEXT: fadd.s ft0, ft1, ft0
; RV64I-NEXT: fmv.x.w a0, ft0
; RV64I-NEXT: ret
entry:
%cond1 = select i1 %a, float %b, float %c
%cond2 = select i1 %a, float %d, float %e
%ret = fadd float %cond1, %cond2
ret float %ret
}
define double @cmovdouble(i1 %a, double %b, double %c) nounwind {
; RV32I-LABEL: cmovdouble:
; RV32I: # %bb.0: # %entry
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw a3, 8(sp)
; RV32I-NEXT: sw a4, 12(sp)
; RV32I-NEXT: fld ft0, 8(sp)
; RV32I-NEXT: sw a1, 8(sp)
; RV32I-NEXT: sw a2, 12(sp)
; RV32I-NEXT: fld ft1, 8(sp)
; RV32I-NEXT: andi a0, a0, 1
; RV32I-NEXT: bnez a0, .LBB5_2
; RV32I-NEXT: # %bb.1: # %entry
; RV32I-NEXT: fmv.d ft1, ft0
; RV32I-NEXT: .LBB5_2: # %entry
; RV32I-NEXT: fsd ft1, 8(sp)
; RV32I-NEXT: lw a0, 8(sp)
; RV32I-NEXT: lw a1, 12(sp)
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV64I-LABEL: cmovdouble:
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: andi a0, a0, 1
; RV64I-NEXT: bnez a0, .LBB5_2
; RV64I-NEXT: # %bb.1: # %entry
; RV64I-NEXT: fmv.d.x ft0, a2
; RV64I-NEXT: fmv.x.d a0, ft0
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB5_2:
; RV64I-NEXT: fmv.d.x ft0, a1
; RV64I-NEXT: fmv.x.d a0, ft0
; RV64I-NEXT: ret
entry:
%cond = select i1 %a, double %b, double %c
ret double %cond
}
; Check that selects with dependencies on previous ones aren't incorrectly
; optimized.
define i32 @cmovccdep(i32 signext %a, i32 %b, i32 %c, i32 %d) nounwind {
; RV32I-LABEL: cmovccdep:
; RV32I: # %bb.0: # %entry
; RV32I-NEXT: addi a4, zero, 123
; RV32I-NEXT: bne a0, a4, .LBB6_3
; RV32I-NEXT: # %bb.1: # %entry
; RV32I-NEXT: mv a2, a1
; RV32I-NEXT: bne a0, a4, .LBB6_4
; RV32I-NEXT: .LBB6_2: # %entry
; RV32I-NEXT: add a0, a1, a2
; RV32I-NEXT: ret
; RV32I-NEXT: .LBB6_3: # %entry
; RV32I-NEXT: mv a1, a2
; RV32I-NEXT: mv a2, a1
; RV32I-NEXT: beq a0, a4, .LBB6_2
; RV32I-NEXT: .LBB6_4: # %entry
; RV32I-NEXT: mv a2, a3
; RV32I-NEXT: add a0, a1, a2
; RV32I-NEXT: ret
;
; RV64I-LABEL: cmovccdep:
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: addi a4, zero, 123
; RV64I-NEXT: bne a0, a4, .LBB6_3
; RV64I-NEXT: # %bb.1: # %entry
; RV64I-NEXT: mv a2, a1
; RV64I-NEXT: bne a0, a4, .LBB6_4
; RV64I-NEXT: .LBB6_2: # %entry
; RV64I-NEXT: add a0, a1, a2
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB6_3: # %entry
; RV64I-NEXT: mv a1, a2
; RV64I-NEXT: mv a2, a1
; RV64I-NEXT: beq a0, a4, .LBB6_2
; RV64I-NEXT: .LBB6_4: # %entry
; RV64I-NEXT: mv a2, a3
; RV64I-NEXT: add a0, a1, a2
; RV64I-NEXT: ret
entry:
%cmp = icmp eq i32 %a, 123
%cond1 = select i1 %cmp, i32 %b, i32 %c
%cond2 = select i1 %cmp, i32 %cond1, i32 %d
%ret = add i32 %cond1, %cond2
ret i32 %ret
}
; Check that selects with different conditions aren't incorrectly optimized.
define i32 @cmovdiffcc(i1 %a, i1 %b, i32 %c, i32 %d, i32 %e, i32 %f) nounwind {
; RV32I-LABEL: cmovdiffcc:
; RV32I: # %bb.0: # %entry
; RV32I-NEXT: andi a1, a1, 1
; RV32I-NEXT: beqz a1, .LBB7_3
; RV32I-NEXT: # %bb.1: # %entry
; RV32I-NEXT: andi a0, a0, 1
; RV32I-NEXT: beqz a0, .LBB7_4
; RV32I-NEXT: .LBB7_2: # %entry
; RV32I-NEXT: add a0, a2, a4
; RV32I-NEXT: ret
; RV32I-NEXT: .LBB7_3: # %entry
; RV32I-NEXT: mv a4, a5
; RV32I-NEXT: andi a0, a0, 1
; RV32I-NEXT: bnez a0, .LBB7_2
; RV32I-NEXT: .LBB7_4: # %entry
; RV32I-NEXT: mv a2, a3
; RV32I-NEXT: add a0, a2, a4
; RV32I-NEXT: ret
;
; RV64I-LABEL: cmovdiffcc:
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: andi a1, a1, 1
; RV64I-NEXT: beqz a1, .LBB7_3
; RV64I-NEXT: # %bb.1: # %entry
; RV64I-NEXT: andi a0, a0, 1
; RV64I-NEXT: beqz a0, .LBB7_4
; RV64I-NEXT: .LBB7_2: # %entry
; RV64I-NEXT: add a0, a2, a4
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB7_3: # %entry
; RV64I-NEXT: mv a4, a5
; RV64I-NEXT: andi a0, a0, 1
; RV64I-NEXT: bnez a0, .LBB7_2
; RV64I-NEXT: .LBB7_4: # %entry
; RV64I-NEXT: mv a2, a3
; RV64I-NEXT: add a0, a2, a4
; RV64I-NEXT: ret
entry:
%cond1 = select i1 %a, i32 %c, i32 %d
%cond2 = select i1 %b, i32 %e, i32 %f
%ret = add i32 %cond1, %cond2
ret i32 %ret
}

View File

@ -0,0 +1,191 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=riscv32 -run-pass=expand-isel-pseudos -simplify-mir -o - %s \
# RUN: | FileCheck -check-prefix=RV32I %s
# RUN: llc -mtriple=riscv64 -run-pass=expand-isel-pseudos -simplify-mir -o - %s \
# RUN: | FileCheck -check-prefix=RV64I %s
# Provide dummy definitions of functions and just enough metadata to create a
# DBG_VALUE.
--- |
define void @cmov_interleaved_bad() {
ret void
}
define void @cmov_interleaved_debug_value() {
ret void
}
!1 = !DIExpression()
...
---
# Here we have a sequence of select instructions with a non-select instruction
# in the middle. Because the non-select depends on the result of a previous
# select, we cannot optimize the sequence to share control-flow.
name: cmov_interleaved_bad
alignment: 2
tracksRegLiveness: true
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
- { id: 2, class: gpr }
- { id: 3, class: gpr }
- { id: 4, class: gpr }
- { id: 5, class: gpr }
- { id: 6, class: gpr }
- { id: 7, class: gpr }
- { id: 8, class: gpr }
- { id: 9, class: gpr }
- { id: 10, class: gpr }
liveins:
- { reg: '$x10', virtual-reg: '%0' }
- { reg: '$x11', virtual-reg: '%1' }
- { reg: '$x12', virtual-reg: '%2' }
- { reg: '$x13', virtual-reg: '%3' }
body: |
bb.0:
liveins: $x10, $x11, $x12, $x13
; RV32I-LABEL: name: cmov_interleaved_bad
; RV32I: successors: %bb.1, %bb.2
; RV32I: liveins: $x10, $x11, $x12, $x13
; RV32I: [[COPY:%[0-9]+]]:gpr = COPY $x13
; RV32I: [[COPY1:%[0-9]+]]:gpr = COPY $x12
; RV32I: [[COPY2:%[0-9]+]]:gpr = COPY $x11
; RV32I: [[COPY3:%[0-9]+]]:gpr = COPY $x10
; RV32I: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY3]], 1
; RV32I: [[COPY4:%[0-9]+]]:gpr = COPY $x0
; RV32I: BNE [[ANDI]], [[COPY4]], %bb.2
; RV32I: .1:
; RV32I: .2:
; RV32I: successors: %bb.3, %bb.4
; RV32I: [[PHI:%[0-9]+]]:gpr = PHI [[COPY2]], %bb.0, [[COPY1]], %bb.1
; RV32I: [[ADDI:%[0-9]+]]:gpr = ADDI [[PHI]], 1
; RV32I: BNE [[ANDI]], [[COPY4]], %bb.4
; RV32I: .3:
; RV32I: .4:
; RV32I: [[PHI1:%[0-9]+]]:gpr = PHI [[COPY]], %bb.2, [[COPY1]], %bb.3
; RV32I: [[ADD:%[0-9]+]]:gpr = ADD [[PHI]], killed [[PHI1]]
; RV32I: $x10 = COPY [[ADD]]
; RV32I: PseudoRET implicit $x10
; RV64I-LABEL: name: cmov_interleaved_bad
; RV64I: successors: %bb.1, %bb.2
; RV64I: liveins: $x10, $x11, $x12, $x13
; RV64I: [[COPY:%[0-9]+]]:gpr = COPY $x13
; RV64I: [[COPY1:%[0-9]+]]:gpr = COPY $x12
; RV64I: [[COPY2:%[0-9]+]]:gpr = COPY $x11
; RV64I: [[COPY3:%[0-9]+]]:gpr = COPY $x10
; RV64I: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY3]], 1
; RV64I: [[COPY4:%[0-9]+]]:gpr = COPY $x0
; RV64I: BNE [[ANDI]], [[COPY4]], %bb.2
; RV64I: .1:
; RV64I: .2:
; RV64I: successors: %bb.3, %bb.4
; RV64I: [[PHI:%[0-9]+]]:gpr = PHI [[COPY2]], %bb.0, [[COPY1]], %bb.1
; RV64I: [[ADDI:%[0-9]+]]:gpr = ADDI [[PHI]], 1
; RV64I: BNE [[ANDI]], [[COPY4]], %bb.4
; RV64I: .3:
; RV64I: .4:
; RV64I: [[PHI1:%[0-9]+]]:gpr = PHI [[COPY]], %bb.2, [[COPY1]], %bb.3
; RV64I: [[ADD:%[0-9]+]]:gpr = ADD [[PHI]], killed [[PHI1]]
; RV64I: $x10 = COPY [[ADD]]
; RV64I: PseudoRET implicit $x10
%3:gpr = COPY $x13
%2:gpr = COPY $x12
%1:gpr = COPY $x11
%0:gpr = COPY $x10
%5:gpr = ANDI %0, 1
%6:gpr = COPY $x0
%7:gpr = Select_GPR_Using_CC_GPR %5, %6, 22, %1, %2
%8:gpr = ADDI %7, 1
%9:gpr = Select_GPR_Using_CC_GPR %5, %6, 22, %3, %2
%10:gpr = ADD %7, killed %9
$x10 = COPY %10
PseudoRET implicit $x10
...
---
# Demonstrate that debug info associated with selects is correctly moved to
# the tail basic block, while debug info associated with non-selects is left
# in the head basic block.
name: cmov_interleaved_debug_value
alignment: 2
tracksRegLiveness: true
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
- { id: 2, class: gpr }
- { id: 3, class: gpr }
- { id: 4, class: gpr }
- { id: 5, class: gpr }
- { id: 6, class: gpr }
- { id: 7, class: gpr }
- { id: 8, class: gpr }
- { id: 9, class: gpr }
- { id: 10, class: gpr }
liveins:
- { reg: '$x10', virtual-reg: '%0' }
- { reg: '$x11', virtual-reg: '%1' }
- { reg: '$x12', virtual-reg: '%2' }
- { reg: '$x13', virtual-reg: '%3' }
body: |
bb.0:
liveins: $x10, $x11, $x12, $x13
; RV32I-LABEL: name: cmov_interleaved_debug_value
; RV32I: successors: %bb.1, %bb.2
; RV32I: liveins: $x10, $x11, $x12, $x13
; RV32I: [[COPY:%[0-9]+]]:gpr = COPY $x13
; RV32I: [[COPY1:%[0-9]+]]:gpr = COPY $x12
; RV32I: [[COPY2:%[0-9]+]]:gpr = COPY $x11
; RV32I: [[COPY3:%[0-9]+]]:gpr = COPY $x10
; RV32I: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY3]], 1
; RV32I: [[COPY4:%[0-9]+]]:gpr = COPY $x0
; RV32I: [[ADDI:%[0-9]+]]:gpr = ADDI [[COPY3]], 1
; RV32I: DBG_VALUE [[ADDI]], $noreg, !DIExpression(), !DIExpression()
; RV32I: BNE [[ANDI]], [[COPY4]], %bb.2
; RV32I: .1:
; RV32I: .2:
; RV32I: [[PHI:%[0-9]+]]:gpr = PHI [[COPY2]], %bb.0, [[COPY1]], %bb.1
; RV32I: [[PHI1:%[0-9]+]]:gpr = PHI [[COPY]], %bb.0, [[COPY1]], %bb.1
; RV32I: DBG_VALUE [[PHI]], $noreg, !DIExpression(), !DIExpression()
; RV32I: DBG_VALUE [[PHI1]], $noreg, !DIExpression(), !DIExpression()
; RV32I: [[ADD:%[0-9]+]]:gpr = ADD [[PHI]], killed [[PHI1]]
; RV32I: $x10 = COPY [[ADD]]
; RV32I: PseudoRET implicit $x10
; RV64I-LABEL: name: cmov_interleaved_debug_value
; RV64I: successors: %bb.1, %bb.2
; RV64I: liveins: $x10, $x11, $x12, $x13
; RV64I: [[COPY:%[0-9]+]]:gpr = COPY $x13
; RV64I: [[COPY1:%[0-9]+]]:gpr = COPY $x12
; RV64I: [[COPY2:%[0-9]+]]:gpr = COPY $x11
; RV64I: [[COPY3:%[0-9]+]]:gpr = COPY $x10
; RV64I: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY3]], 1
; RV64I: [[COPY4:%[0-9]+]]:gpr = COPY $x0
; RV64I: [[ADDI:%[0-9]+]]:gpr = ADDI [[COPY3]], 1
; RV64I: DBG_VALUE [[ADDI]], $noreg, !DIExpression(), !DIExpression()
; RV64I: BNE [[ANDI]], [[COPY4]], %bb.2
; RV64I: .1:
; RV64I: .2:
; RV64I: [[PHI:%[0-9]+]]:gpr = PHI [[COPY2]], %bb.0, [[COPY1]], %bb.1
; RV64I: [[PHI1:%[0-9]+]]:gpr = PHI [[COPY]], %bb.0, [[COPY1]], %bb.1
; RV64I: DBG_VALUE [[PHI]], $noreg, !DIExpression(), !DIExpression()
; RV64I: DBG_VALUE [[PHI1]], $noreg, !DIExpression(), !DIExpression()
; RV64I: [[ADD:%[0-9]+]]:gpr = ADD [[PHI]], killed [[PHI1]]
; RV64I: $x10 = COPY [[ADD]]
; RV64I: PseudoRET implicit $x10
%3:gpr = COPY $x13
%2:gpr = COPY $x12
%1:gpr = COPY $x11
%0:gpr = COPY $x10
%5:gpr = ANDI %0, 1
%6:gpr = COPY $x0
%7:gpr = Select_GPR_Using_CC_GPR %5, %6, 22, %1, %2
DBG_VALUE %7, $noreg, !1, !1
%8:gpr = ADDI %0, 1
DBG_VALUE %8, $noreg, !1, !1
%9:gpr = Select_GPR_Using_CC_GPR %5, %6, 22, %3, %2
DBG_VALUE %9, $noreg, !1, !1
%10:gpr = ADD %7, killed %9
$x10 = COPY %10
PseudoRET implicit $x10
...
---