1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 11:13:28 +01:00
llvm-mirror/lib/Target/AMDGPU/AMDGPUMacroFusion.cpp
Jay Foad cc94ceb292 [AMDGPU] Extend macro fusion for ADDC and SUBB to SUBBREV
Summary:
There's a lot of test case churn but the overall effect is to increase
the number of back-to-back v_sub,v_subbrev pairs, which can execute with
no delay even on gfx10.

Reviewers: arsenm, rampitec, nhaehnle

Subscribers: kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, hiraditya, kerbowa, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D75999
2020-03-11 17:59:21 +00:00

69 lines
2.3 KiB
C++

//===--- AMDGPUMacroFusion.cpp - AMDGPU Macro Fusion ----------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file This file contains the AMDGPU implementation of the DAG scheduling
/// mutation to pair instructions back to back.
//
//===----------------------------------------------------------------------===//
#include "AMDGPUMacroFusion.h"
#include "AMDGPUSubtarget.h"
#include "SIInstrInfo.h"
#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
#include "llvm/CodeGen/MacroFusion.h"
using namespace llvm;
namespace {
/// Check if the instr pair, FirstMI and SecondMI, should be fused
/// together. Given SecondMI, when FirstMI is unspecified, then check if
/// SecondMI may be part of a fused pair at all.
static bool shouldScheduleAdjacent(const TargetInstrInfo &TII_,
const TargetSubtargetInfo &TSI,
const MachineInstr *FirstMI,
const MachineInstr &SecondMI) {
const SIInstrInfo &TII = static_cast<const SIInstrInfo&>(TII_);
switch (SecondMI.getOpcode()) {
case AMDGPU::V_ADDC_U32_e64:
case AMDGPU::V_SUBB_U32_e64:
case AMDGPU::V_SUBBREV_U32_e64:
case AMDGPU::V_CNDMASK_B32_e64: {
// Try to cluster defs of condition registers to their uses. This improves
// the chance VCC will be available which will allow shrinking to VOP2
// encodings.
if (!FirstMI)
return true;
const MachineBasicBlock &MBB = *FirstMI->getParent();
const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
const TargetRegisterInfo *TRI = MRI.getTargetRegisterInfo();
const MachineOperand *Src2 = TII.getNamedOperand(SecondMI,
AMDGPU::OpName::src2);
return FirstMI->definesRegister(Src2->getReg(), TRI);
}
default:
return false;
}
return false;
}
} // end namespace
namespace llvm {
std::unique_ptr<ScheduleDAGMutation> createAMDGPUMacroFusionDAGMutation () {
return createMacroFusionDAGMutation(shouldScheduleAdjacent);
}
} // end namespace llvm