2017-08-08 02:47:13 +02:00
|
|
|
//===- SIInsertWaitcnts.cpp - Insert Wait Instructions --------------------===//
|
2017-04-12 05:25:12 +02:00
|
|
|
//
|
2019-01-19 09:50:56 +01:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2017-04-12 05:25:12 +02:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
/// \file
|
2018-05-01 17:54:18 +02:00
|
|
|
/// Insert wait instructions for memory reads and writes.
|
2017-04-12 05:25:12 +02:00
|
|
|
///
|
|
|
|
/// Memory reads and writes are issued asynchronously, so we need to insert
|
|
|
|
/// S_WAITCNT instructions when we want to access any of their results or
|
|
|
|
/// overwrite any register that's used asynchronously.
|
AMDGPU/InsertWaitcnts: Simplify pending events tracking
Summary:
Instead of storing the "score" (last time point) of the various relevant
events, only store whether an event is pending or not.
This is sufficient, because whenever only one event of a count type is
pending, its last time point is naturally the upper bound of all time
points of this count type, and when multiple event types are pending,
the count type has gone out of order and an s_waitcnt to 0 is required
to clear any pending event type (and will then clear all pending event
types for that count type).
This also removes the special handling of GDS_GPR_LOCK and EXP_GPR_LOCK.
I do not understand what this special handling ever attempted to achieve.
It has existed ever since the original port from an internal code base,
so my best guess is that it solved a problem related to EXEC handling in
that internal code base.
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54228
llvm-svn: 347850
2018-11-29 12:06:14 +01:00
|
|
|
///
|
|
|
|
/// TODO: This pass currently keeps one timeline per hardware counter. A more
|
|
|
|
/// finely-grained approach that keeps one timeline per event type could
|
|
|
|
/// sometimes get away with generating weaker s_waitcnt instructions. For
|
|
|
|
/// example, when both SMEM and LDS are in flight and we need to wait for
|
|
|
|
/// the i-th-last LDS instruction, then an lgkmcnt(i) is actually sufficient,
|
|
|
|
/// but the pass will currently generate a conservative lgkmcnt(0) because
|
|
|
|
/// multiple event types are in flight.
|
2017-04-12 05:25:12 +02:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "AMDGPU.h"
|
|
|
|
#include "AMDGPUSubtarget.h"
|
|
|
|
#include "SIDefines.h"
|
|
|
|
#include "SIInstrInfo.h"
|
|
|
|
#include "SIMachineFunctionInfo.h"
|
2017-08-08 02:47:13 +02:00
|
|
|
#include "SIRegisterInfo.h"
|
2017-04-12 05:25:12 +02:00
|
|
|
#include "Utils/AMDGPUBaseInfo.h"
|
2017-08-08 02:47:13 +02:00
|
|
|
#include "llvm/ADT/DenseMap.h"
|
|
|
|
#include "llvm/ADT/DenseSet.h"
|
2017-04-12 05:25:12 +02:00
|
|
|
#include "llvm/ADT/PostOrderIterator.h"
|
2017-08-08 02:47:13 +02:00
|
|
|
#include "llvm/ADT/STLExtras.h"
|
|
|
|
#include "llvm/ADT/SmallVector.h"
|
|
|
|
#include "llvm/CodeGen/MachineBasicBlock.h"
|
2017-04-12 05:25:12 +02:00
|
|
|
#include "llvm/CodeGen/MachineFunction.h"
|
|
|
|
#include "llvm/CodeGen/MachineFunctionPass.h"
|
2017-08-08 02:47:13 +02:00
|
|
|
#include "llvm/CodeGen/MachineInstr.h"
|
2017-04-12 05:25:12 +02:00
|
|
|
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
2017-08-08 02:47:13 +02:00
|
|
|
#include "llvm/CodeGen/MachineMemOperand.h"
|
|
|
|
#include "llvm/CodeGen/MachineOperand.h"
|
2017-04-12 05:25:12 +02:00
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
2017-08-08 02:47:13 +02:00
|
|
|
#include "llvm/IR/DebugLoc.h"
|
|
|
|
#include "llvm/Pass.h"
|
|
|
|
#include "llvm/Support/Debug.h"
|
2018-04-25 21:21:26 +02:00
|
|
|
#include "llvm/Support/DebugCounter.h"
|
2017-08-08 02:47:13 +02:00
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
|
|
|
#include "llvm/Support/raw_ostream.h"
|
|
|
|
#include <algorithm>
|
|
|
|
#include <cassert>
|
|
|
|
#include <cstdint>
|
|
|
|
#include <cstring>
|
|
|
|
#include <memory>
|
|
|
|
#include <utility>
|
|
|
|
#include <vector>
|
2017-04-12 05:25:12 +02:00
|
|
|
|
2018-04-25 21:21:26 +02:00
|
|
|
using namespace llvm;
|
|
|
|
|
2017-04-12 05:25:12 +02:00
|
|
|
#define DEBUG_TYPE "si-insert-waitcnts"
|
|
|
|
|
2018-04-25 21:21:26 +02:00
|
|
|
DEBUG_COUNTER(ForceExpCounter, DEBUG_TYPE"-forceexp",
|
|
|
|
"Force emit s_waitcnt expcnt(0) instrs");
|
|
|
|
DEBUG_COUNTER(ForceLgkmCounter, DEBUG_TYPE"-forcelgkm",
|
|
|
|
"Force emit s_waitcnt lgkmcnt(0) instrs");
|
|
|
|
DEBUG_COUNTER(ForceVMCounter, DEBUG_TYPE"-forcevm",
|
|
|
|
"Force emit s_waitcnt vmcnt(0) instrs");
|
|
|
|
|
2019-03-14 22:23:59 +01:00
|
|
|
static cl::opt<bool> ForceEmitZeroFlag(
|
2018-04-25 21:21:26 +02:00
|
|
|
"amdgpu-waitcnt-forcezero",
|
|
|
|
cl::desc("Force all waitcnt instrs to be emitted as s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)"),
|
2019-03-14 22:23:59 +01:00
|
|
|
cl::init(false), cl::Hidden);
|
2017-04-12 05:25:12 +02:00
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
AMDGPU/InsertWaitcnts: Use foreach loops for inst and wait event types
Summary:
It hides the type casting ugliness, and I happened to have to add a new
such loop (in a later patch).
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54227
llvm-svn: 347849
2018-11-29 12:06:11 +01:00
|
|
|
template <typename EnumT>
|
|
|
|
class enum_iterator
|
|
|
|
: public iterator_facade_base<enum_iterator<EnumT>,
|
|
|
|
std::forward_iterator_tag, const EnumT> {
|
|
|
|
EnumT Value;
|
|
|
|
public:
|
|
|
|
enum_iterator() = default;
|
|
|
|
enum_iterator(EnumT Value) : Value(Value) {}
|
|
|
|
|
|
|
|
enum_iterator &operator++() {
|
|
|
|
Value = static_cast<EnumT>(Value + 1);
|
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool operator==(const enum_iterator &RHS) const { return Value == RHS.Value; }
|
|
|
|
|
|
|
|
EnumT operator*() const { return Value; }
|
|
|
|
};
|
|
|
|
|
2017-04-12 05:25:12 +02:00
|
|
|
// Class of object that encapsulates latest instruction counter score
|
|
|
|
// associated with the operand. Used for determining whether
|
|
|
|
// s_waitcnt instruction needs to be emited.
|
|
|
|
|
|
|
|
#define CNT_MASK(t) (1u << (t))
|
|
|
|
|
2019-05-03 23:53:53 +02:00
|
|
|
enum InstCounterType { VM_CNT = 0, LGKM_CNT, EXP_CNT, VS_CNT, NUM_INST_CNTS };
|
2017-04-12 05:25:12 +02:00
|
|
|
|
AMDGPU/InsertWaitcnts: Use foreach loops for inst and wait event types
Summary:
It hides the type casting ugliness, and I happened to have to add a new
such loop (in a later patch).
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54227
llvm-svn: 347849
2018-11-29 12:06:11 +01:00
|
|
|
iterator_range<enum_iterator<InstCounterType>> inst_counter_types() {
|
|
|
|
return make_range(enum_iterator<InstCounterType>(VM_CNT),
|
|
|
|
enum_iterator<InstCounterType>(NUM_INST_CNTS));
|
|
|
|
}
|
|
|
|
|
2017-08-08 02:47:13 +02:00
|
|
|
using RegInterval = std::pair<signed, signed>;
|
2017-04-12 05:25:12 +02:00
|
|
|
|
|
|
|
struct {
|
AMDGPU/InsertWaitcnt: Consistently use uint32_t for scores / time points
Summary:
There is one obsolete reference to using -1 as an indication of "unknown",
but this isn't actually used anywhere.
Using unsigned makes robust wrapping checks easier.
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, llvm-commits, tpr, t-tye, hakzsam
Differential Revision: https://reviews.llvm.org/D54230
llvm-svn: 347852
2018-11-29 12:06:21 +01:00
|
|
|
uint32_t VmcntMax;
|
|
|
|
uint32_t ExpcntMax;
|
|
|
|
uint32_t LgkmcntMax;
|
2019-05-03 23:53:53 +02:00
|
|
|
uint32_t VscntMax;
|
2017-04-12 05:25:12 +02:00
|
|
|
int32_t NumVGPRsMax;
|
|
|
|
int32_t NumSGPRsMax;
|
|
|
|
} HardwareLimits;
|
|
|
|
|
|
|
|
struct {
|
|
|
|
unsigned VGPR0;
|
|
|
|
unsigned VGPRL;
|
|
|
|
unsigned SGPR0;
|
|
|
|
unsigned SGPRL;
|
|
|
|
} RegisterEncoding;
|
|
|
|
|
|
|
|
enum WaitEventType {
|
|
|
|
VMEM_ACCESS, // vector-memory read & write
|
2019-05-03 23:53:53 +02:00
|
|
|
VMEM_READ_ACCESS, // vector-memory read
|
|
|
|
VMEM_WRITE_ACCESS,// vector-memory write
|
2017-04-12 05:25:12 +02:00
|
|
|
LDS_ACCESS, // lds read & write
|
|
|
|
GDS_ACCESS, // gds read & write
|
|
|
|
SQ_MESSAGE, // send message
|
|
|
|
SMEM_ACCESS, // scalar-memory read & write
|
|
|
|
EXP_GPR_LOCK, // export holding on its data src
|
|
|
|
GDS_GPR_LOCK, // GDS holding on its data and addr src
|
|
|
|
EXP_POS_ACCESS, // write to export position
|
|
|
|
EXP_PARAM_ACCESS, // write to export parameter
|
|
|
|
VMW_GPR_LOCK, // vector-memory write holding on its data src
|
|
|
|
NUM_WAIT_EVENTS,
|
|
|
|
};
|
|
|
|
|
AMDGPU/InsertWaitcnts: Simplify pending events tracking
Summary:
Instead of storing the "score" (last time point) of the various relevant
events, only store whether an event is pending or not.
This is sufficient, because whenever only one event of a count type is
pending, its last time point is naturally the upper bound of all time
points of this count type, and when multiple event types are pending,
the count type has gone out of order and an s_waitcnt to 0 is required
to clear any pending event type (and will then clear all pending event
types for that count type).
This also removes the special handling of GDS_GPR_LOCK and EXP_GPR_LOCK.
I do not understand what this special handling ever attempted to achieve.
It has existed ever since the original port from an internal code base,
so my best guess is that it solved a problem related to EXEC handling in
that internal code base.
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54228
llvm-svn: 347850
2018-11-29 12:06:14 +01:00
|
|
|
static const uint32_t WaitEventMaskForInst[NUM_INST_CNTS] = {
|
2019-05-03 23:53:53 +02:00
|
|
|
(1 << VMEM_ACCESS) | (1 << VMEM_READ_ACCESS),
|
AMDGPU/InsertWaitcnts: Simplify pending events tracking
Summary:
Instead of storing the "score" (last time point) of the various relevant
events, only store whether an event is pending or not.
This is sufficient, because whenever only one event of a count type is
pending, its last time point is naturally the upper bound of all time
points of this count type, and when multiple event types are pending,
the count type has gone out of order and an s_waitcnt to 0 is required
to clear any pending event type (and will then clear all pending event
types for that count type).
This also removes the special handling of GDS_GPR_LOCK and EXP_GPR_LOCK.
I do not understand what this special handling ever attempted to achieve.
It has existed ever since the original port from an internal code base,
so my best guess is that it solved a problem related to EXEC handling in
that internal code base.
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54228
llvm-svn: 347850
2018-11-29 12:06:14 +01:00
|
|
|
(1 << SMEM_ACCESS) | (1 << LDS_ACCESS) | (1 << GDS_ACCESS) |
|
|
|
|
(1 << SQ_MESSAGE),
|
|
|
|
(1 << EXP_GPR_LOCK) | (1 << GDS_GPR_LOCK) | (1 << VMW_GPR_LOCK) |
|
|
|
|
(1 << EXP_PARAM_ACCESS) | (1 << EXP_POS_ACCESS),
|
2019-05-03 23:53:53 +02:00
|
|
|
(1 << VMEM_WRITE_ACCESS)
|
AMDGPU/InsertWaitcnts: Simplify pending events tracking
Summary:
Instead of storing the "score" (last time point) of the various relevant
events, only store whether an event is pending or not.
This is sufficient, because whenever only one event of a count type is
pending, its last time point is naturally the upper bound of all time
points of this count type, and when multiple event types are pending,
the count type has gone out of order and an s_waitcnt to 0 is required
to clear any pending event type (and will then clear all pending event
types for that count type).
This also removes the special handling of GDS_GPR_LOCK and EXP_GPR_LOCK.
I do not understand what this special handling ever attempted to achieve.
It has existed ever since the original port from an internal code base,
so my best guess is that it solved a problem related to EXEC handling in
that internal code base.
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54228
llvm-svn: 347850
2018-11-29 12:06:14 +01:00
|
|
|
};
|
AMDGPU/InsertWaitcnts: Use foreach loops for inst and wait event types
Summary:
It hides the type casting ugliness, and I happened to have to add a new
such loop (in a later patch).
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54227
llvm-svn: 347849
2018-11-29 12:06:11 +01:00
|
|
|
|
2017-04-12 05:25:12 +02:00
|
|
|
// The mapping is:
|
|
|
|
// 0 .. SQ_MAX_PGM_VGPRS-1 real VGPRs
|
|
|
|
// SQ_MAX_PGM_VGPRS .. NUM_ALL_VGPRS-1 extra VGPR-like slots
|
|
|
|
// NUM_ALL_VGPRS .. NUM_ALL_VGPRS+SQ_MAX_PGM_SGPRS-1 real SGPRs
|
|
|
|
// We reserve a fixed number of VGPR slots in the scoring tables for
|
|
|
|
// special tokens like SCMEM_LDS (needed for buffer load to LDS).
|
|
|
|
enum RegisterMapping {
|
|
|
|
SQ_MAX_PGM_VGPRS = 256, // Maximum programmable VGPRs across all targets.
|
|
|
|
SQ_MAX_PGM_SGPRS = 256, // Maximum programmable SGPRs across all targets.
|
|
|
|
NUM_EXTRA_VGPRS = 1, // A reserved slot for DS.
|
|
|
|
EXTRA_VGPR_LDS = 0, // This is a placeholder the Shader algorithm uses.
|
|
|
|
NUM_ALL_VGPRS = SQ_MAX_PGM_VGPRS + NUM_EXTRA_VGPRS, // Where SGPR starts.
|
|
|
|
};
|
|
|
|
|
AMDGPU/InsertWaitcnts: Untangle some semi-global state
Summary:
Reduce the statefulness of the algorithm in two ways:
1. More clearly split generateWaitcntInstBefore into two phases: the
first one which determines the required wait, if any, without changing
the ScoreBrackets, and the second one which actually inserts the wait
and updates the brackets.
2. Communicate pre-existing s_waitcnt instructions using an argument to
generateWaitcntInstBefore instead of through the ScoreBrackets.
To simplify these changes, a Waitcnt structure is introduced which carries
the counts of an s_waitcnt instruction in decoded form.
There are some functional changes:
1. The FIXME for the VCCZ bug workaround was implemented: we only wait for
SMEM instructions as required instead of waiting on all counters.
2. We now properly track pre-existing waitcnt's in all cases, which leads
to less conservative waitcnts being emitted in some cases.
s_load_dword ...
s_waitcnt lgkmcnt(0) <-- pre-existing wait count
ds_read_b32 v0, ...
ds_read_b32 v1, ...
s_waitcnt lgkmcnt(0) <-- this is too conservative
use(v0)
more code
use(v1)
This increases code size a bit, but the reduced latency should still be a
win in basically all cases. The worst code size regressions in my shader-db
are:
WORST REGRESSIONS - Code Size
Before After Delta Percentage
1724 1736 12 0.70 % shaders/private/f1-2015/1334.shader_test [0]
2276 2284 8 0.35 % shaders/private/f1-2015/1306.shader_test [0]
4632 4640 8 0.17 % shaders/private/ue4_elemental/62.shader_test [0]
2376 2384 8 0.34 % shaders/private/f1-2015/1308.shader_test [0]
3284 3292 8 0.24 % shaders/private/talos_principle/1955.shader_test [0]
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54226
llvm-svn: 347848
2018-11-29 12:06:06 +01:00
|
|
|
void addWait(AMDGPU::Waitcnt &Wait, InstCounterType T, unsigned Count) {
|
|
|
|
switch (T) {
|
|
|
|
case VM_CNT:
|
|
|
|
Wait.VmCnt = std::min(Wait.VmCnt, Count);
|
|
|
|
break;
|
|
|
|
case EXP_CNT:
|
|
|
|
Wait.ExpCnt = std::min(Wait.ExpCnt, Count);
|
|
|
|
break;
|
|
|
|
case LGKM_CNT:
|
|
|
|
Wait.LgkmCnt = std::min(Wait.LgkmCnt, Count);
|
|
|
|
break;
|
2019-05-03 23:53:53 +02:00
|
|
|
case VS_CNT:
|
|
|
|
Wait.VsCnt = std::min(Wait.VsCnt, Count);
|
|
|
|
break;
|
AMDGPU/InsertWaitcnts: Untangle some semi-global state
Summary:
Reduce the statefulness of the algorithm in two ways:
1. More clearly split generateWaitcntInstBefore into two phases: the
first one which determines the required wait, if any, without changing
the ScoreBrackets, and the second one which actually inserts the wait
and updates the brackets.
2. Communicate pre-existing s_waitcnt instructions using an argument to
generateWaitcntInstBefore instead of through the ScoreBrackets.
To simplify these changes, a Waitcnt structure is introduced which carries
the counts of an s_waitcnt instruction in decoded form.
There are some functional changes:
1. The FIXME for the VCCZ bug workaround was implemented: we only wait for
SMEM instructions as required instead of waiting on all counters.
2. We now properly track pre-existing waitcnt's in all cases, which leads
to less conservative waitcnts being emitted in some cases.
s_load_dword ...
s_waitcnt lgkmcnt(0) <-- pre-existing wait count
ds_read_b32 v0, ...
ds_read_b32 v1, ...
s_waitcnt lgkmcnt(0) <-- this is too conservative
use(v0)
more code
use(v1)
This increases code size a bit, but the reduced latency should still be a
win in basically all cases. The worst code size regressions in my shader-db
are:
WORST REGRESSIONS - Code Size
Before After Delta Percentage
1724 1736 12 0.70 % shaders/private/f1-2015/1334.shader_test [0]
2276 2284 8 0.35 % shaders/private/f1-2015/1306.shader_test [0]
4632 4640 8 0.17 % shaders/private/ue4_elemental/62.shader_test [0]
2376 2384 8 0.34 % shaders/private/f1-2015/1308.shader_test [0]
3284 3292 8 0.24 % shaders/private/talos_principle/1955.shader_test [0]
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54226
llvm-svn: 347848
2018-11-29 12:06:06 +01:00
|
|
|
default:
|
|
|
|
llvm_unreachable("bad InstCounterType");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
// This objects maintains the current score brackets of each wait counter, and
|
|
|
|
// a per-register scoreboard for each wait counter.
|
|
|
|
//
|
2017-04-12 05:25:12 +02:00
|
|
|
// We also maintain the latest score for every event type that can change the
|
|
|
|
// waitcnt in order to know if there are multiple types of events within
|
|
|
|
// the brackets. When multiple types of event happen in the bracket,
|
2018-03-14 23:04:32 +01:00
|
|
|
// wait count may get decreased out of order, therefore we need to put in
|
2017-04-12 05:25:12 +02:00
|
|
|
// "s_waitcnt 0" before use.
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
class WaitcntBrackets {
|
2017-04-12 05:25:12 +02:00
|
|
|
public:
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
WaitcntBrackets(const GCNSubtarget *SubTarget) : ST(SubTarget) {
|
AMDGPU/InsertWaitcnts: Use foreach loops for inst and wait event types
Summary:
It hides the type casting ugliness, and I happened to have to add a new
such loop (in a later patch).
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54227
llvm-svn: 347849
2018-11-29 12:06:11 +01:00
|
|
|
for (auto T : inst_counter_types())
|
2017-08-08 02:47:13 +02:00
|
|
|
memset(VgprScores[T], 0, sizeof(VgprScores[T]));
|
|
|
|
}
|
|
|
|
|
AMDGPU/InsertWaitcnt: Consistently use uint32_t for scores / time points
Summary:
There is one obsolete reference to using -1 as an indication of "unknown",
but this isn't actually used anywhere.
Using unsigned makes robust wrapping checks easier.
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, llvm-commits, tpr, t-tye, hakzsam
Differential Revision: https://reviews.llvm.org/D54230
llvm-svn: 347852
2018-11-29 12:06:21 +01:00
|
|
|
static uint32_t getWaitCountMax(InstCounterType T) {
|
2017-04-12 05:25:12 +02:00
|
|
|
switch (T) {
|
|
|
|
case VM_CNT:
|
|
|
|
return HardwareLimits.VmcntMax;
|
|
|
|
case LGKM_CNT:
|
|
|
|
return HardwareLimits.LgkmcntMax;
|
|
|
|
case EXP_CNT:
|
|
|
|
return HardwareLimits.ExpcntMax;
|
2019-05-03 23:53:53 +02:00
|
|
|
case VS_CNT:
|
|
|
|
return HardwareLimits.VscntMax;
|
2017-04-12 05:25:12 +02:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return 0;
|
2017-08-08 02:47:13 +02:00
|
|
|
}
|
2017-04-12 05:25:12 +02:00
|
|
|
|
AMDGPU/InsertWaitcnt: Consistently use uint32_t for scores / time points
Summary:
There is one obsolete reference to using -1 as an indication of "unknown",
but this isn't actually used anywhere.
Using unsigned makes robust wrapping checks easier.
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, llvm-commits, tpr, t-tye, hakzsam
Differential Revision: https://reviews.llvm.org/D54230
llvm-svn: 347852
2018-11-29 12:06:21 +01:00
|
|
|
uint32_t getScoreLB(InstCounterType T) const {
|
2017-04-12 05:25:12 +02:00
|
|
|
assert(T < NUM_INST_CNTS);
|
|
|
|
if (T >= NUM_INST_CNTS)
|
|
|
|
return 0;
|
|
|
|
return ScoreLBs[T];
|
2017-08-08 02:47:13 +02:00
|
|
|
}
|
2017-04-12 05:25:12 +02:00
|
|
|
|
AMDGPU/InsertWaitcnt: Consistently use uint32_t for scores / time points
Summary:
There is one obsolete reference to using -1 as an indication of "unknown",
but this isn't actually used anywhere.
Using unsigned makes robust wrapping checks easier.
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, llvm-commits, tpr, t-tye, hakzsam
Differential Revision: https://reviews.llvm.org/D54230
llvm-svn: 347852
2018-11-29 12:06:21 +01:00
|
|
|
uint32_t getScoreUB(InstCounterType T) const {
|
2017-04-12 05:25:12 +02:00
|
|
|
assert(T < NUM_INST_CNTS);
|
|
|
|
if (T >= NUM_INST_CNTS)
|
|
|
|
return 0;
|
|
|
|
return ScoreUBs[T];
|
2017-08-08 02:47:13 +02:00
|
|
|
}
|
2017-04-12 05:25:12 +02:00
|
|
|
|
|
|
|
// Mapping from event to counter.
|
|
|
|
InstCounterType eventCounter(WaitEventType E) {
|
2019-05-03 23:53:53 +02:00
|
|
|
if (WaitEventMaskForInst[VM_CNT] & (1 << E))
|
2017-04-12 05:25:12 +02:00
|
|
|
return VM_CNT;
|
AMDGPU/InsertWaitcnts: Simplify pending events tracking
Summary:
Instead of storing the "score" (last time point) of the various relevant
events, only store whether an event is pending or not.
This is sufficient, because whenever only one event of a count type is
pending, its last time point is naturally the upper bound of all time
points of this count type, and when multiple event types are pending,
the count type has gone out of order and an s_waitcnt to 0 is required
to clear any pending event type (and will then clear all pending event
types for that count type).
This also removes the special handling of GDS_GPR_LOCK and EXP_GPR_LOCK.
I do not understand what this special handling ever attempted to achieve.
It has existed ever since the original port from an internal code base,
so my best guess is that it solved a problem related to EXEC handling in
that internal code base.
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54228
llvm-svn: 347850
2018-11-29 12:06:14 +01:00
|
|
|
if (WaitEventMaskForInst[LGKM_CNT] & (1 << E))
|
2017-04-12 05:25:12 +02:00
|
|
|
return LGKM_CNT;
|
2019-05-03 23:53:53 +02:00
|
|
|
if (WaitEventMaskForInst[VS_CNT] & (1 << E))
|
|
|
|
return VS_CNT;
|
AMDGPU/InsertWaitcnts: Simplify pending events tracking
Summary:
Instead of storing the "score" (last time point) of the various relevant
events, only store whether an event is pending or not.
This is sufficient, because whenever only one event of a count type is
pending, its last time point is naturally the upper bound of all time
points of this count type, and when multiple event types are pending,
the count type has gone out of order and an s_waitcnt to 0 is required
to clear any pending event type (and will then clear all pending event
types for that count type).
This also removes the special handling of GDS_GPR_LOCK and EXP_GPR_LOCK.
I do not understand what this special handling ever attempted to achieve.
It has existed ever since the original port from an internal code base,
so my best guess is that it solved a problem related to EXEC handling in
that internal code base.
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54228
llvm-svn: 347850
2018-11-29 12:06:14 +01:00
|
|
|
assert(WaitEventMaskForInst[EXP_CNT] & (1 << E));
|
|
|
|
return EXP_CNT;
|
2017-04-12 05:25:12 +02:00
|
|
|
}
|
|
|
|
|
AMDGPU/InsertWaitcnt: Consistently use uint32_t for scores / time points
Summary:
There is one obsolete reference to using -1 as an indication of "unknown",
but this isn't actually used anywhere.
Using unsigned makes robust wrapping checks easier.
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, llvm-commits, tpr, t-tye, hakzsam
Differential Revision: https://reviews.llvm.org/D54230
llvm-svn: 347852
2018-11-29 12:06:21 +01:00
|
|
|
uint32_t getRegScore(int GprNo, InstCounterType T) {
|
2017-04-12 05:25:12 +02:00
|
|
|
if (GprNo < NUM_ALL_VGPRS) {
|
|
|
|
return VgprScores[T][GprNo];
|
|
|
|
}
|
AMDGPU/InsertWaitcnts: Untangle some semi-global state
Summary:
Reduce the statefulness of the algorithm in two ways:
1. More clearly split generateWaitcntInstBefore into two phases: the
first one which determines the required wait, if any, without changing
the ScoreBrackets, and the second one which actually inserts the wait
and updates the brackets.
2. Communicate pre-existing s_waitcnt instructions using an argument to
generateWaitcntInstBefore instead of through the ScoreBrackets.
To simplify these changes, a Waitcnt structure is introduced which carries
the counts of an s_waitcnt instruction in decoded form.
There are some functional changes:
1. The FIXME for the VCCZ bug workaround was implemented: we only wait for
SMEM instructions as required instead of waiting on all counters.
2. We now properly track pre-existing waitcnt's in all cases, which leads
to less conservative waitcnts being emitted in some cases.
s_load_dword ...
s_waitcnt lgkmcnt(0) <-- pre-existing wait count
ds_read_b32 v0, ...
ds_read_b32 v1, ...
s_waitcnt lgkmcnt(0) <-- this is too conservative
use(v0)
more code
use(v1)
This increases code size a bit, but the reduced latency should still be a
win in basically all cases. The worst code size regressions in my shader-db
are:
WORST REGRESSIONS - Code Size
Before After Delta Percentage
1724 1736 12 0.70 % shaders/private/f1-2015/1334.shader_test [0]
2276 2284 8 0.35 % shaders/private/f1-2015/1306.shader_test [0]
4632 4640 8 0.17 % shaders/private/ue4_elemental/62.shader_test [0]
2376 2384 8 0.34 % shaders/private/f1-2015/1308.shader_test [0]
3284 3292 8 0.24 % shaders/private/talos_principle/1955.shader_test [0]
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54226
llvm-svn: 347848
2018-11-29 12:06:06 +01:00
|
|
|
assert(T == LGKM_CNT);
|
2017-04-12 05:25:12 +02:00
|
|
|
return SgprScores[GprNo - NUM_ALL_VGPRS];
|
|
|
|
}
|
|
|
|
|
|
|
|
void clear() {
|
|
|
|
memset(ScoreLBs, 0, sizeof(ScoreLBs));
|
|
|
|
memset(ScoreUBs, 0, sizeof(ScoreUBs));
|
AMDGPU/InsertWaitcnts: Simplify pending events tracking
Summary:
Instead of storing the "score" (last time point) of the various relevant
events, only store whether an event is pending or not.
This is sufficient, because whenever only one event of a count type is
pending, its last time point is naturally the upper bound of all time
points of this count type, and when multiple event types are pending,
the count type has gone out of order and an s_waitcnt to 0 is required
to clear any pending event type (and will then clear all pending event
types for that count type).
This also removes the special handling of GDS_GPR_LOCK and EXP_GPR_LOCK.
I do not understand what this special handling ever attempted to achieve.
It has existed ever since the original port from an internal code base,
so my best guess is that it solved a problem related to EXEC handling in
that internal code base.
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54228
llvm-svn: 347850
2018-11-29 12:06:14 +01:00
|
|
|
PendingEvents = 0;
|
|
|
|
memset(MixedPendingEvents, 0, sizeof(MixedPendingEvents));
|
AMDGPU/InsertWaitcnts: Use foreach loops for inst and wait event types
Summary:
It hides the type casting ugliness, and I happened to have to add a new
such loop (in a later patch).
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54227
llvm-svn: 347849
2018-11-29 12:06:11 +01:00
|
|
|
for (auto T : inst_counter_types())
|
2017-04-12 05:25:12 +02:00
|
|
|
memset(VgprScores[T], 0, sizeof(VgprScores[T]));
|
|
|
|
memset(SgprScores, 0, sizeof(SgprScores));
|
|
|
|
}
|
|
|
|
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
bool merge(const WaitcntBrackets &Other);
|
|
|
|
|
2017-04-12 05:25:12 +02:00
|
|
|
RegInterval getRegInterval(const MachineInstr *MI, const SIInstrInfo *TII,
|
|
|
|
const MachineRegisterInfo *MRI,
|
|
|
|
const SIRegisterInfo *TRI, unsigned OpNo,
|
|
|
|
bool Def) const;
|
|
|
|
|
|
|
|
int32_t getMaxVGPR() const { return VgprUB; }
|
|
|
|
int32_t getMaxSGPR() const { return SgprUB; }
|
2017-08-08 02:47:13 +02:00
|
|
|
|
AMDGPU/InsertWaitcnts: Some more const-correctness
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54225
llvm-svn: 347192
2018-11-19 13:03:11 +01:00
|
|
|
bool counterOutOfOrder(InstCounterType T) const;
|
AMDGPU/InsertWaitcnts: Untangle some semi-global state
Summary:
Reduce the statefulness of the algorithm in two ways:
1. More clearly split generateWaitcntInstBefore into two phases: the
first one which determines the required wait, if any, without changing
the ScoreBrackets, and the second one which actually inserts the wait
and updates the brackets.
2. Communicate pre-existing s_waitcnt instructions using an argument to
generateWaitcntInstBefore instead of through the ScoreBrackets.
To simplify these changes, a Waitcnt structure is introduced which carries
the counts of an s_waitcnt instruction in decoded form.
There are some functional changes:
1. The FIXME for the VCCZ bug workaround was implemented: we only wait for
SMEM instructions as required instead of waiting on all counters.
2. We now properly track pre-existing waitcnt's in all cases, which leads
to less conservative waitcnts being emitted in some cases.
s_load_dword ...
s_waitcnt lgkmcnt(0) <-- pre-existing wait count
ds_read_b32 v0, ...
ds_read_b32 v1, ...
s_waitcnt lgkmcnt(0) <-- this is too conservative
use(v0)
more code
use(v1)
This increases code size a bit, but the reduced latency should still be a
win in basically all cases. The worst code size regressions in my shader-db
are:
WORST REGRESSIONS - Code Size
Before After Delta Percentage
1724 1736 12 0.70 % shaders/private/f1-2015/1334.shader_test [0]
2276 2284 8 0.35 % shaders/private/f1-2015/1306.shader_test [0]
4632 4640 8 0.17 % shaders/private/ue4_elemental/62.shader_test [0]
2376 2384 8 0.34 % shaders/private/f1-2015/1308.shader_test [0]
3284 3292 8 0.24 % shaders/private/talos_principle/1955.shader_test [0]
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54226
llvm-svn: 347848
2018-11-29 12:06:06 +01:00
|
|
|
bool simplifyWaitcnt(AMDGPU::Waitcnt &Wait) const;
|
|
|
|
bool simplifyWaitcnt(InstCounterType T, unsigned &Count) const;
|
AMDGPU/InsertWaitcnt: Consistently use uint32_t for scores / time points
Summary:
There is one obsolete reference to using -1 as an indication of "unknown",
but this isn't actually used anywhere.
Using unsigned makes robust wrapping checks easier.
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, llvm-commits, tpr, t-tye, hakzsam
Differential Revision: https://reviews.llvm.org/D54230
llvm-svn: 347852
2018-11-29 12:06:21 +01:00
|
|
|
void determineWait(InstCounterType T, uint32_t ScoreToWait,
|
AMDGPU/InsertWaitcnts: Untangle some semi-global state
Summary:
Reduce the statefulness of the algorithm in two ways:
1. More clearly split generateWaitcntInstBefore into two phases: the
first one which determines the required wait, if any, without changing
the ScoreBrackets, and the second one which actually inserts the wait
and updates the brackets.
2. Communicate pre-existing s_waitcnt instructions using an argument to
generateWaitcntInstBefore instead of through the ScoreBrackets.
To simplify these changes, a Waitcnt structure is introduced which carries
the counts of an s_waitcnt instruction in decoded form.
There are some functional changes:
1. The FIXME for the VCCZ bug workaround was implemented: we only wait for
SMEM instructions as required instead of waiting on all counters.
2. We now properly track pre-existing waitcnt's in all cases, which leads
to less conservative waitcnts being emitted in some cases.
s_load_dword ...
s_waitcnt lgkmcnt(0) <-- pre-existing wait count
ds_read_b32 v0, ...
ds_read_b32 v1, ...
s_waitcnt lgkmcnt(0) <-- this is too conservative
use(v0)
more code
use(v1)
This increases code size a bit, but the reduced latency should still be a
win in basically all cases. The worst code size regressions in my shader-db
are:
WORST REGRESSIONS - Code Size
Before After Delta Percentage
1724 1736 12 0.70 % shaders/private/f1-2015/1334.shader_test [0]
2276 2284 8 0.35 % shaders/private/f1-2015/1306.shader_test [0]
4632 4640 8 0.17 % shaders/private/ue4_elemental/62.shader_test [0]
2376 2384 8 0.34 % shaders/private/f1-2015/1308.shader_test [0]
3284 3292 8 0.24 % shaders/private/talos_principle/1955.shader_test [0]
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54226
llvm-svn: 347848
2018-11-29 12:06:06 +01:00
|
|
|
AMDGPU::Waitcnt &Wait) const;
|
|
|
|
void applyWaitcnt(const AMDGPU::Waitcnt &Wait);
|
|
|
|
void applyWaitcnt(InstCounterType T, unsigned Count);
|
2017-04-12 05:25:12 +02:00
|
|
|
void updateByEvent(const SIInstrInfo *TII, const SIRegisterInfo *TRI,
|
|
|
|
const MachineRegisterInfo *MRI, WaitEventType E,
|
|
|
|
MachineInstr &MI);
|
|
|
|
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
bool hasPending() const { return PendingEvents != 0; }
|
AMDGPU/InsertWaitcnts: Simplify pending events tracking
Summary:
Instead of storing the "score" (last time point) of the various relevant
events, only store whether an event is pending or not.
This is sufficient, because whenever only one event of a count type is
pending, its last time point is naturally the upper bound of all time
points of this count type, and when multiple event types are pending,
the count type has gone out of order and an s_waitcnt to 0 is required
to clear any pending event type (and will then clear all pending event
types for that count type).
This also removes the special handling of GDS_GPR_LOCK and EXP_GPR_LOCK.
I do not understand what this special handling ever attempted to achieve.
It has existed ever since the original port from an internal code base,
so my best guess is that it solved a problem related to EXEC handling in
that internal code base.
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54228
llvm-svn: 347850
2018-11-29 12:06:14 +01:00
|
|
|
bool hasPendingEvent(WaitEventType E) const {
|
|
|
|
return PendingEvents & (1 << E);
|
2017-04-12 05:25:12 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
bool hasPendingFlat() const {
|
|
|
|
return ((LastFlat[LGKM_CNT] > ScoreLBs[LGKM_CNT] &&
|
|
|
|
LastFlat[LGKM_CNT] <= ScoreUBs[LGKM_CNT]) ||
|
|
|
|
(LastFlat[VM_CNT] > ScoreLBs[VM_CNT] &&
|
|
|
|
LastFlat[VM_CNT] <= ScoreUBs[VM_CNT]));
|
|
|
|
}
|
|
|
|
|
|
|
|
void setPendingFlat() {
|
|
|
|
LastFlat[VM_CNT] = ScoreUBs[VM_CNT];
|
|
|
|
LastFlat[LGKM_CNT] = ScoreUBs[LGKM_CNT];
|
|
|
|
}
|
|
|
|
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
void print(raw_ostream &);
|
|
|
|
void dump() { print(dbgs()); }
|
2017-04-12 05:25:12 +02:00
|
|
|
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
private:
|
|
|
|
struct MergeInfo {
|
|
|
|
uint32_t OldLB;
|
|
|
|
uint32_t OtherLB;
|
|
|
|
uint32_t MyShift;
|
|
|
|
uint32_t OtherShift;
|
|
|
|
};
|
|
|
|
static bool mergeScore(const MergeInfo &M, uint32_t &Score,
|
|
|
|
uint32_t OtherScore);
|
2017-04-12 05:25:12 +02:00
|
|
|
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
void setScoreLB(InstCounterType T, uint32_t Val) {
|
|
|
|
assert(T < NUM_INST_CNTS);
|
|
|
|
if (T >= NUM_INST_CNTS)
|
|
|
|
return;
|
|
|
|
ScoreLBs[T] = Val;
|
|
|
|
}
|
2017-04-12 05:25:12 +02:00
|
|
|
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
void setScoreUB(InstCounterType T, uint32_t Val) {
|
|
|
|
assert(T < NUM_INST_CNTS);
|
|
|
|
if (T >= NUM_INST_CNTS)
|
|
|
|
return;
|
|
|
|
ScoreUBs[T] = Val;
|
|
|
|
if (T == EXP_CNT) {
|
|
|
|
uint32_t UB = ScoreUBs[T] - getWaitCountMax(EXP_CNT);
|
|
|
|
if (ScoreLBs[T] < UB && UB < ScoreUBs[T])
|
|
|
|
ScoreLBs[T] = UB;
|
|
|
|
}
|
|
|
|
}
|
2017-04-12 05:25:12 +02:00
|
|
|
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
void setRegScore(int GprNo, InstCounterType T, uint32_t Val) {
|
|
|
|
if (GprNo < NUM_ALL_VGPRS) {
|
|
|
|
if (GprNo > VgprUB) {
|
|
|
|
VgprUB = GprNo;
|
|
|
|
}
|
|
|
|
VgprScores[T][GprNo] = Val;
|
|
|
|
} else {
|
|
|
|
assert(T == LGKM_CNT);
|
|
|
|
if (GprNo - NUM_ALL_VGPRS > SgprUB) {
|
|
|
|
SgprUB = GprNo - NUM_ALL_VGPRS;
|
|
|
|
}
|
|
|
|
SgprScores[GprNo - NUM_ALL_VGPRS] = Val;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void setExpScore(const MachineInstr *MI, const SIInstrInfo *TII,
|
|
|
|
const SIRegisterInfo *TRI, const MachineRegisterInfo *MRI,
|
|
|
|
unsigned OpNo, uint32_t Val);
|
2017-04-12 05:25:12 +02:00
|
|
|
|
2018-07-11 22:59:01 +02:00
|
|
|
const GCNSubtarget *ST = nullptr;
|
AMDGPU/InsertWaitcnt: Consistently use uint32_t for scores / time points
Summary:
There is one obsolete reference to using -1 as an indication of "unknown",
but this isn't actually used anywhere.
Using unsigned makes robust wrapping checks easier.
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, llvm-commits, tpr, t-tye, hakzsam
Differential Revision: https://reviews.llvm.org/D54230
llvm-svn: 347852
2018-11-29 12:06:21 +01:00
|
|
|
uint32_t ScoreLBs[NUM_INST_CNTS] = {0};
|
|
|
|
uint32_t ScoreUBs[NUM_INST_CNTS] = {0};
|
AMDGPU/InsertWaitcnts: Simplify pending events tracking
Summary:
Instead of storing the "score" (last time point) of the various relevant
events, only store whether an event is pending or not.
This is sufficient, because whenever only one event of a count type is
pending, its last time point is naturally the upper bound of all time
points of this count type, and when multiple event types are pending,
the count type has gone out of order and an s_waitcnt to 0 is required
to clear any pending event type (and will then clear all pending event
types for that count type).
This also removes the special handling of GDS_GPR_LOCK and EXP_GPR_LOCK.
I do not understand what this special handling ever attempted to achieve.
It has existed ever since the original port from an internal code base,
so my best guess is that it solved a problem related to EXEC handling in
that internal code base.
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54228
llvm-svn: 347850
2018-11-29 12:06:14 +01:00
|
|
|
uint32_t PendingEvents = 0;
|
|
|
|
bool MixedPendingEvents[NUM_INST_CNTS] = {false};
|
2017-04-12 05:25:12 +02:00
|
|
|
// Remember the last flat memory operation.
|
AMDGPU/InsertWaitcnt: Consistently use uint32_t for scores / time points
Summary:
There is one obsolete reference to using -1 as an indication of "unknown",
but this isn't actually used anywhere.
Using unsigned makes robust wrapping checks easier.
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, llvm-commits, tpr, t-tye, hakzsam
Differential Revision: https://reviews.llvm.org/D54230
llvm-svn: 347852
2018-11-29 12:06:21 +01:00
|
|
|
uint32_t LastFlat[NUM_INST_CNTS] = {0};
|
2017-04-12 05:25:12 +02:00
|
|
|
// wait_cnt scores for every vgpr.
|
|
|
|
// Keep track of the VgprUB and SgprUB to make merge at join efficient.
|
2017-08-08 02:47:13 +02:00
|
|
|
int32_t VgprUB = 0;
|
|
|
|
int32_t SgprUB = 0;
|
AMDGPU/InsertWaitcnt: Consistently use uint32_t for scores / time points
Summary:
There is one obsolete reference to using -1 as an indication of "unknown",
but this isn't actually used anywhere.
Using unsigned makes robust wrapping checks easier.
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, llvm-commits, tpr, t-tye, hakzsam
Differential Revision: https://reviews.llvm.org/D54230
llvm-svn: 347852
2018-11-29 12:06:21 +01:00
|
|
|
uint32_t VgprScores[NUM_INST_CNTS][NUM_ALL_VGPRS];
|
2017-04-12 05:25:12 +02:00
|
|
|
// Wait cnt scores for every sgpr, only lgkmcnt is relevant.
|
AMDGPU/InsertWaitcnt: Consistently use uint32_t for scores / time points
Summary:
There is one obsolete reference to using -1 as an indication of "unknown",
but this isn't actually used anywhere.
Using unsigned makes robust wrapping checks easier.
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, llvm-commits, tpr, t-tye, hakzsam
Differential Revision: https://reviews.llvm.org/D54230
llvm-svn: 347852
2018-11-29 12:06:21 +01:00
|
|
|
uint32_t SgprScores[SQ_MAX_PGM_SGPRS] = {0};
|
2017-04-12 05:25:12 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
class SIInsertWaitcnts : public MachineFunctionPass {
|
|
|
|
private:
|
2018-07-11 22:59:01 +02:00
|
|
|
const GCNSubtarget *ST = nullptr;
|
2017-08-08 02:47:13 +02:00
|
|
|
const SIInstrInfo *TII = nullptr;
|
|
|
|
const SIRegisterInfo *TRI = nullptr;
|
|
|
|
const MachineRegisterInfo *MRI = nullptr;
|
2018-09-12 20:50:47 +02:00
|
|
|
AMDGPU::IsaVersion IV;
|
2017-04-12 05:25:12 +02:00
|
|
|
|
2018-02-07 03:21:21 +01:00
|
|
|
DenseSet<MachineInstr *> TrackedWaitcntSet;
|
2017-04-12 05:25:12 +02:00
|
|
|
DenseSet<MachineInstr *> VCCZBugHandledSet;
|
|
|
|
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
struct BlockInfo {
|
|
|
|
MachineBasicBlock *MBB;
|
|
|
|
std::unique_ptr<WaitcntBrackets> Incoming;
|
|
|
|
bool Dirty = true;
|
2017-04-12 05:25:12 +02:00
|
|
|
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
explicit BlockInfo(MachineBasicBlock *MBB) : MBB(MBB) {}
|
|
|
|
};
|
2017-04-12 05:25:12 +02:00
|
|
|
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
std::vector<BlockInfo> BlockInfos; // by reverse post-order traversal index
|
|
|
|
DenseMap<MachineBasicBlock *, unsigned> RpotIdxMap;
|
2017-04-12 05:25:12 +02:00
|
|
|
|
2018-05-07 16:43:28 +02:00
|
|
|
// ForceEmitZeroWaitcnts: force all waitcnts insts to be s_waitcnt 0
|
|
|
|
// because of amdgpu-waitcnt-forcezero flag
|
|
|
|
bool ForceEmitZeroWaitcnts;
|
2018-04-25 21:21:26 +02:00
|
|
|
bool ForceEmitWaitcnt[NUM_INST_CNTS];
|
|
|
|
|
2017-04-12 05:25:12 +02:00
|
|
|
public:
|
|
|
|
static char ID;
|
|
|
|
|
2018-06-26 23:33:38 +02:00
|
|
|
SIInsertWaitcnts() : MachineFunctionPass(ID) {
|
|
|
|
(void)ForceExpCounter;
|
|
|
|
(void)ForceLgkmCounter;
|
|
|
|
(void)ForceVMCounter;
|
|
|
|
}
|
2017-04-12 05:25:12 +02:00
|
|
|
|
|
|
|
bool runOnMachineFunction(MachineFunction &MF) override;
|
|
|
|
|
|
|
|
StringRef getPassName() const override {
|
|
|
|
return "SI insert wait instructions";
|
|
|
|
}
|
|
|
|
|
|
|
|
void getAnalysisUsage(AnalysisUsage &AU) const override {
|
|
|
|
AU.setPreservesCFG();
|
|
|
|
MachineFunctionPass::getAnalysisUsage(AU);
|
|
|
|
}
|
|
|
|
|
2018-04-25 21:21:26 +02:00
|
|
|
bool isForceEmitWaitcnt() const {
|
AMDGPU/InsertWaitcnts: Use foreach loops for inst and wait event types
Summary:
It hides the type casting ugliness, and I happened to have to add a new
such loop (in a later patch).
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54227
llvm-svn: 347849
2018-11-29 12:06:11 +01:00
|
|
|
for (auto T : inst_counter_types())
|
2018-04-25 21:21:26 +02:00
|
|
|
if (ForceEmitWaitcnt[T])
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
void setForceEmitWaitcnt() {
|
|
|
|
// For non-debug builds, ForceEmitWaitcnt has been initialized to false;
|
|
|
|
// For debug builds, get the debug counter info and adjust if need be
|
|
|
|
#ifndef NDEBUG
|
|
|
|
if (DebugCounter::isCounterSet(ForceExpCounter) &&
|
|
|
|
DebugCounter::shouldExecute(ForceExpCounter)) {
|
|
|
|
ForceEmitWaitcnt[EXP_CNT] = true;
|
|
|
|
} else {
|
|
|
|
ForceEmitWaitcnt[EXP_CNT] = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (DebugCounter::isCounterSet(ForceLgkmCounter) &&
|
|
|
|
DebugCounter::shouldExecute(ForceLgkmCounter)) {
|
|
|
|
ForceEmitWaitcnt[LGKM_CNT] = true;
|
|
|
|
} else {
|
|
|
|
ForceEmitWaitcnt[LGKM_CNT] = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (DebugCounter::isCounterSet(ForceVMCounter) &&
|
|
|
|
DebugCounter::shouldExecute(ForceVMCounter)) {
|
|
|
|
ForceEmitWaitcnt[VM_CNT] = true;
|
|
|
|
} else {
|
|
|
|
ForceEmitWaitcnt[VM_CNT] = false;
|
|
|
|
}
|
|
|
|
#endif // NDEBUG
|
|
|
|
}
|
|
|
|
|
2017-07-21 20:54:54 +02:00
|
|
|
bool mayAccessLDSThroughFlat(const MachineInstr &MI) const;
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
bool generateWaitcntInstBefore(MachineInstr &MI,
|
|
|
|
WaitcntBrackets &ScoreBrackets,
|
AMDGPU/InsertWaitcnts: Untangle some semi-global state
Summary:
Reduce the statefulness of the algorithm in two ways:
1. More clearly split generateWaitcntInstBefore into two phases: the
first one which determines the required wait, if any, without changing
the ScoreBrackets, and the second one which actually inserts the wait
and updates the brackets.
2. Communicate pre-existing s_waitcnt instructions using an argument to
generateWaitcntInstBefore instead of through the ScoreBrackets.
To simplify these changes, a Waitcnt structure is introduced which carries
the counts of an s_waitcnt instruction in decoded form.
There are some functional changes:
1. The FIXME for the VCCZ bug workaround was implemented: we only wait for
SMEM instructions as required instead of waiting on all counters.
2. We now properly track pre-existing waitcnt's in all cases, which leads
to less conservative waitcnts being emitted in some cases.
s_load_dword ...
s_waitcnt lgkmcnt(0) <-- pre-existing wait count
ds_read_b32 v0, ...
ds_read_b32 v1, ...
s_waitcnt lgkmcnt(0) <-- this is too conservative
use(v0)
more code
use(v1)
This increases code size a bit, but the reduced latency should still be a
win in basically all cases. The worst code size regressions in my shader-db
are:
WORST REGRESSIONS - Code Size
Before After Delta Percentage
1724 1736 12 0.70 % shaders/private/f1-2015/1334.shader_test [0]
2276 2284 8 0.35 % shaders/private/f1-2015/1306.shader_test [0]
4632 4640 8 0.17 % shaders/private/ue4_elemental/62.shader_test [0]
2376 2384 8 0.34 % shaders/private/f1-2015/1308.shader_test [0]
3284 3292 8 0.24 % shaders/private/talos_principle/1955.shader_test [0]
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54226
llvm-svn: 347848
2018-11-29 12:06:06 +01:00
|
|
|
MachineInstr *OldWaitcntInstr);
|
2018-04-24 17:59:59 +02:00
|
|
|
void updateEventWaitcntAfter(MachineInstr &Inst,
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
WaitcntBrackets *ScoreBrackets);
|
|
|
|
bool insertWaitcntInBlock(MachineFunction &MF, MachineBasicBlock &Block,
|
|
|
|
WaitcntBrackets &ScoreBrackets);
|
2017-04-12 05:25:12 +02:00
|
|
|
};
|
|
|
|
|
2017-08-08 02:47:13 +02:00
|
|
|
} // end anonymous namespace
|
2017-04-12 05:25:12 +02:00
|
|
|
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
RegInterval WaitcntBrackets::getRegInterval(const MachineInstr *MI,
|
|
|
|
const SIInstrInfo *TII,
|
|
|
|
const MachineRegisterInfo *MRI,
|
|
|
|
const SIRegisterInfo *TRI,
|
|
|
|
unsigned OpNo, bool Def) const {
|
2017-04-12 05:25:12 +02:00
|
|
|
const MachineOperand &Op = MI->getOperand(OpNo);
|
|
|
|
if (!Op.isReg() || !TRI->isInAllocatableClass(Op.getReg()) ||
|
2019-07-11 23:19:33 +02:00
|
|
|
(Def && !Op.isDef()) || TRI->isAGPR(*MRI, Op.getReg()))
|
2017-04-12 05:25:12 +02:00
|
|
|
return {-1, -1};
|
|
|
|
|
|
|
|
// A use via a PW operand does not need a waitcnt.
|
|
|
|
// A partial write is not a WAW.
|
|
|
|
assert(!Op.getSubReg() || !Op.isUndef());
|
|
|
|
|
|
|
|
RegInterval Result;
|
|
|
|
const MachineRegisterInfo &MRIA = *MRI;
|
|
|
|
|
|
|
|
unsigned Reg = TRI->getEncodingValue(Op.getReg());
|
|
|
|
|
|
|
|
if (TRI->isVGPR(MRIA, Op.getReg())) {
|
|
|
|
assert(Reg >= RegisterEncoding.VGPR0 && Reg <= RegisterEncoding.VGPRL);
|
|
|
|
Result.first = Reg - RegisterEncoding.VGPR0;
|
|
|
|
assert(Result.first >= 0 && Result.first < SQ_MAX_PGM_VGPRS);
|
|
|
|
} else if (TRI->isSGPRReg(MRIA, Op.getReg())) {
|
|
|
|
assert(Reg >= RegisterEncoding.SGPR0 && Reg < SQ_MAX_PGM_SGPRS);
|
|
|
|
Result.first = Reg - RegisterEncoding.SGPR0 + NUM_ALL_VGPRS;
|
|
|
|
assert(Result.first >= NUM_ALL_VGPRS &&
|
|
|
|
Result.first < SQ_MAX_PGM_SGPRS + NUM_ALL_VGPRS);
|
|
|
|
}
|
|
|
|
// TODO: Handle TTMP
|
|
|
|
// else if (TRI->isTTMP(MRIA, Reg.getReg())) ...
|
|
|
|
else
|
|
|
|
return {-1, -1};
|
|
|
|
|
|
|
|
const MachineInstr &MIA = *MI;
|
|
|
|
const TargetRegisterClass *RC = TII->getOpRegClass(MIA, OpNo);
|
2017-04-24 20:55:33 +02:00
|
|
|
unsigned Size = TRI->getRegSizeInBits(*RC);
|
|
|
|
Result.second = Result.first + (Size / 32);
|
2017-04-12 05:25:12 +02:00
|
|
|
|
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
void WaitcntBrackets::setExpScore(const MachineInstr *MI,
|
|
|
|
const SIInstrInfo *TII,
|
|
|
|
const SIRegisterInfo *TRI,
|
|
|
|
const MachineRegisterInfo *MRI, unsigned OpNo,
|
|
|
|
uint32_t Val) {
|
2017-04-12 05:25:12 +02:00
|
|
|
RegInterval Interval = getRegInterval(MI, TII, MRI, TRI, OpNo, false);
|
2018-05-14 14:53:11 +02:00
|
|
|
LLVM_DEBUG({
|
2017-04-12 05:25:12 +02:00
|
|
|
const MachineOperand &Opnd = MI->getOperand(OpNo);
|
|
|
|
assert(TRI->isVGPR(*MRI, Opnd.getReg()));
|
|
|
|
});
|
|
|
|
for (signed RegNo = Interval.first; RegNo < Interval.second; ++RegNo) {
|
|
|
|
setRegScore(RegNo, EXP_CNT, Val);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
void WaitcntBrackets::updateByEvent(const SIInstrInfo *TII,
|
|
|
|
const SIRegisterInfo *TRI,
|
|
|
|
const MachineRegisterInfo *MRI,
|
|
|
|
WaitEventType E, MachineInstr &Inst) {
|
2017-04-12 05:25:12 +02:00
|
|
|
const MachineRegisterInfo &MRIA = *MRI;
|
|
|
|
InstCounterType T = eventCounter(E);
|
AMDGPU/InsertWaitcnt: Consistently use uint32_t for scores / time points
Summary:
There is one obsolete reference to using -1 as an indication of "unknown",
but this isn't actually used anywhere.
Using unsigned makes robust wrapping checks easier.
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, llvm-commits, tpr, t-tye, hakzsam
Differential Revision: https://reviews.llvm.org/D54230
llvm-svn: 347852
2018-11-29 12:06:21 +01:00
|
|
|
uint32_t CurrScore = getScoreUB(T) + 1;
|
|
|
|
if (CurrScore == 0)
|
|
|
|
report_fatal_error("InsertWaitcnt score wraparound");
|
AMDGPU/InsertWaitcnts: Simplify pending events tracking
Summary:
Instead of storing the "score" (last time point) of the various relevant
events, only store whether an event is pending or not.
This is sufficient, because whenever only one event of a count type is
pending, its last time point is naturally the upper bound of all time
points of this count type, and when multiple event types are pending,
the count type has gone out of order and an s_waitcnt to 0 is required
to clear any pending event type (and will then clear all pending event
types for that count type).
This also removes the special handling of GDS_GPR_LOCK and EXP_GPR_LOCK.
I do not understand what this special handling ever attempted to achieve.
It has existed ever since the original port from an internal code base,
so my best guess is that it solved a problem related to EXEC handling in
that internal code base.
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54228
llvm-svn: 347850
2018-11-29 12:06:14 +01:00
|
|
|
// PendingEvents and ScoreUB need to be update regardless if this event
|
|
|
|
// changes the score of a register or not.
|
2017-04-12 05:25:12 +02:00
|
|
|
// Examples including vm_cnt when buffer-store or lgkm_cnt when send-message.
|
AMDGPU/InsertWaitcnts: Simplify pending events tracking
Summary:
Instead of storing the "score" (last time point) of the various relevant
events, only store whether an event is pending or not.
This is sufficient, because whenever only one event of a count type is
pending, its last time point is naturally the upper bound of all time
points of this count type, and when multiple event types are pending,
the count type has gone out of order and an s_waitcnt to 0 is required
to clear any pending event type (and will then clear all pending event
types for that count type).
This also removes the special handling of GDS_GPR_LOCK and EXP_GPR_LOCK.
I do not understand what this special handling ever attempted to achieve.
It has existed ever since the original port from an internal code base,
so my best guess is that it solved a problem related to EXEC handling in
that internal code base.
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54228
llvm-svn: 347850
2018-11-29 12:06:14 +01:00
|
|
|
if (!hasPendingEvent(E)) {
|
|
|
|
if (PendingEvents & WaitEventMaskForInst[T])
|
|
|
|
MixedPendingEvents[T] = true;
|
|
|
|
PendingEvents |= 1 << E;
|
|
|
|
}
|
2017-04-12 05:25:12 +02:00
|
|
|
setScoreUB(T, CurrScore);
|
|
|
|
|
|
|
|
if (T == EXP_CNT) {
|
|
|
|
// Put score on the source vgprs. If this is a store, just use those
|
|
|
|
// specific register(s).
|
|
|
|
if (TII->isDS(Inst) && (Inst.mayStore() || Inst.mayLoad())) {
|
2019-06-19 21:55:27 +02:00
|
|
|
int AddrOpIdx =
|
|
|
|
AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::addr);
|
2017-04-12 05:25:12 +02:00
|
|
|
// All GDS operations must protect their address register (same as
|
|
|
|
// export.)
|
2019-06-19 21:55:27 +02:00
|
|
|
if (AddrOpIdx != -1) {
|
|
|
|
setExpScore(&Inst, TII, TRI, MRI, AddrOpIdx, CurrScore);
|
2017-04-12 05:25:12 +02:00
|
|
|
}
|
2019-06-19 21:55:27 +02:00
|
|
|
|
2017-04-12 05:25:12 +02:00
|
|
|
if (Inst.mayStore()) {
|
2019-01-16 16:43:53 +01:00
|
|
|
if (AMDGPU::getNamedOperandIdx(Inst.getOpcode(),
|
|
|
|
AMDGPU::OpName::data0) != -1) {
|
|
|
|
setExpScore(
|
|
|
|
&Inst, TII, TRI, MRI,
|
|
|
|
AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data0),
|
|
|
|
CurrScore);
|
|
|
|
}
|
2017-04-12 05:25:12 +02:00
|
|
|
if (AMDGPU::getNamedOperandIdx(Inst.getOpcode(),
|
|
|
|
AMDGPU::OpName::data1) != -1) {
|
|
|
|
setExpScore(&Inst, TII, TRI, MRI,
|
|
|
|
AMDGPU::getNamedOperandIdx(Inst.getOpcode(),
|
|
|
|
AMDGPU::OpName::data1),
|
|
|
|
CurrScore);
|
|
|
|
}
|
|
|
|
} else if (AMDGPU::getAtomicNoRetOp(Inst.getOpcode()) != -1 &&
|
|
|
|
Inst.getOpcode() != AMDGPU::DS_GWS_INIT &&
|
|
|
|
Inst.getOpcode() != AMDGPU::DS_GWS_SEMA_V &&
|
|
|
|
Inst.getOpcode() != AMDGPU::DS_GWS_SEMA_BR &&
|
|
|
|
Inst.getOpcode() != AMDGPU::DS_GWS_SEMA_P &&
|
|
|
|
Inst.getOpcode() != AMDGPU::DS_GWS_BARRIER &&
|
|
|
|
Inst.getOpcode() != AMDGPU::DS_APPEND &&
|
|
|
|
Inst.getOpcode() != AMDGPU::DS_CONSUME &&
|
|
|
|
Inst.getOpcode() != AMDGPU::DS_ORDERED_COUNT) {
|
|
|
|
for (unsigned I = 0, E = Inst.getNumOperands(); I != E; ++I) {
|
|
|
|
const MachineOperand &Op = Inst.getOperand(I);
|
|
|
|
if (Op.isReg() && !Op.isDef() && TRI->isVGPR(MRIA, Op.getReg())) {
|
|
|
|
setExpScore(&Inst, TII, TRI, MRI, I, CurrScore);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else if (TII->isFLAT(Inst)) {
|
|
|
|
if (Inst.mayStore()) {
|
|
|
|
setExpScore(
|
|
|
|
&Inst, TII, TRI, MRI,
|
|
|
|
AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data),
|
|
|
|
CurrScore);
|
|
|
|
} else if (AMDGPU::getAtomicNoRetOp(Inst.getOpcode()) != -1) {
|
|
|
|
setExpScore(
|
|
|
|
&Inst, TII, TRI, MRI,
|
|
|
|
AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data),
|
|
|
|
CurrScore);
|
|
|
|
}
|
|
|
|
} else if (TII->isMIMG(Inst)) {
|
|
|
|
if (Inst.mayStore()) {
|
|
|
|
setExpScore(&Inst, TII, TRI, MRI, 0, CurrScore);
|
|
|
|
} else if (AMDGPU::getAtomicNoRetOp(Inst.getOpcode()) != -1) {
|
|
|
|
setExpScore(
|
|
|
|
&Inst, TII, TRI, MRI,
|
|
|
|
AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data),
|
|
|
|
CurrScore);
|
|
|
|
}
|
|
|
|
} else if (TII->isMTBUF(Inst)) {
|
|
|
|
if (Inst.mayStore()) {
|
|
|
|
setExpScore(&Inst, TII, TRI, MRI, 0, CurrScore);
|
|
|
|
}
|
|
|
|
} else if (TII->isMUBUF(Inst)) {
|
|
|
|
if (Inst.mayStore()) {
|
|
|
|
setExpScore(&Inst, TII, TRI, MRI, 0, CurrScore);
|
|
|
|
} else if (AMDGPU::getAtomicNoRetOp(Inst.getOpcode()) != -1) {
|
|
|
|
setExpScore(
|
|
|
|
&Inst, TII, TRI, MRI,
|
|
|
|
AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data),
|
|
|
|
CurrScore);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (TII->isEXP(Inst)) {
|
|
|
|
// For export the destination registers are really temps that
|
|
|
|
// can be used as the actual source after export patching, so
|
|
|
|
// we need to treat them like sources and set the EXP_CNT
|
|
|
|
// score.
|
|
|
|
for (unsigned I = 0, E = Inst.getNumOperands(); I != E; ++I) {
|
|
|
|
MachineOperand &DefMO = Inst.getOperand(I);
|
|
|
|
if (DefMO.isReg() && DefMO.isDef() &&
|
|
|
|
TRI->isVGPR(MRIA, DefMO.getReg())) {
|
|
|
|
setRegScore(TRI->getEncodingValue(DefMO.getReg()), EXP_CNT,
|
|
|
|
CurrScore);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for (unsigned I = 0, E = Inst.getNumOperands(); I != E; ++I) {
|
|
|
|
MachineOperand &MO = Inst.getOperand(I);
|
|
|
|
if (MO.isReg() && !MO.isDef() && TRI->isVGPR(MRIA, MO.getReg())) {
|
|
|
|
setExpScore(&Inst, TII, TRI, MRI, I, CurrScore);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#if 0 // TODO: check if this is handled by MUBUF code above.
|
|
|
|
} else if (Inst.getOpcode() == AMDGPU::BUFFER_STORE_DWORD ||
|
2017-08-16 18:47:29 +02:00
|
|
|
Inst.getOpcode() == AMDGPU::BUFFER_STORE_DWORDX2 ||
|
|
|
|
Inst.getOpcode() == AMDGPU::BUFFER_STORE_DWORDX4) {
|
2017-04-12 05:25:12 +02:00
|
|
|
MachineOperand *MO = TII->getNamedOperand(Inst, AMDGPU::OpName::data);
|
|
|
|
unsigned OpNo;//TODO: find the OpNo for this operand;
|
|
|
|
RegInterval Interval = getRegInterval(&Inst, TII, MRI, TRI, OpNo, false);
|
|
|
|
for (signed RegNo = Interval.first; RegNo < Interval.second;
|
2017-08-16 18:47:29 +02:00
|
|
|
++RegNo) {
|
2017-04-12 05:25:12 +02:00
|
|
|
setRegScore(RegNo + NUM_ALL_VGPRS, t, CurrScore);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
} else {
|
|
|
|
// Match the score to the destination registers.
|
|
|
|
for (unsigned I = 0, E = Inst.getNumOperands(); I != E; ++I) {
|
|
|
|
RegInterval Interval = getRegInterval(&Inst, TII, MRI, TRI, I, true);
|
|
|
|
if (T == VM_CNT && Interval.first >= NUM_ALL_VGPRS)
|
|
|
|
continue;
|
|
|
|
for (signed RegNo = Interval.first; RegNo < Interval.second; ++RegNo) {
|
|
|
|
setRegScore(RegNo, T, CurrScore);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (TII->isDS(Inst) && Inst.mayStore()) {
|
|
|
|
setRegScore(SQ_MAX_PGM_VGPRS + EXTRA_VGPR_LDS, T, CurrScore);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
void WaitcntBrackets::print(raw_ostream &OS) {
|
2017-04-12 05:25:12 +02:00
|
|
|
OS << '\n';
|
AMDGPU/InsertWaitcnts: Use foreach loops for inst and wait event types
Summary:
It hides the type casting ugliness, and I happened to have to add a new
such loop (in a later patch).
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54227
llvm-svn: 347849
2018-11-29 12:06:11 +01:00
|
|
|
for (auto T : inst_counter_types()) {
|
AMDGPU/InsertWaitcnt: Consistently use uint32_t for scores / time points
Summary:
There is one obsolete reference to using -1 as an indication of "unknown",
but this isn't actually used anywhere.
Using unsigned makes robust wrapping checks easier.
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, llvm-commits, tpr, t-tye, hakzsam
Differential Revision: https://reviews.llvm.org/D54230
llvm-svn: 347852
2018-11-29 12:06:21 +01:00
|
|
|
uint32_t LB = getScoreLB(T);
|
|
|
|
uint32_t UB = getScoreUB(T);
|
2017-04-12 05:25:12 +02:00
|
|
|
|
|
|
|
switch (T) {
|
|
|
|
case VM_CNT:
|
|
|
|
OS << " VM_CNT(" << UB - LB << "): ";
|
|
|
|
break;
|
|
|
|
case LGKM_CNT:
|
|
|
|
OS << " LGKM_CNT(" << UB - LB << "): ";
|
|
|
|
break;
|
|
|
|
case EXP_CNT:
|
|
|
|
OS << " EXP_CNT(" << UB - LB << "): ";
|
|
|
|
break;
|
2019-05-03 23:53:53 +02:00
|
|
|
case VS_CNT:
|
|
|
|
OS << " VS_CNT(" << UB - LB << "): ";
|
|
|
|
break;
|
2017-04-12 05:25:12 +02:00
|
|
|
default:
|
|
|
|
OS << " UNKNOWN(" << UB - LB << "): ";
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (LB < UB) {
|
|
|
|
// Print vgpr scores.
|
|
|
|
for (int J = 0; J <= getMaxVGPR(); J++) {
|
AMDGPU/InsertWaitcnt: Consistently use uint32_t for scores / time points
Summary:
There is one obsolete reference to using -1 as an indication of "unknown",
but this isn't actually used anywhere.
Using unsigned makes robust wrapping checks easier.
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, llvm-commits, tpr, t-tye, hakzsam
Differential Revision: https://reviews.llvm.org/D54230
llvm-svn: 347852
2018-11-29 12:06:21 +01:00
|
|
|
uint32_t RegScore = getRegScore(J, T);
|
2017-04-12 05:25:12 +02:00
|
|
|
if (RegScore <= LB)
|
|
|
|
continue;
|
AMDGPU/InsertWaitcnt: Consistently use uint32_t for scores / time points
Summary:
There is one obsolete reference to using -1 as an indication of "unknown",
but this isn't actually used anywhere.
Using unsigned makes robust wrapping checks easier.
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, llvm-commits, tpr, t-tye, hakzsam
Differential Revision: https://reviews.llvm.org/D54230
llvm-svn: 347852
2018-11-29 12:06:21 +01:00
|
|
|
uint32_t RelScore = RegScore - LB - 1;
|
2017-04-12 05:25:12 +02:00
|
|
|
if (J < SQ_MAX_PGM_VGPRS + EXTRA_VGPR_LDS) {
|
|
|
|
OS << RelScore << ":v" << J << " ";
|
|
|
|
} else {
|
|
|
|
OS << RelScore << ":ds ";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Also need to print sgpr scores for lgkm_cnt.
|
|
|
|
if (T == LGKM_CNT) {
|
|
|
|
for (int J = 0; J <= getMaxSGPR(); J++) {
|
AMDGPU/InsertWaitcnt: Consistently use uint32_t for scores / time points
Summary:
There is one obsolete reference to using -1 as an indication of "unknown",
but this isn't actually used anywhere.
Using unsigned makes robust wrapping checks easier.
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, llvm-commits, tpr, t-tye, hakzsam
Differential Revision: https://reviews.llvm.org/D54230
llvm-svn: 347852
2018-11-29 12:06:21 +01:00
|
|
|
uint32_t RegScore = getRegScore(J + NUM_ALL_VGPRS, LGKM_CNT);
|
2017-04-12 05:25:12 +02:00
|
|
|
if (RegScore <= LB)
|
|
|
|
continue;
|
AMDGPU/InsertWaitcnt: Consistently use uint32_t for scores / time points
Summary:
There is one obsolete reference to using -1 as an indication of "unknown",
but this isn't actually used anywhere.
Using unsigned makes robust wrapping checks easier.
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, llvm-commits, tpr, t-tye, hakzsam
Differential Revision: https://reviews.llvm.org/D54230
llvm-svn: 347852
2018-11-29 12:06:21 +01:00
|
|
|
uint32_t RelScore = RegScore - LB - 1;
|
2017-04-12 05:25:12 +02:00
|
|
|
OS << RelScore << ":s" << J << " ";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
OS << '\n';
|
|
|
|
}
|
|
|
|
OS << '\n';
|
|
|
|
}
|
|
|
|
|
AMDGPU/InsertWaitcnts: Untangle some semi-global state
Summary:
Reduce the statefulness of the algorithm in two ways:
1. More clearly split generateWaitcntInstBefore into two phases: the
first one which determines the required wait, if any, without changing
the ScoreBrackets, and the second one which actually inserts the wait
and updates the brackets.
2. Communicate pre-existing s_waitcnt instructions using an argument to
generateWaitcntInstBefore instead of through the ScoreBrackets.
To simplify these changes, a Waitcnt structure is introduced which carries
the counts of an s_waitcnt instruction in decoded form.
There are some functional changes:
1. The FIXME for the VCCZ bug workaround was implemented: we only wait for
SMEM instructions as required instead of waiting on all counters.
2. We now properly track pre-existing waitcnt's in all cases, which leads
to less conservative waitcnts being emitted in some cases.
s_load_dword ...
s_waitcnt lgkmcnt(0) <-- pre-existing wait count
ds_read_b32 v0, ...
ds_read_b32 v1, ...
s_waitcnt lgkmcnt(0) <-- this is too conservative
use(v0)
more code
use(v1)
This increases code size a bit, but the reduced latency should still be a
win in basically all cases. The worst code size regressions in my shader-db
are:
WORST REGRESSIONS - Code Size
Before After Delta Percentage
1724 1736 12 0.70 % shaders/private/f1-2015/1334.shader_test [0]
2276 2284 8 0.35 % shaders/private/f1-2015/1306.shader_test [0]
4632 4640 8 0.17 % shaders/private/ue4_elemental/62.shader_test [0]
2376 2384 8 0.34 % shaders/private/f1-2015/1308.shader_test [0]
3284 3292 8 0.24 % shaders/private/talos_principle/1955.shader_test [0]
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54226
llvm-svn: 347848
2018-11-29 12:06:06 +01:00
|
|
|
/// Simplify the waitcnt, in the sense of removing redundant counts, and return
|
|
|
|
/// whether a waitcnt instruction is needed at all.
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
bool WaitcntBrackets::simplifyWaitcnt(AMDGPU::Waitcnt &Wait) const {
|
AMDGPU/InsertWaitcnts: Untangle some semi-global state
Summary:
Reduce the statefulness of the algorithm in two ways:
1. More clearly split generateWaitcntInstBefore into two phases: the
first one which determines the required wait, if any, without changing
the ScoreBrackets, and the second one which actually inserts the wait
and updates the brackets.
2. Communicate pre-existing s_waitcnt instructions using an argument to
generateWaitcntInstBefore instead of through the ScoreBrackets.
To simplify these changes, a Waitcnt structure is introduced which carries
the counts of an s_waitcnt instruction in decoded form.
There are some functional changes:
1. The FIXME for the VCCZ bug workaround was implemented: we only wait for
SMEM instructions as required instead of waiting on all counters.
2. We now properly track pre-existing waitcnt's in all cases, which leads
to less conservative waitcnts being emitted in some cases.
s_load_dword ...
s_waitcnt lgkmcnt(0) <-- pre-existing wait count
ds_read_b32 v0, ...
ds_read_b32 v1, ...
s_waitcnt lgkmcnt(0) <-- this is too conservative
use(v0)
more code
use(v1)
This increases code size a bit, but the reduced latency should still be a
win in basically all cases. The worst code size regressions in my shader-db
are:
WORST REGRESSIONS - Code Size
Before After Delta Percentage
1724 1736 12 0.70 % shaders/private/f1-2015/1334.shader_test [0]
2276 2284 8 0.35 % shaders/private/f1-2015/1306.shader_test [0]
4632 4640 8 0.17 % shaders/private/ue4_elemental/62.shader_test [0]
2376 2384 8 0.34 % shaders/private/f1-2015/1308.shader_test [0]
3284 3292 8 0.24 % shaders/private/talos_principle/1955.shader_test [0]
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54226
llvm-svn: 347848
2018-11-29 12:06:06 +01:00
|
|
|
return simplifyWaitcnt(VM_CNT, Wait.VmCnt) |
|
|
|
|
simplifyWaitcnt(EXP_CNT, Wait.ExpCnt) |
|
2019-05-03 23:53:53 +02:00
|
|
|
simplifyWaitcnt(LGKM_CNT, Wait.LgkmCnt) |
|
|
|
|
simplifyWaitcnt(VS_CNT, Wait.VsCnt);
|
AMDGPU/InsertWaitcnts: Untangle some semi-global state
Summary:
Reduce the statefulness of the algorithm in two ways:
1. More clearly split generateWaitcntInstBefore into two phases: the
first one which determines the required wait, if any, without changing
the ScoreBrackets, and the second one which actually inserts the wait
and updates the brackets.
2. Communicate pre-existing s_waitcnt instructions using an argument to
generateWaitcntInstBefore instead of through the ScoreBrackets.
To simplify these changes, a Waitcnt structure is introduced which carries
the counts of an s_waitcnt instruction in decoded form.
There are some functional changes:
1. The FIXME for the VCCZ bug workaround was implemented: we only wait for
SMEM instructions as required instead of waiting on all counters.
2. We now properly track pre-existing waitcnt's in all cases, which leads
to less conservative waitcnts being emitted in some cases.
s_load_dword ...
s_waitcnt lgkmcnt(0) <-- pre-existing wait count
ds_read_b32 v0, ...
ds_read_b32 v1, ...
s_waitcnt lgkmcnt(0) <-- this is too conservative
use(v0)
more code
use(v1)
This increases code size a bit, but the reduced latency should still be a
win in basically all cases. The worst code size regressions in my shader-db
are:
WORST REGRESSIONS - Code Size
Before After Delta Percentage
1724 1736 12 0.70 % shaders/private/f1-2015/1334.shader_test [0]
2276 2284 8 0.35 % shaders/private/f1-2015/1306.shader_test [0]
4632 4640 8 0.17 % shaders/private/ue4_elemental/62.shader_test [0]
2376 2384 8 0.34 % shaders/private/f1-2015/1308.shader_test [0]
3284 3292 8 0.24 % shaders/private/talos_principle/1955.shader_test [0]
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54226
llvm-svn: 347848
2018-11-29 12:06:06 +01:00
|
|
|
}
|
|
|
|
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
bool WaitcntBrackets::simplifyWaitcnt(InstCounterType T,
|
|
|
|
unsigned &Count) const {
|
AMDGPU/InsertWaitcnt: Consistently use uint32_t for scores / time points
Summary:
There is one obsolete reference to using -1 as an indication of "unknown",
but this isn't actually used anywhere.
Using unsigned makes robust wrapping checks easier.
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, llvm-commits, tpr, t-tye, hakzsam
Differential Revision: https://reviews.llvm.org/D54230
llvm-svn: 347852
2018-11-29 12:06:21 +01:00
|
|
|
const uint32_t LB = getScoreLB(T);
|
|
|
|
const uint32_t UB = getScoreUB(T);
|
|
|
|
if (Count < UB && UB - Count > LB)
|
AMDGPU/InsertWaitcnts: Untangle some semi-global state
Summary:
Reduce the statefulness of the algorithm in two ways:
1. More clearly split generateWaitcntInstBefore into two phases: the
first one which determines the required wait, if any, without changing
the ScoreBrackets, and the second one which actually inserts the wait
and updates the brackets.
2. Communicate pre-existing s_waitcnt instructions using an argument to
generateWaitcntInstBefore instead of through the ScoreBrackets.
To simplify these changes, a Waitcnt structure is introduced which carries
the counts of an s_waitcnt instruction in decoded form.
There are some functional changes:
1. The FIXME for the VCCZ bug workaround was implemented: we only wait for
SMEM instructions as required instead of waiting on all counters.
2. We now properly track pre-existing waitcnt's in all cases, which leads
to less conservative waitcnts being emitted in some cases.
s_load_dword ...
s_waitcnt lgkmcnt(0) <-- pre-existing wait count
ds_read_b32 v0, ...
ds_read_b32 v1, ...
s_waitcnt lgkmcnt(0) <-- this is too conservative
use(v0)
more code
use(v1)
This increases code size a bit, but the reduced latency should still be a
win in basically all cases. The worst code size regressions in my shader-db
are:
WORST REGRESSIONS - Code Size
Before After Delta Percentage
1724 1736 12 0.70 % shaders/private/f1-2015/1334.shader_test [0]
2276 2284 8 0.35 % shaders/private/f1-2015/1306.shader_test [0]
4632 4640 8 0.17 % shaders/private/ue4_elemental/62.shader_test [0]
2376 2384 8 0.34 % shaders/private/f1-2015/1308.shader_test [0]
3284 3292 8 0.24 % shaders/private/talos_principle/1955.shader_test [0]
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54226
llvm-svn: 347848
2018-11-29 12:06:06 +01:00
|
|
|
return true;
|
|
|
|
|
|
|
|
Count = ~0u;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
void WaitcntBrackets::determineWait(InstCounterType T, uint32_t ScoreToWait,
|
|
|
|
AMDGPU::Waitcnt &Wait) const {
|
2017-04-12 05:25:12 +02:00
|
|
|
// If the score of src_operand falls within the bracket, we need an
|
|
|
|
// s_waitcnt instruction.
|
AMDGPU/InsertWaitcnt: Consistently use uint32_t for scores / time points
Summary:
There is one obsolete reference to using -1 as an indication of "unknown",
but this isn't actually used anywhere.
Using unsigned makes robust wrapping checks easier.
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, llvm-commits, tpr, t-tye, hakzsam
Differential Revision: https://reviews.llvm.org/D54230
llvm-svn: 347852
2018-11-29 12:06:21 +01:00
|
|
|
const uint32_t LB = getScoreLB(T);
|
|
|
|
const uint32_t UB = getScoreUB(T);
|
2017-04-12 05:25:12 +02:00
|
|
|
if ((UB >= ScoreToWait) && (ScoreToWait > LB)) {
|
2018-06-04 18:51:59 +02:00
|
|
|
if ((T == VM_CNT || T == LGKM_CNT) &&
|
|
|
|
hasPendingFlat() &&
|
|
|
|
!ST->hasFlatLgkmVMemCountInOrder()) {
|
|
|
|
// If there is a pending FLAT operation, and this is a VMem or LGKM
|
|
|
|
// waitcnt and the target can report early completion, then we need
|
|
|
|
// to force a waitcnt 0.
|
AMDGPU/InsertWaitcnts: Untangle some semi-global state
Summary:
Reduce the statefulness of the algorithm in two ways:
1. More clearly split generateWaitcntInstBefore into two phases: the
first one which determines the required wait, if any, without changing
the ScoreBrackets, and the second one which actually inserts the wait
and updates the brackets.
2. Communicate pre-existing s_waitcnt instructions using an argument to
generateWaitcntInstBefore instead of through the ScoreBrackets.
To simplify these changes, a Waitcnt structure is introduced which carries
the counts of an s_waitcnt instruction in decoded form.
There are some functional changes:
1. The FIXME for the VCCZ bug workaround was implemented: we only wait for
SMEM instructions as required instead of waiting on all counters.
2. We now properly track pre-existing waitcnt's in all cases, which leads
to less conservative waitcnts being emitted in some cases.
s_load_dword ...
s_waitcnt lgkmcnt(0) <-- pre-existing wait count
ds_read_b32 v0, ...
ds_read_b32 v1, ...
s_waitcnt lgkmcnt(0) <-- this is too conservative
use(v0)
more code
use(v1)
This increases code size a bit, but the reduced latency should still be a
win in basically all cases. The worst code size regressions in my shader-db
are:
WORST REGRESSIONS - Code Size
Before After Delta Percentage
1724 1736 12 0.70 % shaders/private/f1-2015/1334.shader_test [0]
2276 2284 8 0.35 % shaders/private/f1-2015/1306.shader_test [0]
4632 4640 8 0.17 % shaders/private/ue4_elemental/62.shader_test [0]
2376 2384 8 0.34 % shaders/private/f1-2015/1308.shader_test [0]
3284 3292 8 0.24 % shaders/private/talos_principle/1955.shader_test [0]
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54226
llvm-svn: 347848
2018-11-29 12:06:06 +01:00
|
|
|
addWait(Wait, T, 0);
|
2017-04-12 05:25:12 +02:00
|
|
|
} else if (counterOutOfOrder(T)) {
|
|
|
|
// Counter can get decremented out-of-order when there
|
2018-03-14 23:04:32 +01:00
|
|
|
// are multiple types event in the bracket. Also emit an s_wait counter
|
2017-04-12 05:25:12 +02:00
|
|
|
// with a conservative value of 0 for the counter.
|
AMDGPU/InsertWaitcnts: Untangle some semi-global state
Summary:
Reduce the statefulness of the algorithm in two ways:
1. More clearly split generateWaitcntInstBefore into two phases: the
first one which determines the required wait, if any, without changing
the ScoreBrackets, and the second one which actually inserts the wait
and updates the brackets.
2. Communicate pre-existing s_waitcnt instructions using an argument to
generateWaitcntInstBefore instead of through the ScoreBrackets.
To simplify these changes, a Waitcnt structure is introduced which carries
the counts of an s_waitcnt instruction in decoded form.
There are some functional changes:
1. The FIXME for the VCCZ bug workaround was implemented: we only wait for
SMEM instructions as required instead of waiting on all counters.
2. We now properly track pre-existing waitcnt's in all cases, which leads
to less conservative waitcnts being emitted in some cases.
s_load_dword ...
s_waitcnt lgkmcnt(0) <-- pre-existing wait count
ds_read_b32 v0, ...
ds_read_b32 v1, ...
s_waitcnt lgkmcnt(0) <-- this is too conservative
use(v0)
more code
use(v1)
This increases code size a bit, but the reduced latency should still be a
win in basically all cases. The worst code size regressions in my shader-db
are:
WORST REGRESSIONS - Code Size
Before After Delta Percentage
1724 1736 12 0.70 % shaders/private/f1-2015/1334.shader_test [0]
2276 2284 8 0.35 % shaders/private/f1-2015/1306.shader_test [0]
4632 4640 8 0.17 % shaders/private/ue4_elemental/62.shader_test [0]
2376 2384 8 0.34 % shaders/private/f1-2015/1308.shader_test [0]
3284 3292 8 0.24 % shaders/private/talos_principle/1955.shader_test [0]
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54226
llvm-svn: 347848
2018-11-29 12:06:06 +01:00
|
|
|
addWait(Wait, T, 0);
|
2017-04-12 05:25:12 +02:00
|
|
|
} else {
|
AMDGPU/InsertWaitcnts: Untangle some semi-global state
Summary:
Reduce the statefulness of the algorithm in two ways:
1. More clearly split generateWaitcntInstBefore into two phases: the
first one which determines the required wait, if any, without changing
the ScoreBrackets, and the second one which actually inserts the wait
and updates the brackets.
2. Communicate pre-existing s_waitcnt instructions using an argument to
generateWaitcntInstBefore instead of through the ScoreBrackets.
To simplify these changes, a Waitcnt structure is introduced which carries
the counts of an s_waitcnt instruction in decoded form.
There are some functional changes:
1. The FIXME for the VCCZ bug workaround was implemented: we only wait for
SMEM instructions as required instead of waiting on all counters.
2. We now properly track pre-existing waitcnt's in all cases, which leads
to less conservative waitcnts being emitted in some cases.
s_load_dword ...
s_waitcnt lgkmcnt(0) <-- pre-existing wait count
ds_read_b32 v0, ...
ds_read_b32 v1, ...
s_waitcnt lgkmcnt(0) <-- this is too conservative
use(v0)
more code
use(v1)
This increases code size a bit, but the reduced latency should still be a
win in basically all cases. The worst code size regressions in my shader-db
are:
WORST REGRESSIONS - Code Size
Before After Delta Percentage
1724 1736 12 0.70 % shaders/private/f1-2015/1334.shader_test [0]
2276 2284 8 0.35 % shaders/private/f1-2015/1306.shader_test [0]
4632 4640 8 0.17 % shaders/private/ue4_elemental/62.shader_test [0]
2376 2384 8 0.34 % shaders/private/f1-2015/1308.shader_test [0]
3284 3292 8 0.24 % shaders/private/talos_principle/1955.shader_test [0]
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54226
llvm-svn: 347848
2018-11-29 12:06:06 +01:00
|
|
|
addWait(Wait, T, UB - ScoreToWait);
|
2017-04-12 05:25:12 +02:00
|
|
|
}
|
|
|
|
}
|
AMDGPU/InsertWaitcnts: Untangle some semi-global state
Summary:
Reduce the statefulness of the algorithm in two ways:
1. More clearly split generateWaitcntInstBefore into two phases: the
first one which determines the required wait, if any, without changing
the ScoreBrackets, and the second one which actually inserts the wait
and updates the brackets.
2. Communicate pre-existing s_waitcnt instructions using an argument to
generateWaitcntInstBefore instead of through the ScoreBrackets.
To simplify these changes, a Waitcnt structure is introduced which carries
the counts of an s_waitcnt instruction in decoded form.
There are some functional changes:
1. The FIXME for the VCCZ bug workaround was implemented: we only wait for
SMEM instructions as required instead of waiting on all counters.
2. We now properly track pre-existing waitcnt's in all cases, which leads
to less conservative waitcnts being emitted in some cases.
s_load_dword ...
s_waitcnt lgkmcnt(0) <-- pre-existing wait count
ds_read_b32 v0, ...
ds_read_b32 v1, ...
s_waitcnt lgkmcnt(0) <-- this is too conservative
use(v0)
more code
use(v1)
This increases code size a bit, but the reduced latency should still be a
win in basically all cases. The worst code size regressions in my shader-db
are:
WORST REGRESSIONS - Code Size
Before After Delta Percentage
1724 1736 12 0.70 % shaders/private/f1-2015/1334.shader_test [0]
2276 2284 8 0.35 % shaders/private/f1-2015/1306.shader_test [0]
4632 4640 8 0.17 % shaders/private/ue4_elemental/62.shader_test [0]
2376 2384 8 0.34 % shaders/private/f1-2015/1308.shader_test [0]
3284 3292 8 0.24 % shaders/private/talos_principle/1955.shader_test [0]
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54226
llvm-svn: 347848
2018-11-29 12:06:06 +01:00
|
|
|
}
|
|
|
|
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
void WaitcntBrackets::applyWaitcnt(const AMDGPU::Waitcnt &Wait) {
|
AMDGPU/InsertWaitcnts: Untangle some semi-global state
Summary:
Reduce the statefulness of the algorithm in two ways:
1. More clearly split generateWaitcntInstBefore into two phases: the
first one which determines the required wait, if any, without changing
the ScoreBrackets, and the second one which actually inserts the wait
and updates the brackets.
2. Communicate pre-existing s_waitcnt instructions using an argument to
generateWaitcntInstBefore instead of through the ScoreBrackets.
To simplify these changes, a Waitcnt structure is introduced which carries
the counts of an s_waitcnt instruction in decoded form.
There are some functional changes:
1. The FIXME for the VCCZ bug workaround was implemented: we only wait for
SMEM instructions as required instead of waiting on all counters.
2. We now properly track pre-existing waitcnt's in all cases, which leads
to less conservative waitcnts being emitted in some cases.
s_load_dword ...
s_waitcnt lgkmcnt(0) <-- pre-existing wait count
ds_read_b32 v0, ...
ds_read_b32 v1, ...
s_waitcnt lgkmcnt(0) <-- this is too conservative
use(v0)
more code
use(v1)
This increases code size a bit, but the reduced latency should still be a
win in basically all cases. The worst code size regressions in my shader-db
are:
WORST REGRESSIONS - Code Size
Before After Delta Percentage
1724 1736 12 0.70 % shaders/private/f1-2015/1334.shader_test [0]
2276 2284 8 0.35 % shaders/private/f1-2015/1306.shader_test [0]
4632 4640 8 0.17 % shaders/private/ue4_elemental/62.shader_test [0]
2376 2384 8 0.34 % shaders/private/f1-2015/1308.shader_test [0]
3284 3292 8 0.24 % shaders/private/talos_principle/1955.shader_test [0]
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54226
llvm-svn: 347848
2018-11-29 12:06:06 +01:00
|
|
|
applyWaitcnt(VM_CNT, Wait.VmCnt);
|
|
|
|
applyWaitcnt(EXP_CNT, Wait.ExpCnt);
|
|
|
|
applyWaitcnt(LGKM_CNT, Wait.LgkmCnt);
|
2019-05-03 23:53:53 +02:00
|
|
|
applyWaitcnt(VS_CNT, Wait.VsCnt);
|
AMDGPU/InsertWaitcnts: Untangle some semi-global state
Summary:
Reduce the statefulness of the algorithm in two ways:
1. More clearly split generateWaitcntInstBefore into two phases: the
first one which determines the required wait, if any, without changing
the ScoreBrackets, and the second one which actually inserts the wait
and updates the brackets.
2. Communicate pre-existing s_waitcnt instructions using an argument to
generateWaitcntInstBefore instead of through the ScoreBrackets.
To simplify these changes, a Waitcnt structure is introduced which carries
the counts of an s_waitcnt instruction in decoded form.
There are some functional changes:
1. The FIXME for the VCCZ bug workaround was implemented: we only wait for
SMEM instructions as required instead of waiting on all counters.
2. We now properly track pre-existing waitcnt's in all cases, which leads
to less conservative waitcnts being emitted in some cases.
s_load_dword ...
s_waitcnt lgkmcnt(0) <-- pre-existing wait count
ds_read_b32 v0, ...
ds_read_b32 v1, ...
s_waitcnt lgkmcnt(0) <-- this is too conservative
use(v0)
more code
use(v1)
This increases code size a bit, but the reduced latency should still be a
win in basically all cases. The worst code size regressions in my shader-db
are:
WORST REGRESSIONS - Code Size
Before After Delta Percentage
1724 1736 12 0.70 % shaders/private/f1-2015/1334.shader_test [0]
2276 2284 8 0.35 % shaders/private/f1-2015/1306.shader_test [0]
4632 4640 8 0.17 % shaders/private/ue4_elemental/62.shader_test [0]
2376 2384 8 0.34 % shaders/private/f1-2015/1308.shader_test [0]
3284 3292 8 0.24 % shaders/private/talos_principle/1955.shader_test [0]
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54226
llvm-svn: 347848
2018-11-29 12:06:06 +01:00
|
|
|
}
|
|
|
|
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
void WaitcntBrackets::applyWaitcnt(InstCounterType T, unsigned Count) {
|
AMDGPU/InsertWaitcnt: Consistently use uint32_t for scores / time points
Summary:
There is one obsolete reference to using -1 as an indication of "unknown",
but this isn't actually used anywhere.
Using unsigned makes robust wrapping checks easier.
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, llvm-commits, tpr, t-tye, hakzsam
Differential Revision: https://reviews.llvm.org/D54230
llvm-svn: 347852
2018-11-29 12:06:21 +01:00
|
|
|
const uint32_t UB = getScoreUB(T);
|
|
|
|
if (Count >= UB)
|
AMDGPU/InsertWaitcnts: Untangle some semi-global state
Summary:
Reduce the statefulness of the algorithm in two ways:
1. More clearly split generateWaitcntInstBefore into two phases: the
first one which determines the required wait, if any, without changing
the ScoreBrackets, and the second one which actually inserts the wait
and updates the brackets.
2. Communicate pre-existing s_waitcnt instructions using an argument to
generateWaitcntInstBefore instead of through the ScoreBrackets.
To simplify these changes, a Waitcnt structure is introduced which carries
the counts of an s_waitcnt instruction in decoded form.
There are some functional changes:
1. The FIXME for the VCCZ bug workaround was implemented: we only wait for
SMEM instructions as required instead of waiting on all counters.
2. We now properly track pre-existing waitcnt's in all cases, which leads
to less conservative waitcnts being emitted in some cases.
s_load_dword ...
s_waitcnt lgkmcnt(0) <-- pre-existing wait count
ds_read_b32 v0, ...
ds_read_b32 v1, ...
s_waitcnt lgkmcnt(0) <-- this is too conservative
use(v0)
more code
use(v1)
This increases code size a bit, but the reduced latency should still be a
win in basically all cases. The worst code size regressions in my shader-db
are:
WORST REGRESSIONS - Code Size
Before After Delta Percentage
1724 1736 12 0.70 % shaders/private/f1-2015/1334.shader_test [0]
2276 2284 8 0.35 % shaders/private/f1-2015/1306.shader_test [0]
4632 4640 8 0.17 % shaders/private/ue4_elemental/62.shader_test [0]
2376 2384 8 0.34 % shaders/private/f1-2015/1308.shader_test [0]
3284 3292 8 0.24 % shaders/private/talos_principle/1955.shader_test [0]
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54226
llvm-svn: 347848
2018-11-29 12:06:06 +01:00
|
|
|
return;
|
|
|
|
if (Count != 0) {
|
|
|
|
if (counterOutOfOrder(T))
|
|
|
|
return;
|
AMDGPU/InsertWaitcnt: Consistently use uint32_t for scores / time points
Summary:
There is one obsolete reference to using -1 as an indication of "unknown",
but this isn't actually used anywhere.
Using unsigned makes robust wrapping checks easier.
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, llvm-commits, tpr, t-tye, hakzsam
Differential Revision: https://reviews.llvm.org/D54230
llvm-svn: 347852
2018-11-29 12:06:21 +01:00
|
|
|
setScoreLB(T, std::max(getScoreLB(T), UB - Count));
|
AMDGPU/InsertWaitcnts: Untangle some semi-global state
Summary:
Reduce the statefulness of the algorithm in two ways:
1. More clearly split generateWaitcntInstBefore into two phases: the
first one which determines the required wait, if any, without changing
the ScoreBrackets, and the second one which actually inserts the wait
and updates the brackets.
2. Communicate pre-existing s_waitcnt instructions using an argument to
generateWaitcntInstBefore instead of through the ScoreBrackets.
To simplify these changes, a Waitcnt structure is introduced which carries
the counts of an s_waitcnt instruction in decoded form.
There are some functional changes:
1. The FIXME for the VCCZ bug workaround was implemented: we only wait for
SMEM instructions as required instead of waiting on all counters.
2. We now properly track pre-existing waitcnt's in all cases, which leads
to less conservative waitcnts being emitted in some cases.
s_load_dword ...
s_waitcnt lgkmcnt(0) <-- pre-existing wait count
ds_read_b32 v0, ...
ds_read_b32 v1, ...
s_waitcnt lgkmcnt(0) <-- this is too conservative
use(v0)
more code
use(v1)
This increases code size a bit, but the reduced latency should still be a
win in basically all cases. The worst code size regressions in my shader-db
are:
WORST REGRESSIONS - Code Size
Before After Delta Percentage
1724 1736 12 0.70 % shaders/private/f1-2015/1334.shader_test [0]
2276 2284 8 0.35 % shaders/private/f1-2015/1306.shader_test [0]
4632 4640 8 0.17 % shaders/private/ue4_elemental/62.shader_test [0]
2376 2384 8 0.34 % shaders/private/f1-2015/1308.shader_test [0]
3284 3292 8 0.24 % shaders/private/talos_principle/1955.shader_test [0]
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54226
llvm-svn: 347848
2018-11-29 12:06:06 +01:00
|
|
|
} else {
|
|
|
|
setScoreLB(T, UB);
|
AMDGPU/InsertWaitcnts: Simplify pending events tracking
Summary:
Instead of storing the "score" (last time point) of the various relevant
events, only store whether an event is pending or not.
This is sufficient, because whenever only one event of a count type is
pending, its last time point is naturally the upper bound of all time
points of this count type, and when multiple event types are pending,
the count type has gone out of order and an s_waitcnt to 0 is required
to clear any pending event type (and will then clear all pending event
types for that count type).
This also removes the special handling of GDS_GPR_LOCK and EXP_GPR_LOCK.
I do not understand what this special handling ever attempted to achieve.
It has existed ever since the original port from an internal code base,
so my best guess is that it solved a problem related to EXEC handling in
that internal code base.
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54228
llvm-svn: 347850
2018-11-29 12:06:14 +01:00
|
|
|
MixedPendingEvents[T] = false;
|
|
|
|
PendingEvents &= ~WaitEventMaskForInst[T];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-12 05:25:12 +02:00
|
|
|
// Where there are multiple types of event in the bracket of a counter,
|
|
|
|
// the decrement may go out of order.
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
bool WaitcntBrackets::counterOutOfOrder(InstCounterType T) const {
|
AMDGPU/InsertWaitcnts: Simplify pending events tracking
Summary:
Instead of storing the "score" (last time point) of the various relevant
events, only store whether an event is pending or not.
This is sufficient, because whenever only one event of a count type is
pending, its last time point is naturally the upper bound of all time
points of this count type, and when multiple event types are pending,
the count type has gone out of order and an s_waitcnt to 0 is required
to clear any pending event type (and will then clear all pending event
types for that count type).
This also removes the special handling of GDS_GPR_LOCK and EXP_GPR_LOCK.
I do not understand what this special handling ever attempted to achieve.
It has existed ever since the original port from an internal code base,
so my best guess is that it solved a problem related to EXEC handling in
that internal code base.
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54228
llvm-svn: 347850
2018-11-29 12:06:14 +01:00
|
|
|
// Scalar memory read always can go out of order.
|
|
|
|
if (T == LGKM_CNT && hasPendingEvent(SMEM_ACCESS))
|
|
|
|
return true;
|
|
|
|
return MixedPendingEvents[T];
|
2017-04-12 05:25:12 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
INITIALIZE_PASS_BEGIN(SIInsertWaitcnts, DEBUG_TYPE, "SI Insert Waitcnts", false,
|
|
|
|
false)
|
|
|
|
INITIALIZE_PASS_END(SIInsertWaitcnts, DEBUG_TYPE, "SI Insert Waitcnts", false,
|
|
|
|
false)
|
|
|
|
|
|
|
|
char SIInsertWaitcnts::ID = 0;
|
|
|
|
|
|
|
|
char &llvm::SIInsertWaitcntsID = SIInsertWaitcnts::ID;
|
|
|
|
|
|
|
|
FunctionPass *llvm::createSIInsertWaitcntsPass() {
|
|
|
|
return new SIInsertWaitcnts();
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool readsVCCZ(const MachineInstr &MI) {
|
|
|
|
unsigned Opc = MI.getOpcode();
|
|
|
|
return (Opc == AMDGPU::S_CBRANCH_VCCNZ || Opc == AMDGPU::S_CBRANCH_VCCZ) &&
|
|
|
|
!MI.getOperand(1).isUndef();
|
|
|
|
}
|
|
|
|
|
2019-06-14 23:52:26 +02:00
|
|
|
/// \returns true if the callee inserts an s_waitcnt 0 on function entry.
|
|
|
|
static bool callWaitsOnFunctionEntry(const MachineInstr &MI) {
|
|
|
|
// Currently all conventions wait, but this may not always be the case.
|
|
|
|
//
|
|
|
|
// TODO: If IPRA is enabled, and the callee is isSafeForNoCSROpt, it may make
|
|
|
|
// senses to omit the wait and do it in the caller.
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// \returns true if the callee is expected to wait for any outstanding waits
|
|
|
|
/// before returning.
|
|
|
|
static bool callWaitsOnFunctionReturn(const MachineInstr &MI) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-05-01 17:54:18 +02:00
|
|
|
/// Generate s_waitcnt instruction to be placed before cur_Inst.
|
2017-04-12 05:25:12 +02:00
|
|
|
/// Instructions of a given type are returned in order,
|
|
|
|
/// but instructions of different types can complete out of order.
|
|
|
|
/// We rely on this in-order completion
|
|
|
|
/// and simply assign a score to the memory access instructions.
|
|
|
|
/// We keep track of the active "score bracket" to determine
|
|
|
|
/// if an access of a memory read requires an s_waitcnt
|
|
|
|
/// and if so what the value of each counter is.
|
|
|
|
/// The "score bracket" is bound by the lower bound and upper bound
|
|
|
|
/// scores (*_score_LB and *_score_ub respectively).
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
bool SIInsertWaitcnts::generateWaitcntInstBefore(
|
|
|
|
MachineInstr &MI, WaitcntBrackets &ScoreBrackets,
|
AMDGPU/InsertWaitcnts: Untangle some semi-global state
Summary:
Reduce the statefulness of the algorithm in two ways:
1. More clearly split generateWaitcntInstBefore into two phases: the
first one which determines the required wait, if any, without changing
the ScoreBrackets, and the second one which actually inserts the wait
and updates the brackets.
2. Communicate pre-existing s_waitcnt instructions using an argument to
generateWaitcntInstBefore instead of through the ScoreBrackets.
To simplify these changes, a Waitcnt structure is introduced which carries
the counts of an s_waitcnt instruction in decoded form.
There are some functional changes:
1. The FIXME for the VCCZ bug workaround was implemented: we only wait for
SMEM instructions as required instead of waiting on all counters.
2. We now properly track pre-existing waitcnt's in all cases, which leads
to less conservative waitcnts being emitted in some cases.
s_load_dword ...
s_waitcnt lgkmcnt(0) <-- pre-existing wait count
ds_read_b32 v0, ...
ds_read_b32 v1, ...
s_waitcnt lgkmcnt(0) <-- this is too conservative
use(v0)
more code
use(v1)
This increases code size a bit, but the reduced latency should still be a
win in basically all cases. The worst code size regressions in my shader-db
are:
WORST REGRESSIONS - Code Size
Before After Delta Percentage
1724 1736 12 0.70 % shaders/private/f1-2015/1334.shader_test [0]
2276 2284 8 0.35 % shaders/private/f1-2015/1306.shader_test [0]
4632 4640 8 0.17 % shaders/private/ue4_elemental/62.shader_test [0]
2376 2384 8 0.34 % shaders/private/f1-2015/1308.shader_test [0]
3284 3292 8 0.24 % shaders/private/talos_principle/1955.shader_test [0]
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54226
llvm-svn: 347848
2018-11-29 12:06:06 +01:00
|
|
|
MachineInstr *OldWaitcntInstr) {
|
2018-05-07 16:43:28 +02:00
|
|
|
setForceEmitWaitcnt();
|
2018-04-25 21:21:26 +02:00
|
|
|
bool IsForceEmitWaitcnt = isForceEmitWaitcnt();
|
|
|
|
|
AMDGPU/InsertWaitcnts: Cleanup some old cruft (NFCI)
Summary: Remove redundant logic and simplify control flow.
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D54086
llvm-svn: 346363
2018-11-07 22:53:36 +01:00
|
|
|
if (MI.isDebugInstr())
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
return false;
|
2017-04-12 05:25:12 +02:00
|
|
|
|
AMDGPU/InsertWaitcnts: Untangle some semi-global state
Summary:
Reduce the statefulness of the algorithm in two ways:
1. More clearly split generateWaitcntInstBefore into two phases: the
first one which determines the required wait, if any, without changing
the ScoreBrackets, and the second one which actually inserts the wait
and updates the brackets.
2. Communicate pre-existing s_waitcnt instructions using an argument to
generateWaitcntInstBefore instead of through the ScoreBrackets.
To simplify these changes, a Waitcnt structure is introduced which carries
the counts of an s_waitcnt instruction in decoded form.
There are some functional changes:
1. The FIXME for the VCCZ bug workaround was implemented: we only wait for
SMEM instructions as required instead of waiting on all counters.
2. We now properly track pre-existing waitcnt's in all cases, which leads
to less conservative waitcnts being emitted in some cases.
s_load_dword ...
s_waitcnt lgkmcnt(0) <-- pre-existing wait count
ds_read_b32 v0, ...
ds_read_b32 v1, ...
s_waitcnt lgkmcnt(0) <-- this is too conservative
use(v0)
more code
use(v1)
This increases code size a bit, but the reduced latency should still be a
win in basically all cases. The worst code size regressions in my shader-db
are:
WORST REGRESSIONS - Code Size
Before After Delta Percentage
1724 1736 12 0.70 % shaders/private/f1-2015/1334.shader_test [0]
2276 2284 8 0.35 % shaders/private/f1-2015/1306.shader_test [0]
4632 4640 8 0.17 % shaders/private/ue4_elemental/62.shader_test [0]
2376 2384 8 0.34 % shaders/private/f1-2015/1308.shader_test [0]
3284 3292 8 0.24 % shaders/private/talos_principle/1955.shader_test [0]
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54226
llvm-svn: 347848
2018-11-29 12:06:06 +01:00
|
|
|
AMDGPU::Waitcnt Wait;
|
|
|
|
|
2017-04-12 05:25:12 +02:00
|
|
|
// See if this instruction has a forced S_WAITCNT VM.
|
|
|
|
// TODO: Handle other cases of NeedsWaitcntVmBefore()
|
AMDGPU/InsertWaitcnt: Remove unused WaitAtBeginning
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54229
llvm-svn: 347851
2018-11-29 12:06:18 +01:00
|
|
|
if (MI.getOpcode() == AMDGPU::BUFFER_WBINVL1 ||
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
MI.getOpcode() == AMDGPU::BUFFER_WBINVL1_SC ||
|
2019-05-03 23:53:53 +02:00
|
|
|
MI.getOpcode() == AMDGPU::BUFFER_WBINVL1_VOL ||
|
|
|
|
MI.getOpcode() == AMDGPU::BUFFER_GL0_INV ||
|
|
|
|
MI.getOpcode() == AMDGPU::BUFFER_GL1_INV) {
|
AMDGPU/InsertWaitcnts: Untangle some semi-global state
Summary:
Reduce the statefulness of the algorithm in two ways:
1. More clearly split generateWaitcntInstBefore into two phases: the
first one which determines the required wait, if any, without changing
the ScoreBrackets, and the second one which actually inserts the wait
and updates the brackets.
2. Communicate pre-existing s_waitcnt instructions using an argument to
generateWaitcntInstBefore instead of through the ScoreBrackets.
To simplify these changes, a Waitcnt structure is introduced which carries
the counts of an s_waitcnt instruction in decoded form.
There are some functional changes:
1. The FIXME for the VCCZ bug workaround was implemented: we only wait for
SMEM instructions as required instead of waiting on all counters.
2. We now properly track pre-existing waitcnt's in all cases, which leads
to less conservative waitcnts being emitted in some cases.
s_load_dword ...
s_waitcnt lgkmcnt(0) <-- pre-existing wait count
ds_read_b32 v0, ...
ds_read_b32 v1, ...
s_waitcnt lgkmcnt(0) <-- this is too conservative
use(v0)
more code
use(v1)
This increases code size a bit, but the reduced latency should still be a
win in basically all cases. The worst code size regressions in my shader-db
are:
WORST REGRESSIONS - Code Size
Before After Delta Percentage
1724 1736 12 0.70 % shaders/private/f1-2015/1334.shader_test [0]
2276 2284 8 0.35 % shaders/private/f1-2015/1306.shader_test [0]
4632 4640 8 0.17 % shaders/private/ue4_elemental/62.shader_test [0]
2376 2384 8 0.34 % shaders/private/f1-2015/1308.shader_test [0]
3284 3292 8 0.24 % shaders/private/talos_principle/1955.shader_test [0]
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54226
llvm-svn: 347848
2018-11-29 12:06:06 +01:00
|
|
|
Wait.VmCnt = 0;
|
2017-04-12 05:25:12 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// All waits must be resolved at call return.
|
|
|
|
// NOTE: this could be improved with knowledge of all call sites or
|
|
|
|
// with knowledge of the called routines.
|
AMDGPU: Separate R600 and GCN TableGen files
Summary:
We now have two sets of generated TableGen files, one for R600 and one
for GCN, so each sub-target now has its own tables of instructions,
registers, ISel patterns, etc. This should help reduce compile time
since each sub-target now only has to consider information that
is specific to itself. This will also help prevent the R600
sub-target from slowing down new features for GCN, like disassembler
support, GlobalISel, etc.
Reviewers: arsenm, nhaehnle, jvesely
Reviewed By: arsenm
Subscribers: MatzeB, kzhuravl, wdng, mgorny, yaxunl, dstuttard, tpr, t-tye, javed.absar, llvm-commits
Differential Revision: https://reviews.llvm.org/D46365
llvm-svn: 335942
2018-06-29 01:47:12 +02:00
|
|
|
if (MI.getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG ||
|
2019-06-14 23:52:26 +02:00
|
|
|
MI.getOpcode() == AMDGPU::S_SETPC_B64_return ||
|
|
|
|
(MI.isReturn() && MI.isCall() && !callWaitsOnFunctionEntry(MI))) {
|
2019-05-03 23:53:53 +02:00
|
|
|
Wait = Wait.combined(AMDGPU::Waitcnt::allZero(IV));
|
2017-04-12 05:25:12 +02:00
|
|
|
}
|
|
|
|
// Resolve vm waits before gs-done.
|
|
|
|
else if ((MI.getOpcode() == AMDGPU::S_SENDMSG ||
|
|
|
|
MI.getOpcode() == AMDGPU::S_SENDMSGHALT) &&
|
|
|
|
((MI.getOperand(0).getImm() & AMDGPU::SendMsg::ID_MASK_) ==
|
|
|
|
AMDGPU::SendMsg::ID_GS_DONE)) {
|
AMDGPU/InsertWaitcnts: Untangle some semi-global state
Summary:
Reduce the statefulness of the algorithm in two ways:
1. More clearly split generateWaitcntInstBefore into two phases: the
first one which determines the required wait, if any, without changing
the ScoreBrackets, and the second one which actually inserts the wait
and updates the brackets.
2. Communicate pre-existing s_waitcnt instructions using an argument to
generateWaitcntInstBefore instead of through the ScoreBrackets.
To simplify these changes, a Waitcnt structure is introduced which carries
the counts of an s_waitcnt instruction in decoded form.
There are some functional changes:
1. The FIXME for the VCCZ bug workaround was implemented: we only wait for
SMEM instructions as required instead of waiting on all counters.
2. We now properly track pre-existing waitcnt's in all cases, which leads
to less conservative waitcnts being emitted in some cases.
s_load_dword ...
s_waitcnt lgkmcnt(0) <-- pre-existing wait count
ds_read_b32 v0, ...
ds_read_b32 v1, ...
s_waitcnt lgkmcnt(0) <-- this is too conservative
use(v0)
more code
use(v1)
This increases code size a bit, but the reduced latency should still be a
win in basically all cases. The worst code size regressions in my shader-db
are:
WORST REGRESSIONS - Code Size
Before After Delta Percentage
1724 1736 12 0.70 % shaders/private/f1-2015/1334.shader_test [0]
2276 2284 8 0.35 % shaders/private/f1-2015/1306.shader_test [0]
4632 4640 8 0.17 % shaders/private/ue4_elemental/62.shader_test [0]
2376 2384 8 0.34 % shaders/private/f1-2015/1308.shader_test [0]
3284 3292 8 0.24 % shaders/private/talos_principle/1955.shader_test [0]
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54226
llvm-svn: 347848
2018-11-29 12:06:06 +01:00
|
|
|
Wait.VmCnt = 0;
|
2017-04-12 05:25:12 +02:00
|
|
|
}
|
|
|
|
#if 0 // TODO: the following blocks of logic when we have fence.
|
|
|
|
else if (MI.getOpcode() == SC_FENCE) {
|
|
|
|
const unsigned int group_size =
|
|
|
|
context->shader_info->GetMaxThreadGroupSize();
|
|
|
|
// group_size == 0 means thread group size is unknown at compile time
|
|
|
|
const bool group_is_multi_wave =
|
|
|
|
(group_size == 0 || group_size > target_info->GetWaveFrontSize());
|
|
|
|
const bool fence_is_global = !((SCInstInternalMisc*)Inst)->IsGroupFence();
|
|
|
|
|
|
|
|
for (unsigned int i = 0; i < Inst->NumSrcOperands(); i++) {
|
|
|
|
SCRegType src_type = Inst->GetSrcType(i);
|
|
|
|
switch (src_type) {
|
|
|
|
case SCMEM_LDS:
|
|
|
|
if (group_is_multi_wave ||
|
2017-08-16 18:47:29 +02:00
|
|
|
context->OptFlagIsOn(OPT_R1100_LDSMEM_FENCE_CHICKEN_BIT)) {
|
2018-04-24 17:59:59 +02:00
|
|
|
EmitWaitcnt |= ScoreBrackets->updateByWait(LGKM_CNT,
|
2017-04-12 05:25:12 +02:00
|
|
|
ScoreBrackets->getScoreUB(LGKM_CNT));
|
|
|
|
// LDS may have to wait for VM_CNT after buffer load to LDS
|
|
|
|
if (target_info->HasBufferLoadToLDS()) {
|
2018-04-24 17:59:59 +02:00
|
|
|
EmitWaitcnt |= ScoreBrackets->updateByWait(VM_CNT,
|
2017-04-12 05:25:12 +02:00
|
|
|
ScoreBrackets->getScoreUB(VM_CNT));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SCMEM_GDS:
|
|
|
|
if (group_is_multi_wave || fence_is_global) {
|
2018-04-24 17:59:59 +02:00
|
|
|
EmitWaitcnt |= ScoreBrackets->updateByWait(EXP_CNT,
|
2017-08-16 18:47:29 +02:00
|
|
|
ScoreBrackets->getScoreUB(EXP_CNT));
|
2018-04-24 17:59:59 +02:00
|
|
|
EmitWaitcnt |= ScoreBrackets->updateByWait(LGKM_CNT,
|
2017-08-16 18:47:29 +02:00
|
|
|
ScoreBrackets->getScoreUB(LGKM_CNT));
|
2017-04-12 05:25:12 +02:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SCMEM_UAV:
|
|
|
|
case SCMEM_TFBUF:
|
|
|
|
case SCMEM_RING:
|
|
|
|
case SCMEM_SCATTER:
|
|
|
|
if (group_is_multi_wave || fence_is_global) {
|
2018-04-24 17:59:59 +02:00
|
|
|
EmitWaitcnt |= ScoreBrackets->updateByWait(EXP_CNT,
|
2017-08-16 18:47:29 +02:00
|
|
|
ScoreBrackets->getScoreUB(EXP_CNT));
|
2018-04-24 17:59:59 +02:00
|
|
|
EmitWaitcnt |= ScoreBrackets->updateByWait(VM_CNT,
|
2017-08-16 18:47:29 +02:00
|
|
|
ScoreBrackets->getScoreUB(VM_CNT));
|
2017-04-12 05:25:12 +02:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SCMEM_SCRATCH:
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
// Export & GDS instructions do not read the EXEC mask until after the export
|
|
|
|
// is granted (which can occur well after the instruction is issued).
|
|
|
|
// The shader program must flush all EXP operations on the export-count
|
|
|
|
// before overwriting the EXEC mask.
|
|
|
|
else {
|
|
|
|
if (MI.modifiesRegister(AMDGPU::EXEC, TRI)) {
|
|
|
|
// Export and GDS are tracked individually, either may trigger a waitcnt
|
|
|
|
// for EXEC.
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
if (ScoreBrackets.hasPendingEvent(EXP_GPR_LOCK) ||
|
|
|
|
ScoreBrackets.hasPendingEvent(EXP_PARAM_ACCESS) ||
|
|
|
|
ScoreBrackets.hasPendingEvent(EXP_POS_ACCESS) ||
|
|
|
|
ScoreBrackets.hasPendingEvent(GDS_GPR_LOCK)) {
|
AMDGPU/InsertWaitcnts: Simplify pending events tracking
Summary:
Instead of storing the "score" (last time point) of the various relevant
events, only store whether an event is pending or not.
This is sufficient, because whenever only one event of a count type is
pending, its last time point is naturally the upper bound of all time
points of this count type, and when multiple event types are pending,
the count type has gone out of order and an s_waitcnt to 0 is required
to clear any pending event type (and will then clear all pending event
types for that count type).
This also removes the special handling of GDS_GPR_LOCK and EXP_GPR_LOCK.
I do not understand what this special handling ever attempted to achieve.
It has existed ever since the original port from an internal code base,
so my best guess is that it solved a problem related to EXEC handling in
that internal code base.
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54228
llvm-svn: 347850
2018-11-29 12:06:14 +01:00
|
|
|
Wait.ExpCnt = 0;
|
|
|
|
}
|
2017-04-12 05:25:12 +02:00
|
|
|
}
|
|
|
|
|
2019-06-14 23:52:26 +02:00
|
|
|
if (MI.isCall() && callWaitsOnFunctionEntry(MI)) {
|
AMDGPU: Avoid overwriting saved PC
Summary:
An outstanding load with same destination sgpr as call could cause PC to be
updated with junk value on return.
Reviewers: arsenm, rampitec
Reviewed By: arsenm
Subscribers: kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D69474
2019-10-28 17:39:20 +01:00
|
|
|
// The function is going to insert a wait on everything in its prolog.
|
|
|
|
// This still needs to be careful if the call target is a load (e.g. a GOT
|
|
|
|
// load). We also need to check WAW depenancy with saved PC.
|
2019-06-14 23:52:26 +02:00
|
|
|
Wait = AMDGPU::Waitcnt();
|
2017-07-21 20:54:54 +02:00
|
|
|
|
2019-06-14 23:52:26 +02:00
|
|
|
int CallAddrOpIdx =
|
|
|
|
AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0);
|
AMDGPU: Avoid overwriting saved PC
Summary:
An outstanding load with same destination sgpr as call could cause PC to be
updated with junk value on return.
Reviewers: arsenm, rampitec
Reviewed By: arsenm
Subscribers: kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D69474
2019-10-28 17:39:20 +01:00
|
|
|
RegInterval CallAddrOpInterval = ScoreBrackets.getRegInterval(
|
|
|
|
&MI, TII, MRI, TRI, CallAddrOpIdx, false);
|
|
|
|
|
|
|
|
for (signed RegNo = CallAddrOpInterval.first;
|
|
|
|
RegNo < CallAddrOpInterval.second; ++RegNo)
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
ScoreBrackets.determineWait(
|
|
|
|
LGKM_CNT, ScoreBrackets.getRegScore(RegNo, LGKM_CNT), Wait);
|
AMDGPU: Avoid overwriting saved PC
Summary:
An outstanding load with same destination sgpr as call could cause PC to be
updated with junk value on return.
Reviewers: arsenm, rampitec
Reviewed By: arsenm
Subscribers: kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D69474
2019-10-28 17:39:20 +01:00
|
|
|
|
|
|
|
int RtnAddrOpIdx =
|
|
|
|
AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::dst);
|
|
|
|
if (RtnAddrOpIdx != -1) {
|
|
|
|
RegInterval RtnAddrOpInterval = ScoreBrackets.getRegInterval(
|
|
|
|
&MI, TII, MRI, TRI, RtnAddrOpIdx, false);
|
|
|
|
|
|
|
|
for (signed RegNo = RtnAddrOpInterval.first;
|
|
|
|
RegNo < RtnAddrOpInterval.second; ++RegNo)
|
|
|
|
ScoreBrackets.determineWait(
|
|
|
|
LGKM_CNT, ScoreBrackets.getRegScore(RegNo, LGKM_CNT), Wait);
|
2017-04-12 05:25:12 +02:00
|
|
|
}
|
AMDGPU: Avoid overwriting saved PC
Summary:
An outstanding load with same destination sgpr as call could cause PC to be
updated with junk value on return.
Reviewers: arsenm, rampitec
Reviewed By: arsenm
Subscribers: kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D69474
2019-10-28 17:39:20 +01:00
|
|
|
|
2019-06-14 23:52:26 +02:00
|
|
|
} else {
|
2017-07-21 20:54:54 +02:00
|
|
|
// FIXME: Should not be relying on memoperands.
|
2019-06-14 23:52:26 +02:00
|
|
|
// Look at the source operands of every instruction to see if
|
|
|
|
// any of them results from a previous memory operation that affects
|
|
|
|
// its current usage. If so, an s_waitcnt instruction needs to be
|
|
|
|
// emitted.
|
|
|
|
// If the source operand was defined by a load, add the s_waitcnt
|
|
|
|
// instruction.
|
2017-04-12 05:25:12 +02:00
|
|
|
for (const MachineMemOperand *Memop : MI.memoperands()) {
|
|
|
|
unsigned AS = Memop->getAddrSpace();
|
2018-08-31 07:49:54 +02:00
|
|
|
if (AS != AMDGPUAS::LOCAL_ADDRESS)
|
2017-04-12 05:25:12 +02:00
|
|
|
continue;
|
|
|
|
unsigned RegNo = SQ_MAX_PGM_VGPRS + EXTRA_VGPR_LDS;
|
2019-06-14 23:52:26 +02:00
|
|
|
// VM_CNT is only relevant to vgpr or LDS.
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
ScoreBrackets.determineWait(
|
|
|
|
VM_CNT, ScoreBrackets.getRegScore(RegNo, VM_CNT), Wait);
|
2017-04-12 05:25:12 +02:00
|
|
|
}
|
2019-06-14 23:52:26 +02:00
|
|
|
|
|
|
|
for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
|
|
|
|
const MachineOperand &Op = MI.getOperand(I);
|
|
|
|
const MachineRegisterInfo &MRIA = *MRI;
|
|
|
|
RegInterval Interval =
|
|
|
|
ScoreBrackets.getRegInterval(&MI, TII, MRI, TRI, I, false);
|
|
|
|
for (signed RegNo = Interval.first; RegNo < Interval.second; ++RegNo) {
|
|
|
|
if (TRI->isVGPR(MRIA, Op.getReg())) {
|
|
|
|
// VM_CNT is only relevant to vgpr or LDS.
|
|
|
|
ScoreBrackets.determineWait(
|
|
|
|
VM_CNT, ScoreBrackets.getRegScore(RegNo, VM_CNT), Wait);
|
|
|
|
}
|
|
|
|
ScoreBrackets.determineWait(
|
|
|
|
LGKM_CNT, ScoreBrackets.getRegScore(RegNo, LGKM_CNT), Wait);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// End of for loop that looks at all source operands to decide vm_wait_cnt
|
|
|
|
// and lgk_wait_cnt.
|
|
|
|
|
|
|
|
// Two cases are handled for destination operands:
|
|
|
|
// 1) If the destination operand was defined by a load, add the s_waitcnt
|
|
|
|
// instruction to guarantee the right WAW order.
|
|
|
|
// 2) If a destination operand that was used by a recent export/store ins,
|
|
|
|
// add s_waitcnt on exp_cnt to guarantee the WAR order.
|
|
|
|
if (MI.mayStore()) {
|
|
|
|
// FIXME: Should not be relying on memoperands.
|
|
|
|
for (const MachineMemOperand *Memop : MI.memoperands()) {
|
|
|
|
unsigned AS = Memop->getAddrSpace();
|
|
|
|
if (AS != AMDGPUAS::LOCAL_ADDRESS)
|
|
|
|
continue;
|
|
|
|
unsigned RegNo = SQ_MAX_PGM_VGPRS + EXTRA_VGPR_LDS;
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
ScoreBrackets.determineWait(
|
|
|
|
VM_CNT, ScoreBrackets.getRegScore(RegNo, VM_CNT), Wait);
|
|
|
|
ScoreBrackets.determineWait(
|
|
|
|
EXP_CNT, ScoreBrackets.getRegScore(RegNo, EXP_CNT), Wait);
|
2017-04-12 05:25:12 +02:00
|
|
|
}
|
|
|
|
}
|
2019-06-14 23:52:26 +02:00
|
|
|
for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
|
|
|
|
MachineOperand &Def = MI.getOperand(I);
|
|
|
|
const MachineRegisterInfo &MRIA = *MRI;
|
|
|
|
RegInterval Interval =
|
|
|
|
ScoreBrackets.getRegInterval(&MI, TII, MRI, TRI, I, true);
|
|
|
|
for (signed RegNo = Interval.first; RegNo < Interval.second; ++RegNo) {
|
|
|
|
if (TRI->isVGPR(MRIA, Def.getReg())) {
|
|
|
|
ScoreBrackets.determineWait(
|
|
|
|
VM_CNT, ScoreBrackets.getRegScore(RegNo, VM_CNT), Wait);
|
|
|
|
ScoreBrackets.determineWait(
|
|
|
|
EXP_CNT, ScoreBrackets.getRegScore(RegNo, EXP_CNT), Wait);
|
|
|
|
}
|
|
|
|
ScoreBrackets.determineWait(
|
|
|
|
LGKM_CNT, ScoreBrackets.getRegScore(RegNo, LGKM_CNT), Wait);
|
|
|
|
}
|
|
|
|
} // End of for loop that looks at all dest operands.
|
|
|
|
}
|
2017-04-12 05:25:12 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check to see if this is an S_BARRIER, and if an implicit S_WAITCNT 0
|
|
|
|
// occurs before the instruction. Doing it here prevents any additional
|
|
|
|
// S_WAITCNTs from being emitted if the instruction was marked as
|
|
|
|
// requiring a WAITCNT beforehand.
|
2017-06-02 19:40:26 +02:00
|
|
|
if (MI.getOpcode() == AMDGPU::S_BARRIER &&
|
|
|
|
!ST->hasAutoWaitcntBeforeBarrier()) {
|
2019-05-03 23:53:53 +02:00
|
|
|
Wait = Wait.combined(AMDGPU::Waitcnt::allZero(IV));
|
2017-04-12 05:25:12 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: Remove this work-around, enable the assert for Bug 457939
|
|
|
|
// after fixing the scheduler. Also, the Shader Compiler code is
|
|
|
|
// independent of target.
|
2019-06-20 01:54:58 +02:00
|
|
|
if (readsVCCZ(MI) && ST->hasReadVCCZBug()) {
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
if (ScoreBrackets.getScoreLB(LGKM_CNT) <
|
|
|
|
ScoreBrackets.getScoreUB(LGKM_CNT) &&
|
|
|
|
ScoreBrackets.hasPendingEvent(SMEM_ACCESS)) {
|
AMDGPU/InsertWaitcnts: Untangle some semi-global state
Summary:
Reduce the statefulness of the algorithm in two ways:
1. More clearly split generateWaitcntInstBefore into two phases: the
first one which determines the required wait, if any, without changing
the ScoreBrackets, and the second one which actually inserts the wait
and updates the brackets.
2. Communicate pre-existing s_waitcnt instructions using an argument to
generateWaitcntInstBefore instead of through the ScoreBrackets.
To simplify these changes, a Waitcnt structure is introduced which carries
the counts of an s_waitcnt instruction in decoded form.
There are some functional changes:
1. The FIXME for the VCCZ bug workaround was implemented: we only wait for
SMEM instructions as required instead of waiting on all counters.
2. We now properly track pre-existing waitcnt's in all cases, which leads
to less conservative waitcnts being emitted in some cases.
s_load_dword ...
s_waitcnt lgkmcnt(0) <-- pre-existing wait count
ds_read_b32 v0, ...
ds_read_b32 v1, ...
s_waitcnt lgkmcnt(0) <-- this is too conservative
use(v0)
more code
use(v1)
This increases code size a bit, but the reduced latency should still be a
win in basically all cases. The worst code size regressions in my shader-db
are:
WORST REGRESSIONS - Code Size
Before After Delta Percentage
1724 1736 12 0.70 % shaders/private/f1-2015/1334.shader_test [0]
2276 2284 8 0.35 % shaders/private/f1-2015/1306.shader_test [0]
4632 4640 8 0.17 % shaders/private/ue4_elemental/62.shader_test [0]
2376 2384 8 0.34 % shaders/private/f1-2015/1308.shader_test [0]
3284 3292 8 0.24 % shaders/private/talos_principle/1955.shader_test [0]
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54226
llvm-svn: 347848
2018-11-29 12:06:06 +01:00
|
|
|
Wait.LgkmCnt = 0;
|
2017-04-12 05:25:12 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
AMDGPU/InsertWaitcnts: Untangle some semi-global state
Summary:
Reduce the statefulness of the algorithm in two ways:
1. More clearly split generateWaitcntInstBefore into two phases: the
first one which determines the required wait, if any, without changing
the ScoreBrackets, and the second one which actually inserts the wait
and updates the brackets.
2. Communicate pre-existing s_waitcnt instructions using an argument to
generateWaitcntInstBefore instead of through the ScoreBrackets.
To simplify these changes, a Waitcnt structure is introduced which carries
the counts of an s_waitcnt instruction in decoded form.
There are some functional changes:
1. The FIXME for the VCCZ bug workaround was implemented: we only wait for
SMEM instructions as required instead of waiting on all counters.
2. We now properly track pre-existing waitcnt's in all cases, which leads
to less conservative waitcnts being emitted in some cases.
s_load_dword ...
s_waitcnt lgkmcnt(0) <-- pre-existing wait count
ds_read_b32 v0, ...
ds_read_b32 v1, ...
s_waitcnt lgkmcnt(0) <-- this is too conservative
use(v0)
more code
use(v1)
This increases code size a bit, but the reduced latency should still be a
win in basically all cases. The worst code size regressions in my shader-db
are:
WORST REGRESSIONS - Code Size
Before After Delta Percentage
1724 1736 12 0.70 % shaders/private/f1-2015/1334.shader_test [0]
2276 2284 8 0.35 % shaders/private/f1-2015/1306.shader_test [0]
4632 4640 8 0.17 % shaders/private/ue4_elemental/62.shader_test [0]
2376 2384 8 0.34 % shaders/private/f1-2015/1308.shader_test [0]
3284 3292 8 0.24 % shaders/private/talos_principle/1955.shader_test [0]
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54226
llvm-svn: 347848
2018-11-29 12:06:06 +01:00
|
|
|
// Early-out if no wait is indicated.
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
if (!ScoreBrackets.simplifyWaitcnt(Wait) && !IsForceEmitWaitcnt) {
|
|
|
|
bool Modified = false;
|
AMDGPU/InsertWaitcnts: Untangle some semi-global state
Summary:
Reduce the statefulness of the algorithm in two ways:
1. More clearly split generateWaitcntInstBefore into two phases: the
first one which determines the required wait, if any, without changing
the ScoreBrackets, and the second one which actually inserts the wait
and updates the brackets.
2. Communicate pre-existing s_waitcnt instructions using an argument to
generateWaitcntInstBefore instead of through the ScoreBrackets.
To simplify these changes, a Waitcnt structure is introduced which carries
the counts of an s_waitcnt instruction in decoded form.
There are some functional changes:
1. The FIXME for the VCCZ bug workaround was implemented: we only wait for
SMEM instructions as required instead of waiting on all counters.
2. We now properly track pre-existing waitcnt's in all cases, which leads
to less conservative waitcnts being emitted in some cases.
s_load_dword ...
s_waitcnt lgkmcnt(0) <-- pre-existing wait count
ds_read_b32 v0, ...
ds_read_b32 v1, ...
s_waitcnt lgkmcnt(0) <-- this is too conservative
use(v0)
more code
use(v1)
This increases code size a bit, but the reduced latency should still be a
win in basically all cases. The worst code size regressions in my shader-db
are:
WORST REGRESSIONS - Code Size
Before After Delta Percentage
1724 1736 12 0.70 % shaders/private/f1-2015/1334.shader_test [0]
2276 2284 8 0.35 % shaders/private/f1-2015/1306.shader_test [0]
4632 4640 8 0.17 % shaders/private/ue4_elemental/62.shader_test [0]
2376 2384 8 0.34 % shaders/private/f1-2015/1308.shader_test [0]
3284 3292 8 0.24 % shaders/private/talos_principle/1955.shader_test [0]
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54226
llvm-svn: 347848
2018-11-29 12:06:06 +01:00
|
|
|
if (OldWaitcntInstr) {
|
2019-05-03 23:53:53 +02:00
|
|
|
for (auto II = OldWaitcntInstr->getIterator(), NextI = std::next(II);
|
|
|
|
&*II != &MI; II = NextI, ++NextI) {
|
|
|
|
if (II->isDebugInstr())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (TrackedWaitcntSet.count(&*II)) {
|
|
|
|
TrackedWaitcntSet.erase(&*II);
|
|
|
|
II->eraseFromParent();
|
|
|
|
Modified = true;
|
|
|
|
} else if (II->getOpcode() == AMDGPU::S_WAITCNT) {
|
|
|
|
int64_t Imm = II->getOperand(0).getImm();
|
|
|
|
ScoreBrackets.applyWaitcnt(AMDGPU::decodeWaitcnt(IV, Imm));
|
|
|
|
} else {
|
|
|
|
assert(II->getOpcode() == AMDGPU::S_WAITCNT_VSCNT);
|
|
|
|
assert(II->getOperand(0).getReg() == AMDGPU::SGPR_NULL);
|
|
|
|
ScoreBrackets.applyWaitcnt(
|
|
|
|
AMDGPU::Waitcnt(0, 0, 0, II->getOperand(1).getImm()));
|
|
|
|
}
|
2017-04-12 05:25:12 +02:00
|
|
|
}
|
|
|
|
}
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
return Modified;
|
AMDGPU/InsertWaitcnts: Untangle some semi-global state
Summary:
Reduce the statefulness of the algorithm in two ways:
1. More clearly split generateWaitcntInstBefore into two phases: the
first one which determines the required wait, if any, without changing
the ScoreBrackets, and the second one which actually inserts the wait
and updates the brackets.
2. Communicate pre-existing s_waitcnt instructions using an argument to
generateWaitcntInstBefore instead of through the ScoreBrackets.
To simplify these changes, a Waitcnt structure is introduced which carries
the counts of an s_waitcnt instruction in decoded form.
There are some functional changes:
1. The FIXME for the VCCZ bug workaround was implemented: we only wait for
SMEM instructions as required instead of waiting on all counters.
2. We now properly track pre-existing waitcnt's in all cases, which leads
to less conservative waitcnts being emitted in some cases.
s_load_dword ...
s_waitcnt lgkmcnt(0) <-- pre-existing wait count
ds_read_b32 v0, ...
ds_read_b32 v1, ...
s_waitcnt lgkmcnt(0) <-- this is too conservative
use(v0)
more code
use(v1)
This increases code size a bit, but the reduced latency should still be a
win in basically all cases. The worst code size regressions in my shader-db
are:
WORST REGRESSIONS - Code Size
Before After Delta Percentage
1724 1736 12 0.70 % shaders/private/f1-2015/1334.shader_test [0]
2276 2284 8 0.35 % shaders/private/f1-2015/1306.shader_test [0]
4632 4640 8 0.17 % shaders/private/ue4_elemental/62.shader_test [0]
2376 2384 8 0.34 % shaders/private/f1-2015/1308.shader_test [0]
3284 3292 8 0.24 % shaders/private/talos_principle/1955.shader_test [0]
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54226
llvm-svn: 347848
2018-11-29 12:06:06 +01:00
|
|
|
}
|
2017-04-12 05:25:12 +02:00
|
|
|
|
AMDGPU/InsertWaitcnts: Untangle some semi-global state
Summary:
Reduce the statefulness of the algorithm in two ways:
1. More clearly split generateWaitcntInstBefore into two phases: the
first one which determines the required wait, if any, without changing
the ScoreBrackets, and the second one which actually inserts the wait
and updates the brackets.
2. Communicate pre-existing s_waitcnt instructions using an argument to
generateWaitcntInstBefore instead of through the ScoreBrackets.
To simplify these changes, a Waitcnt structure is introduced which carries
the counts of an s_waitcnt instruction in decoded form.
There are some functional changes:
1. The FIXME for the VCCZ bug workaround was implemented: we only wait for
SMEM instructions as required instead of waiting on all counters.
2. We now properly track pre-existing waitcnt's in all cases, which leads
to less conservative waitcnts being emitted in some cases.
s_load_dword ...
s_waitcnt lgkmcnt(0) <-- pre-existing wait count
ds_read_b32 v0, ...
ds_read_b32 v1, ...
s_waitcnt lgkmcnt(0) <-- this is too conservative
use(v0)
more code
use(v1)
This increases code size a bit, but the reduced latency should still be a
win in basically all cases. The worst code size regressions in my shader-db
are:
WORST REGRESSIONS - Code Size
Before After Delta Percentage
1724 1736 12 0.70 % shaders/private/f1-2015/1334.shader_test [0]
2276 2284 8 0.35 % shaders/private/f1-2015/1306.shader_test [0]
4632 4640 8 0.17 % shaders/private/ue4_elemental/62.shader_test [0]
2376 2384 8 0.34 % shaders/private/f1-2015/1308.shader_test [0]
3284 3292 8 0.24 % shaders/private/talos_principle/1955.shader_test [0]
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54226
llvm-svn: 347848
2018-11-29 12:06:06 +01:00
|
|
|
if (ForceEmitZeroWaitcnts)
|
2019-04-25 20:53:41 +02:00
|
|
|
Wait = AMDGPU::Waitcnt::allZero(IV);
|
2017-04-12 05:25:12 +02:00
|
|
|
|
AMDGPU/InsertWaitcnts: Untangle some semi-global state
Summary:
Reduce the statefulness of the algorithm in two ways:
1. More clearly split generateWaitcntInstBefore into two phases: the
first one which determines the required wait, if any, without changing
the ScoreBrackets, and the second one which actually inserts the wait
and updates the brackets.
2. Communicate pre-existing s_waitcnt instructions using an argument to
generateWaitcntInstBefore instead of through the ScoreBrackets.
To simplify these changes, a Waitcnt structure is introduced which carries
the counts of an s_waitcnt instruction in decoded form.
There are some functional changes:
1. The FIXME for the VCCZ bug workaround was implemented: we only wait for
SMEM instructions as required instead of waiting on all counters.
2. We now properly track pre-existing waitcnt's in all cases, which leads
to less conservative waitcnts being emitted in some cases.
s_load_dword ...
s_waitcnt lgkmcnt(0) <-- pre-existing wait count
ds_read_b32 v0, ...
ds_read_b32 v1, ...
s_waitcnt lgkmcnt(0) <-- this is too conservative
use(v0)
more code
use(v1)
This increases code size a bit, but the reduced latency should still be a
win in basically all cases. The worst code size regressions in my shader-db
are:
WORST REGRESSIONS - Code Size
Before After Delta Percentage
1724 1736 12 0.70 % shaders/private/f1-2015/1334.shader_test [0]
2276 2284 8 0.35 % shaders/private/f1-2015/1306.shader_test [0]
4632 4640 8 0.17 % shaders/private/ue4_elemental/62.shader_test [0]
2376 2384 8 0.34 % shaders/private/f1-2015/1308.shader_test [0]
3284 3292 8 0.24 % shaders/private/talos_principle/1955.shader_test [0]
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54226
llvm-svn: 347848
2018-11-29 12:06:06 +01:00
|
|
|
if (ForceEmitWaitcnt[VM_CNT])
|
|
|
|
Wait.VmCnt = 0;
|
|
|
|
if (ForceEmitWaitcnt[EXP_CNT])
|
|
|
|
Wait.ExpCnt = 0;
|
|
|
|
if (ForceEmitWaitcnt[LGKM_CNT])
|
|
|
|
Wait.LgkmCnt = 0;
|
2019-05-03 23:53:53 +02:00
|
|
|
if (ForceEmitWaitcnt[VS_CNT])
|
|
|
|
Wait.VsCnt = 0;
|
2018-02-15 23:03:55 +01:00
|
|
|
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
ScoreBrackets.applyWaitcnt(Wait);
|
2017-04-12 05:25:12 +02:00
|
|
|
|
AMDGPU/InsertWaitcnts: Untangle some semi-global state
Summary:
Reduce the statefulness of the algorithm in two ways:
1. More clearly split generateWaitcntInstBefore into two phases: the
first one which determines the required wait, if any, without changing
the ScoreBrackets, and the second one which actually inserts the wait
and updates the brackets.
2. Communicate pre-existing s_waitcnt instructions using an argument to
generateWaitcntInstBefore instead of through the ScoreBrackets.
To simplify these changes, a Waitcnt structure is introduced which carries
the counts of an s_waitcnt instruction in decoded form.
There are some functional changes:
1. The FIXME for the VCCZ bug workaround was implemented: we only wait for
SMEM instructions as required instead of waiting on all counters.
2. We now properly track pre-existing waitcnt's in all cases, which leads
to less conservative waitcnts being emitted in some cases.
s_load_dword ...
s_waitcnt lgkmcnt(0) <-- pre-existing wait count
ds_read_b32 v0, ...
ds_read_b32 v1, ...
s_waitcnt lgkmcnt(0) <-- this is too conservative
use(v0)
more code
use(v1)
This increases code size a bit, but the reduced latency should still be a
win in basically all cases. The worst code size regressions in my shader-db
are:
WORST REGRESSIONS - Code Size
Before After Delta Percentage
1724 1736 12 0.70 % shaders/private/f1-2015/1334.shader_test [0]
2276 2284 8 0.35 % shaders/private/f1-2015/1306.shader_test [0]
4632 4640 8 0.17 % shaders/private/ue4_elemental/62.shader_test [0]
2376 2384 8 0.34 % shaders/private/f1-2015/1308.shader_test [0]
3284 3292 8 0.24 % shaders/private/talos_principle/1955.shader_test [0]
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54226
llvm-svn: 347848
2018-11-29 12:06:06 +01:00
|
|
|
AMDGPU::Waitcnt OldWait;
|
2019-05-03 23:53:53 +02:00
|
|
|
bool Modified = false;
|
|
|
|
|
AMDGPU/InsertWaitcnts: Untangle some semi-global state
Summary:
Reduce the statefulness of the algorithm in two ways:
1. More clearly split generateWaitcntInstBefore into two phases: the
first one which determines the required wait, if any, without changing
the ScoreBrackets, and the second one which actually inserts the wait
and updates the brackets.
2. Communicate pre-existing s_waitcnt instructions using an argument to
generateWaitcntInstBefore instead of through the ScoreBrackets.
To simplify these changes, a Waitcnt structure is introduced which carries
the counts of an s_waitcnt instruction in decoded form.
There are some functional changes:
1. The FIXME for the VCCZ bug workaround was implemented: we only wait for
SMEM instructions as required instead of waiting on all counters.
2. We now properly track pre-existing waitcnt's in all cases, which leads
to less conservative waitcnts being emitted in some cases.
s_load_dword ...
s_waitcnt lgkmcnt(0) <-- pre-existing wait count
ds_read_b32 v0, ...
ds_read_b32 v1, ...
s_waitcnt lgkmcnt(0) <-- this is too conservative
use(v0)
more code
use(v1)
This increases code size a bit, but the reduced latency should still be a
win in basically all cases. The worst code size regressions in my shader-db
are:
WORST REGRESSIONS - Code Size
Before After Delta Percentage
1724 1736 12 0.70 % shaders/private/f1-2015/1334.shader_test [0]
2276 2284 8 0.35 % shaders/private/f1-2015/1306.shader_test [0]
4632 4640 8 0.17 % shaders/private/ue4_elemental/62.shader_test [0]
2376 2384 8 0.34 % shaders/private/f1-2015/1308.shader_test [0]
3284 3292 8 0.24 % shaders/private/talos_principle/1955.shader_test [0]
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54226
llvm-svn: 347848
2018-11-29 12:06:06 +01:00
|
|
|
if (OldWaitcntInstr) {
|
2019-05-03 23:53:53 +02:00
|
|
|
for (auto II = OldWaitcntInstr->getIterator(), NextI = std::next(II);
|
|
|
|
&*II != &MI; II = NextI, NextI++) {
|
|
|
|
if (II->isDebugInstr())
|
|
|
|
continue;
|
AMDGPU/InsertWaitcnts: Untangle some semi-global state
Summary:
Reduce the statefulness of the algorithm in two ways:
1. More clearly split generateWaitcntInstBefore into two phases: the
first one which determines the required wait, if any, without changing
the ScoreBrackets, and the second one which actually inserts the wait
and updates the brackets.
2. Communicate pre-existing s_waitcnt instructions using an argument to
generateWaitcntInstBefore instead of through the ScoreBrackets.
To simplify these changes, a Waitcnt structure is introduced which carries
the counts of an s_waitcnt instruction in decoded form.
There are some functional changes:
1. The FIXME for the VCCZ bug workaround was implemented: we only wait for
SMEM instructions as required instead of waiting on all counters.
2. We now properly track pre-existing waitcnt's in all cases, which leads
to less conservative waitcnts being emitted in some cases.
s_load_dword ...
s_waitcnt lgkmcnt(0) <-- pre-existing wait count
ds_read_b32 v0, ...
ds_read_b32 v1, ...
s_waitcnt lgkmcnt(0) <-- this is too conservative
use(v0)
more code
use(v1)
This increases code size a bit, but the reduced latency should still be a
win in basically all cases. The worst code size regressions in my shader-db
are:
WORST REGRESSIONS - Code Size
Before After Delta Percentage
1724 1736 12 0.70 % shaders/private/f1-2015/1334.shader_test [0]
2276 2284 8 0.35 % shaders/private/f1-2015/1306.shader_test [0]
4632 4640 8 0.17 % shaders/private/ue4_elemental/62.shader_test [0]
2376 2384 8 0.34 % shaders/private/f1-2015/1308.shader_test [0]
3284 3292 8 0.24 % shaders/private/talos_principle/1955.shader_test [0]
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54226
llvm-svn: 347848
2018-11-29 12:06:06 +01:00
|
|
|
|
2019-05-03 23:53:53 +02:00
|
|
|
if (II->getOpcode() == AMDGPU::S_WAITCNT) {
|
|
|
|
unsigned IEnc = II->getOperand(0).getImm();
|
|
|
|
AMDGPU::Waitcnt IWait = AMDGPU::decodeWaitcnt(IV, IEnc);
|
|
|
|
OldWait = OldWait.combined(IWait);
|
|
|
|
if (!TrackedWaitcntSet.count(&*II))
|
|
|
|
Wait = Wait.combined(IWait);
|
|
|
|
unsigned NewEnc = AMDGPU::encodeWaitcnt(IV, Wait);
|
|
|
|
if (IEnc != NewEnc) {
|
|
|
|
II->getOperand(0).setImm(NewEnc);
|
|
|
|
Modified = true;
|
|
|
|
}
|
|
|
|
Wait.VmCnt = ~0u;
|
|
|
|
Wait.LgkmCnt = ~0u;
|
|
|
|
Wait.ExpCnt = ~0u;
|
|
|
|
} else {
|
|
|
|
assert(II->getOpcode() == AMDGPU::S_WAITCNT_VSCNT);
|
|
|
|
assert(II->getOperand(0).getReg() == AMDGPU::SGPR_NULL);
|
|
|
|
|
|
|
|
unsigned ICnt = II->getOperand(1).getImm();
|
|
|
|
OldWait.VsCnt = std::min(OldWait.VsCnt, ICnt);
|
|
|
|
if (!TrackedWaitcntSet.count(&*II))
|
|
|
|
Wait.VsCnt = std::min(Wait.VsCnt, ICnt);
|
|
|
|
if (Wait.VsCnt != ICnt) {
|
|
|
|
II->getOperand(1).setImm(Wait.VsCnt);
|
|
|
|
Modified = true;
|
|
|
|
}
|
|
|
|
Wait.VsCnt = ~0u;
|
|
|
|
}
|
AMDGPU/InsertWaitcnts: Untangle some semi-global state
Summary:
Reduce the statefulness of the algorithm in two ways:
1. More clearly split generateWaitcntInstBefore into two phases: the
first one which determines the required wait, if any, without changing
the ScoreBrackets, and the second one which actually inserts the wait
and updates the brackets.
2. Communicate pre-existing s_waitcnt instructions using an argument to
generateWaitcntInstBefore instead of through the ScoreBrackets.
To simplify these changes, a Waitcnt structure is introduced which carries
the counts of an s_waitcnt instruction in decoded form.
There are some functional changes:
1. The FIXME for the VCCZ bug workaround was implemented: we only wait for
SMEM instructions as required instead of waiting on all counters.
2. We now properly track pre-existing waitcnt's in all cases, which leads
to less conservative waitcnts being emitted in some cases.
s_load_dword ...
s_waitcnt lgkmcnt(0) <-- pre-existing wait count
ds_read_b32 v0, ...
ds_read_b32 v1, ...
s_waitcnt lgkmcnt(0) <-- this is too conservative
use(v0)
more code
use(v1)
This increases code size a bit, but the reduced latency should still be a
win in basically all cases. The worst code size regressions in my shader-db
are:
WORST REGRESSIONS - Code Size
Before After Delta Percentage
1724 1736 12 0.70 % shaders/private/f1-2015/1334.shader_test [0]
2276 2284 8 0.35 % shaders/private/f1-2015/1306.shader_test [0]
4632 4640 8 0.17 % shaders/private/ue4_elemental/62.shader_test [0]
2376 2384 8 0.34 % shaders/private/f1-2015/1308.shader_test [0]
3284 3292 8 0.24 % shaders/private/talos_principle/1955.shader_test [0]
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54226
llvm-svn: 347848
2018-11-29 12:06:06 +01:00
|
|
|
|
2019-05-03 23:53:53 +02:00
|
|
|
LLVM_DEBUG(dbgs() << "updateWaitcntInBlock\n"
|
|
|
|
<< "Old Instr: " << MI << '\n'
|
|
|
|
<< "New Instr: " << *II << '\n');
|
AMDGPU/InsertWaitcnts: Untangle some semi-global state
Summary:
Reduce the statefulness of the algorithm in two ways:
1. More clearly split generateWaitcntInstBefore into two phases: the
first one which determines the required wait, if any, without changing
the ScoreBrackets, and the second one which actually inserts the wait
and updates the brackets.
2. Communicate pre-existing s_waitcnt instructions using an argument to
generateWaitcntInstBefore instead of through the ScoreBrackets.
To simplify these changes, a Waitcnt structure is introduced which carries
the counts of an s_waitcnt instruction in decoded form.
There are some functional changes:
1. The FIXME for the VCCZ bug workaround was implemented: we only wait for
SMEM instructions as required instead of waiting on all counters.
2. We now properly track pre-existing waitcnt's in all cases, which leads
to less conservative waitcnts being emitted in some cases.
s_load_dword ...
s_waitcnt lgkmcnt(0) <-- pre-existing wait count
ds_read_b32 v0, ...
ds_read_b32 v1, ...
s_waitcnt lgkmcnt(0) <-- this is too conservative
use(v0)
more code
use(v1)
This increases code size a bit, but the reduced latency should still be a
win in basically all cases. The worst code size regressions in my shader-db
are:
WORST REGRESSIONS - Code Size
Before After Delta Percentage
1724 1736 12 0.70 % shaders/private/f1-2015/1334.shader_test [0]
2276 2284 8 0.35 % shaders/private/f1-2015/1306.shader_test [0]
4632 4640 8 0.17 % shaders/private/ue4_elemental/62.shader_test [0]
2376 2384 8 0.34 % shaders/private/f1-2015/1308.shader_test [0]
3284 3292 8 0.24 % shaders/private/talos_principle/1955.shader_test [0]
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54226
llvm-svn: 347848
2018-11-29 12:06:06 +01:00
|
|
|
|
2019-05-03 23:53:53 +02:00
|
|
|
if (!Wait.hasWait())
|
|
|
|
return Modified;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Wait.VmCnt != ~0u || Wait.LgkmCnt != ~0u || Wait.ExpCnt != ~0u) {
|
|
|
|
unsigned Enc = AMDGPU::encodeWaitcnt(IV, Wait);
|
AMDGPU/InsertWaitcnts: Untangle some semi-global state
Summary:
Reduce the statefulness of the algorithm in two ways:
1. More clearly split generateWaitcntInstBefore into two phases: the
first one which determines the required wait, if any, without changing
the ScoreBrackets, and the second one which actually inserts the wait
and updates the brackets.
2. Communicate pre-existing s_waitcnt instructions using an argument to
generateWaitcntInstBefore instead of through the ScoreBrackets.
To simplify these changes, a Waitcnt structure is introduced which carries
the counts of an s_waitcnt instruction in decoded form.
There are some functional changes:
1. The FIXME for the VCCZ bug workaround was implemented: we only wait for
SMEM instructions as required instead of waiting on all counters.
2. We now properly track pre-existing waitcnt's in all cases, which leads
to less conservative waitcnts being emitted in some cases.
s_load_dword ...
s_waitcnt lgkmcnt(0) <-- pre-existing wait count
ds_read_b32 v0, ...
ds_read_b32 v1, ...
s_waitcnt lgkmcnt(0) <-- this is too conservative
use(v0)
more code
use(v1)
This increases code size a bit, but the reduced latency should still be a
win in basically all cases. The worst code size regressions in my shader-db
are:
WORST REGRESSIONS - Code Size
Before After Delta Percentage
1724 1736 12 0.70 % shaders/private/f1-2015/1334.shader_test [0]
2276 2284 8 0.35 % shaders/private/f1-2015/1306.shader_test [0]
4632 4640 8 0.17 % shaders/private/ue4_elemental/62.shader_test [0]
2376 2384 8 0.34 % shaders/private/f1-2015/1308.shader_test [0]
3284 3292 8 0.24 % shaders/private/talos_principle/1955.shader_test [0]
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54226
llvm-svn: 347848
2018-11-29 12:06:06 +01:00
|
|
|
auto SWaitInst = BuildMI(*MI.getParent(), MI.getIterator(),
|
|
|
|
MI.getDebugLoc(), TII->get(AMDGPU::S_WAITCNT))
|
|
|
|
.addImm(Enc);
|
|
|
|
TrackedWaitcntSet.insert(SWaitInst);
|
2019-05-03 23:53:53 +02:00
|
|
|
Modified = true;
|
AMDGPU/InsertWaitcnts: Untangle some semi-global state
Summary:
Reduce the statefulness of the algorithm in two ways:
1. More clearly split generateWaitcntInstBefore into two phases: the
first one which determines the required wait, if any, without changing
the ScoreBrackets, and the second one which actually inserts the wait
and updates the brackets.
2. Communicate pre-existing s_waitcnt instructions using an argument to
generateWaitcntInstBefore instead of through the ScoreBrackets.
To simplify these changes, a Waitcnt structure is introduced which carries
the counts of an s_waitcnt instruction in decoded form.
There are some functional changes:
1. The FIXME for the VCCZ bug workaround was implemented: we only wait for
SMEM instructions as required instead of waiting on all counters.
2. We now properly track pre-existing waitcnt's in all cases, which leads
to less conservative waitcnts being emitted in some cases.
s_load_dword ...
s_waitcnt lgkmcnt(0) <-- pre-existing wait count
ds_read_b32 v0, ...
ds_read_b32 v1, ...
s_waitcnt lgkmcnt(0) <-- this is too conservative
use(v0)
more code
use(v1)
This increases code size a bit, but the reduced latency should still be a
win in basically all cases. The worst code size regressions in my shader-db
are:
WORST REGRESSIONS - Code Size
Before After Delta Percentage
1724 1736 12 0.70 % shaders/private/f1-2015/1334.shader_test [0]
2276 2284 8 0.35 % shaders/private/f1-2015/1306.shader_test [0]
4632 4640 8 0.17 % shaders/private/ue4_elemental/62.shader_test [0]
2376 2384 8 0.34 % shaders/private/f1-2015/1308.shader_test [0]
3284 3292 8 0.24 % shaders/private/talos_principle/1955.shader_test [0]
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54226
llvm-svn: 347848
2018-11-29 12:06:06 +01:00
|
|
|
|
|
|
|
LLVM_DEBUG(dbgs() << "insertWaitcntInBlock\n"
|
|
|
|
<< "Old Instr: " << MI << '\n'
|
|
|
|
<< "New Instr: " << *SWaitInst << '\n');
|
2017-04-12 05:25:12 +02:00
|
|
|
}
|
|
|
|
|
2019-05-03 23:53:53 +02:00
|
|
|
if (Wait.VsCnt != ~0u) {
|
|
|
|
assert(ST->hasVscnt());
|
|
|
|
|
|
|
|
auto SWaitInst =
|
|
|
|
BuildMI(*MI.getParent(), MI.getIterator(), MI.getDebugLoc(),
|
|
|
|
TII->get(AMDGPU::S_WAITCNT_VSCNT))
|
|
|
|
.addReg(AMDGPU::SGPR_NULL, RegState::Undef)
|
|
|
|
.addImm(Wait.VsCnt);
|
|
|
|
TrackedWaitcntSet.insert(SWaitInst);
|
|
|
|
Modified = true;
|
|
|
|
|
|
|
|
LLVM_DEBUG(dbgs() << "insertWaitcntInBlock\n"
|
|
|
|
<< "Old Instr: " << MI << '\n'
|
|
|
|
<< "New Instr: " << *SWaitInst << '\n');
|
|
|
|
}
|
|
|
|
|
|
|
|
return Modified;
|
2017-04-12 05:25:12 +02:00
|
|
|
}
|
|
|
|
|
2017-07-21 20:54:54 +02:00
|
|
|
// This is a flat memory operation. Check to see if it has memory
|
|
|
|
// tokens for both LDS and Memory, and if so mark it as a flat.
|
|
|
|
bool SIInsertWaitcnts::mayAccessLDSThroughFlat(const MachineInstr &MI) const {
|
|
|
|
if (MI.memoperands_empty())
|
|
|
|
return true;
|
|
|
|
|
|
|
|
for (const MachineMemOperand *Memop : MI.memoperands()) {
|
|
|
|
unsigned AS = Memop->getAddrSpace();
|
2018-08-31 07:49:54 +02:00
|
|
|
if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS)
|
2017-07-21 20:54:54 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
void SIInsertWaitcnts::updateEventWaitcntAfter(MachineInstr &Inst,
|
|
|
|
WaitcntBrackets *ScoreBrackets) {
|
2017-04-12 05:25:12 +02:00
|
|
|
// Now look at the instruction opcode. If it is a memory access
|
|
|
|
// instruction, update the upper-bound of the appropriate counter's
|
|
|
|
// bracket and the destination operand scores.
|
|
|
|
// TODO: Use the (TSFlags & SIInstrFlags::LGKM_CNT) property everywhere.
|
2017-07-21 20:34:51 +02:00
|
|
|
if (TII->isDS(Inst) && TII->usesLGKM_CNT(Inst)) {
|
2019-01-16 16:43:53 +01:00
|
|
|
if (TII->isAlwaysGDS(Inst.getOpcode()) ||
|
|
|
|
TII->hasModifiersSet(Inst, AMDGPU::OpName::gds)) {
|
2017-04-12 05:25:12 +02:00
|
|
|
ScoreBrackets->updateByEvent(TII, TRI, MRI, GDS_ACCESS, Inst);
|
|
|
|
ScoreBrackets->updateByEvent(TII, TRI, MRI, GDS_GPR_LOCK, Inst);
|
|
|
|
} else {
|
|
|
|
ScoreBrackets->updateByEvent(TII, TRI, MRI, LDS_ACCESS, Inst);
|
|
|
|
}
|
|
|
|
} else if (TII->isFLAT(Inst)) {
|
|
|
|
assert(Inst.mayLoad() || Inst.mayStore());
|
2017-07-21 20:34:51 +02:00
|
|
|
|
2019-05-03 23:53:53 +02:00
|
|
|
if (TII->usesVM_CNT(Inst)) {
|
|
|
|
if (!ST->hasVscnt())
|
|
|
|
ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_ACCESS, Inst);
|
|
|
|
else if (Inst.mayLoad() &&
|
|
|
|
AMDGPU::getAtomicRetOp(Inst.getOpcode()) == -1)
|
|
|
|
ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_READ_ACCESS, Inst);
|
|
|
|
else
|
|
|
|
ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_WRITE_ACCESS, Inst);
|
|
|
|
}
|
2017-07-21 20:34:51 +02:00
|
|
|
|
2017-07-21 20:54:54 +02:00
|
|
|
if (TII->usesLGKM_CNT(Inst)) {
|
2017-07-21 20:34:51 +02:00
|
|
|
ScoreBrackets->updateByEvent(TII, TRI, MRI, LDS_ACCESS, Inst);
|
2017-04-12 05:25:12 +02:00
|
|
|
|
2017-07-21 20:54:54 +02:00
|
|
|
// This is a flat memory operation, so note it - it will require
|
|
|
|
// that both the VM and LGKM be flushed to zero if it is pending when
|
|
|
|
// a VM or LGKM dependency occurs.
|
|
|
|
if (mayAccessLDSThroughFlat(Inst))
|
|
|
|
ScoreBrackets->setPendingFlat();
|
2017-04-12 05:25:12 +02:00
|
|
|
}
|
|
|
|
} else if (SIInstrInfo::isVMEM(Inst) &&
|
|
|
|
// TODO: get a better carve out.
|
|
|
|
Inst.getOpcode() != AMDGPU::BUFFER_WBINVL1 &&
|
|
|
|
Inst.getOpcode() != AMDGPU::BUFFER_WBINVL1_SC &&
|
2019-05-03 23:53:53 +02:00
|
|
|
Inst.getOpcode() != AMDGPU::BUFFER_WBINVL1_VOL &&
|
|
|
|
Inst.getOpcode() != AMDGPU::BUFFER_GL0_INV &&
|
|
|
|
Inst.getOpcode() != AMDGPU::BUFFER_GL1_INV) {
|
|
|
|
if (!ST->hasVscnt())
|
|
|
|
ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_ACCESS, Inst);
|
|
|
|
else if ((Inst.mayLoad() &&
|
|
|
|
AMDGPU::getAtomicRetOp(Inst.getOpcode()) == -1) ||
|
|
|
|
/* IMAGE_GET_RESINFO / IMAGE_GET_LOD */
|
|
|
|
(TII->isMIMG(Inst) && !Inst.mayLoad() && !Inst.mayStore()))
|
|
|
|
ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_READ_ACCESS, Inst);
|
|
|
|
else if (Inst.mayStore())
|
|
|
|
ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_WRITE_ACCESS, Inst);
|
|
|
|
|
2018-04-26 18:11:19 +02:00
|
|
|
if (ST->vmemWriteNeedsExpWaitcnt() &&
|
2017-05-31 18:44:23 +02:00
|
|
|
(Inst.mayStore() || AMDGPU::getAtomicNoRetOp(Inst.getOpcode()) != -1)) {
|
2017-04-12 05:25:12 +02:00
|
|
|
ScoreBrackets->updateByEvent(TII, TRI, MRI, VMW_GPR_LOCK, Inst);
|
|
|
|
}
|
|
|
|
} else if (TII->isSMRD(Inst)) {
|
|
|
|
ScoreBrackets->updateByEvent(TII, TRI, MRI, SMEM_ACCESS, Inst);
|
2019-06-14 23:52:26 +02:00
|
|
|
} else if (Inst.isCall()) {
|
|
|
|
if (callWaitsOnFunctionReturn(Inst)) {
|
|
|
|
// Act as a wait on everything
|
|
|
|
ScoreBrackets->applyWaitcnt(AMDGPU::Waitcnt::allZero(IV));
|
|
|
|
} else {
|
|
|
|
// May need to way wait for anything.
|
|
|
|
ScoreBrackets->applyWaitcnt(AMDGPU::Waitcnt());
|
|
|
|
}
|
2017-04-12 05:25:12 +02:00
|
|
|
} else {
|
|
|
|
switch (Inst.getOpcode()) {
|
|
|
|
case AMDGPU::S_SENDMSG:
|
|
|
|
case AMDGPU::S_SENDMSGHALT:
|
|
|
|
ScoreBrackets->updateByEvent(TII, TRI, MRI, SQ_MESSAGE, Inst);
|
|
|
|
break;
|
|
|
|
case AMDGPU::EXP:
|
|
|
|
case AMDGPU::EXP_DONE: {
|
|
|
|
int Imm = TII->getNamedOperand(Inst, AMDGPU::OpName::tgt)->getImm();
|
|
|
|
if (Imm >= 32 && Imm <= 63)
|
|
|
|
ScoreBrackets->updateByEvent(TII, TRI, MRI, EXP_PARAM_ACCESS, Inst);
|
|
|
|
else if (Imm >= 12 && Imm <= 15)
|
|
|
|
ScoreBrackets->updateByEvent(TII, TRI, MRI, EXP_POS_ACCESS, Inst);
|
|
|
|
else
|
|
|
|
ScoreBrackets->updateByEvent(TII, TRI, MRI, EXP_GPR_LOCK, Inst);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case AMDGPU::S_MEMTIME:
|
|
|
|
case AMDGPU::S_MEMREALTIME:
|
|
|
|
ScoreBrackets->updateByEvent(TII, TRI, MRI, SMEM_ACCESS, Inst);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
bool WaitcntBrackets::mergeScore(const MergeInfo &M, uint32_t &Score,
|
|
|
|
uint32_t OtherScore) {
|
|
|
|
uint32_t MyShifted = Score <= M.OldLB ? 0 : Score + M.MyShift;
|
|
|
|
uint32_t OtherShifted =
|
|
|
|
OtherScore <= M.OtherLB ? 0 : OtherScore + M.OtherShift;
|
|
|
|
Score = std::max(MyShifted, OtherShifted);
|
|
|
|
return OtherShifted > MyShifted;
|
|
|
|
}
|
2017-04-12 05:25:12 +02:00
|
|
|
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
/// Merge the pending events and associater score brackets of \p Other into
|
|
|
|
/// this brackets status.
|
|
|
|
///
|
|
|
|
/// Returns whether the merge resulted in a change that requires tighter waits
|
|
|
|
/// (i.e. the merged brackets strictly dominate the original brackets).
|
|
|
|
bool WaitcntBrackets::merge(const WaitcntBrackets &Other) {
|
|
|
|
bool StrictDom = false;
|
2017-04-12 05:25:12 +02:00
|
|
|
|
AMDGPU/InsertWaitcnts: Use foreach loops for inst and wait event types
Summary:
It hides the type casting ugliness, and I happened to have to add a new
such loop (in a later patch).
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54227
llvm-svn: 347849
2018-11-29 12:06:11 +01:00
|
|
|
for (auto T : inst_counter_types()) {
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
// Merge event flags for this counter
|
|
|
|
const bool OldOutOfOrder = counterOutOfOrder(T);
|
|
|
|
const uint32_t OldEvents = PendingEvents & WaitEventMaskForInst[T];
|
|
|
|
const uint32_t OtherEvents = Other.PendingEvents & WaitEventMaskForInst[T];
|
|
|
|
if (OtherEvents & ~OldEvents)
|
|
|
|
StrictDom = true;
|
|
|
|
if (Other.MixedPendingEvents[T] ||
|
|
|
|
(OldEvents && OtherEvents && OldEvents != OtherEvents))
|
|
|
|
MixedPendingEvents[T] = true;
|
|
|
|
PendingEvents |= OtherEvents;
|
|
|
|
|
|
|
|
// Merge scores for this counter
|
|
|
|
const uint32_t MyPending = ScoreUBs[T] - ScoreLBs[T];
|
|
|
|
const uint32_t OtherPending = Other.ScoreUBs[T] - Other.ScoreLBs[T];
|
|
|
|
MergeInfo M;
|
|
|
|
M.OldLB = ScoreLBs[T];
|
|
|
|
M.OtherLB = Other.ScoreLBs[T];
|
|
|
|
M.MyShift = OtherPending > MyPending ? OtherPending - MyPending : 0;
|
|
|
|
M.OtherShift = ScoreUBs[T] - Other.ScoreUBs[T] + M.MyShift;
|
|
|
|
|
|
|
|
const uint32_t NewUB = ScoreUBs[T] + M.MyShift;
|
|
|
|
if (NewUB < ScoreUBs[T])
|
|
|
|
report_fatal_error("waitcnt score overflow");
|
|
|
|
ScoreUBs[T] = NewUB;
|
|
|
|
ScoreLBs[T] = std::min(M.OldLB + M.MyShift, M.OtherLB + M.OtherShift);
|
|
|
|
|
|
|
|
StrictDom |= mergeScore(M, LastFlat[T], Other.LastFlat[T]);
|
|
|
|
|
|
|
|
bool RegStrictDom = false;
|
|
|
|
for (int J = 0, E = std::max(getMaxVGPR(), Other.getMaxVGPR()) + 1; J != E;
|
|
|
|
J++) {
|
|
|
|
RegStrictDom |= mergeScore(M, VgprScores[T][J], Other.VgprScores[T][J]);
|
2017-04-12 05:25:12 +02:00
|
|
|
}
|
|
|
|
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
if (T == LGKM_CNT) {
|
|
|
|
for (int J = 0, E = std::max(getMaxSGPR(), Other.getMaxSGPR()) + 1;
|
|
|
|
J != E; J++) {
|
|
|
|
RegStrictDom |= mergeScore(M, SgprScores[J], Other.SgprScores[J]);
|
2017-04-12 05:25:12 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
if (RegStrictDom && !OldOutOfOrder)
|
|
|
|
StrictDom = true;
|
2018-04-19 17:42:30 +02:00
|
|
|
}
|
|
|
|
|
2018-12-19 11:17:49 +01:00
|
|
|
VgprUB = std::max(getMaxVGPR(), Other.getMaxVGPR());
|
|
|
|
SgprUB = std::max(getMaxSGPR(), Other.getMaxSGPR());
|
|
|
|
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
return StrictDom;
|
2017-04-12 05:25:12 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Generate s_waitcnt instructions where needed.
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
bool SIInsertWaitcnts::insertWaitcntInBlock(MachineFunction &MF,
|
|
|
|
MachineBasicBlock &Block,
|
|
|
|
WaitcntBrackets &ScoreBrackets) {
|
|
|
|
bool Modified = false;
|
2017-04-12 05:25:12 +02:00
|
|
|
|
2018-05-14 14:53:11 +02:00
|
|
|
LLVM_DEBUG({
|
2018-04-25 21:21:26 +02:00
|
|
|
dbgs() << "*** Block" << Block.getNumber() << " ***";
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
ScoreBrackets.dump();
|
2017-04-12 05:25:12 +02:00
|
|
|
});
|
|
|
|
|
|
|
|
// Walk over the instructions.
|
AMDGPU/InsertWaitcnts: Untangle some semi-global state
Summary:
Reduce the statefulness of the algorithm in two ways:
1. More clearly split generateWaitcntInstBefore into two phases: the
first one which determines the required wait, if any, without changing
the ScoreBrackets, and the second one which actually inserts the wait
and updates the brackets.
2. Communicate pre-existing s_waitcnt instructions using an argument to
generateWaitcntInstBefore instead of through the ScoreBrackets.
To simplify these changes, a Waitcnt structure is introduced which carries
the counts of an s_waitcnt instruction in decoded form.
There are some functional changes:
1. The FIXME for the VCCZ bug workaround was implemented: we only wait for
SMEM instructions as required instead of waiting on all counters.
2. We now properly track pre-existing waitcnt's in all cases, which leads
to less conservative waitcnts being emitted in some cases.
s_load_dword ...
s_waitcnt lgkmcnt(0) <-- pre-existing wait count
ds_read_b32 v0, ...
ds_read_b32 v1, ...
s_waitcnt lgkmcnt(0) <-- this is too conservative
use(v0)
more code
use(v1)
This increases code size a bit, but the reduced latency should still be a
win in basically all cases. The worst code size regressions in my shader-db
are:
WORST REGRESSIONS - Code Size
Before After Delta Percentage
1724 1736 12 0.70 % shaders/private/f1-2015/1334.shader_test [0]
2276 2284 8 0.35 % shaders/private/f1-2015/1306.shader_test [0]
4632 4640 8 0.17 % shaders/private/ue4_elemental/62.shader_test [0]
2376 2384 8 0.34 % shaders/private/f1-2015/1308.shader_test [0]
3284 3292 8 0.24 % shaders/private/talos_principle/1955.shader_test [0]
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54226
llvm-svn: 347848
2018-11-29 12:06:06 +01:00
|
|
|
MachineInstr *OldWaitcntInstr = nullptr;
|
|
|
|
|
2019-07-03 02:30:44 +02:00
|
|
|
for (MachineBasicBlock::instr_iterator Iter = Block.instr_begin(),
|
|
|
|
E = Block.instr_end();
|
2017-04-12 05:25:12 +02:00
|
|
|
Iter != E;) {
|
|
|
|
MachineInstr &Inst = *Iter;
|
AMDGPU/InsertWaitcnts: Untangle some semi-global state
Summary:
Reduce the statefulness of the algorithm in two ways:
1. More clearly split generateWaitcntInstBefore into two phases: the
first one which determines the required wait, if any, without changing
the ScoreBrackets, and the second one which actually inserts the wait
and updates the brackets.
2. Communicate pre-existing s_waitcnt instructions using an argument to
generateWaitcntInstBefore instead of through the ScoreBrackets.
To simplify these changes, a Waitcnt structure is introduced which carries
the counts of an s_waitcnt instruction in decoded form.
There are some functional changes:
1. The FIXME for the VCCZ bug workaround was implemented: we only wait for
SMEM instructions as required instead of waiting on all counters.
2. We now properly track pre-existing waitcnt's in all cases, which leads
to less conservative waitcnts being emitted in some cases.
s_load_dword ...
s_waitcnt lgkmcnt(0) <-- pre-existing wait count
ds_read_b32 v0, ...
ds_read_b32 v1, ...
s_waitcnt lgkmcnt(0) <-- this is too conservative
use(v0)
more code
use(v1)
This increases code size a bit, but the reduced latency should still be a
win in basically all cases. The worst code size regressions in my shader-db
are:
WORST REGRESSIONS - Code Size
Before After Delta Percentage
1724 1736 12 0.70 % shaders/private/f1-2015/1334.shader_test [0]
2276 2284 8 0.35 % shaders/private/f1-2015/1306.shader_test [0]
4632 4640 8 0.17 % shaders/private/ue4_elemental/62.shader_test [0]
2376 2384 8 0.34 % shaders/private/f1-2015/1308.shader_test [0]
3284 3292 8 0.24 % shaders/private/talos_principle/1955.shader_test [0]
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54226
llvm-svn: 347848
2018-11-29 12:06:06 +01:00
|
|
|
|
2019-05-03 23:53:53 +02:00
|
|
|
// Track pre-existing waitcnts from earlier iterations.
|
|
|
|
if (Inst.getOpcode() == AMDGPU::S_WAITCNT ||
|
|
|
|
(Inst.getOpcode() == AMDGPU::S_WAITCNT_VSCNT &&
|
|
|
|
Inst.getOperand(0).isReg() &&
|
|
|
|
Inst.getOperand(0).getReg() == AMDGPU::SGPR_NULL)) {
|
|
|
|
if (!OldWaitcntInstr)
|
|
|
|
OldWaitcntInstr = &Inst;
|
AMDGPU/InsertWaitcnts: Untangle some semi-global state
Summary:
Reduce the statefulness of the algorithm in two ways:
1. More clearly split generateWaitcntInstBefore into two phases: the
first one which determines the required wait, if any, without changing
the ScoreBrackets, and the second one which actually inserts the wait
and updates the brackets.
2. Communicate pre-existing s_waitcnt instructions using an argument to
generateWaitcntInstBefore instead of through the ScoreBrackets.
To simplify these changes, a Waitcnt structure is introduced which carries
the counts of an s_waitcnt instruction in decoded form.
There are some functional changes:
1. The FIXME for the VCCZ bug workaround was implemented: we only wait for
SMEM instructions as required instead of waiting on all counters.
2. We now properly track pre-existing waitcnt's in all cases, which leads
to less conservative waitcnts being emitted in some cases.
s_load_dword ...
s_waitcnt lgkmcnt(0) <-- pre-existing wait count
ds_read_b32 v0, ...
ds_read_b32 v1, ...
s_waitcnt lgkmcnt(0) <-- this is too conservative
use(v0)
more code
use(v1)
This increases code size a bit, but the reduced latency should still be a
win in basically all cases. The worst code size regressions in my shader-db
are:
WORST REGRESSIONS - Code Size
Before After Delta Percentage
1724 1736 12 0.70 % shaders/private/f1-2015/1334.shader_test [0]
2276 2284 8 0.35 % shaders/private/f1-2015/1306.shader_test [0]
4632 4640 8 0.17 % shaders/private/ue4_elemental/62.shader_test [0]
2376 2384 8 0.34 % shaders/private/f1-2015/1308.shader_test [0]
3284 3292 8 0.24 % shaders/private/talos_principle/1955.shader_test [0]
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54226
llvm-svn: 347848
2018-11-29 12:06:06 +01:00
|
|
|
++Iter;
|
2017-04-12 05:25:12 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool VCCZBugWorkAround = false;
|
|
|
|
if (readsVCCZ(Inst) &&
|
2018-02-07 03:21:21 +01:00
|
|
|
(!VCCZBugHandledSet.count(&Inst))) {
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
if (ScoreBrackets.getScoreLB(LGKM_CNT) <
|
|
|
|
ScoreBrackets.getScoreUB(LGKM_CNT) &&
|
|
|
|
ScoreBrackets.hasPendingEvent(SMEM_ACCESS)) {
|
2019-10-29 18:11:21 +01:00
|
|
|
if (ST->hasReadVCCZBug())
|
2017-04-12 05:25:12 +02:00
|
|
|
VCCZBugWorkAround = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Generate an s_waitcnt instruction to be placed before
|
|
|
|
// cur_Inst, if needed.
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
Modified |= generateWaitcntInstBefore(Inst, ScoreBrackets, OldWaitcntInstr);
|
AMDGPU/InsertWaitcnts: Untangle some semi-global state
Summary:
Reduce the statefulness of the algorithm in two ways:
1. More clearly split generateWaitcntInstBefore into two phases: the
first one which determines the required wait, if any, without changing
the ScoreBrackets, and the second one which actually inserts the wait
and updates the brackets.
2. Communicate pre-existing s_waitcnt instructions using an argument to
generateWaitcntInstBefore instead of through the ScoreBrackets.
To simplify these changes, a Waitcnt structure is introduced which carries
the counts of an s_waitcnt instruction in decoded form.
There are some functional changes:
1. The FIXME for the VCCZ bug workaround was implemented: we only wait for
SMEM instructions as required instead of waiting on all counters.
2. We now properly track pre-existing waitcnt's in all cases, which leads
to less conservative waitcnts being emitted in some cases.
s_load_dword ...
s_waitcnt lgkmcnt(0) <-- pre-existing wait count
ds_read_b32 v0, ...
ds_read_b32 v1, ...
s_waitcnt lgkmcnt(0) <-- this is too conservative
use(v0)
more code
use(v1)
This increases code size a bit, but the reduced latency should still be a
win in basically all cases. The worst code size regressions in my shader-db
are:
WORST REGRESSIONS - Code Size
Before After Delta Percentage
1724 1736 12 0.70 % shaders/private/f1-2015/1334.shader_test [0]
2276 2284 8 0.35 % shaders/private/f1-2015/1306.shader_test [0]
4632 4640 8 0.17 % shaders/private/ue4_elemental/62.shader_test [0]
2376 2384 8 0.34 % shaders/private/f1-2015/1308.shader_test [0]
3284 3292 8 0.24 % shaders/private/talos_principle/1955.shader_test [0]
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54226
llvm-svn: 347848
2018-11-29 12:06:06 +01:00
|
|
|
OldWaitcntInstr = nullptr;
|
2017-04-12 05:25:12 +02:00
|
|
|
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
updateEventWaitcntAfter(Inst, &ScoreBrackets);
|
2017-04-12 05:25:12 +02:00
|
|
|
|
|
|
|
#if 0 // TODO: implement resource type check controlled by options with ub = LB.
|
|
|
|
// If this instruction generates a S_SETVSKIP because it is an
|
|
|
|
// indexed resource, and we are on Tahiti, then it will also force
|
|
|
|
// an S_WAITCNT vmcnt(0)
|
|
|
|
if (RequireCheckResourceType(Inst, context)) {
|
|
|
|
// Force the score to as if an S_WAITCNT vmcnt(0) is emitted.
|
|
|
|
ScoreBrackets->setScoreLB(VM_CNT,
|
2017-08-16 18:47:29 +02:00
|
|
|
ScoreBrackets->getScoreUB(VM_CNT));
|
2017-04-12 05:25:12 +02:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2018-05-14 14:53:11 +02:00
|
|
|
LLVM_DEBUG({
|
2018-01-30 18:17:06 +01:00
|
|
|
Inst.print(dbgs());
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
ScoreBrackets.dump();
|
2017-04-12 05:25:12 +02:00
|
|
|
});
|
|
|
|
|
|
|
|
// TODO: Remove this work-around after fixing the scheduler and enable the
|
|
|
|
// assert above.
|
|
|
|
if (VCCZBugWorkAround) {
|
|
|
|
// Restore the vccz bit. Any time a value is written to vcc, the vcc
|
|
|
|
// bit is updated, so we can restore the bit by reading the value of
|
|
|
|
// vcc and then writing it back to the register.
|
2019-05-03 23:53:53 +02:00
|
|
|
BuildMI(Block, Inst, Inst.getDebugLoc(),
|
2019-06-16 19:13:09 +02:00
|
|
|
TII->get(ST->isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64),
|
|
|
|
TRI->getVCC())
|
|
|
|
.addReg(TRI->getVCC());
|
2017-04-12 05:25:12 +02:00
|
|
|
VCCZBugHandledSet.insert(&Inst);
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
Modified = true;
|
2017-04-12 05:25:12 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
++Iter;
|
|
|
|
}
|
|
|
|
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
return Modified;
|
2017-04-12 05:25:12 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
bool SIInsertWaitcnts::runOnMachineFunction(MachineFunction &MF) {
|
2018-07-11 22:59:01 +02:00
|
|
|
ST = &MF.getSubtarget<GCNSubtarget>();
|
2017-04-12 05:25:12 +02:00
|
|
|
TII = ST->getInstrInfo();
|
|
|
|
TRI = &TII->getRegisterInfo();
|
|
|
|
MRI = &MF.getRegInfo();
|
2018-09-12 20:50:47 +02:00
|
|
|
IV = AMDGPU::getIsaVersion(ST->getCPU());
|
2017-05-31 18:44:23 +02:00
|
|
|
const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
|
2017-04-12 05:25:12 +02:00
|
|
|
|
2018-05-07 16:43:28 +02:00
|
|
|
ForceEmitZeroWaitcnts = ForceEmitZeroFlag;
|
AMDGPU/InsertWaitcnts: Use foreach loops for inst and wait event types
Summary:
It hides the type casting ugliness, and I happened to have to add a new
such loop (in a later patch).
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54227
llvm-svn: 347849
2018-11-29 12:06:11 +01:00
|
|
|
for (auto T : inst_counter_types())
|
2018-04-25 21:21:26 +02:00
|
|
|
ForceEmitWaitcnt[T] = false;
|
|
|
|
|
2017-04-12 05:25:12 +02:00
|
|
|
HardwareLimits.VmcntMax = AMDGPU::getVmcntBitMask(IV);
|
|
|
|
HardwareLimits.ExpcntMax = AMDGPU::getExpcntBitMask(IV);
|
|
|
|
HardwareLimits.LgkmcntMax = AMDGPU::getLgkmcntBitMask(IV);
|
2019-05-03 23:53:53 +02:00
|
|
|
HardwareLimits.VscntMax = ST->hasVscnt() ? 63 : 0;
|
2017-04-12 05:25:12 +02:00
|
|
|
|
|
|
|
HardwareLimits.NumVGPRsMax = ST->getAddressableNumVGPRs();
|
|
|
|
HardwareLimits.NumSGPRsMax = ST->getAddressableNumSGPRs();
|
|
|
|
assert(HardwareLimits.NumVGPRsMax <= SQ_MAX_PGM_VGPRS);
|
|
|
|
assert(HardwareLimits.NumSGPRsMax <= SQ_MAX_PGM_SGPRS);
|
|
|
|
|
|
|
|
RegisterEncoding.VGPR0 = TRI->getEncodingValue(AMDGPU::VGPR0);
|
|
|
|
RegisterEncoding.VGPRL =
|
|
|
|
RegisterEncoding.VGPR0 + HardwareLimits.NumVGPRsMax - 1;
|
|
|
|
RegisterEncoding.SGPR0 = TRI->getEncodingValue(AMDGPU::SGPR0);
|
|
|
|
RegisterEncoding.SGPRL =
|
|
|
|
RegisterEncoding.SGPR0 + HardwareLimits.NumSGPRsMax - 1;
|
|
|
|
|
2018-02-07 03:21:21 +01:00
|
|
|
TrackedWaitcntSet.clear();
|
|
|
|
VCCZBugHandledSet.clear();
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
RpotIdxMap.clear();
|
|
|
|
BlockInfos.clear();
|
|
|
|
|
|
|
|
// Keep iterating over the blocks in reverse post order, inserting and
|
|
|
|
// updating s_waitcnt where needed, until a fix point is reached.
|
|
|
|
for (MachineBasicBlock *MBB :
|
|
|
|
ReversePostOrderTraversal<MachineFunction *>(&MF)) {
|
|
|
|
RpotIdxMap[MBB] = BlockInfos.size();
|
|
|
|
BlockInfos.emplace_back(MBB);
|
|
|
|
}
|
2018-02-07 03:21:21 +01:00
|
|
|
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
std::unique_ptr<WaitcntBrackets> Brackets;
|
2017-04-12 05:25:12 +02:00
|
|
|
bool Modified = false;
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
bool Repeat;
|
|
|
|
do {
|
|
|
|
Repeat = false;
|
|
|
|
|
|
|
|
for (BlockInfo &BI : BlockInfos) {
|
|
|
|
if (!BI.Dirty)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
unsigned Idx = std::distance(&*BlockInfos.begin(), &BI);
|
|
|
|
|
|
|
|
if (BI.Incoming) {
|
|
|
|
if (!Brackets)
|
2019-08-15 17:54:37 +02:00
|
|
|
Brackets = std::make_unique<WaitcntBrackets>(*BI.Incoming);
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
else
|
|
|
|
*Brackets = *BI.Incoming;
|
|
|
|
} else {
|
|
|
|
if (!Brackets)
|
2019-08-15 17:54:37 +02:00
|
|
|
Brackets = std::make_unique<WaitcntBrackets>(ST);
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
else
|
|
|
|
Brackets->clear();
|
2018-04-19 17:42:30 +02:00
|
|
|
}
|
2017-04-12 05:25:12 +02:00
|
|
|
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
Modified |= insertWaitcntInBlock(MF, *BI.MBB, *Brackets);
|
|
|
|
BI.Dirty = false;
|
|
|
|
|
|
|
|
if (Brackets->hasPending()) {
|
|
|
|
BlockInfo *MoveBracketsToSucc = nullptr;
|
|
|
|
for (MachineBasicBlock *Succ : BI.MBB->successors()) {
|
|
|
|
unsigned SuccIdx = RpotIdxMap[Succ];
|
|
|
|
BlockInfo &SuccBI = BlockInfos[SuccIdx];
|
|
|
|
if (!SuccBI.Incoming) {
|
|
|
|
SuccBI.Dirty = true;
|
|
|
|
if (SuccIdx <= Idx)
|
|
|
|
Repeat = true;
|
|
|
|
if (!MoveBracketsToSucc) {
|
|
|
|
MoveBracketsToSucc = &SuccBI;
|
|
|
|
} else {
|
2019-08-15 17:54:37 +02:00
|
|
|
SuccBI.Incoming = std::make_unique<WaitcntBrackets>(*Brackets);
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
}
|
|
|
|
} else if (SuccBI.Incoming->merge(*Brackets)) {
|
|
|
|
SuccBI.Dirty = true;
|
|
|
|
if (SuccIdx <= Idx)
|
|
|
|
Repeat = true;
|
2017-04-12 05:25:12 +02:00
|
|
|
}
|
|
|
|
}
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
if (MoveBracketsToSucc)
|
|
|
|
MoveBracketsToSucc->Incoming = std::move(Brackets);
|
2017-04-12 05:25:12 +02:00
|
|
|
}
|
|
|
|
}
|
AMDGPU/InsertWaitcnts: Remove the dependence on MachineLoopInfo
Summary:
MachineLoopInfo cannot be relied on for correctness, because it cannot
properly recognize loops in irreducible control flow which can be
introduced by late machine basic block optimization passes. See the new
test case for the reduced form of an example that occurred in practice.
Use a simple fixpoint iteration instead.
In order to facilitate this change, refactor WaitcntBrackets so that it
only tracks pending events and registers, rather than also maintaining
state that is relevant for the high-level algorithm. Various accessor
methods can be removed or made private as a consequence.
Affects (in radv):
- dEQP-VK.glsl.loops.special.{for,while}_uniform_iterations.select_iteration_count_{fragment,vertex}
Fixes: r345719 ("AMDGPU: Rewrite SILowerI1Copies to always stay on SALU")
Reviewers: msearles, rampitec, scott.linder, kanarayan
Subscribers: arsenm, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits, hakzsam
Differential Revision: https://reviews.llvm.org/D54231
llvm-svn: 347853
2018-11-29 12:06:26 +01:00
|
|
|
} while (Repeat);
|
2017-04-12 05:25:12 +02:00
|
|
|
|
|
|
|
SmallVector<MachineBasicBlock *, 4> EndPgmBlocks;
|
|
|
|
|
|
|
|
bool HaveScalarStores = false;
|
|
|
|
|
|
|
|
for (MachineFunction::iterator BI = MF.begin(), BE = MF.end(); BI != BE;
|
|
|
|
++BI) {
|
|
|
|
MachineBasicBlock &MBB = *BI;
|
|
|
|
|
|
|
|
for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E;
|
|
|
|
++I) {
|
|
|
|
if (!HaveScalarStores && TII->isScalarStore(*I))
|
|
|
|
HaveScalarStores = true;
|
|
|
|
|
|
|
|
if (I->getOpcode() == AMDGPU::S_ENDPGM ||
|
|
|
|
I->getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG)
|
|
|
|
EndPgmBlocks.push_back(&MBB);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (HaveScalarStores) {
|
|
|
|
// If scalar writes are used, the cache must be flushed or else the next
|
|
|
|
// wave to reuse the same scratch memory can be clobbered.
|
|
|
|
//
|
|
|
|
// Insert s_dcache_wb at wave termination points if there were any scalar
|
|
|
|
// stores, and only if the cache hasn't already been flushed. This could be
|
|
|
|
// improved by looking across blocks for flushes in postdominating blocks
|
|
|
|
// from the stores but an explicitly requested flush is probably very rare.
|
|
|
|
for (MachineBasicBlock *MBB : EndPgmBlocks) {
|
|
|
|
bool SeenDCacheWB = false;
|
|
|
|
|
|
|
|
for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E;
|
|
|
|
++I) {
|
|
|
|
if (I->getOpcode() == AMDGPU::S_DCACHE_WB)
|
|
|
|
SeenDCacheWB = true;
|
|
|
|
else if (TII->isScalarStore(*I))
|
|
|
|
SeenDCacheWB = false;
|
|
|
|
|
|
|
|
// FIXME: It would be better to insert this before a waitcnt if any.
|
|
|
|
if ((I->getOpcode() == AMDGPU::S_ENDPGM ||
|
|
|
|
I->getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG) &&
|
|
|
|
!SeenDCacheWB) {
|
|
|
|
Modified = true;
|
|
|
|
BuildMI(*MBB, I, I->getDebugLoc(), TII->get(AMDGPU::S_DCACHE_WB));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-31 18:44:23 +02:00
|
|
|
if (!MFI->isEntryFunction()) {
|
|
|
|
// Wait for any outstanding memory operations that the input registers may
|
2018-01-29 06:17:03 +01:00
|
|
|
// depend on. We can't track them and it's better to the wait after the
|
2017-05-31 18:44:23 +02:00
|
|
|
// costly call sequence.
|
|
|
|
|
|
|
|
// TODO: Could insert earlier and schedule more liberally with operations
|
|
|
|
// that only use caller preserved registers.
|
|
|
|
MachineBasicBlock &EntryBB = MF.front();
|
2019-05-03 23:53:53 +02:00
|
|
|
if (ST->hasVscnt())
|
|
|
|
BuildMI(EntryBB, EntryBB.getFirstNonPHI(), DebugLoc(),
|
|
|
|
TII->get(AMDGPU::S_WAITCNT_VSCNT))
|
|
|
|
.addReg(AMDGPU::SGPR_NULL, RegState::Undef)
|
|
|
|
.addImm(0);
|
2018-05-30 18:27:57 +02:00
|
|
|
BuildMI(EntryBB, EntryBB.getFirstNonPHI(), DebugLoc(), TII->get(AMDGPU::S_WAITCNT))
|
|
|
|
.addImm(0);
|
2017-05-31 18:44:23 +02:00
|
|
|
|
|
|
|
Modified = true;
|
|
|
|
}
|
|
|
|
|
2017-04-12 05:25:12 +02:00
|
|
|
return Modified;
|
|
|
|
}
|