2019-06-07 09:35:30 +02:00
|
|
|
//===-- HardwareLoops.cpp - Target Independent Hardware Loops --*- C++ -*-===//
|
|
|
|
//
|
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
/// \file
|
|
|
|
/// Insert hardware loop intrinsics into loops which are deemed profitable by
|
|
|
|
/// the target, by querying TargetTransformInfo. A hardware loop comprises of
|
|
|
|
/// two intrinsics: one, outside the loop, to set the loop iteration count and
|
|
|
|
/// another, in the exit block, to decrement the counter. The decremented value
|
|
|
|
/// can either be carried through the loop via a phi or handled in some opaque
|
|
|
|
/// way by the target.
|
|
|
|
///
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "llvm/ADT/Statistic.h"
|
|
|
|
#include "llvm/Analysis/AssumptionCache.h"
|
|
|
|
#include "llvm/Analysis/LoopInfo.h"
|
2019-11-05 09:56:14 +01:00
|
|
|
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
|
2019-06-07 09:35:30 +02:00
|
|
|
#include "llvm/Analysis/ScalarEvolution.h"
|
2020-06-24 20:02:35 +02:00
|
|
|
#include "llvm/Analysis/TargetLibraryInfo.h"
|
2019-06-07 09:35:30 +02:00
|
|
|
#include "llvm/Analysis/TargetTransformInfo.h"
|
|
|
|
#include "llvm/CodeGen/Passes.h"
|
|
|
|
#include "llvm/CodeGen/TargetPassConfig.h"
|
|
|
|
#include "llvm/IR/BasicBlock.h"
|
Sink all InitializePasses.h includes
This file lists every pass in LLVM, and is included by Pass.h, which is
very popular. Every time we add, remove, or rename a pass in LLVM, it
caused lots of recompilation.
I found this fact by looking at this table, which is sorted by the
number of times a file was changed over the last 100,000 git commits
multiplied by the number of object files that depend on it in the
current checkout:
recompiles touches affected_files header
342380 95 3604 llvm/include/llvm/ADT/STLExtras.h
314730 234 1345 llvm/include/llvm/InitializePasses.h
307036 118 2602 llvm/include/llvm/ADT/APInt.h
213049 59 3611 llvm/include/llvm/Support/MathExtras.h
170422 47 3626 llvm/include/llvm/Support/Compiler.h
162225 45 3605 llvm/include/llvm/ADT/Optional.h
158319 63 2513 llvm/include/llvm/ADT/Triple.h
140322 39 3598 llvm/include/llvm/ADT/StringRef.h
137647 59 2333 llvm/include/llvm/Support/Error.h
131619 73 1803 llvm/include/llvm/Support/FileSystem.h
Before this change, touching InitializePasses.h would cause 1345 files
to recompile. After this change, touching it only causes 550 compiles in
an incremental rebuild.
Reviewers: bkramer, asbirlea, bollu, jdoerfert
Differential Revision: https://reviews.llvm.org/D70211
2019-11-13 22:15:01 +01:00
|
|
|
#include "llvm/IR/Constants.h"
|
2019-06-07 09:35:30 +02:00
|
|
|
#include "llvm/IR/DataLayout.h"
|
|
|
|
#include "llvm/IR/Dominators.h"
|
|
|
|
#include "llvm/IR/IRBuilder.h"
|
|
|
|
#include "llvm/IR/Instructions.h"
|
|
|
|
#include "llvm/IR/IntrinsicInst.h"
|
|
|
|
#include "llvm/IR/Value.h"
|
Sink all InitializePasses.h includes
This file lists every pass in LLVM, and is included by Pass.h, which is
very popular. Every time we add, remove, or rename a pass in LLVM, it
caused lots of recompilation.
I found this fact by looking at this table, which is sorted by the
number of times a file was changed over the last 100,000 git commits
multiplied by the number of object files that depend on it in the
current checkout:
recompiles touches affected_files header
342380 95 3604 llvm/include/llvm/ADT/STLExtras.h
314730 234 1345 llvm/include/llvm/InitializePasses.h
307036 118 2602 llvm/include/llvm/ADT/APInt.h
213049 59 3611 llvm/include/llvm/Support/MathExtras.h
170422 47 3626 llvm/include/llvm/Support/Compiler.h
162225 45 3605 llvm/include/llvm/ADT/Optional.h
158319 63 2513 llvm/include/llvm/ADT/Triple.h
140322 39 3598 llvm/include/llvm/ADT/StringRef.h
137647 59 2333 llvm/include/llvm/Support/Error.h
131619 73 1803 llvm/include/llvm/Support/FileSystem.h
Before this change, touching InitializePasses.h would cause 1345 files
to recompile. After this change, touching it only causes 550 compiles in
an incremental rebuild.
Reviewers: bkramer, asbirlea, bollu, jdoerfert
Differential Revision: https://reviews.llvm.org/D70211
2019-11-13 22:15:01 +01:00
|
|
|
#include "llvm/InitializePasses.h"
|
|
|
|
#include "llvm/Pass.h"
|
|
|
|
#include "llvm/PassRegistry.h"
|
2019-11-15 00:15:48 +01:00
|
|
|
#include "llvm/Support/CommandLine.h"
|
2019-06-07 09:35:30 +02:00
|
|
|
#include "llvm/Support/Debug.h"
|
|
|
|
#include "llvm/Transforms/Scalar.h"
|
2019-07-09 19:53:09 +02:00
|
|
|
#include "llvm/Transforms/Utils.h"
|
2019-06-07 09:35:30 +02:00
|
|
|
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
|
|
|
|
#include "llvm/Transforms/Utils/Local.h"
|
2019-07-09 19:53:09 +02:00
|
|
|
#include "llvm/Transforms/Utils/LoopUtils.h"
|
2020-05-20 11:08:08 +02:00
|
|
|
#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
|
2019-06-07 09:35:30 +02:00
|
|
|
|
|
|
|
#define DEBUG_TYPE "hardware-loops"
|
|
|
|
|
|
|
|
#define HW_LOOPS_NAME "Hardware Loop Insertion"
|
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
|
|
|
|
static cl::opt<bool>
|
|
|
|
ForceHardwareLoops("force-hardware-loops", cl::Hidden, cl::init(false),
|
|
|
|
cl::desc("Force hardware loops intrinsics to be inserted"));
|
|
|
|
|
|
|
|
static cl::opt<bool>
|
|
|
|
ForceHardwareLoopPHI(
|
|
|
|
"force-hardware-loop-phi", cl::Hidden, cl::init(false),
|
|
|
|
cl::desc("Force hardware loop counter to be updated through a phi"));
|
|
|
|
|
|
|
|
static cl::opt<bool>
|
|
|
|
ForceNestedLoop("force-nested-hardware-loop", cl::Hidden, cl::init(false),
|
|
|
|
cl::desc("Force allowance of nested hardware loops"));
|
|
|
|
|
|
|
|
static cl::opt<unsigned>
|
|
|
|
LoopDecrement("hardware-loop-decrement", cl::Hidden, cl::init(1),
|
|
|
|
cl::desc("Set the loop decrement value"));
|
|
|
|
|
|
|
|
static cl::opt<unsigned>
|
|
|
|
CounterBitWidth("hardware-loop-counter-bitwidth", cl::Hidden, cl::init(32),
|
|
|
|
cl::desc("Set the loop counter bitwidth"));
|
|
|
|
|
2019-06-28 09:38:16 +02:00
|
|
|
static cl::opt<bool>
|
|
|
|
ForceGuardLoopEntry(
|
|
|
|
"force-hardware-loop-guard", cl::Hidden, cl::init(false),
|
|
|
|
cl::desc("Force generation of loop guard intrinsic"));
|
|
|
|
|
2019-06-07 09:35:30 +02:00
|
|
|
STATISTIC(NumHWLoops, "Number of loops converted to hardware loops");
|
|
|
|
|
2019-11-05 09:56:14 +01:00
|
|
|
#ifndef NDEBUG
|
|
|
|
static void debugHWLoopFailure(const StringRef DebugMsg,
|
|
|
|
Instruction *I) {
|
|
|
|
dbgs() << "HWLoops: " << DebugMsg;
|
|
|
|
if (I)
|
|
|
|
dbgs() << ' ' << *I;
|
|
|
|
else
|
|
|
|
dbgs() << '.';
|
|
|
|
dbgs() << '\n';
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static OptimizationRemarkAnalysis
|
|
|
|
createHWLoopAnalysis(StringRef RemarkName, Loop *L, Instruction *I) {
|
|
|
|
Value *CodeRegion = L->getHeader();
|
|
|
|
DebugLoc DL = L->getStartLoc();
|
|
|
|
|
|
|
|
if (I) {
|
|
|
|
CodeRegion = I->getParent();
|
|
|
|
// If there is no debug location attached to the instruction, revert back to
|
|
|
|
// using the loop's.
|
|
|
|
if (I->getDebugLoc())
|
|
|
|
DL = I->getDebugLoc();
|
|
|
|
}
|
|
|
|
|
|
|
|
OptimizationRemarkAnalysis R(DEBUG_TYPE, RemarkName, DL, CodeRegion);
|
|
|
|
R << "hardware-loop not created: ";
|
|
|
|
return R;
|
|
|
|
}
|
|
|
|
|
2019-06-07 09:35:30 +02:00
|
|
|
namespace {
|
|
|
|
|
2019-11-05 09:56:14 +01:00
|
|
|
void reportHWLoopFailure(const StringRef Msg, const StringRef ORETag,
|
|
|
|
OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I = nullptr) {
|
|
|
|
LLVM_DEBUG(debugHWLoopFailure(Msg, I));
|
|
|
|
ORE->emit(createHWLoopAnalysis(ORETag, TheLoop, I) << Msg);
|
|
|
|
}
|
|
|
|
|
2019-06-07 09:35:30 +02:00
|
|
|
using TTI = TargetTransformInfo;
|
|
|
|
|
|
|
|
class HardwareLoops : public FunctionPass {
|
|
|
|
public:
|
|
|
|
static char ID;
|
|
|
|
|
|
|
|
HardwareLoops() : FunctionPass(ID) {
|
|
|
|
initializeHardwareLoopsPass(*PassRegistry::getPassRegistry());
|
|
|
|
}
|
|
|
|
|
|
|
|
bool runOnFunction(Function &F) override;
|
|
|
|
|
|
|
|
void getAnalysisUsage(AnalysisUsage &AU) const override {
|
|
|
|
AU.addRequired<LoopInfoWrapperPass>();
|
|
|
|
AU.addPreserved<LoopInfoWrapperPass>();
|
|
|
|
AU.addRequired<DominatorTreeWrapperPass>();
|
|
|
|
AU.addPreserved<DominatorTreeWrapperPass>();
|
|
|
|
AU.addRequired<ScalarEvolutionWrapperPass>();
|
|
|
|
AU.addRequired<AssumptionCacheTracker>();
|
|
|
|
AU.addRequired<TargetTransformInfoWrapperPass>();
|
2019-11-05 09:56:14 +01:00
|
|
|
AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
|
2019-06-07 09:35:30 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Try to convert the given Loop into a hardware loop.
|
|
|
|
bool TryConvertLoop(Loop *L);
|
|
|
|
|
|
|
|
// Given that the target believes the loop to be profitable, try to
|
|
|
|
// convert it.
|
2019-06-19 03:26:31 +02:00
|
|
|
bool TryConvertLoop(HardwareLoopInfo &HWLoopInfo);
|
2019-06-07 09:35:30 +02:00
|
|
|
|
|
|
|
private:
|
|
|
|
ScalarEvolution *SE = nullptr;
|
|
|
|
LoopInfo *LI = nullptr;
|
|
|
|
const DataLayout *DL = nullptr;
|
2019-11-05 09:56:14 +01:00
|
|
|
OptimizationRemarkEmitter *ORE = nullptr;
|
2019-06-07 09:35:30 +02:00
|
|
|
const TargetTransformInfo *TTI = nullptr;
|
|
|
|
DominatorTree *DT = nullptr;
|
2019-07-09 19:53:09 +02:00
|
|
|
bool PreserveLCSSA = false;
|
2019-06-07 09:35:30 +02:00
|
|
|
AssumptionCache *AC = nullptr;
|
|
|
|
TargetLibraryInfo *LibInfo = nullptr;
|
|
|
|
Module *M = nullptr;
|
|
|
|
bool MadeChange = false;
|
|
|
|
};
|
|
|
|
|
|
|
|
class HardwareLoop {
|
|
|
|
// Expand the trip count scev into a value that we can use.
|
2019-06-28 09:38:16 +02:00
|
|
|
Value *InitLoopCount();
|
2019-06-07 09:35:30 +02:00
|
|
|
|
|
|
|
// Insert the set_loop_iteration intrinsic.
|
[ARM] Alter t2DoLoopStart to define lr
This changes the definition of t2DoLoopStart from
t2DoLoopStart rGPR
to
GPRlr = t2DoLoopStart rGPR
This will hopefully mean that low overhead loops are more tied together,
and we can more reliably generate loops without reverting or being at
the whims of the register allocator.
This is a fairly simple change in itself, but leads to a number of other
required alterations.
- The hardware loop pass, if UsePhi is set, now generates loops of the
form:
%start = llvm.start.loop.iterations(%N)
loop:
%p = phi [%start], [%dec]
%dec = llvm.loop.decrement.reg(%p, 1)
%c = icmp ne %dec, 0
br %c, loop, exit
- For this a new llvm.start.loop.iterations intrinsic was added, identical
to llvm.set.loop.iterations but produces a value as seen above, gluing
the loop together more through def-use chains.
- This new instrinsic conceptually produces the same output as input,
which is taught to SCEV so that the checks in MVETailPredication are not
affected.
- Some minor changes are needed to the ARMLowOverheadLoop pass, but it has
been left mostly as before. We should now more reliably be able to tell
that the t2DoLoopStart is correct without having to prove it, but
t2WhileLoopStart and tail-predicated loops will remain the same.
- And all the tests have been updated. There are a lot of them!
This patch on it's own might cause more trouble that it helps, with more
tail-predicated loops being reverted, but some additional patches can
hopefully improve upon that to get to something that is better overall.
Differential Revision: https://reviews.llvm.org/D89881
2020-11-10 16:57:58 +01:00
|
|
|
Value *InsertIterationSetup(Value *LoopCountInit);
|
2019-06-07 09:35:30 +02:00
|
|
|
|
|
|
|
// Insert the loop_decrement intrinsic.
|
|
|
|
void InsertLoopDec();
|
|
|
|
|
|
|
|
// Insert the loop_decrement_reg intrinsic.
|
|
|
|
Instruction *InsertLoopRegDec(Value *EltsRem);
|
|
|
|
|
|
|
|
// If the target requires the counter value to be updated in the loop,
|
|
|
|
// insert a phi to hold the value. The intended purpose is for use by
|
|
|
|
// loop_decrement_reg.
|
|
|
|
PHINode *InsertPHICounter(Value *NumElts, Value *EltsRem);
|
|
|
|
|
|
|
|
// Create a new cmp, that checks the returned value of loop_decrement*,
|
|
|
|
// and update the exit branch to use it.
|
|
|
|
void UpdateBranch(Value *EltsRem);
|
|
|
|
|
|
|
|
public:
|
2019-06-19 03:26:31 +02:00
|
|
|
HardwareLoop(HardwareLoopInfo &Info, ScalarEvolution &SE,
|
2019-11-05 09:56:14 +01:00
|
|
|
const DataLayout &DL,
|
|
|
|
OptimizationRemarkEmitter *ORE) :
|
|
|
|
SE(SE), DL(DL), ORE(ORE), L(Info.L), M(L->getHeader()->getModule()),
|
2021-09-03 04:53:31 +02:00
|
|
|
ExitCount(Info.ExitCount),
|
2019-06-07 09:35:30 +02:00
|
|
|
CountType(Info.CountType),
|
|
|
|
ExitBranch(Info.ExitBranch),
|
|
|
|
LoopDecrement(Info.LoopDecrement),
|
2019-06-28 09:38:16 +02:00
|
|
|
UsePHICounter(Info.CounterInReg),
|
|
|
|
UseLoopGuard(Info.PerformEntryTest) { }
|
2019-06-07 09:35:30 +02:00
|
|
|
|
|
|
|
void Create();
|
|
|
|
|
|
|
|
private:
|
|
|
|
ScalarEvolution &SE;
|
|
|
|
const DataLayout &DL;
|
2019-11-05 09:56:14 +01:00
|
|
|
OptimizationRemarkEmitter *ORE = nullptr;
|
2019-06-07 09:35:30 +02:00
|
|
|
Loop *L = nullptr;
|
|
|
|
Module *M = nullptr;
|
2021-09-03 04:53:31 +02:00
|
|
|
const SCEV *ExitCount = nullptr;
|
2019-06-07 09:35:30 +02:00
|
|
|
Type *CountType = nullptr;
|
|
|
|
BranchInst *ExitBranch = nullptr;
|
2019-06-28 09:38:16 +02:00
|
|
|
Value *LoopDecrement = nullptr;
|
2019-06-07 09:35:30 +02:00
|
|
|
bool UsePHICounter = false;
|
2019-06-28 09:38:16 +02:00
|
|
|
bool UseLoopGuard = false;
|
|
|
|
BasicBlock *BeginBB = nullptr;
|
2019-06-07 09:35:30 +02:00
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
char HardwareLoops::ID = 0;
|
|
|
|
|
|
|
|
bool HardwareLoops::runOnFunction(Function &F) {
|
|
|
|
if (skipFunction(F))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
LLVM_DEBUG(dbgs() << "HWLoops: Running on " << F.getName() << "\n");
|
|
|
|
|
|
|
|
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
|
|
|
|
SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
|
|
|
|
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
|
|
|
|
TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
|
|
|
|
DL = &F.getParent()->getDataLayout();
|
2019-11-05 09:56:14 +01:00
|
|
|
ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
|
2019-06-07 09:35:30 +02:00
|
|
|
auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
|
Change TargetLibraryInfo analysis passes to always require Function
Summary:
This is the first change to enable the TLI to be built per-function so
that -fno-builtin* handling can be migrated to use function attributes.
See discussion on D61634 for background. This is an enabler for fixing
handling of these options for LTO, for example.
This change should not affect behavior, as the provided function is not
yet used to build a specifically per-function TLI, but rather enables
that migration.
Most of the changes were very mechanical, e.g. passing a Function to the
legacy analysis pass's getTLI interface, or in Module level cases,
adding a callback. This is similar to the way the per-function TTI
analysis works.
There was one place where we were looking for builtins but not in the
context of a specific function. See FindCXAAtExit in
lib/Transforms/IPO/GlobalOpt.cpp. I'm somewhat concerned my workaround
could provide the wrong behavior in some corner cases. Suggestions
welcome.
Reviewers: chandlerc, hfinkel
Subscribers: arsenm, dschuff, jvesely, nhaehnle, mehdi_amini, javed.absar, sbc100, jgravelle-google, eraman, aheejin, steven_wu, george.burgess.iv, dexonsmith, jfb, asbirlea, gchatelet, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D66428
llvm-svn: 371284
2019-09-07 05:09:36 +02:00
|
|
|
LibInfo = TLIP ? &TLIP->getTLI(F) : nullptr;
|
2019-07-09 19:53:09 +02:00
|
|
|
PreserveLCSSA = mustPreserveAnalysisID(LCSSAID);
|
2019-06-07 09:35:30 +02:00
|
|
|
AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
|
|
|
|
M = F.getParent();
|
|
|
|
|
2021-02-13 08:44:33 +01:00
|
|
|
for (Loop *L : *LI)
|
2020-09-22 22:28:00 +02:00
|
|
|
if (L->isOutermost())
|
2019-06-07 09:35:30 +02:00
|
|
|
TryConvertLoop(L);
|
|
|
|
|
|
|
|
return MadeChange;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return true if the search should stop, which will be when an inner loop is
|
|
|
|
// converted and the parent loop doesn't support containing a hardware loop.
|
|
|
|
bool HardwareLoops::TryConvertLoop(Loop *L) {
|
|
|
|
// Process nested loops first.
|
2020-07-03 15:18:32 +02:00
|
|
|
bool AnyChanged = false;
|
|
|
|
for (Loop *SL : *L)
|
|
|
|
AnyChanged |= TryConvertLoop(SL);
|
|
|
|
if (AnyChanged) {
|
|
|
|
reportHWLoopFailure("nested hardware-loops not supported", "HWLoopNested",
|
|
|
|
ORE, L);
|
|
|
|
return true; // Stop search.
|
2019-11-05 09:56:14 +01:00
|
|
|
}
|
2019-06-07 09:35:30 +02:00
|
|
|
|
2020-07-03 15:18:32 +02:00
|
|
|
LLVM_DEBUG(dbgs() << "HWLoops: Loop " << L->getHeader()->getName() << "\n");
|
|
|
|
|
2019-06-19 03:26:31 +02:00
|
|
|
HardwareLoopInfo HWLoopInfo(L);
|
2019-11-05 09:56:14 +01:00
|
|
|
if (!HWLoopInfo.canAnalyze(*LI)) {
|
|
|
|
reportHWLoopFailure("cannot analyze loop, irreducible control flow",
|
|
|
|
"HWLoopCannotAnalyze", ORE, L);
|
2019-06-26 14:02:43 +02:00
|
|
|
return false;
|
2019-11-05 09:56:14 +01:00
|
|
|
}
|
2019-06-26 14:02:43 +02:00
|
|
|
|
2019-11-05 09:56:14 +01:00
|
|
|
if (!ForceHardwareLoops &&
|
|
|
|
!TTI->isHardwareLoopProfitable(L, *SE, *AC, LibInfo, HWLoopInfo)) {
|
|
|
|
reportHWLoopFailure("it's not profitable to create a hardware-loop",
|
|
|
|
"HWLoopNotProfitable", ORE, L);
|
|
|
|
return false;
|
|
|
|
}
|
2019-06-07 09:35:30 +02:00
|
|
|
|
2019-11-05 09:56:14 +01:00
|
|
|
// Allow overriding of the counter width and loop decrement value.
|
|
|
|
if (CounterBitWidth.getNumOccurrences())
|
|
|
|
HWLoopInfo.CountType =
|
|
|
|
IntegerType::get(M->getContext(), CounterBitWidth);
|
2019-06-07 09:35:30 +02:00
|
|
|
|
2019-11-05 09:56:14 +01:00
|
|
|
if (LoopDecrement.getNumOccurrences())
|
|
|
|
HWLoopInfo.LoopDecrement =
|
|
|
|
ConstantInt::get(HWLoopInfo.CountType, LoopDecrement);
|
2019-06-07 09:35:30 +02:00
|
|
|
|
2019-11-05 09:56:14 +01:00
|
|
|
MadeChange |= TryConvertLoop(HWLoopInfo);
|
|
|
|
return MadeChange && (!HWLoopInfo.IsNestingLegal && !ForceNestedLoop);
|
2019-06-07 09:35:30 +02:00
|
|
|
}
|
|
|
|
|
2019-06-19 03:26:31 +02:00
|
|
|
bool HardwareLoops::TryConvertLoop(HardwareLoopInfo &HWLoopInfo) {
|
2019-06-07 09:35:30 +02:00
|
|
|
|
2019-07-09 19:53:09 +02:00
|
|
|
Loop *L = HWLoopInfo.L;
|
|
|
|
LLVM_DEBUG(dbgs() << "HWLoops: Try to convert profitable loop: " << *L);
|
2019-06-07 09:35:30 +02:00
|
|
|
|
2019-06-19 03:26:31 +02:00
|
|
|
if (!HWLoopInfo.isHardwareLoopCandidate(*SE, *LI, *DT, ForceNestedLoop,
|
2019-11-05 09:56:14 +01:00
|
|
|
ForceHardwareLoopPHI)) {
|
|
|
|
// TODO: there can be many reasons a loop is not considered a
|
|
|
|
// candidate, so we should let isHardwareLoopCandidate fill in the
|
|
|
|
// reason and then report a better message here.
|
|
|
|
reportHWLoopFailure("loop is not a candidate", "HWLoopNoCandidate", ORE, L);
|
2019-06-07 09:35:30 +02:00
|
|
|
return false;
|
2019-11-05 09:56:14 +01:00
|
|
|
}
|
2019-06-07 09:35:30 +02:00
|
|
|
|
2019-06-19 03:26:31 +02:00
|
|
|
assert(
|
2021-09-03 04:53:31 +02:00
|
|
|
(HWLoopInfo.ExitBlock && HWLoopInfo.ExitBranch && HWLoopInfo.ExitCount) &&
|
2019-06-19 03:26:31 +02:00
|
|
|
"Hardware Loop must have set exit info.");
|
|
|
|
|
2019-07-09 19:53:09 +02:00
|
|
|
BasicBlock *Preheader = L->getLoopPreheader();
|
|
|
|
|
|
|
|
// If we don't have a preheader, then insert one.
|
|
|
|
if (!Preheader)
|
|
|
|
Preheader = InsertPreheaderForLoop(L, DT, LI, nullptr, PreserveLCSSA);
|
|
|
|
if (!Preheader)
|
|
|
|
return false;
|
|
|
|
|
2019-11-05 09:56:14 +01:00
|
|
|
HardwareLoop HWLoop(HWLoopInfo, *SE, *DL, ORE);
|
2019-06-07 09:35:30 +02:00
|
|
|
HWLoop.Create();
|
|
|
|
++NumHWLoops;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void HardwareLoop::Create() {
|
|
|
|
LLVM_DEBUG(dbgs() << "HWLoops: Converting loop..\n");
|
2019-11-05 09:56:14 +01:00
|
|
|
|
2019-06-28 09:38:16 +02:00
|
|
|
Value *LoopCountInit = InitLoopCount();
|
2019-11-05 09:56:14 +01:00
|
|
|
if (!LoopCountInit) {
|
|
|
|
reportHWLoopFailure("could not safely create a loop count expression",
|
|
|
|
"HWLoopNotSafe", ORE, L);
|
2019-07-09 19:53:09 +02:00
|
|
|
return;
|
2019-11-05 09:56:14 +01:00
|
|
|
}
|
2019-06-07 09:35:30 +02:00
|
|
|
|
[ARM] Alter t2DoLoopStart to define lr
This changes the definition of t2DoLoopStart from
t2DoLoopStart rGPR
to
GPRlr = t2DoLoopStart rGPR
This will hopefully mean that low overhead loops are more tied together,
and we can more reliably generate loops without reverting or being at
the whims of the register allocator.
This is a fairly simple change in itself, but leads to a number of other
required alterations.
- The hardware loop pass, if UsePhi is set, now generates loops of the
form:
%start = llvm.start.loop.iterations(%N)
loop:
%p = phi [%start], [%dec]
%dec = llvm.loop.decrement.reg(%p, 1)
%c = icmp ne %dec, 0
br %c, loop, exit
- For this a new llvm.start.loop.iterations intrinsic was added, identical
to llvm.set.loop.iterations but produces a value as seen above, gluing
the loop together more through def-use chains.
- This new instrinsic conceptually produces the same output as input,
which is taught to SCEV so that the checks in MVETailPredication are not
affected.
- Some minor changes are needed to the ARMLowOverheadLoop pass, but it has
been left mostly as before. We should now more reliably be able to tell
that the t2DoLoopStart is correct without having to prove it, but
t2WhileLoopStart and tail-predicated loops will remain the same.
- And all the tests have been updated. There are a lot of them!
This patch on it's own might cause more trouble that it helps, with more
tail-predicated loops being reverted, but some additional patches can
hopefully improve upon that to get to something that is better overall.
Differential Revision: https://reviews.llvm.org/D89881
2020-11-10 16:57:58 +01:00
|
|
|
Value *Setup = InsertIterationSetup(LoopCountInit);
|
2019-06-07 09:35:30 +02:00
|
|
|
|
|
|
|
if (UsePHICounter || ForceHardwareLoopPHI) {
|
|
|
|
Instruction *LoopDec = InsertLoopRegDec(LoopCountInit);
|
[ARM] Alter t2DoLoopStart to define lr
This changes the definition of t2DoLoopStart from
t2DoLoopStart rGPR
to
GPRlr = t2DoLoopStart rGPR
This will hopefully mean that low overhead loops are more tied together,
and we can more reliably generate loops without reverting or being at
the whims of the register allocator.
This is a fairly simple change in itself, but leads to a number of other
required alterations.
- The hardware loop pass, if UsePhi is set, now generates loops of the
form:
%start = llvm.start.loop.iterations(%N)
loop:
%p = phi [%start], [%dec]
%dec = llvm.loop.decrement.reg(%p, 1)
%c = icmp ne %dec, 0
br %c, loop, exit
- For this a new llvm.start.loop.iterations intrinsic was added, identical
to llvm.set.loop.iterations but produces a value as seen above, gluing
the loop together more through def-use chains.
- This new instrinsic conceptually produces the same output as input,
which is taught to SCEV so that the checks in MVETailPredication are not
affected.
- Some minor changes are needed to the ARMLowOverheadLoop pass, but it has
been left mostly as before. We should now more reliably be able to tell
that the t2DoLoopStart is correct without having to prove it, but
t2WhileLoopStart and tail-predicated loops will remain the same.
- And all the tests have been updated. There are a lot of them!
This patch on it's own might cause more trouble that it helps, with more
tail-predicated loops being reverted, but some additional patches can
hopefully improve upon that to get to something that is better overall.
Differential Revision: https://reviews.llvm.org/D89881
2020-11-10 16:57:58 +01:00
|
|
|
Value *EltsRem = InsertPHICounter(Setup, LoopDec);
|
2019-06-07 09:35:30 +02:00
|
|
|
LoopDec->setOperand(0, EltsRem);
|
|
|
|
UpdateBranch(LoopDec);
|
|
|
|
} else
|
|
|
|
InsertLoopDec();
|
|
|
|
|
|
|
|
// Run through the basic blocks of the loop and see if any of them have dead
|
|
|
|
// PHIs that can be removed.
|
|
|
|
for (auto I : L->blocks())
|
|
|
|
DeleteDeadPHIs(I);
|
|
|
|
}
|
|
|
|
|
2019-06-28 09:38:16 +02:00
|
|
|
static bool CanGenerateTest(Loop *L, Value *Count) {
|
|
|
|
BasicBlock *Preheader = L->getLoopPreheader();
|
|
|
|
if (!Preheader->getSinglePredecessor())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
BasicBlock *Pred = Preheader->getSinglePredecessor();
|
|
|
|
if (!isa<BranchInst>(Pred->getTerminator()))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
auto *BI = cast<BranchInst>(Pred->getTerminator());
|
|
|
|
if (BI->isUnconditional() || !isa<ICmpInst>(BI->getCondition()))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Check that the icmp is checking for equality of Count and zero and that
|
|
|
|
// a non-zero value results in entering the loop.
|
|
|
|
auto ICmp = cast<ICmpInst>(BI->getCondition());
|
2019-07-01 10:21:28 +02:00
|
|
|
LLVM_DEBUG(dbgs() << " - Found condition: " << *ICmp << "\n");
|
2019-06-28 09:38:16 +02:00
|
|
|
if (!ICmp->isEquality())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
auto IsCompareZero = [](ICmpInst *ICmp, Value *Count, unsigned OpIdx) {
|
|
|
|
if (auto *Const = dyn_cast<ConstantInt>(ICmp->getOperand(OpIdx)))
|
|
|
|
return Const->isZero() && ICmp->getOperand(OpIdx ^ 1) == Count;
|
|
|
|
return false;
|
|
|
|
};
|
|
|
|
|
|
|
|
if (!IsCompareZero(ICmp, Count, 0) && !IsCompareZero(ICmp, Count, 1))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
unsigned SuccIdx = ICmp->getPredicate() == ICmpInst::ICMP_NE ? 0 : 1;
|
|
|
|
if (BI->getSuccessor(SuccIdx) != Preheader)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
Value *HardwareLoop::InitLoopCount() {
|
|
|
|
LLVM_DEBUG(dbgs() << "HWLoops: Initialising loop counter value:\n");
|
|
|
|
// Can we replace a conditional branch with an intrinsic that sets the
|
|
|
|
// loop counter and tests that is not zero?
|
|
|
|
|
2019-06-07 09:35:30 +02:00
|
|
|
SCEVExpander SCEVE(SE, DL, "loopcnt");
|
2021-09-03 04:53:31 +02:00
|
|
|
if (!ExitCount->getType()->isPointerTy() &&
|
|
|
|
ExitCount->getType() != CountType)
|
|
|
|
ExitCount = SE.getZeroExtendExpr(ExitCount, CountType);
|
|
|
|
|
|
|
|
ExitCount = SE.getAddExpr(ExitCount, SE.getOne(CountType));
|
2019-06-07 09:35:30 +02:00
|
|
|
|
2019-06-28 09:38:16 +02:00
|
|
|
// If we're trying to use the 'test and set' form of the intrinsic, we need
|
|
|
|
// to replace a conditional branch that is controlling entry to the loop. It
|
|
|
|
// is likely (guaranteed?) that the preheader has an unconditional branch to
|
|
|
|
// the loop header, so also check if it has a single predecessor.
|
2021-09-03 04:53:31 +02:00
|
|
|
if (SE.isLoopEntryGuardedByCond(L, ICmpInst::ICMP_NE, ExitCount,
|
|
|
|
SE.getZero(ExitCount->getType()))) {
|
2019-07-09 19:53:09 +02:00
|
|
|
LLVM_DEBUG(dbgs() << " - Attempting to use test.set counter.\n");
|
2019-06-28 09:38:16 +02:00
|
|
|
UseLoopGuard |= ForceGuardLoopEntry;
|
2019-07-09 19:53:09 +02:00
|
|
|
} else
|
2019-06-28 09:38:16 +02:00
|
|
|
UseLoopGuard = false;
|
|
|
|
|
|
|
|
BasicBlock *BB = L->getLoopPreheader();
|
|
|
|
if (UseLoopGuard && BB->getSinglePredecessor() &&
|
2020-07-16 16:55:50 +02:00
|
|
|
cast<BranchInst>(BB->getTerminator())->isUnconditional()) {
|
|
|
|
BasicBlock *Predecessor = BB->getSinglePredecessor();
|
|
|
|
// If it's not safe to create a while loop then don't force it and create a
|
|
|
|
// do-while loop instead
|
2021-09-03 04:53:31 +02:00
|
|
|
if (!isSafeToExpandAt(ExitCount, Predecessor->getTerminator(), SE))
|
2020-07-16 16:55:50 +02:00
|
|
|
UseLoopGuard = false;
|
|
|
|
else
|
|
|
|
BB = Predecessor;
|
|
|
|
}
|
2019-07-09 19:53:09 +02:00
|
|
|
|
2021-09-03 04:53:31 +02:00
|
|
|
if (!isSafeToExpandAt(ExitCount, BB->getTerminator(), SE)) {
|
|
|
|
LLVM_DEBUG(dbgs() << "- Bailing, unsafe to expand ExitCount "
|
|
|
|
<< *ExitCount << "\n");
|
2019-07-09 19:53:09 +02:00
|
|
|
return nullptr;
|
2019-06-07 09:35:30 +02:00
|
|
|
}
|
|
|
|
|
2021-09-03 04:53:31 +02:00
|
|
|
Value *Count = SCEVE.expandCodeFor(ExitCount, CountType,
|
2019-06-07 09:35:30 +02:00
|
|
|
BB->getTerminator());
|
2019-06-28 09:38:16 +02:00
|
|
|
|
|
|
|
// FIXME: We've expanded Count where we hope to insert the counter setting
|
|
|
|
// intrinsic. But, in the case of the 'test and set' form, we may fallback to
|
|
|
|
// the just 'set' form and in which case the insertion block is most likely
|
|
|
|
// different. It means there will be instruction(s) in a block that possibly
|
|
|
|
// aren't needed. The isLoopEntryGuardedByCond is trying to avoid this issue,
|
|
|
|
// but it's doesn't appear to work in all cases.
|
|
|
|
|
|
|
|
UseLoopGuard = UseLoopGuard && CanGenerateTest(L, Count);
|
|
|
|
BeginBB = UseLoopGuard ? BB : L->getLoopPreheader();
|
|
|
|
LLVM_DEBUG(dbgs() << " - Loop Count: " << *Count << "\n"
|
2021-07-27 11:11:51 +02:00
|
|
|
<< " - Expanded Count in " << BB->getName() << "\n"
|
|
|
|
<< " - Will insert set counter intrinsic into: "
|
|
|
|
<< BeginBB->getName() << "\n");
|
2019-06-07 09:35:30 +02:00
|
|
|
return Count;
|
|
|
|
}
|
|
|
|
|
[ARM] Alter t2DoLoopStart to define lr
This changes the definition of t2DoLoopStart from
t2DoLoopStart rGPR
to
GPRlr = t2DoLoopStart rGPR
This will hopefully mean that low overhead loops are more tied together,
and we can more reliably generate loops without reverting or being at
the whims of the register allocator.
This is a fairly simple change in itself, but leads to a number of other
required alterations.
- The hardware loop pass, if UsePhi is set, now generates loops of the
form:
%start = llvm.start.loop.iterations(%N)
loop:
%p = phi [%start], [%dec]
%dec = llvm.loop.decrement.reg(%p, 1)
%c = icmp ne %dec, 0
br %c, loop, exit
- For this a new llvm.start.loop.iterations intrinsic was added, identical
to llvm.set.loop.iterations but produces a value as seen above, gluing
the loop together more through def-use chains.
- This new instrinsic conceptually produces the same output as input,
which is taught to SCEV so that the checks in MVETailPredication are not
affected.
- Some minor changes are needed to the ARMLowOverheadLoop pass, but it has
been left mostly as before. We should now more reliably be able to tell
that the t2DoLoopStart is correct without having to prove it, but
t2WhileLoopStart and tail-predicated loops will remain the same.
- And all the tests have been updated. There are a lot of them!
This patch on it's own might cause more trouble that it helps, with more
tail-predicated loops being reverted, but some additional patches can
hopefully improve upon that to get to something that is better overall.
Differential Revision: https://reviews.llvm.org/D89881
2020-11-10 16:57:58 +01:00
|
|
|
Value* HardwareLoop::InsertIterationSetup(Value *LoopCountInit) {
|
2019-06-28 09:38:16 +02:00
|
|
|
IRBuilder<> Builder(BeginBB->getTerminator());
|
2019-06-07 09:35:30 +02:00
|
|
|
Type *Ty = LoopCountInit->getType();
|
[ARM] Alter t2DoLoopStart to define lr
This changes the definition of t2DoLoopStart from
t2DoLoopStart rGPR
to
GPRlr = t2DoLoopStart rGPR
This will hopefully mean that low overhead loops are more tied together,
and we can more reliably generate loops without reverting or being at
the whims of the register allocator.
This is a fairly simple change in itself, but leads to a number of other
required alterations.
- The hardware loop pass, if UsePhi is set, now generates loops of the
form:
%start = llvm.start.loop.iterations(%N)
loop:
%p = phi [%start], [%dec]
%dec = llvm.loop.decrement.reg(%p, 1)
%c = icmp ne %dec, 0
br %c, loop, exit
- For this a new llvm.start.loop.iterations intrinsic was added, identical
to llvm.set.loop.iterations but produces a value as seen above, gluing
the loop together more through def-use chains.
- This new instrinsic conceptually produces the same output as input,
which is taught to SCEV so that the checks in MVETailPredication are not
affected.
- Some minor changes are needed to the ARMLowOverheadLoop pass, but it has
been left mostly as before. We should now more reliably be able to tell
that the t2DoLoopStart is correct without having to prove it, but
t2WhileLoopStart and tail-predicated loops will remain the same.
- And all the tests have been updated. There are a lot of them!
This patch on it's own might cause more trouble that it helps, with more
tail-predicated loops being reverted, but some additional patches can
hopefully improve upon that to get to something that is better overall.
Differential Revision: https://reviews.llvm.org/D89881
2020-11-10 16:57:58 +01:00
|
|
|
bool UsePhi = UsePHICounter || ForceHardwareLoopPHI;
|
[ARM] Improve WLS lowering
Recently we improved the lowering of low overhead loops and tail
predicated loops, but concentrated first on the DLS do style loops. This
extends those improvements over to the WLS while loops, improving the
chance of lowering them successfully. To do this the lowering has to
change a little as the instructions are terminators that produce a value
- something that needs to be treated carefully.
Lowering starts at the Hardware Loop pass, inserting a new
llvm.test.start.loop.iterations that produces both an i1 to control the
loop entry and an i32 similar to the llvm.start.loop.iterations
intrinsic added for do loops. This feeds into the loop phi, properly
gluing the values together:
%wls = call { i32, i1 } @llvm.test.start.loop.iterations.i32(i32 %div)
%wls0 = extractvalue { i32, i1 } %wls, 0
%wls1 = extractvalue { i32, i1 } %wls, 1
br i1 %wls1, label %loop.ph, label %loop.exit
...
loop:
%lsr.iv = phi i32 [ %wls0, %loop.ph ], [ %iv.next, %loop ]
..
%iv.next = call i32 @llvm.loop.decrement.reg.i32(i32 %lsr.iv, i32 1)
%cmp = icmp ne i32 %iv.next, 0
br i1 %cmp, label %loop, label %loop.exit
The llvm.test.start.loop.iterations need to be lowered through ISel
lowering as a pair of WLS and WLSSETUP nodes, which each get converted
to t2WhileLoopSetup and t2WhileLoopStart Pseudos. This helps prevent
t2WhileLoopStart from being a terminator that produces a value,
something difficult to control at that stage in the pipeline. Instead
the t2WhileLoopSetup produces the value of LR (essentially acting as a
lr = subs rn, 0), t2WhileLoopStart consumes that lr value (the Bcc).
These are then converted into a single t2WhileLoopStartLR at the same
point as t2DoLoopStartTP and t2LoopEndDec. Otherwise we revert the loop
to prevent them from progressing further in the pipeline. The
t2WhileLoopStartLR is a single instruction that takes a GPR and produces
LR, similar to the WLS instruction.
%1:gprlr = t2WhileLoopStartLR %0:rgpr, %bb.3
t2B %bb.1
...
bb.2.loop:
%2:gprlr = PHI %1:gprlr, %bb.1, %3:gprlr, %bb.2
...
%3:gprlr = t2LoopEndDec %2:gprlr, %bb.2
t2B %bb.3
The t2WhileLoopStartLR can then be treated similar to the other low
overhead loop pseudos, eventually being lowered to a WLS providing the
branches are within range.
Differential Revision: https://reviews.llvm.org/D97729
2021-03-11 15:06:04 +01:00
|
|
|
Intrinsic::ID ID = UseLoopGuard
|
|
|
|
? (UsePhi ? Intrinsic::test_start_loop_iterations
|
|
|
|
: Intrinsic::test_set_loop_iterations)
|
|
|
|
: (UsePhi ? Intrinsic::start_loop_iterations
|
|
|
|
: Intrinsic::set_loop_iterations);
|
2019-06-28 09:38:16 +02:00
|
|
|
Function *LoopIter = Intrinsic::getDeclaration(M, ID, Ty);
|
[ARM] Improve WLS lowering
Recently we improved the lowering of low overhead loops and tail
predicated loops, but concentrated first on the DLS do style loops. This
extends those improvements over to the WLS while loops, improving the
chance of lowering them successfully. To do this the lowering has to
change a little as the instructions are terminators that produce a value
- something that needs to be treated carefully.
Lowering starts at the Hardware Loop pass, inserting a new
llvm.test.start.loop.iterations that produces both an i1 to control the
loop entry and an i32 similar to the llvm.start.loop.iterations
intrinsic added for do loops. This feeds into the loop phi, properly
gluing the values together:
%wls = call { i32, i1 } @llvm.test.start.loop.iterations.i32(i32 %div)
%wls0 = extractvalue { i32, i1 } %wls, 0
%wls1 = extractvalue { i32, i1 } %wls, 1
br i1 %wls1, label %loop.ph, label %loop.exit
...
loop:
%lsr.iv = phi i32 [ %wls0, %loop.ph ], [ %iv.next, %loop ]
..
%iv.next = call i32 @llvm.loop.decrement.reg.i32(i32 %lsr.iv, i32 1)
%cmp = icmp ne i32 %iv.next, 0
br i1 %cmp, label %loop, label %loop.exit
The llvm.test.start.loop.iterations need to be lowered through ISel
lowering as a pair of WLS and WLSSETUP nodes, which each get converted
to t2WhileLoopSetup and t2WhileLoopStart Pseudos. This helps prevent
t2WhileLoopStart from being a terminator that produces a value,
something difficult to control at that stage in the pipeline. Instead
the t2WhileLoopSetup produces the value of LR (essentially acting as a
lr = subs rn, 0), t2WhileLoopStart consumes that lr value (the Bcc).
These are then converted into a single t2WhileLoopStartLR at the same
point as t2DoLoopStartTP and t2LoopEndDec. Otherwise we revert the loop
to prevent them from progressing further in the pipeline. The
t2WhileLoopStartLR is a single instruction that takes a GPR and produces
LR, similar to the WLS instruction.
%1:gprlr = t2WhileLoopStartLR %0:rgpr, %bb.3
t2B %bb.1
...
bb.2.loop:
%2:gprlr = PHI %1:gprlr, %bb.1, %3:gprlr, %bb.2
...
%3:gprlr = t2LoopEndDec %2:gprlr, %bb.2
t2B %bb.3
The t2WhileLoopStartLR can then be treated similar to the other low
overhead loop pseudos, eventually being lowered to a WLS providing the
branches are within range.
Differential Revision: https://reviews.llvm.org/D97729
2021-03-11 15:06:04 +01:00
|
|
|
Value *LoopSetup = Builder.CreateCall(LoopIter, LoopCountInit);
|
2019-06-28 09:38:16 +02:00
|
|
|
|
|
|
|
// Use the return value of the intrinsic to control the entry of the loop.
|
|
|
|
if (UseLoopGuard) {
|
|
|
|
assert((isa<BranchInst>(BeginBB->getTerminator()) &&
|
|
|
|
cast<BranchInst>(BeginBB->getTerminator())->isConditional()) &&
|
|
|
|
"Expected conditional branch");
|
[ARM] Improve WLS lowering
Recently we improved the lowering of low overhead loops and tail
predicated loops, but concentrated first on the DLS do style loops. This
extends those improvements over to the WLS while loops, improving the
chance of lowering them successfully. To do this the lowering has to
change a little as the instructions are terminators that produce a value
- something that needs to be treated carefully.
Lowering starts at the Hardware Loop pass, inserting a new
llvm.test.start.loop.iterations that produces both an i1 to control the
loop entry and an i32 similar to the llvm.start.loop.iterations
intrinsic added for do loops. This feeds into the loop phi, properly
gluing the values together:
%wls = call { i32, i1 } @llvm.test.start.loop.iterations.i32(i32 %div)
%wls0 = extractvalue { i32, i1 } %wls, 0
%wls1 = extractvalue { i32, i1 } %wls, 1
br i1 %wls1, label %loop.ph, label %loop.exit
...
loop:
%lsr.iv = phi i32 [ %wls0, %loop.ph ], [ %iv.next, %loop ]
..
%iv.next = call i32 @llvm.loop.decrement.reg.i32(i32 %lsr.iv, i32 1)
%cmp = icmp ne i32 %iv.next, 0
br i1 %cmp, label %loop, label %loop.exit
The llvm.test.start.loop.iterations need to be lowered through ISel
lowering as a pair of WLS and WLSSETUP nodes, which each get converted
to t2WhileLoopSetup and t2WhileLoopStart Pseudos. This helps prevent
t2WhileLoopStart from being a terminator that produces a value,
something difficult to control at that stage in the pipeline. Instead
the t2WhileLoopSetup produces the value of LR (essentially acting as a
lr = subs rn, 0), t2WhileLoopStart consumes that lr value (the Bcc).
These are then converted into a single t2WhileLoopStartLR at the same
point as t2DoLoopStartTP and t2LoopEndDec. Otherwise we revert the loop
to prevent them from progressing further in the pipeline. The
t2WhileLoopStartLR is a single instruction that takes a GPR and produces
LR, similar to the WLS instruction.
%1:gprlr = t2WhileLoopStartLR %0:rgpr, %bb.3
t2B %bb.1
...
bb.2.loop:
%2:gprlr = PHI %1:gprlr, %bb.1, %3:gprlr, %bb.2
...
%3:gprlr = t2LoopEndDec %2:gprlr, %bb.2
t2B %bb.3
The t2WhileLoopStartLR can then be treated similar to the other low
overhead loop pseudos, eventually being lowered to a WLS providing the
branches are within range.
Differential Revision: https://reviews.llvm.org/D97729
2021-03-11 15:06:04 +01:00
|
|
|
|
|
|
|
Value *SetCount =
|
|
|
|
UsePhi ? Builder.CreateExtractValue(LoopSetup, 1) : LoopSetup;
|
2019-06-28 09:38:16 +02:00
|
|
|
auto *LoopGuard = cast<BranchInst>(BeginBB->getTerminator());
|
|
|
|
LoopGuard->setCondition(SetCount);
|
|
|
|
if (LoopGuard->getSuccessor(0) != L->getLoopPreheader())
|
|
|
|
LoopGuard->swapSuccessors();
|
|
|
|
}
|
[ARM] Improve WLS lowering
Recently we improved the lowering of low overhead loops and tail
predicated loops, but concentrated first on the DLS do style loops. This
extends those improvements over to the WLS while loops, improving the
chance of lowering them successfully. To do this the lowering has to
change a little as the instructions are terminators that produce a value
- something that needs to be treated carefully.
Lowering starts at the Hardware Loop pass, inserting a new
llvm.test.start.loop.iterations that produces both an i1 to control the
loop entry and an i32 similar to the llvm.start.loop.iterations
intrinsic added for do loops. This feeds into the loop phi, properly
gluing the values together:
%wls = call { i32, i1 } @llvm.test.start.loop.iterations.i32(i32 %div)
%wls0 = extractvalue { i32, i1 } %wls, 0
%wls1 = extractvalue { i32, i1 } %wls, 1
br i1 %wls1, label %loop.ph, label %loop.exit
...
loop:
%lsr.iv = phi i32 [ %wls0, %loop.ph ], [ %iv.next, %loop ]
..
%iv.next = call i32 @llvm.loop.decrement.reg.i32(i32 %lsr.iv, i32 1)
%cmp = icmp ne i32 %iv.next, 0
br i1 %cmp, label %loop, label %loop.exit
The llvm.test.start.loop.iterations need to be lowered through ISel
lowering as a pair of WLS and WLSSETUP nodes, which each get converted
to t2WhileLoopSetup and t2WhileLoopStart Pseudos. This helps prevent
t2WhileLoopStart from being a terminator that produces a value,
something difficult to control at that stage in the pipeline. Instead
the t2WhileLoopSetup produces the value of LR (essentially acting as a
lr = subs rn, 0), t2WhileLoopStart consumes that lr value (the Bcc).
These are then converted into a single t2WhileLoopStartLR at the same
point as t2DoLoopStartTP and t2LoopEndDec. Otherwise we revert the loop
to prevent them from progressing further in the pipeline. The
t2WhileLoopStartLR is a single instruction that takes a GPR and produces
LR, similar to the WLS instruction.
%1:gprlr = t2WhileLoopStartLR %0:rgpr, %bb.3
t2B %bb.1
...
bb.2.loop:
%2:gprlr = PHI %1:gprlr, %bb.1, %3:gprlr, %bb.2
...
%3:gprlr = t2LoopEndDec %2:gprlr, %bb.2
t2B %bb.3
The t2WhileLoopStartLR can then be treated similar to the other low
overhead loop pseudos, eventually being lowered to a WLS providing the
branches are within range.
Differential Revision: https://reviews.llvm.org/D97729
2021-03-11 15:06:04 +01:00
|
|
|
LLVM_DEBUG(dbgs() << "HWLoops: Inserted loop counter: " << *LoopSetup
|
|
|
|
<< "\n");
|
|
|
|
if (UsePhi && UseLoopGuard)
|
|
|
|
LoopSetup = Builder.CreateExtractValue(LoopSetup, 0);
|
|
|
|
return !UsePhi ? LoopCountInit : LoopSetup;
|
2019-06-07 09:35:30 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void HardwareLoop::InsertLoopDec() {
|
|
|
|
IRBuilder<> CondBuilder(ExitBranch);
|
|
|
|
|
|
|
|
Function *DecFunc =
|
|
|
|
Intrinsic::getDeclaration(M, Intrinsic::loop_decrement,
|
|
|
|
LoopDecrement->getType());
|
|
|
|
Value *Ops[] = { LoopDecrement };
|
|
|
|
Value *NewCond = CondBuilder.CreateCall(DecFunc, Ops);
|
|
|
|
Value *OldCond = ExitBranch->getCondition();
|
|
|
|
ExitBranch->setCondition(NewCond);
|
|
|
|
|
|
|
|
// The false branch must exit the loop.
|
|
|
|
if (!L->contains(ExitBranch->getSuccessor(0)))
|
|
|
|
ExitBranch->swapSuccessors();
|
|
|
|
|
|
|
|
// The old condition may be dead now, and may have even created a dead PHI
|
|
|
|
// (the original induction variable).
|
|
|
|
RecursivelyDeleteTriviallyDeadInstructions(OldCond);
|
|
|
|
|
|
|
|
LLVM_DEBUG(dbgs() << "HWLoops: Inserted loop dec: " << *NewCond << "\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
Instruction* HardwareLoop::InsertLoopRegDec(Value *EltsRem) {
|
|
|
|
IRBuilder<> CondBuilder(ExitBranch);
|
|
|
|
|
|
|
|
Function *DecFunc =
|
|
|
|
Intrinsic::getDeclaration(M, Intrinsic::loop_decrement_reg,
|
2020-05-21 11:40:26 +02:00
|
|
|
{ EltsRem->getType() });
|
2019-06-07 09:35:30 +02:00
|
|
|
Value *Ops[] = { EltsRem, LoopDecrement };
|
|
|
|
Value *Call = CondBuilder.CreateCall(DecFunc, Ops);
|
|
|
|
|
|
|
|
LLVM_DEBUG(dbgs() << "HWLoops: Inserted loop dec: " << *Call << "\n");
|
|
|
|
return cast<Instruction>(Call);
|
|
|
|
}
|
|
|
|
|
|
|
|
PHINode* HardwareLoop::InsertPHICounter(Value *NumElts, Value *EltsRem) {
|
|
|
|
BasicBlock *Preheader = L->getLoopPreheader();
|
|
|
|
BasicBlock *Header = L->getHeader();
|
|
|
|
BasicBlock *Latch = ExitBranch->getParent();
|
|
|
|
IRBuilder<> Builder(Header->getFirstNonPHI());
|
|
|
|
PHINode *Index = Builder.CreatePHI(NumElts->getType(), 2);
|
|
|
|
Index->addIncoming(NumElts, Preheader);
|
|
|
|
Index->addIncoming(EltsRem, Latch);
|
|
|
|
LLVM_DEBUG(dbgs() << "HWLoops: PHI Counter: " << *Index << "\n");
|
|
|
|
return Index;
|
|
|
|
}
|
|
|
|
|
|
|
|
void HardwareLoop::UpdateBranch(Value *EltsRem) {
|
|
|
|
IRBuilder<> CondBuilder(ExitBranch);
|
|
|
|
Value *NewCond =
|
|
|
|
CondBuilder.CreateICmpNE(EltsRem, ConstantInt::get(EltsRem->getType(), 0));
|
|
|
|
Value *OldCond = ExitBranch->getCondition();
|
|
|
|
ExitBranch->setCondition(NewCond);
|
|
|
|
|
|
|
|
// The false branch must exit the loop.
|
|
|
|
if (!L->contains(ExitBranch->getSuccessor(0)))
|
|
|
|
ExitBranch->swapSuccessors();
|
|
|
|
|
|
|
|
// The old condition may be dead now, and may have even created a dead PHI
|
|
|
|
// (the original induction variable).
|
|
|
|
RecursivelyDeleteTriviallyDeadInstructions(OldCond);
|
|
|
|
}
|
|
|
|
|
|
|
|
INITIALIZE_PASS_BEGIN(HardwareLoops, DEBUG_TYPE, HW_LOOPS_NAME, false, false)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
|
2019-11-05 09:56:14 +01:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
|
2019-06-07 09:35:30 +02:00
|
|
|
INITIALIZE_PASS_END(HardwareLoops, DEBUG_TYPE, HW_LOOPS_NAME, false, false)
|
|
|
|
|
|
|
|
FunctionPass *llvm::createHardwareLoopsPass() { return new HardwareLoops(); }
|