1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-26 04:32:44 +01:00

[DivRempairs] add a pass to optimize div/rem pairs (PR31028)

This is intended to be a superset of the functionality from D31037 (EarlyCSE) but implemented 
as an independent pass, so there's no stretching of scope and feature creep for an existing pass. 
I also proposed a weaker version of this for SimplifyCFG in D30910. And I initially had almost 
this same functionality as an addition to CGP in the motivating example of PR31028:
https://bugs.llvm.org/show_bug.cgi?id=31028

The advantage of positioning this ahead of SimplifyCFG in the pass pipeline is that it can allow 
more flattening. But it needs to be after passes (InstCombine) that could sink a div/rem and
undo the hoisting that is done here.

Decomposing remainder may allow removing some code from the backend (PPC and possibly others).

Differential Revision: https://reviews.llvm.org/D37121 

llvm-svn: 312862
This commit is contained in:
Sanjay Patel 2017-09-09 13:38:18 +00:00
parent a236420560
commit 554ba948c2
17 changed files with 647 additions and 0 deletions

View File

@ -483,6 +483,13 @@ public:
bool isLegalMaskedScatter(Type *DataType) const;
bool isLegalMaskedGather(Type *DataType) const;
/// Return true if the target has a unified operation to calculate division
/// and remainder. If so, the additional implicit multiplication and
/// subtraction required to calculate a remainder from division are free. This
/// can enable more aggressive transformations for division and remainder than
/// would typically be allowed using throughput or size cost models.
bool hasDivRemOp(Type *DataType, bool IsSigned) const;
/// Return true if target doesn't mind addresses in vectors.
bool prefersVectorizedAddressing() const;
@ -960,6 +967,7 @@ public:
virtual bool isLegalMaskedLoad(Type *DataType) = 0;
virtual bool isLegalMaskedScatter(Type *DataType) = 0;
virtual bool isLegalMaskedGather(Type *DataType) = 0;
virtual bool hasDivRemOp(Type *DataType, bool IsSigned) = 0;
virtual bool prefersVectorizedAddressing() = 0;
virtual int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
int64_t BaseOffset, bool HasBaseReg,
@ -1182,6 +1190,9 @@ public:
bool isLegalMaskedGather(Type *DataType) override {
return Impl.isLegalMaskedGather(DataType);
}
bool hasDivRemOp(Type *DataType, bool IsSigned) override {
return Impl.hasDivRemOp(DataType, IsSigned);
}
bool prefersVectorizedAddressing() override {
return Impl.prefersVectorizedAddressing();
}

View File

@ -251,6 +251,8 @@ public:
bool isLegalMaskedGather(Type *DataType) { return false; }
bool hasDivRemOp(Type *DataType, bool IsSigned) { return false; }
bool prefersVectorizedAddressing() { return true; }
int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,

View File

@ -113,6 +113,7 @@ void initializeDependenceAnalysisPass(PassRegistry&);
void initializeDependenceAnalysisWrapperPassPass(PassRegistry&);
void initializeDetectDeadLanesPass(PassRegistry&);
void initializeDivergenceAnalysisPass(PassRegistry&);
void initializeDivRemPairsLegacyPassPass(PassRegistry&);
void initializeDomOnlyPrinterPass(PassRegistry&);
void initializeDomOnlyViewerPass(PassRegistry&);
void initializeDomPrinterPass(PassRegistry&);

View File

@ -375,6 +375,12 @@ FunctionPass *createMergedLoadStoreMotionPass();
//
FunctionPass *createNewGVNPass();
//===----------------------------------------------------------------------===//
//
// DivRemPairs - Hoist/decompose integer division and remainder instructions.
//
FunctionPass *createDivRemPairsPass();
//===----------------------------------------------------------------------===//
//
// MemCpyOpt - This pass performs optimizations related to eliminating memcpy

View File

@ -0,0 +1,31 @@
//===- DivRemPairs.h - Hoist/decompose integer division and remainder -----===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This pass hoists and/or decomposes integer division and remainder
// instructions to enable CFG improvements and better codegen.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_TRANSFORMS_SCALAR_DIVREMPAIRS_H
#define LLVM_TRANSFORMS_SCALAR_DIVREMPAIRS_H
#include "llvm/IR/PassManager.h"
namespace llvm {
/// Hoist/decompose integer division and remainder instructions to enable CFG
/// improvements and better codegen.
struct DivRemPairsPass : public PassInfoMixin<DivRemPairsPass> {
public:
PreservedAnalyses run(Function &F, FunctionAnalysisManager &);
};
}
#endif // LLVM_TRANSFORMS_SCALAR_DIVREMPAIRS_H

View File

@ -176,6 +176,10 @@ bool TargetTransformInfo::isLegalMaskedScatter(Type *DataType) const {
return TTIImpl->isLegalMaskedScatter(DataType);
}
bool TargetTransformInfo::hasDivRemOp(Type *DataType, bool IsSigned) const {
return TTIImpl->hasDivRemOp(DataType, IsSigned);
}
bool TargetTransformInfo::prefersVectorizedAddressing() const {
return TTIImpl->prefersVectorizedAddressing();
}

View File

@ -92,6 +92,7 @@
#include "llvm/Transforms/Scalar/CorrelatedValuePropagation.h"
#include "llvm/Transforms/Scalar/DCE.h"
#include "llvm/Transforms/Scalar/DeadStoreElimination.h"
#include "llvm/Transforms/Scalar/DivRemPairs.h"
#include "llvm/Transforms/Scalar/EarlyCSE.h"
#include "llvm/Transforms/Scalar/Float2Int.h"
#include "llvm/Transforms/Scalar/GVN.h"
@ -765,6 +766,11 @@ PassBuilder::buildModuleOptimizationPipeline(OptimizationLevel Level,
// And finally clean up LCSSA form before generating code.
OptimizePM.addPass(InstSimplifierPass());
// This hoists/decomposes div/rem ops. It should run after other sink/hoist
// passes to avoid re-sinking, but before SimplifyCFG because it can allow
// flattening of blocks.
OptimizePM.addPass(DivRemPairsPass());
// LoopSink (and other loop passes since the last simplifyCFG) might have
// resulted in single-entry-single-exit or empty blocks. Clean up the CFG.
OptimizePM.addPass(SimplifyCFGPass());

View File

@ -142,6 +142,7 @@ FUNCTION_PASS("break-crit-edges", BreakCriticalEdgesPass())
FUNCTION_PASS("consthoist", ConstantHoistingPass())
FUNCTION_PASS("correlated-propagation", CorrelatedValuePropagationPass())
FUNCTION_PASS("dce", DCEPass())
FUNCTION_PASS("div-rem-pairs", DivRemPairsPass())
FUNCTION_PASS("dse", DSEPass())
FUNCTION_PASS("dot-cfg", CFGPrinterPass())
FUNCTION_PASS("dot-cfg-only", CFGOnlyPrinterPass())

View File

@ -2515,6 +2515,11 @@ bool X86TTIImpl::isLegalMaskedScatter(Type *DataType) {
return isLegalMaskedGather(DataType);
}
bool X86TTIImpl::hasDivRemOp(Type *DataType, bool IsSigned) {
EVT VT = TLI->getValueType(DL, DataType);
return TLI->isOperationLegal(IsSigned ? ISD::SDIVREM : ISD::UDIVREM, VT);
}
bool X86TTIImpl::areInlineCompatible(const Function *Caller,
const Function *Callee) const {
const TargetMachine &TM = getTLI()->getTargetMachine();

View File

@ -124,6 +124,7 @@ public:
bool isLegalMaskedStore(Type *DataType);
bool isLegalMaskedGather(Type *DataType);
bool isLegalMaskedScatter(Type *DataType);
bool hasDivRemOp(Type *DataType, bool IsSigned);
bool areInlineCompatible(const Function *Caller,
const Function *Callee) const;
bool expandMemCmp(Instruction *I, unsigned &MaxLoadSize);

View File

@ -673,6 +673,11 @@ void PassManagerBuilder::populateModulePassManager(
// Get rid of LCSSA nodes.
MPM.add(createInstructionSimplifierPass());
// This hoists/decomposes div/rem ops. It should run after other sink/hoist
// passes to avoid re-sinking, but before SimplifyCFG because it can allow
// flattening of blocks.
MPM.add(createDivRemPairsPass());
// LoopSink (and other loop passes since the last simplifyCFG) might have
// resulted in single-entry-single-exit or empty blocks. Clean up the CFG.
MPM.add(createCFGSimplificationPass());

View File

@ -7,6 +7,7 @@ add_llvm_library(LLVMScalarOpts
CorrelatedValuePropagation.cpp
DCE.cpp
DeadStoreElimination.cpp
DivRemPairs.cpp
EarlyCSE.cpp
FlattenCFGPass.cpp
Float2Int.cpp

View File

@ -0,0 +1,206 @@
//===- DivRemPairs.cpp - Hoist/decompose division and remainder -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This pass hoists and/or decomposes integer division and remainder
// instructions to enable CFG improvements and better codegen.
//
//===----------------------------------------------------------------------===//
#include "llvm/Transforms/Scalar/DivRemPairs.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/GlobalsModRef.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/Pass.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils/BypassSlowDivision.h"
using namespace llvm;
#define DEBUG_TYPE "div-rem-pairs"
STATISTIC(NumPairs, "Number of div/rem pairs");
STATISTIC(NumHoisted, "Number of instructions hoisted");
STATISTIC(NumDecomposed, "Number of instructions decomposed");
/// Find matching pairs of integer div/rem ops (they have the same numerator,
/// denominator, and signedness). If they exist in different basic blocks, bring
/// them together by hoisting or replace the common division operation that is
/// implicit in the remainder:
/// X % Y <--> X - ((X / Y) * Y).
///
/// We can largely ignore the normal safety and cost constraints on speculation
/// of these ops when we find a matching pair. This is because we are already
/// guaranteed that any exceptions and most cost are already incurred by the
/// first member of the pair.
///
/// Note: This transform could be an oddball enhancement to EarlyCSE, GVN, or
/// SimplifyCFG, but it's split off on its own because it's different enough
/// that it doesn't quite match the stated objectives of those passes.
static bool optimizeDivRem(Function &F, const TargetTransformInfo &TTI,
const DominatorTree &DT) {
bool Changed = false;
// Insert all divide and remainder instructions into maps keyed by their
// operands and opcode (signed or unsigned).
DenseMap<DivRemMapKey, Instruction *> DivMap, RemMap;
for (auto &BB : F) {
for (auto &I : BB) {
if (I.getOpcode() == Instruction::SDiv)
DivMap[DivRemMapKey(true, I.getOperand(0), I.getOperand(1))] = &I;
else if (I.getOpcode() == Instruction::UDiv)
DivMap[DivRemMapKey(false, I.getOperand(0), I.getOperand(1))] = &I;
else if (I.getOpcode() == Instruction::SRem)
RemMap[DivRemMapKey(true, I.getOperand(0), I.getOperand(1))] = &I;
else if (I.getOpcode() == Instruction::URem)
RemMap[DivRemMapKey(false, I.getOperand(0), I.getOperand(1))] = &I;
}
}
// We can iterate over either map because we are only looking for matched
// pairs. Choose remainders for efficiency because they are usually even more
// rare than division.
for (auto &RemPair : RemMap) {
// Find the matching division instruction from the division map.
Instruction *DivInst = DivMap[RemPair.getFirst()];
if (!DivInst)
continue;
// We have a matching pair of div/rem instructions. If one dominates the
// other, hoist and/or replace one.
NumPairs++;
Instruction *RemInst = RemPair.getSecond();
bool IsSigned = DivInst->getOpcode() == Instruction::SDiv;
bool HasDivRemOp = TTI.hasDivRemOp(DivInst->getType(), IsSigned);
// If the target supports div+rem and the instructions are in the same block
// already, there's nothing to do. The backend should handle this. If the
// target does not support div+rem, then we will decompose the rem.
if (HasDivRemOp && RemInst->getParent() == DivInst->getParent())
continue;
bool DivDominates = DT.dominates(DivInst, RemInst);
if (!DivDominates && !DT.dominates(RemInst, DivInst))
continue;
if (HasDivRemOp) {
// The target has a single div/rem operation. Hoist the lower instruction
// to make the matched pair visible to the backend.
if (DivDominates)
RemInst->moveAfter(DivInst);
else
DivInst->moveAfter(RemInst);
NumHoisted++;
} else {
// The target does not have a single div/rem operation. Decompose the
// remainder calculation as:
// X % Y --> X - ((X / Y) * Y).
Value *X = RemInst->getOperand(0);
Value *Y = RemInst->getOperand(1);
Instruction *Mul = BinaryOperator::CreateMul(DivInst, Y);
Instruction *Sub = BinaryOperator::CreateSub(X, Mul);
// If the remainder dominates, then hoist the division up to that block:
//
// bb1:
// %rem = srem %x, %y
// bb2:
// %div = sdiv %x, %y
// -->
// bb1:
// %div = sdiv %x, %y
// %mul = mul %div, %y
// %rem = sub %x, %mul
//
// If the division dominates, it's already in the right place. The mul+sub
// will be in a different block because we don't assume that they are
// cheap to speculatively execute:
//
// bb1:
// %div = sdiv %x, %y
// bb2:
// %rem = srem %x, %y
// -->
// bb1:
// %div = sdiv %x, %y
// bb2:
// %mul = mul %div, %y
// %rem = sub %x, %mul
//
// If the div and rem are in the same block, we do the same transform,
// but any code movement would be within the same block.
if (!DivDominates)
DivInst->moveBefore(RemInst);
Mul->insertAfter(RemInst);
Sub->insertAfter(Mul);
// Now kill the explicit remainder. We have replaced it with:
// (sub X, (mul (div X, Y), Y)
RemInst->replaceAllUsesWith(Sub);
RemInst->eraseFromParent();
NumDecomposed++;
}
Changed = true;
}
return Changed;
}
// Pass manager boilerplate below here.
namespace {
struct DivRemPairsLegacyPass : public FunctionPass {
static char ID;
DivRemPairsLegacyPass() : FunctionPass(ID) {
initializeDivRemPairsLegacyPassPass(*PassRegistry::getPassRegistry());
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<DominatorTreeWrapperPass>();
AU.addRequired<TargetTransformInfoWrapperPass>();
AU.setPreservesCFG();
AU.addPreserved<DominatorTreeWrapperPass>();
AU.addPreserved<GlobalsAAWrapperPass>();
FunctionPass::getAnalysisUsage(AU);
}
bool runOnFunction(Function &F) override {
if (skipFunction(F))
return false;
auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
return optimizeDivRem(F, TTI, DT);
}
};
}
char DivRemPairsLegacyPass::ID = 0;
INITIALIZE_PASS_BEGIN(DivRemPairsLegacyPass, "div-rem-pairs",
"Hoist/decompose integer division and remainder", false,
false)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_END(DivRemPairsLegacyPass, "div-rem-pairs",
"Hoist/decompose integer division and remainder", false,
false)
FunctionPass *llvm::createDivRemPairsPass() {
return new DivRemPairsLegacyPass();
}
PreservedAnalyses DivRemPairsPass::run(Function &F,
FunctionAnalysisManager &FAM) {
TargetTransformInfo &TTI = FAM.getResult<TargetIRAnalysis>(F);
DominatorTree &DT = FAM.getResult<DominatorTreeAnalysis>(F);
if (!optimizeDivRem(F, TTI, DT))
return PreservedAnalyses::all();
// TODO: This pass just hoists/replaces math ops - all analyses are preserved?
PreservedAnalyses PA;
PA.preserveSet<CFGAnalyses>();
PA.preserve<GlobalsAA>();
return PA;
}

View File

@ -40,6 +40,7 @@ void llvm::initializeScalarOpts(PassRegistry &Registry) {
initializeCorrelatedValuePropagationPass(Registry);
initializeDCELegacyPassPass(Registry);
initializeDeadInstEliminationPass(Registry);
initializeDivRemPairsLegacyPassPass(Registry);
initializeScalarizerPass(Registry);
initializeDSELegacyPassPass(Registry);
initializeGuardWideningLegacyPassPass(Registry);

View File

@ -205,6 +205,7 @@
; CHECK-O-NEXT: Running pass: AlignmentFromAssumptionsPass
; CHECK-O-NEXT: Running pass: LoopSinkPass
; CHECK-O-NEXT: Running pass: InstSimplifierPass
; CHECK-O-NEXT: Running pass: DivRemPairsPass
; CHECK-O-NEXT: Running pass: SimplifyCFGPass
; CHECK-O-NEXT: Finished llvm::Function pass manager run.
; CHECK-O-NEXT: Running pass: GlobalDCEPass

View File

@ -193,6 +193,7 @@
; CHECK-POSTLINK-O-NEXT: Running pass: AlignmentFromAssumptionsPass
; CHECK-POSTLINK-O-NEXT: Running pass: LoopSinkPass
; CHECK-POSTLINK-O-NEXT: Running pass: InstSimplifierPass
; CHECK-POSTLINK-O-NEXT: Running pass: DivRemPairsPass
; CHECK-POSTLINK-O-NEXT: Running pass: SimplifyCFGPass
; CHECK-POSTLINK-O-NEXT: Finished llvm::Function pass manager run.
; CHECK-POSTLINK-O-NEXT: Running pass: GlobalDCEPass

View File

@ -0,0 +1,364 @@
; RUN: opt < %s -div-rem-pairs -S -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=ALL --check-prefix=X86
; RUN: opt < %s -div-rem-pairs -S -mtriple=powerpc64-unknown-unknown | FileCheck %s --check-prefix=ALL --check-prefix=PPC
declare void @foo(i32, i32)
define void @decompose_illegal_srem_same_block(i32 %a, i32 %b) {
; X86-LABEL: @decompose_illegal_srem_same_block(
; X86-NEXT: [[REM:%.*]] = srem i32 %a, %b
; X86-NEXT: [[DIV:%.*]] = sdiv i32 %a, %b
; X86-NEXT: call void @foo(i32 [[REM]], i32 [[DIV]])
; X86-NEXT: ret void
;
; PPC-LABEL: @decompose_illegal_srem_same_block(
; PPC-NEXT: [[DIV:%.*]] = sdiv i32 %a, %b
; PPC-NEXT: [[TMP1:%.*]] = mul i32 [[DIV]], %b
; PPC-NEXT: [[TMP2:%.*]] = sub i32 %a, [[TMP1]]
; PPC-NEXT: call void @foo(i32 [[TMP2]], i32 [[DIV]])
; PPC-NEXT: ret void
;
%rem = srem i32 %a, %b
%div = sdiv i32 %a, %b
call void @foo(i32 %rem, i32 %div)
ret void
}
define void @decompose_illegal_urem_same_block(i32 %a, i32 %b) {
; X86-LABEL: @decompose_illegal_urem_same_block(
; X86-NEXT: [[DIV:%.*]] = udiv i32 %a, %b
; X86-NEXT: [[REM:%.*]] = urem i32 %a, %b
; X86-NEXT: call void @foo(i32 [[REM]], i32 [[DIV]])
; X86-NEXT: ret void
;
; PPC-LABEL: @decompose_illegal_urem_same_block(
; PPC-NEXT: [[DIV:%.*]] = udiv i32 %a, %b
; PPC-NEXT: [[TMP1:%.*]] = mul i32 [[DIV]], %b
; PPC-NEXT: [[TMP2:%.*]] = sub i32 %a, [[TMP1]]
; PPC-NEXT: call void @foo(i32 [[TMP2]], i32 [[DIV]])
; PPC-NEXT: ret void
;
%div = udiv i32 %a, %b
%rem = urem i32 %a, %b
call void @foo(i32 %rem, i32 %div)
ret void
}
; Hoist and optionally decompose the sdiv because it's safe and free.
; PR31028 - https://bugs.llvm.org/show_bug.cgi?id=31028
define i32 @hoist_sdiv(i32 %a, i32 %b) {
; X86-LABEL: @hoist_sdiv(
; X86-NEXT: entry:
; X86-NEXT: [[REM:%.*]] = srem i32 %a, %b
; X86-NEXT: [[DIV:%.*]] = sdiv i32 %a, %b
; X86-NEXT: [[CMP:%.*]] = icmp eq i32 [[REM]], 42
; X86-NEXT: br i1 [[CMP]], label %if, label %end
; X86: if:
; X86-NEXT: br label %end
; X86: end:
; X86-NEXT: [[RET:%.*]] = phi i32 [ [[DIV]], %if ], [ 3, %entry ]
; X86-NEXT: ret i32 [[RET]]
;
; PPC-LABEL: @hoist_sdiv(
; PPC-NEXT: entry:
; PPC-NEXT: [[DIV:%.*]] = sdiv i32 %a, %b
; PPC-NEXT: [[TMP0:%.*]] = mul i32 [[DIV]], %b
; PPC-NEXT: [[TMP1:%.*]] = sub i32 %a, [[TMP0]]
; PPC-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP1]], 42
; PPC-NEXT: br i1 [[CMP]], label %if, label %end
; PPC: if:
; PPC-NEXT: br label %end
; PPC: end:
; PPC-NEXT: [[RET:%.*]] = phi i32 [ [[DIV]], %if ], [ 3, %entry ]
; PPC-NEXT: ret i32 [[RET]]
;
entry:
%rem = srem i32 %a, %b
%cmp = icmp eq i32 %rem, 42
br i1 %cmp, label %if, label %end
if:
%div = sdiv i32 %a, %b
br label %end
end:
%ret = phi i32 [ %div, %if ], [ 3, %entry ]
ret i32 %ret
}
; Hoist and optionally decompose the udiv because it's safe and free.
define i64 @hoist_udiv(i64 %a, i64 %b) {
; X86-LABEL: @hoist_udiv(
; X86-NEXT: entry:
; X86-NEXT: [[REM:%.*]] = urem i64 %a, %b
; X86-NEXT: [[DIV:%.*]] = udiv i64 %a, %b
; X86-NEXT: [[CMP:%.*]] = icmp eq i64 [[REM]], 42
; X86-NEXT: br i1 [[CMP]], label %if, label %end
; X86: if:
; X86-NEXT: br label %end
; X86: end:
; X86-NEXT: [[RET:%.*]] = phi i64 [ [[DIV]], %if ], [ 3, %entry ]
; X86-NEXT: ret i64 [[RET]]
;
; PPC-LABEL: @hoist_udiv(
; PPC-NEXT: entry:
; PPC-NEXT: [[DIV:%.*]] = udiv i64 %a, %b
; PPC-NEXT: [[TMP0:%.*]] = mul i64 [[DIV]], %b
; PPC-NEXT: [[TMP1:%.*]] = sub i64 %a, [[TMP0]]
; PPC-NEXT: [[CMP:%.*]] = icmp eq i64 [[TMP1]], 42
; PPC-NEXT: br i1 [[CMP]], label %if, label %end
; PPC: if:
; PPC-NEXT: br label %end
; PPC: end:
; PPC-NEXT: [[RET:%.*]] = phi i64 [ [[DIV]], %if ], [ 3, %entry ]
; PPC-NEXT: ret i64 [[RET]]
;
entry:
%rem = urem i64 %a, %b
%cmp = icmp eq i64 %rem, 42
br i1 %cmp, label %if, label %end
if:
%div = udiv i64 %a, %b
br label %end
end:
%ret = phi i64 [ %div, %if ], [ 3, %entry ]
ret i64 %ret
}
; Hoist the srem if it's safe and free, otherwise decompose it.
define i16 @hoist_srem(i16 %a, i16 %b) {
; X86-LABEL: @hoist_srem(
; X86-NEXT: entry:
; X86-NEXT: [[DIV:%.*]] = sdiv i16 %a, %b
; X86-NEXT: [[REM:%.*]] = srem i16 %a, %b
; X86-NEXT: [[CMP:%.*]] = icmp eq i16 [[DIV]], 42
; X86-NEXT: br i1 [[CMP]], label %if, label %end
; X86: if:
; X86-NEXT: br label %end
; X86: end:
; X86-NEXT: [[RET:%.*]] = phi i16 [ [[REM]], %if ], [ 3, %entry ]
; X86-NEXT: ret i16 [[RET]]
;
; PPC-LABEL: @hoist_srem(
; PPC-NEXT: entry:
; PPC-NEXT: [[DIV:%.*]] = sdiv i16 %a, %b
; PPC-NEXT: [[CMP:%.*]] = icmp eq i16 [[DIV]], 42
; PPC-NEXT: br i1 [[CMP]], label %if, label %end
; PPC: if:
; PPC-NEXT: [[TMP0:%.*]] = mul i16 [[DIV]], %b
; PPC-NEXT: [[TMP1:%.*]] = sub i16 %a, [[TMP0]]
; PPC-NEXT: br label %end
; PPC: end:
; PPC-NEXT: [[RET:%.*]] = phi i16 [ [[TMP1]], %if ], [ 3, %entry ]
; PPC-NEXT: ret i16 [[RET]]
;
entry:
%div = sdiv i16 %a, %b
%cmp = icmp eq i16 %div, 42
br i1 %cmp, label %if, label %end
if:
%rem = srem i16 %a, %b
br label %end
end:
%ret = phi i16 [ %rem, %if ], [ 3, %entry ]
ret i16 %ret
}
; Hoist the urem if it's safe and free, otherwise decompose it.
define i8 @hoist_urem(i8 %a, i8 %b) {
; X86-LABEL: @hoist_urem(
; X86-NEXT: entry:
; X86-NEXT: [[DIV:%.*]] = udiv i8 %a, %b
; X86-NEXT: [[REM:%.*]] = urem i8 %a, %b
; X86-NEXT: [[CMP:%.*]] = icmp eq i8 [[DIV]], 42
; X86-NEXT: br i1 [[CMP]], label %if, label %end
; X86: if:
; X86-NEXT: br label %end
; X86: end:
; X86-NEXT: [[RET:%.*]] = phi i8 [ [[REM]], %if ], [ 3, %entry ]
; X86-NEXT: ret i8 [[RET]]
;
; PPC-LABEL: @hoist_urem(
; PPC-NEXT: entry:
; PPC-NEXT: [[DIV:%.*]] = udiv i8 %a, %b
; PPC-NEXT: [[CMP:%.*]] = icmp eq i8 [[DIV]], 42
; PPC-NEXT: br i1 [[CMP]], label %if, label %end
; PPC: if:
; PPC-NEXT: [[TMP0:%.*]] = mul i8 [[DIV]], %b
; PPC-NEXT: [[TMP1:%.*]] = sub i8 %a, [[TMP0]]
; PPC-NEXT: br label %end
; PPC: end:
; PPC-NEXT: [[RET:%.*]] = phi i8 [ [[TMP1]], %if ], [ 3, %entry ]
; PPC-NEXT: ret i8 [[RET]]
;
entry:
%div = udiv i8 %a, %b
%cmp = icmp eq i8 %div, 42
br i1 %cmp, label %if, label %end
if:
%rem = urem i8 %a, %b
br label %end
end:
%ret = phi i8 [ %rem, %if ], [ 3, %entry ]
ret i8 %ret
}
; If the ops don't match, don't do anything: signedness.
define i32 @dont_hoist_udiv(i32 %a, i32 %b) {
; ALL-LABEL: @dont_hoist_udiv(
; ALL-NEXT: entry:
; ALL-NEXT: [[REM:%.*]] = srem i32 %a, %b
; ALL-NEXT: [[CMP:%.*]] = icmp eq i32 [[REM]], 42
; ALL-NEXT: br i1 [[CMP]], label %if, label %end
; ALL: if:
; ALL-NEXT: [[DIV:%.*]] = udiv i32 %a, %b
; ALL-NEXT: br label %end
; ALL: end:
; ALL-NEXT: [[RET:%.*]] = phi i32 [ [[DIV]], %if ], [ 3, %entry ]
; ALL-NEXT: ret i32 [[RET]]
;
entry:
%rem = srem i32 %a, %b
%cmp = icmp eq i32 %rem, 42
br i1 %cmp, label %if, label %end
if:
%div = udiv i32 %a, %b
br label %end
end:
%ret = phi i32 [ %div, %if ], [ 3, %entry ]
ret i32 %ret
}
; If the ops don't match, don't do anything: operation.
define i32 @dont_hoist_srem(i32 %a, i32 %b) {
; ALL-LABEL: @dont_hoist_srem(
; ALL-NEXT: entry:
; ALL-NEXT: [[REM:%.*]] = urem i32 %a, %b
; ALL-NEXT: [[CMP:%.*]] = icmp eq i32 [[REM]], 42
; ALL-NEXT: br i1 [[CMP]], label %if, label %end
; ALL: if:
; ALL-NEXT: [[REM2:%.*]] = srem i32 %a, %b
; ALL-NEXT: br label %end
; ALL: end:
; ALL-NEXT: [[RET:%.*]] = phi i32 [ [[REM2]], %if ], [ 3, %entry ]
; ALL-NEXT: ret i32 [[RET]]
;
entry:
%rem = urem i32 %a, %b
%cmp = icmp eq i32 %rem, 42
br i1 %cmp, label %if, label %end
if:
%rem2 = srem i32 %a, %b
br label %end
end:
%ret = phi i32 [ %rem2, %if ], [ 3, %entry ]
ret i32 %ret
}
; If the ops don't match, don't do anything: operands.
define i32 @dont_hoist_sdiv(i32 %a, i32 %b, i32 %c) {
; ALL-LABEL: @dont_hoist_sdiv(
; ALL-NEXT: entry:
; ALL-NEXT: [[REM:%.*]] = srem i32 %a, %b
; ALL-NEXT: [[CMP:%.*]] = icmp eq i32 [[REM]], 42
; ALL-NEXT: br i1 [[CMP]], label %if, label %end
; ALL: if:
; ALL-NEXT: [[DIV:%.*]] = sdiv i32 %a, %c
; ALL-NEXT: br label %end
; ALL: end:
; ALL-NEXT: [[RET:%.*]] = phi i32 [ [[DIV]], %if ], [ 3, %entry ]
; ALL-NEXT: ret i32 [[RET]]
;
entry:
%rem = srem i32 %a, %b
%cmp = icmp eq i32 %rem, 42
br i1 %cmp, label %if, label %end
if:
%div = sdiv i32 %a, %c
br label %end
end:
%ret = phi i32 [ %div, %if ], [ 3, %entry ]
ret i32 %ret
}
; If the target doesn't have a unified div/rem op for the type, decompose rem in-place to mul+sub.
define i128 @dont_hoist_urem(i128 %a, i128 %b) {
; ALL-LABEL: @dont_hoist_urem(
; ALL-NEXT: entry:
; ALL-NEXT: [[DIV:%.*]] = udiv i128 %a, %b
; ALL-NEXT: [[CMP:%.*]] = icmp eq i128 [[DIV]], 42
; ALL-NEXT: br i1 [[CMP]], label %if, label %end
; ALL: if:
; ALL-NEXT: [[TMP0:%.*]] = mul i128 [[DIV]], %b
; ALL-NEXT: [[TMP1:%.*]] = sub i128 %a, [[TMP0]]
; ALL-NEXT: br label %end
; ALL: end:
; ALL-NEXT: [[RET:%.*]] = phi i128 [ [[TMP1]], %if ], [ 3, %entry ]
; ALL-NEXT: ret i128 [[RET]]
;
entry:
%div = udiv i128 %a, %b
%cmp = icmp eq i128 %div, 42
br i1 %cmp, label %if, label %end
if:
%rem = urem i128 %a, %b
br label %end
end:
%ret = phi i128 [ %rem, %if ], [ 3, %entry ]
ret i128 %ret
}
; We don't hoist if one op does not dominate the other,
; but we could hoist both ops to the common predecessor block?
define i32 @no_domination(i1 %cmp, i32 %a, i32 %b) {
; ALL-LABEL: @no_domination(
; ALL-NEXT: entry:
; ALL-NEXT: br i1 %cmp, label %if, label %else
; ALL: if:
; ALL-NEXT: [[DIV:%.*]] = sdiv i32 %a, %b
; ALL-NEXT: br label %end
; ALL: else:
; ALL-NEXT: [[REM:%.*]] = srem i32 %a, %b
; ALL-NEXT: br label %end
; ALL: end:
; ALL-NEXT: [[RET:%.*]] = phi i32 [ [[DIV]], %if ], [ [[REM]], %else ]
; ALL-NEXT: ret i32 [[RET]]
;
entry:
br i1 %cmp, label %if, label %else
if:
%div = sdiv i32 %a, %b
br label %end
else:
%rem = srem i32 %a, %b
br label %end
end:
%ret = phi i32 [ %div, %if ], [ %rem, %else ]
ret i32 %ret
}