1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-24 11:42:57 +01:00
llvm-mirror/lib/Analysis/ScalarEvolutionExpander.cpp
Dan Gohman 3213622610 Introduce memoization for ScalarEvolution dominates and properlyDominates
queries, and SCEVExpander getRelevantLoop queries.

llvm-svn: 119595
2010-11-18 00:34:22 +00:00

1376 lines
54 KiB
C++

//===- ScalarEvolutionExpander.cpp - Scalar Evolution Analysis --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the implementation of the scalar evolution expander,
// which is used to generate the code corresponding to a given scalar evolution
// expression.
//
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/ScalarEvolutionExpander.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/LLVMContext.h"
#include "llvm/Target/TargetData.h"
#include "llvm/ADT/STLExtras.h"
using namespace llvm;
/// ReuseOrCreateCast - Arrange for there to be a cast of V to Ty at IP,
/// reusing an existing cast if a suitable one exists, moving an existing
/// cast if a suitable one exists but isn't in the right place, or
/// creating a new one.
Value *SCEVExpander::ReuseOrCreateCast(Value *V, const Type *Ty,
Instruction::CastOps Op,
BasicBlock::iterator IP) {
// Check to see if there is already a cast!
for (Value::use_iterator UI = V->use_begin(), E = V->use_end();
UI != E; ++UI) {
User *U = *UI;
if (U->getType() == Ty)
if (CastInst *CI = dyn_cast<CastInst>(U))
if (CI->getOpcode() == Op) {
// If the cast isn't where we want it, fix it.
if (BasicBlock::iterator(CI) != IP) {
// Create a new cast, and leave the old cast in place in case
// it is being used as an insert point. Clear its operand
// so that it doesn't hold anything live.
Instruction *NewCI = CastInst::Create(Op, V, Ty, "", IP);
NewCI->takeName(CI);
CI->replaceAllUsesWith(NewCI);
CI->setOperand(0, UndefValue::get(V->getType()));
rememberInstruction(NewCI);
return NewCI;
}
rememberInstruction(CI);
return CI;
}
}
// Create a new cast.
Instruction *I = CastInst::Create(Op, V, Ty, V->getName(), IP);
rememberInstruction(I);
return I;
}
/// InsertNoopCastOfTo - Insert a cast of V to the specified type,
/// which must be possible with a noop cast, doing what we can to share
/// the casts.
Value *SCEVExpander::InsertNoopCastOfTo(Value *V, const Type *Ty) {
Instruction::CastOps Op = CastInst::getCastOpcode(V, false, Ty, false);
assert((Op == Instruction::BitCast ||
Op == Instruction::PtrToInt ||
Op == Instruction::IntToPtr) &&
"InsertNoopCastOfTo cannot perform non-noop casts!");
assert(SE.getTypeSizeInBits(V->getType()) == SE.getTypeSizeInBits(Ty) &&
"InsertNoopCastOfTo cannot change sizes!");
// Short-circuit unnecessary bitcasts.
if (Op == Instruction::BitCast && V->getType() == Ty)
return V;
// Short-circuit unnecessary inttoptr<->ptrtoint casts.
if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) &&
SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->getType())) {
if (CastInst *CI = dyn_cast<CastInst>(V))
if ((CI->getOpcode() == Instruction::PtrToInt ||
CI->getOpcode() == Instruction::IntToPtr) &&
SE.getTypeSizeInBits(CI->getType()) ==
SE.getTypeSizeInBits(CI->getOperand(0)->getType()))
return CI->getOperand(0);
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
if ((CE->getOpcode() == Instruction::PtrToInt ||
CE->getOpcode() == Instruction::IntToPtr) &&
SE.getTypeSizeInBits(CE->getType()) ==
SE.getTypeSizeInBits(CE->getOperand(0)->getType()))
return CE->getOperand(0);
}
// Fold a cast of a constant.
if (Constant *C = dyn_cast<Constant>(V))
return ConstantExpr::getCast(Op, C, Ty);
// Cast the argument at the beginning of the entry block, after
// any bitcasts of other arguments.
if (Argument *A = dyn_cast<Argument>(V)) {
BasicBlock::iterator IP = A->getParent()->getEntryBlock().begin();
while ((isa<BitCastInst>(IP) &&
isa<Argument>(cast<BitCastInst>(IP)->getOperand(0)) &&
cast<BitCastInst>(IP)->getOperand(0) != A) ||
isa<DbgInfoIntrinsic>(IP))
++IP;
return ReuseOrCreateCast(A, Ty, Op, IP);
}
// Cast the instruction immediately after the instruction.
Instruction *I = cast<Instruction>(V);
BasicBlock::iterator IP = I; ++IP;
if (InvokeInst *II = dyn_cast<InvokeInst>(I))
IP = II->getNormalDest()->begin();
while (isa<PHINode>(IP) || isa<DbgInfoIntrinsic>(IP)) ++IP;
return ReuseOrCreateCast(I, Ty, Op, IP);
}
/// InsertBinop - Insert the specified binary operator, doing a small amount
/// of work to avoid inserting an obviously redundant operation.
Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode,
Value *LHS, Value *RHS) {
// Fold a binop with constant operands.
if (Constant *CLHS = dyn_cast<Constant>(LHS))
if (Constant *CRHS = dyn_cast<Constant>(RHS))
return ConstantExpr::get(Opcode, CLHS, CRHS);
// Do a quick scan to see if we have this binop nearby. If so, reuse it.
unsigned ScanLimit = 6;
BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
// Scanning starts from the last instruction before the insertion point.
BasicBlock::iterator IP = Builder.GetInsertPoint();
if (IP != BlockBegin) {
--IP;
for (; ScanLimit; --IP, --ScanLimit) {
// Don't count dbg.value against the ScanLimit, to avoid perturbing the
// generated code.
if (isa<DbgInfoIntrinsic>(IP))
ScanLimit++;
if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS &&
IP->getOperand(1) == RHS)
return IP;
if (IP == BlockBegin) break;
}
}
// Save the original insertion point so we can restore it when we're done.
BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
// Move the insertion point out of as many loops as we can.
while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) {
if (!L->isLoopInvariant(LHS) || !L->isLoopInvariant(RHS)) break;
BasicBlock *Preheader = L->getLoopPreheader();
if (!Preheader) break;
// Ok, move up a level.
Builder.SetInsertPoint(Preheader, Preheader->getTerminator());
}
// If we haven't found this binop, insert it.
Value *BO = Builder.CreateBinOp(Opcode, LHS, RHS, "tmp");
rememberInstruction(BO);
// Restore the original insert point.
if (SaveInsertBB)
restoreInsertPoint(SaveInsertBB, SaveInsertPt);
return BO;
}
/// FactorOutConstant - Test if S is divisible by Factor, using signed
/// division. If so, update S with Factor divided out and return true.
/// S need not be evenly divisible if a reasonable remainder can be
/// computed.
/// TODO: When ScalarEvolution gets a SCEVSDivExpr, this can be made
/// unnecessary; in its place, just signed-divide Ops[i] by the scale and
/// check to see if the divide was folded.
static bool FactorOutConstant(const SCEV *&S,
const SCEV *&Remainder,
const SCEV *Factor,
ScalarEvolution &SE,
const TargetData *TD) {
// Everything is divisible by one.
if (Factor->isOne())
return true;
// x/x == 1.
if (S == Factor) {
S = SE.getConstant(S->getType(), 1);
return true;
}
// For a Constant, check for a multiple of the given factor.
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) {
// 0/x == 0.
if (C->isZero())
return true;
// Check for divisibility.
if (const SCEVConstant *FC = dyn_cast<SCEVConstant>(Factor)) {
ConstantInt *CI =
ConstantInt::get(SE.getContext(),
C->getValue()->getValue().sdiv(
FC->getValue()->getValue()));
// If the quotient is zero and the remainder is non-zero, reject
// the value at this scale. It will be considered for subsequent
// smaller scales.
if (!CI->isZero()) {
const SCEV *Div = SE.getConstant(CI);
S = Div;
Remainder =
SE.getAddExpr(Remainder,
SE.getConstant(C->getValue()->getValue().srem(
FC->getValue()->getValue())));
return true;
}
}
}
// In a Mul, check if there is a constant operand which is a multiple
// of the given factor.
if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
if (TD) {
// With TargetData, the size is known. Check if there is a constant
// operand which is a multiple of the given factor. If so, we can
// factor it.
const SCEVConstant *FC = cast<SCEVConstant>(Factor);
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(M->getOperand(0)))
if (!C->getValue()->getValue().srem(FC->getValue()->getValue())) {
SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end());
NewMulOps[0] =
SE.getConstant(C->getValue()->getValue().sdiv(
FC->getValue()->getValue()));
S = SE.getMulExpr(NewMulOps);
return true;
}
} else {
// Without TargetData, check if Factor can be factored out of any of the
// Mul's operands. If so, we can just remove it.
for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
const SCEV *SOp = M->getOperand(i);
const SCEV *Remainder = SE.getConstant(SOp->getType(), 0);
if (FactorOutConstant(SOp, Remainder, Factor, SE, TD) &&
Remainder->isZero()) {
SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end());
NewMulOps[i] = SOp;
S = SE.getMulExpr(NewMulOps);
return true;
}
}
}
}
// In an AddRec, check if both start and step are divisible.
if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
const SCEV *Step = A->getStepRecurrence(SE);
const SCEV *StepRem = SE.getConstant(Step->getType(), 0);
if (!FactorOutConstant(Step, StepRem, Factor, SE, TD))
return false;
if (!StepRem->isZero())
return false;
const SCEV *Start = A->getStart();
if (!FactorOutConstant(Start, Remainder, Factor, SE, TD))
return false;
S = SE.getAddRecExpr(Start, Step, A->getLoop());
return true;
}
return false;
}
/// SimplifyAddOperands - Sort and simplify a list of add operands. NumAddRecs
/// is the number of SCEVAddRecExprs present, which are kept at the end of
/// the list.
///
static void SimplifyAddOperands(SmallVectorImpl<const SCEV *> &Ops,
const Type *Ty,
ScalarEvolution &SE) {
unsigned NumAddRecs = 0;
for (unsigned i = Ops.size(); i > 0 && isa<SCEVAddRecExpr>(Ops[i-1]); --i)
++NumAddRecs;
// Group Ops into non-addrecs and addrecs.
SmallVector<const SCEV *, 8> NoAddRecs(Ops.begin(), Ops.end() - NumAddRecs);
SmallVector<const SCEV *, 8> AddRecs(Ops.end() - NumAddRecs, Ops.end());
// Let ScalarEvolution sort and simplify the non-addrecs list.
const SCEV *Sum = NoAddRecs.empty() ?
SE.getConstant(Ty, 0) :
SE.getAddExpr(NoAddRecs);
// If it returned an add, use the operands. Otherwise it simplified
// the sum into a single value, so just use that.
Ops.clear();
if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Sum))
Ops.append(Add->op_begin(), Add->op_end());
else if (!Sum->isZero())
Ops.push_back(Sum);
// Then append the addrecs.
Ops.append(AddRecs.begin(), AddRecs.end());
}
/// SplitAddRecs - Flatten a list of add operands, moving addrec start values
/// out to the top level. For example, convert {a + b,+,c} to a, b, {0,+,d}.
/// This helps expose more opportunities for folding parts of the expressions
/// into GEP indices.
///
static void SplitAddRecs(SmallVectorImpl<const SCEV *> &Ops,
const Type *Ty,
ScalarEvolution &SE) {
// Find the addrecs.
SmallVector<const SCEV *, 8> AddRecs;
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Ops[i])) {
const SCEV *Start = A->getStart();
if (Start->isZero()) break;
const SCEV *Zero = SE.getConstant(Ty, 0);
AddRecs.push_back(SE.getAddRecExpr(Zero,
A->getStepRecurrence(SE),
A->getLoop()));
if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Start)) {
Ops[i] = Zero;
Ops.append(Add->op_begin(), Add->op_end());
e += Add->getNumOperands();
} else {
Ops[i] = Start;
}
}
if (!AddRecs.empty()) {
// Add the addrecs onto the end of the list.
Ops.append(AddRecs.begin(), AddRecs.end());
// Resort the operand list, moving any constants to the front.
SimplifyAddOperands(Ops, Ty, SE);
}
}
/// expandAddToGEP - Expand an addition expression with a pointer type into
/// a GEP instead of using ptrtoint+arithmetic+inttoptr. This helps
/// BasicAliasAnalysis and other passes analyze the result. See the rules
/// for getelementptr vs. inttoptr in
/// http://llvm.org/docs/LangRef.html#pointeraliasing
/// for details.
///
/// Design note: The correctness of using getelementptr here depends on
/// ScalarEvolution not recognizing inttoptr and ptrtoint operators, as
/// they may introduce pointer arithmetic which may not be safely converted
/// into getelementptr.
///
/// Design note: It might seem desirable for this function to be more
/// loop-aware. If some of the indices are loop-invariant while others
/// aren't, it might seem desirable to emit multiple GEPs, keeping the
/// loop-invariant portions of the overall computation outside the loop.
/// However, there are a few reasons this is not done here. Hoisting simple
/// arithmetic is a low-level optimization that often isn't very
/// important until late in the optimization process. In fact, passes
/// like InstructionCombining will combine GEPs, even if it means
/// pushing loop-invariant computation down into loops, so even if the
/// GEPs were split here, the work would quickly be undone. The
/// LoopStrengthReduction pass, which is usually run quite late (and
/// after the last InstructionCombining pass), takes care of hoisting
/// loop-invariant portions of expressions, after considering what
/// can be folded using target addressing modes.
///
Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
const SCEV *const *op_end,
const PointerType *PTy,
const Type *Ty,
Value *V) {
const Type *ElTy = PTy->getElementType();
SmallVector<Value *, 4> GepIndices;
SmallVector<const SCEV *, 8> Ops(op_begin, op_end);
bool AnyNonZeroIndices = false;
// Split AddRecs up into parts as either of the parts may be usable
// without the other.
SplitAddRecs(Ops, Ty, SE);
// Descend down the pointer's type and attempt to convert the other
// operands into GEP indices, at each level. The first index in a GEP
// indexes into the array implied by the pointer operand; the rest of
// the indices index into the element or field type selected by the
// preceding index.
for (;;) {
// If the scale size is not 0, attempt to factor out a scale for
// array indexing.
SmallVector<const SCEV *, 8> ScaledOps;
if (ElTy->isSized()) {
const SCEV *ElSize = SE.getSizeOfExpr(ElTy);
if (!ElSize->isZero()) {
SmallVector<const SCEV *, 8> NewOps;
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
const SCEV *Op = Ops[i];
const SCEV *Remainder = SE.getConstant(Ty, 0);
if (FactorOutConstant(Op, Remainder, ElSize, SE, SE.TD)) {
// Op now has ElSize factored out.
ScaledOps.push_back(Op);
if (!Remainder->isZero())
NewOps.push_back(Remainder);
AnyNonZeroIndices = true;
} else {
// The operand was not divisible, so add it to the list of operands
// we'll scan next iteration.
NewOps.push_back(Ops[i]);
}
}
// If we made any changes, update Ops.
if (!ScaledOps.empty()) {
Ops = NewOps;
SimplifyAddOperands(Ops, Ty, SE);
}
}
}
// Record the scaled array index for this level of the type. If
// we didn't find any operands that could be factored, tentatively
// assume that element zero was selected (since the zero offset
// would obviously be folded away).
Value *Scaled = ScaledOps.empty() ?
Constant::getNullValue(Ty) :
expandCodeFor(SE.getAddExpr(ScaledOps), Ty);
GepIndices.push_back(Scaled);
// Collect struct field index operands.
while (const StructType *STy = dyn_cast<StructType>(ElTy)) {
bool FoundFieldNo = false;
// An empty struct has no fields.
if (STy->getNumElements() == 0) break;
if (SE.TD) {
// With TargetData, field offsets are known. See if a constant offset
// falls within any of the struct fields.
if (Ops.empty()) break;
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0]))
if (SE.getTypeSizeInBits(C->getType()) <= 64) {
const StructLayout &SL = *SE.TD->getStructLayout(STy);
uint64_t FullOffset = C->getValue()->getZExtValue();
if (FullOffset < SL.getSizeInBytes()) {
unsigned ElIdx = SL.getElementContainingOffset(FullOffset);
GepIndices.push_back(
ConstantInt::get(Type::getInt32Ty(Ty->getContext()), ElIdx));
ElTy = STy->getTypeAtIndex(ElIdx);
Ops[0] =
SE.getConstant(Ty, FullOffset - SL.getElementOffset(ElIdx));
AnyNonZeroIndices = true;
FoundFieldNo = true;
}
}
} else {
// Without TargetData, just check for an offsetof expression of the
// appropriate struct type.
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Ops[i])) {
const Type *CTy;
Constant *FieldNo;
if (U->isOffsetOf(CTy, FieldNo) && CTy == STy) {
GepIndices.push_back(FieldNo);
ElTy =
STy->getTypeAtIndex(cast<ConstantInt>(FieldNo)->getZExtValue());
Ops[i] = SE.getConstant(Ty, 0);
AnyNonZeroIndices = true;
FoundFieldNo = true;
break;
}
}
}
// If no struct field offsets were found, tentatively assume that
// field zero was selected (since the zero offset would obviously
// be folded away).
if (!FoundFieldNo) {
ElTy = STy->getTypeAtIndex(0u);
GepIndices.push_back(
Constant::getNullValue(Type::getInt32Ty(Ty->getContext())));
}
}
if (const ArrayType *ATy = dyn_cast<ArrayType>(ElTy))
ElTy = ATy->getElementType();
else
break;
}
// If none of the operands were convertible to proper GEP indices, cast
// the base to i8* and do an ugly getelementptr with that. It's still
// better than ptrtoint+arithmetic+inttoptr at least.
if (!AnyNonZeroIndices) {
// Cast the base to i8*.
V = InsertNoopCastOfTo(V,
Type::getInt8PtrTy(Ty->getContext(), PTy->getAddressSpace()));
// Expand the operands for a plain byte offset.
Value *Idx = expandCodeFor(SE.getAddExpr(Ops), Ty);
// Fold a GEP with constant operands.
if (Constant *CLHS = dyn_cast<Constant>(V))
if (Constant *CRHS = dyn_cast<Constant>(Idx))
return ConstantExpr::getGetElementPtr(CLHS, &CRHS, 1);
// Do a quick scan to see if we have this GEP nearby. If so, reuse it.
unsigned ScanLimit = 6;
BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
// Scanning starts from the last instruction before the insertion point.
BasicBlock::iterator IP = Builder.GetInsertPoint();
if (IP != BlockBegin) {
--IP;
for (; ScanLimit; --IP, --ScanLimit) {
// Don't count dbg.value against the ScanLimit, to avoid perturbing the
// generated code.
if (isa<DbgInfoIntrinsic>(IP))
ScanLimit++;
if (IP->getOpcode() == Instruction::GetElementPtr &&
IP->getOperand(0) == V && IP->getOperand(1) == Idx)
return IP;
if (IP == BlockBegin) break;
}
}
// Save the original insertion point so we can restore it when we're done.
BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
// Move the insertion point out of as many loops as we can.
while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) {
if (!L->isLoopInvariant(V) || !L->isLoopInvariant(Idx)) break;
BasicBlock *Preheader = L->getLoopPreheader();
if (!Preheader) break;
// Ok, move up a level.
Builder.SetInsertPoint(Preheader, Preheader->getTerminator());
}
// Emit a GEP.
Value *GEP = Builder.CreateGEP(V, Idx, "uglygep");
rememberInstruction(GEP);
// Restore the original insert point.
if (SaveInsertBB)
restoreInsertPoint(SaveInsertBB, SaveInsertPt);
return GEP;
}
// Save the original insertion point so we can restore it when we're done.
BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
// Move the insertion point out of as many loops as we can.
while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) {
if (!L->isLoopInvariant(V)) break;
bool AnyIndexNotLoopInvariant = false;
for (SmallVectorImpl<Value *>::const_iterator I = GepIndices.begin(),
E = GepIndices.end(); I != E; ++I)
if (!L->isLoopInvariant(*I)) {
AnyIndexNotLoopInvariant = true;
break;
}
if (AnyIndexNotLoopInvariant)
break;
BasicBlock *Preheader = L->getLoopPreheader();
if (!Preheader) break;
// Ok, move up a level.
Builder.SetInsertPoint(Preheader, Preheader->getTerminator());
}
// Insert a pretty getelementptr. Note that this GEP is not marked inbounds,
// because ScalarEvolution may have changed the address arithmetic to
// compute a value which is beyond the end of the allocated object.
Value *Casted = V;
if (V->getType() != PTy)
Casted = InsertNoopCastOfTo(Casted, PTy);
Value *GEP = Builder.CreateGEP(Casted,
GepIndices.begin(),
GepIndices.end(),
"scevgep");
Ops.push_back(SE.getUnknown(GEP));
rememberInstruction(GEP);
// Restore the original insert point.
if (SaveInsertBB)
restoreInsertPoint(SaveInsertBB, SaveInsertPt);
return expand(SE.getAddExpr(Ops));
}
/// isNonConstantNegative - Return true if the specified scev is negated, but
/// not a constant.
static bool isNonConstantNegative(const SCEV *F) {
const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(F);
if (!Mul) return false;
// If there is a constant factor, it will be first.
const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0));
if (!SC) return false;
// Return true if the value is negative, this matches things like (-42 * V).
return SC->getValue()->getValue().isNegative();
}
/// PickMostRelevantLoop - Given two loops pick the one that's most relevant for
/// SCEV expansion. If they are nested, this is the most nested. If they are
/// neighboring, pick the later.
static const Loop *PickMostRelevantLoop(const Loop *A, const Loop *B,
DominatorTree &DT) {
if (!A) return B;
if (!B) return A;
if (A->contains(B)) return B;
if (B->contains(A)) return A;
if (DT.dominates(A->getHeader(), B->getHeader())) return B;
if (DT.dominates(B->getHeader(), A->getHeader())) return A;
return A; // Arbitrarily break the tie.
}
/// getRelevantLoop - Get the most relevant loop associated with the given
/// expression, according to PickMostRelevantLoop.
const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) {
// Test whether we've already computed the most relevant loop for this SCEV.
std::pair<DenseMap<const SCEV *, const Loop *>::iterator, bool> Pair =
RelevantLoops.insert(std::make_pair(S, static_cast<const Loop *>(0)));
if (!Pair.second)
return Pair.first->second;
if (isa<SCEVConstant>(S))
// A constant has no relevant loops.
return 0;
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
if (const Instruction *I = dyn_cast<Instruction>(U->getValue()))
return Pair.first->second = SE.LI->getLoopFor(I->getParent());
// A non-instruction has no relevant loops.
return 0;
}
if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) {
const Loop *L = 0;
if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
L = AR->getLoop();
for (SCEVNAryExpr::op_iterator I = N->op_begin(), E = N->op_end();
I != E; ++I)
L = PickMostRelevantLoop(L, getRelevantLoop(*I), *SE.DT);
return RelevantLoops[N] = L;
}
if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S)) {
const Loop *Result = getRelevantLoop(C->getOperand());
return RelevantLoops[C] = Result;
}
if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
const Loop *Result =
PickMostRelevantLoop(getRelevantLoop(D->getLHS()),
getRelevantLoop(D->getRHS()),
*SE.DT);
return RelevantLoops[D] = Result;
}
llvm_unreachable("Unexpected SCEV type!");
return 0;
}
namespace {
/// LoopCompare - Compare loops by PickMostRelevantLoop.
class LoopCompare {
DominatorTree &DT;
public:
explicit LoopCompare(DominatorTree &dt) : DT(dt) {}
bool operator()(std::pair<const Loop *, const SCEV *> LHS,
std::pair<const Loop *, const SCEV *> RHS) const {
// Keep pointer operands sorted at the end.
if (LHS.second->getType()->isPointerTy() !=
RHS.second->getType()->isPointerTy())
return LHS.second->getType()->isPointerTy();
// Compare loops with PickMostRelevantLoop.
if (LHS.first != RHS.first)
return PickMostRelevantLoop(LHS.first, RHS.first, DT) != LHS.first;
// If one operand is a non-constant negative and the other is not,
// put the non-constant negative on the right so that a sub can
// be used instead of a negate and add.
if (isNonConstantNegative(LHS.second)) {
if (!isNonConstantNegative(RHS.second))
return false;
} else if (isNonConstantNegative(RHS.second))
return true;
// Otherwise they are equivalent according to this comparison.
return false;
}
};
}
Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
const Type *Ty = SE.getEffectiveSCEVType(S->getType());
// Collect all the add operands in a loop, along with their associated loops.
// Iterate in reverse so that constants are emitted last, all else equal, and
// so that pointer operands are inserted first, which the code below relies on
// to form more involved GEPs.
SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
for (std::reverse_iterator<SCEVAddExpr::op_iterator> I(S->op_end()),
E(S->op_begin()); I != E; ++I)
OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I));
// Sort by loop. Use a stable sort so that constants follow non-constants and
// pointer operands precede non-pointer operands.
std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT));
// Emit instructions to add all the operands. Hoist as much as possible
// out of loops, and form meaningful getelementptrs where possible.
Value *Sum = 0;
for (SmallVectorImpl<std::pair<const Loop *, const SCEV *> >::iterator
I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E; ) {
const Loop *CurLoop = I->first;
const SCEV *Op = I->second;
if (!Sum) {
// This is the first operand. Just expand it.
Sum = expand(Op);
++I;
} else if (const PointerType *PTy = dyn_cast<PointerType>(Sum->getType())) {
// The running sum expression is a pointer. Try to form a getelementptr
// at this level with that as the base.
SmallVector<const SCEV *, 4> NewOps;
for (; I != E && I->first == CurLoop; ++I) {
// If the operand is SCEVUnknown and not instructions, peek through
// it, to enable more of it to be folded into the GEP.
const SCEV *X = I->second;
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(X))
if (!isa<Instruction>(U->getValue()))
X = SE.getSCEV(U->getValue());
NewOps.push_back(X);
}
Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, Sum);
} else if (const PointerType *PTy = dyn_cast<PointerType>(Op->getType())) {
// The running sum is an integer, and there's a pointer at this level.
// Try to form a getelementptr. If the running sum is instructions,
// use a SCEVUnknown to avoid re-analyzing them.
SmallVector<const SCEV *, 4> NewOps;
NewOps.push_back(isa<Instruction>(Sum) ? SE.getUnknown(Sum) :
SE.getSCEV(Sum));
for (++I; I != E && I->first == CurLoop; ++I)
NewOps.push_back(I->second);
Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, expand(Op));
} else if (isNonConstantNegative(Op)) {
// Instead of doing a negate and add, just do a subtract.
Value *W = expandCodeFor(SE.getNegativeSCEV(Op), Ty);
Sum = InsertNoopCastOfTo(Sum, Ty);
Sum = InsertBinop(Instruction::Sub, Sum, W);
++I;
} else {
// A simple add.
Value *W = expandCodeFor(Op, Ty);
Sum = InsertNoopCastOfTo(Sum, Ty);
// Canonicalize a constant to the RHS.
if (isa<Constant>(Sum)) std::swap(Sum, W);
Sum = InsertBinop(Instruction::Add, Sum, W);
++I;
}
}
return Sum;
}
Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) {
const Type *Ty = SE.getEffectiveSCEVType(S->getType());
// Collect all the mul operands in a loop, along with their associated loops.
// Iterate in reverse so that constants are emitted last, all else equal.
SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
for (std::reverse_iterator<SCEVMulExpr::op_iterator> I(S->op_end()),
E(S->op_begin()); I != E; ++I)
OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I));
// Sort by loop. Use a stable sort so that constants follow non-constants.
std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT));
// Emit instructions to mul all the operands. Hoist as much as possible
// out of loops.
Value *Prod = 0;
for (SmallVectorImpl<std::pair<const Loop *, const SCEV *> >::iterator
I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E; ) {
const SCEV *Op = I->second;
if (!Prod) {
// This is the first operand. Just expand it.
Prod = expand(Op);
++I;
} else if (Op->isAllOnesValue()) {
// Instead of doing a multiply by negative one, just do a negate.
Prod = InsertNoopCastOfTo(Prod, Ty);
Prod = InsertBinop(Instruction::Sub, Constant::getNullValue(Ty), Prod);
++I;
} else {
// A simple mul.
Value *W = expandCodeFor(Op, Ty);
Prod = InsertNoopCastOfTo(Prod, Ty);
// Canonicalize a constant to the RHS.
if (isa<Constant>(Prod)) std::swap(Prod, W);
Prod = InsertBinop(Instruction::Mul, Prod, W);
++I;
}
}
return Prod;
}
Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) {
const Type *Ty = SE.getEffectiveSCEVType(S->getType());
Value *LHS = expandCodeFor(S->getLHS(), Ty);
if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getRHS())) {
const APInt &RHS = SC->getValue()->getValue();
if (RHS.isPowerOf2())
return InsertBinop(Instruction::LShr, LHS,
ConstantInt::get(Ty, RHS.logBase2()));
}
Value *RHS = expandCodeFor(S->getRHS(), Ty);
return InsertBinop(Instruction::UDiv, LHS, RHS);
}
/// Move parts of Base into Rest to leave Base with the minimal
/// expression that provides a pointer operand suitable for a
/// GEP expansion.
static void ExposePointerBase(const SCEV *&Base, const SCEV *&Rest,
ScalarEvolution &SE) {
while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Base)) {
Base = A->getStart();
Rest = SE.getAddExpr(Rest,
SE.getAddRecExpr(SE.getConstant(A->getType(), 0),
A->getStepRecurrence(SE),
A->getLoop()));
}
if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(Base)) {
Base = A->getOperand(A->getNumOperands()-1);
SmallVector<const SCEV *, 8> NewAddOps(A->op_begin(), A->op_end());
NewAddOps.back() = Rest;
Rest = SE.getAddExpr(NewAddOps);
ExposePointerBase(Base, Rest, SE);
}
}
/// getAddRecExprPHILiterally - Helper for expandAddRecExprLiterally. Expand
/// the base addrec, which is the addrec without any non-loop-dominating
/// values, and return the PHI.
PHINode *
SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
const Loop *L,
const Type *ExpandTy,
const Type *IntTy) {
// Reuse a previously-inserted PHI, if present.
for (BasicBlock::iterator I = L->getHeader()->begin();
PHINode *PN = dyn_cast<PHINode>(I); ++I)
if (SE.isSCEVable(PN->getType()) &&
(SE.getEffectiveSCEVType(PN->getType()) ==
SE.getEffectiveSCEVType(Normalized->getType())) &&
SE.getSCEV(PN) == Normalized)
if (BasicBlock *LatchBlock = L->getLoopLatch()) {
Instruction *IncV =
cast<Instruction>(PN->getIncomingValueForBlock(LatchBlock));
// Determine if this is a well-behaved chain of instructions leading
// back to the PHI. It probably will be, if we're scanning an inner
// loop already visited by LSR for example, but it wouldn't have
// to be.
do {
if (IncV->getNumOperands() == 0 || isa<PHINode>(IncV)) {
IncV = 0;
break;
}
// If any of the operands don't dominate the insert position, bail.
// Addrec operands are always loop-invariant, so this can only happen
// if there are instructions which haven't been hoisted.
for (User::op_iterator OI = IncV->op_begin()+1,
OE = IncV->op_end(); OI != OE; ++OI)
if (Instruction *OInst = dyn_cast<Instruction>(OI))
if (!SE.DT->dominates(OInst, IVIncInsertPos)) {
IncV = 0;
break;
}
if (!IncV)
break;
// Advance to the next instruction.
IncV = dyn_cast<Instruction>(IncV->getOperand(0));
if (!IncV)
break;
if (IncV->mayHaveSideEffects()) {
IncV = 0;
break;
}
} while (IncV != PN);
if (IncV) {
// Ok, the add recurrence looks usable.
// Remember this PHI, even in post-inc mode.
InsertedValues.insert(PN);
// Remember the increment.
IncV = cast<Instruction>(PN->getIncomingValueForBlock(LatchBlock));
rememberInstruction(IncV);
if (L == IVIncInsertLoop)
do {
if (SE.DT->dominates(IncV, IVIncInsertPos))
break;
// Make sure the increment is where we want it. But don't move it
// down past a potential existing post-inc user.
IncV->moveBefore(IVIncInsertPos);
IVIncInsertPos = IncV;
IncV = cast<Instruction>(IncV->getOperand(0));
} while (IncV != PN);
return PN;
}
}
// Save the original insertion point so we can restore it when we're done.
BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
// Expand code for the start value.
Value *StartV = expandCodeFor(Normalized->getStart(), ExpandTy,
L->getHeader()->begin());
// Expand code for the step value. Insert instructions right before the
// terminator corresponding to the back-edge. Do this before creating the PHI
// so that PHI reuse code doesn't see an incomplete PHI. If the stride is
// negative, insert a sub instead of an add for the increment (unless it's a
// constant, because subtracts of constants are canonicalized to adds).
const SCEV *Step = Normalized->getStepRecurrence(SE);
bool isPointer = ExpandTy->isPointerTy();
bool isNegative = !isPointer && isNonConstantNegative(Step);
if (isNegative)
Step = SE.getNegativeSCEV(Step);
Value *StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin());
// Create the PHI.
Builder.SetInsertPoint(L->getHeader(), L->getHeader()->begin());
PHINode *PN = Builder.CreatePHI(ExpandTy, "lsr.iv");
rememberInstruction(PN);
// Create the step instructions and populate the PHI.
BasicBlock *Header = L->getHeader();
for (pred_iterator HPI = pred_begin(Header), HPE = pred_end(Header);
HPI != HPE; ++HPI) {
BasicBlock *Pred = *HPI;
// Add a start value.
if (!L->contains(Pred)) {
PN->addIncoming(StartV, Pred);
continue;
}
// Create a step value and add it to the PHI. If IVIncInsertLoop is
// non-null and equal to the addrec's loop, insert the instructions
// at IVIncInsertPos.
Instruction *InsertPos = L == IVIncInsertLoop ?
IVIncInsertPos : Pred->getTerminator();
Builder.SetInsertPoint(InsertPos->getParent(), InsertPos);
Value *IncV;
// If the PHI is a pointer, use a GEP, otherwise use an add or sub.
if (isPointer) {
const PointerType *GEPPtrTy = cast<PointerType>(ExpandTy);
// If the step isn't constant, don't use an implicitly scaled GEP, because
// that would require a multiply inside the loop.
if (!isa<ConstantInt>(StepV))
GEPPtrTy = PointerType::get(Type::getInt1Ty(SE.getContext()),
GEPPtrTy->getAddressSpace());
const SCEV *const StepArray[1] = { SE.getSCEV(StepV) };
IncV = expandAddToGEP(StepArray, StepArray+1, GEPPtrTy, IntTy, PN);
if (IncV->getType() != PN->getType()) {
IncV = Builder.CreateBitCast(IncV, PN->getType(), "tmp");
rememberInstruction(IncV);
}
} else {
IncV = isNegative ?
Builder.CreateSub(PN, StepV, "lsr.iv.next") :
Builder.CreateAdd(PN, StepV, "lsr.iv.next");
rememberInstruction(IncV);
}
PN->addIncoming(IncV, Pred);
}
// Restore the original insert point.
if (SaveInsertBB)
restoreInsertPoint(SaveInsertBB, SaveInsertPt);
// Remember this PHI, even in post-inc mode.
InsertedValues.insert(PN);
return PN;
}
Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
const Type *STy = S->getType();
const Type *IntTy = SE.getEffectiveSCEVType(STy);
const Loop *L = S->getLoop();
// Determine a normalized form of this expression, which is the expression
// before any post-inc adjustment is made.
const SCEVAddRecExpr *Normalized = S;
if (PostIncLoops.count(L)) {
PostIncLoopSet Loops;
Loops.insert(L);
Normalized =
cast<SCEVAddRecExpr>(TransformForPostIncUse(Normalize, S, 0, 0,
Loops, SE, *SE.DT));
}
// Strip off any non-loop-dominating component from the addrec start.
const SCEV *Start = Normalized->getStart();
const SCEV *PostLoopOffset = 0;
if (!SE.properlyDominates(Start, L->getHeader())) {
PostLoopOffset = Start;
Start = SE.getConstant(Normalized->getType(), 0);
Normalized =
cast<SCEVAddRecExpr>(SE.getAddRecExpr(Start,
Normalized->getStepRecurrence(SE),
Normalized->getLoop()));
}
// Strip off any non-loop-dominating component from the addrec step.
const SCEV *Step = Normalized->getStepRecurrence(SE);
const SCEV *PostLoopScale = 0;
if (!SE.dominates(Step, L->getHeader())) {
PostLoopScale = Step;
Step = SE.getConstant(Normalized->getType(), 1);
Normalized =
cast<SCEVAddRecExpr>(SE.getAddRecExpr(Start, Step,
Normalized->getLoop()));
}
// Expand the core addrec. If we need post-loop scaling, force it to
// expand to an integer type to avoid the need for additional casting.
const Type *ExpandTy = PostLoopScale ? IntTy : STy;
PHINode *PN = getAddRecExprPHILiterally(Normalized, L, ExpandTy, IntTy);
// Accommodate post-inc mode, if necessary.
Value *Result;
if (!PostIncLoops.count(L))
Result = PN;
else {
// In PostInc mode, use the post-incremented value.
BasicBlock *LatchBlock = L->getLoopLatch();
assert(LatchBlock && "PostInc mode requires a unique loop latch!");
Result = PN->getIncomingValueForBlock(LatchBlock);
}
// Re-apply any non-loop-dominating scale.
if (PostLoopScale) {
Result = InsertNoopCastOfTo(Result, IntTy);
Result = Builder.CreateMul(Result,
expandCodeFor(PostLoopScale, IntTy));
rememberInstruction(Result);
}
// Re-apply any non-loop-dominating offset.
if (PostLoopOffset) {
if (const PointerType *PTy = dyn_cast<PointerType>(ExpandTy)) {
const SCEV *const OffsetArray[1] = { PostLoopOffset };
Result = expandAddToGEP(OffsetArray, OffsetArray+1, PTy, IntTy, Result);
} else {
Result = InsertNoopCastOfTo(Result, IntTy);
Result = Builder.CreateAdd(Result,
expandCodeFor(PostLoopOffset, IntTy));
rememberInstruction(Result);
}
}
return Result;
}
Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
if (!CanonicalMode) return expandAddRecExprLiterally(S);
const Type *Ty = SE.getEffectiveSCEVType(S->getType());
const Loop *L = S->getLoop();
// First check for an existing canonical IV in a suitable type.
PHINode *CanonicalIV = 0;
if (PHINode *PN = L->getCanonicalInductionVariable())
if (SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty))
CanonicalIV = PN;
// Rewrite an AddRec in terms of the canonical induction variable, if
// its type is more narrow.
if (CanonicalIV &&
SE.getTypeSizeInBits(CanonicalIV->getType()) >
SE.getTypeSizeInBits(Ty)) {
SmallVector<const SCEV *, 4> NewOps(S->getNumOperands());
for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i)
NewOps[i] = SE.getAnyExtendExpr(S->op_begin()[i], CanonicalIV->getType());
Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop()));
BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
BasicBlock::iterator NewInsertPt =
llvm::next(BasicBlock::iterator(cast<Instruction>(V)));
while (isa<PHINode>(NewInsertPt) || isa<DbgInfoIntrinsic>(NewInsertPt))
++NewInsertPt;
V = expandCodeFor(SE.getTruncateExpr(SE.getUnknown(V), Ty), 0,
NewInsertPt);
restoreInsertPoint(SaveInsertBB, SaveInsertPt);
return V;
}
// {X,+,F} --> X + {0,+,F}
if (!S->getStart()->isZero()) {
SmallVector<const SCEV *, 4> NewOps(S->op_begin(), S->op_end());
NewOps[0] = SE.getConstant(Ty, 0);
const SCEV *Rest = SE.getAddRecExpr(NewOps, L);
// Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the
// comments on expandAddToGEP for details.
const SCEV *Base = S->getStart();
const SCEV *RestArray[1] = { Rest };
// Dig into the expression to find the pointer base for a GEP.
ExposePointerBase(Base, RestArray[0], SE);
// If we found a pointer, expand the AddRec with a GEP.
if (const PointerType *PTy = dyn_cast<PointerType>(Base->getType())) {
// Make sure the Base isn't something exotic, such as a multiplied
// or divided pointer value. In those cases, the result type isn't
// actually a pointer type.
if (!isa<SCEVMulExpr>(Base) && !isa<SCEVUDivExpr>(Base)) {
Value *StartV = expand(Base);
assert(StartV->getType() == PTy && "Pointer type mismatch for GEP!");
return expandAddToGEP(RestArray, RestArray+1, PTy, Ty, StartV);
}
}
// Just do a normal add. Pre-expand the operands to suppress folding.
return expand(SE.getAddExpr(SE.getUnknown(expand(S->getStart())),
SE.getUnknown(expand(Rest))));
}
// If we don't yet have a canonical IV, create one.
if (!CanonicalIV) {
// Create and insert the PHI node for the induction variable in the
// specified loop.
BasicBlock *Header = L->getHeader();
CanonicalIV = PHINode::Create(Ty, "indvar", Header->begin());
rememberInstruction(CanonicalIV);
Constant *One = ConstantInt::get(Ty, 1);
for (pred_iterator HPI = pred_begin(Header), HPE = pred_end(Header);
HPI != HPE; ++HPI) {
BasicBlock *HP = *HPI;
if (L->contains(HP)) {
// Insert a unit add instruction right before the terminator
// corresponding to the back-edge.
Instruction *Add = BinaryOperator::CreateAdd(CanonicalIV, One,
"indvar.next",
HP->getTerminator());
rememberInstruction(Add);
CanonicalIV->addIncoming(Add, HP);
} else {
CanonicalIV->addIncoming(Constant::getNullValue(Ty), HP);
}
}
}
// {0,+,1} --> Insert a canonical induction variable into the loop!
if (S->isAffine() && S->getOperand(1)->isOne()) {
assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) &&
"IVs with types different from the canonical IV should "
"already have been handled!");
return CanonicalIV;
}
// {0,+,F} --> {0,+,1} * F
// If this is a simple linear addrec, emit it now as a special case.
if (S->isAffine()) // {0,+,F} --> i*F
return
expand(SE.getTruncateOrNoop(
SE.getMulExpr(SE.getUnknown(CanonicalIV),
SE.getNoopOrAnyExtend(S->getOperand(1),
CanonicalIV->getType())),
Ty));
// If this is a chain of recurrences, turn it into a closed form, using the
// folders, then expandCodeFor the closed form. This allows the folders to
// simplify the expression without having to build a bunch of special code
// into this folder.
const SCEV *IH = SE.getUnknown(CanonicalIV); // Get I as a "symbolic" SCEV.
// Promote S up to the canonical IV type, if the cast is foldable.
const SCEV *NewS = S;
const SCEV *Ext = SE.getNoopOrAnyExtend(S, CanonicalIV->getType());
if (isa<SCEVAddRecExpr>(Ext))
NewS = Ext;
const SCEV *V = cast<SCEVAddRecExpr>(NewS)->evaluateAtIteration(IH, SE);
//cerr << "Evaluated: " << *this << "\n to: " << *V << "\n";
// Truncate the result down to the original type, if needed.
const SCEV *T = SE.getTruncateOrNoop(V, Ty);
return expand(T);
}
Value *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) {
const Type *Ty = SE.getEffectiveSCEVType(S->getType());
Value *V = expandCodeFor(S->getOperand(),
SE.getEffectiveSCEVType(S->getOperand()->getType()));
Value *I = Builder.CreateTrunc(V, Ty, "tmp");
rememberInstruction(I);
return I;
}
Value *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) {
const Type *Ty = SE.getEffectiveSCEVType(S->getType());
Value *V = expandCodeFor(S->getOperand(),
SE.getEffectiveSCEVType(S->getOperand()->getType()));
Value *I = Builder.CreateZExt(V, Ty, "tmp");
rememberInstruction(I);
return I;
}
Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) {
const Type *Ty = SE.getEffectiveSCEVType(S->getType());
Value *V = expandCodeFor(S->getOperand(),
SE.getEffectiveSCEVType(S->getOperand()->getType()));
Value *I = Builder.CreateSExt(V, Ty, "tmp");
rememberInstruction(I);
return I;
}
Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) {
Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
const Type *Ty = LHS->getType();
for (int i = S->getNumOperands()-2; i >= 0; --i) {
// In the case of mixed integer and pointer types, do the
// rest of the comparisons as integer.
if (S->getOperand(i)->getType() != Ty) {
Ty = SE.getEffectiveSCEVType(Ty);
LHS = InsertNoopCastOfTo(LHS, Ty);
}
Value *RHS = expandCodeFor(S->getOperand(i), Ty);
Value *ICmp = Builder.CreateICmpSGT(LHS, RHS, "tmp");
rememberInstruction(ICmp);
Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "smax");
rememberInstruction(Sel);
LHS = Sel;
}
// In the case of mixed integer and pointer types, cast the
// final result back to the pointer type.
if (LHS->getType() != S->getType())
LHS = InsertNoopCastOfTo(LHS, S->getType());
return LHS;
}
Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) {
Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
const Type *Ty = LHS->getType();
for (int i = S->getNumOperands()-2; i >= 0; --i) {
// In the case of mixed integer and pointer types, do the
// rest of the comparisons as integer.
if (S->getOperand(i)->getType() != Ty) {
Ty = SE.getEffectiveSCEVType(Ty);
LHS = InsertNoopCastOfTo(LHS, Ty);
}
Value *RHS = expandCodeFor(S->getOperand(i), Ty);
Value *ICmp = Builder.CreateICmpUGT(LHS, RHS, "tmp");
rememberInstruction(ICmp);
Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "umax");
rememberInstruction(Sel);
LHS = Sel;
}
// In the case of mixed integer and pointer types, cast the
// final result back to the pointer type.
if (LHS->getType() != S->getType())
LHS = InsertNoopCastOfTo(LHS, S->getType());
return LHS;
}
Value *SCEVExpander::expandCodeFor(const SCEV *SH, const Type *Ty,
Instruction *I) {
BasicBlock::iterator IP = I;
while (isInsertedInstruction(IP) || isa<DbgInfoIntrinsic>(IP))
++IP;
Builder.SetInsertPoint(IP->getParent(), IP);
return expandCodeFor(SH, Ty);
}
Value *SCEVExpander::expandCodeFor(const SCEV *SH, const Type *Ty) {
// Expand the code for this SCEV.
Value *V = expand(SH);
if (Ty) {
assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->getType()) &&
"non-trivial casts should be done with the SCEVs directly!");
V = InsertNoopCastOfTo(V, Ty);
}
return V;
}
Value *SCEVExpander::expand(const SCEV *S) {
// Compute an insertion point for this SCEV object. Hoist the instructions
// as far out in the loop nest as possible.
Instruction *InsertPt = Builder.GetInsertPoint();
for (Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock()); ;
L = L->getParentLoop())
if (SE.isLoopInvariant(S, L)) {
if (!L) break;
if (BasicBlock *Preheader = L->getLoopPreheader())
InsertPt = Preheader->getTerminator();
} else {
// If the SCEV is computable at this level, insert it into the header
// after the PHIs (and after any other instructions that we've inserted
// there) so that it is guaranteed to dominate any user inside the loop.
if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L))
InsertPt = L->getHeader()->getFirstNonPHI();
while (isInsertedInstruction(InsertPt) || isa<DbgInfoIntrinsic>(InsertPt))
InsertPt = llvm::next(BasicBlock::iterator(InsertPt));
break;
}
// Check to see if we already expanded this here.
std::map<std::pair<const SCEV *, Instruction *>,
AssertingVH<Value> >::iterator I =
InsertedExpressions.find(std::make_pair(S, InsertPt));
if (I != InsertedExpressions.end())
return I->second;
BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
Builder.SetInsertPoint(InsertPt->getParent(), InsertPt);
// Expand the expression into instructions.
Value *V = visit(S);
// Remember the expanded value for this SCEV at this location.
if (PostIncLoops.empty())
InsertedExpressions[std::make_pair(S, InsertPt)] = V;
restoreInsertPoint(SaveInsertBB, SaveInsertPt);
return V;
}
void SCEVExpander::rememberInstruction(Value *I) {
if (!PostIncLoops.empty())
InsertedPostIncValues.insert(I);
else
InsertedValues.insert(I);
// If we just claimed an existing instruction and that instruction had
// been the insert point, adjust the insert point forward so that
// subsequently inserted code will be dominated.
if (Builder.GetInsertPoint() == I) {
BasicBlock::iterator It = cast<Instruction>(I);
do { ++It; } while (isInsertedInstruction(It) ||
isa<DbgInfoIntrinsic>(It));
Builder.SetInsertPoint(Builder.GetInsertBlock(), It);
}
}
void SCEVExpander::restoreInsertPoint(BasicBlock *BB, BasicBlock::iterator I) {
// If we acquired more instructions since the old insert point was saved,
// advance past them.
while (isInsertedInstruction(I) || isa<DbgInfoIntrinsic>(I)) ++I;
Builder.SetInsertPoint(BB, I);
}
/// getOrInsertCanonicalInductionVariable - This method returns the
/// canonical induction variable of the specified type for the specified
/// loop (inserting one if there is none). A canonical induction variable
/// starts at zero and steps by one on each iteration.
PHINode *
SCEVExpander::getOrInsertCanonicalInductionVariable(const Loop *L,
const Type *Ty) {
assert(Ty->isIntegerTy() && "Can only insert integer induction variables!");
// Build a SCEV for {0,+,1}<L>.
const SCEV *H = SE.getAddRecExpr(SE.getConstant(Ty, 0),
SE.getConstant(Ty, 1), L);
// Emit code for it.
BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
PHINode *V = cast<PHINode>(expandCodeFor(H, 0, L->getHeader()->begin()));
if (SaveInsertBB)
restoreInsertPoint(SaveInsertBB, SaveInsertPt);
return V;
}