1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-25 20:23:11 +01:00

[llvm] Construct SmallVector with iterator ranges (NFC)

This commit is contained in:
Kazu Hirata 2021-01-04 11:42:43 -08:00
parent b6b5ea5b82
commit 08389fa62e
10 changed files with 18 additions and 28 deletions

View File

@ -332,8 +332,7 @@ public:
/// This is a helper function which calls the two-argument getUserCost /// This is a helper function which calls the two-argument getUserCost
/// with \p Operands which are the current operands U has. /// with \p Operands which are the current operands U has.
int getUserCost(const User *U, TargetCostKind CostKind) const { int getUserCost(const User *U, TargetCostKind CostKind) const {
SmallVector<const Value *, 4> Operands(U->value_op_begin(), SmallVector<const Value *, 4> Operands(U->operand_values());
U->value_op_end());
return getUserCost(U, Operands, CostKind); return getUserCost(U, Operands, CostKind);
} }

View File

@ -1071,8 +1071,7 @@ public:
} }
int getInstructionLatency(const Instruction *I) { int getInstructionLatency(const Instruction *I) {
SmallVector<const Value *, 4> Operands(I->value_op_begin(), SmallVector<const Value *, 4> Operands(I->operand_values());
I->value_op_end());
if (getUserCost(I, Operands, TTI::TCK_Latency) == TTI::TCC_Free) if (getUserCost(I, Operands, TTI::TCK_Latency) == TTI::TCC_Free)
return 0; return 0;

View File

@ -240,9 +240,8 @@ class GenericDINode : public DINode {
StorageType Storage, bool ShouldCreate = true); StorageType Storage, bool ShouldCreate = true);
TempGenericDINode cloneImpl() const { TempGenericDINode cloneImpl() const {
return getTemporary( return getTemporary(getContext(), getTag(), getHeader(),
getContext(), getTag(), getHeader(), SmallVector<Metadata *, 4>(dwarf_operands()));
SmallVector<Metadata *, 4>(dwarf_op_begin(), dwarf_op_end()));
} }
public: public:

View File

@ -1128,8 +1128,7 @@ class MDTuple : public MDNode {
StorageType Storage, bool ShouldCreate = true); StorageType Storage, bool ShouldCreate = true);
TempMDTuple cloneImpl() const { TempMDTuple cloneImpl() const {
return getTemporary(getContext(), return getTemporary(getContext(), SmallVector<Metadata *, 4>(operands()));
SmallVector<Metadata *, 4>(op_begin(), op_end()));
} }
public: public:

View File

@ -44,7 +44,7 @@ private:
if (Entry) if (Entry)
return Entry; return Entry;
SmallVector<BasicBlock *, 32> PredCache(pred_begin(BB), pred_end(BB)); SmallVector<BasicBlock *, 32> PredCache(predecessors(BB));
PredCache.push_back(nullptr); // null terminator. PredCache.push_back(nullptr); // null terminator.
BlockToPredCountMap[BB] = PredCache.size() - 1; BlockToPredCountMap[BB] = PredCache.size() - 1;

View File

@ -3599,7 +3599,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
// expression GEP with the same indices and a null base pointer to see // expression GEP with the same indices and a null base pointer to see
// what constant folding can make out of it. // what constant folding can make out of it.
Constant *Null = Constant::getNullValue(GLHS->getPointerOperandType()); Constant *Null = Constant::getNullValue(GLHS->getPointerOperandType());
SmallVector<Value *, 4> IndicesLHS(GLHS->idx_begin(), GLHS->idx_end()); SmallVector<Value *, 4> IndicesLHS(GLHS->indices());
Constant *NewLHS = ConstantExpr::getGetElementPtr( Constant *NewLHS = ConstantExpr::getGetElementPtr(
GLHS->getSourceElementType(), Null, IndicesLHS); GLHS->getSourceElementType(), Null, IndicesLHS);
@ -5814,7 +5814,7 @@ Value *llvm::SimplifyInstruction(Instruction *I, const SimplifyQuery &SQ,
I->getOperand(2), Q); I->getOperand(2), Q);
break; break;
case Instruction::GetElementPtr: { case Instruction::GetElementPtr: {
SmallVector<Value *, 8> Ops(I->op_begin(), I->op_end()); SmallVector<Value *, 8> Ops(I->operands());
Result = SimplifyGEPInst(cast<GetElementPtrInst>(I)->getSourceElementType(), Result = SimplifyGEPInst(cast<GetElementPtrInst>(I)->getSourceElementType(),
Ops, Q); Ops, Q);
break; break;

View File

@ -2602,8 +2602,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
// NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step} // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step}
LIOps.push_back(AddRec->getStart()); LIOps.push_back(AddRec->getStart());
SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), SmallVector<const SCEV *, 4> AddRecOps(AddRec->operands());
AddRec->op_end());
// This follows from the fact that the no-wrap flags on the outer add // This follows from the fact that the no-wrap flags on the outer add
// expression are applicable on the 0th iteration, when the add recurrence // expression are applicable on the 0th iteration, when the add recurrence
// will be equal to its start value. // will be equal to its start value.
@ -2641,8 +2640,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
"AddRecExprs are not sorted in reverse dominance order?"); "AddRecExprs are not sorted in reverse dominance order?");
if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) { if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) {
// Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L> // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L>
SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), SmallVector<const SCEV *, 4> AddRecOps(AddRec->operands());
AddRec->op_end());
for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
++OtherIdx) { ++OtherIdx) {
const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]); const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
@ -3182,8 +3180,7 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
const SCEV *Op = M->getOperand(i); const SCEV *Op = M->getOperand(i);
const SCEV *Div = getUDivExpr(Op, RHSC); const SCEV *Div = getUDivExpr(Op, RHSC);
if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) { if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
Operands = SmallVector<const SCEV *, 4>(M->op_begin(), Operands = SmallVector<const SCEV *, 4>(M->operands());
M->op_end());
Operands[i] = Div; Operands[i] = Div;
return getMulExpr(Operands); return getMulExpr(Operands);
} }
@ -3368,8 +3365,7 @@ ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
? (L->getLoopDepth() < NestedLoop->getLoopDepth()) ? (L->getLoopDepth() < NestedLoop->getLoopDepth())
: (!NestedLoop->contains(L) && : (!NestedLoop->contains(L) &&
DT.dominates(L->getHeader(), NestedLoop->getHeader()))) { DT.dominates(L->getHeader(), NestedLoop->getHeader()))) {
SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(), SmallVector<const SCEV *, 4> NestedOperands(NestedAR->operands());
NestedAR->op_end());
Operands[0] = NestedAR->getStart(); Operands[0] = NestedAR->getStart();
// AddRecs require their operands be loop-invariant with respect to their // AddRecs require their operands be loop-invariant with respect to their
// loops. Don't perform this transformation if it would break this // loops. Don't perform this transformation if it would break this
@ -11360,7 +11356,7 @@ const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range,
// If the start is a non-zero constant, shift the range to simplify things. // If the start is a non-zero constant, shift the range to simplify things.
if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart())) if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart()))
if (!SC->getValue()->isZero()) { if (!SC->getValue()->isZero()) {
SmallVector<const SCEV *, 4> Operands(op_begin(), op_end()); SmallVector<const SCEV *, 4> Operands(operands());
Operands[0] = SE.getZero(SC->getType()); Operands[0] = SE.getZero(SC->getType());
const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(), const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(),
getNoWrapFlags(FlagNW)); getNoWrapFlags(FlagNW));
@ -11971,7 +11967,7 @@ void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) {
// so that future queries will recompute the expressions using the new // so that future queries will recompute the expressions using the new
// value. // value.
Value *Old = getValPtr(); Value *Old = getValPtr();
SmallVector<User *, 16> Worklist(Old->user_begin(), Old->user_end()); SmallVector<User *, 16> Worklist(Old->users());
SmallPtrSet<User *, 8> Visited; SmallPtrSet<User *, 8> Visited;
while (!Worklist.empty()) { while (!Worklist.empty()) {
User *U = Worklist.pop_back_val(); User *U = Worklist.pop_back_val();

View File

@ -3411,7 +3411,7 @@ Value *ConstantExpr::handleOperandChangeImpl(Value *From, Value *ToV) {
} }
Instruction *ConstantExpr::getAsInstruction() const { Instruction *ConstantExpr::getAsInstruction() const {
SmallVector<Value *, 4> ValueOperands(op_begin(), op_end()); SmallVector<Value *, 4> ValueOperands(operands());
ArrayRef<Value*> Ops(ValueOperands); ArrayRef<Value*> Ops(ValueOperands);
switch (getOpcode()) { switch (getOpcode()) {

View File

@ -2566,7 +2566,7 @@ void Verifier::visitBasicBlock(BasicBlock &BB) {
// Check constraints that this basic block imposes on all of the PHI nodes in // Check constraints that this basic block imposes on all of the PHI nodes in
// it. // it.
if (isa<PHINode>(BB.front())) { if (isa<PHINode>(BB.front())) {
SmallVector<BasicBlock*, 8> Preds(pred_begin(&BB), pred_end(&BB)); SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
SmallVector<std::pair<BasicBlock*, Value*>, 8> Values; SmallVector<std::pair<BasicBlock*, Value*>, 8> Values;
llvm::sort(Preds); llvm::sort(Preds);
for (const PHINode &PN : BB.phis()) { for (const PHINode &PN : BB.phis()) {
@ -3495,7 +3495,7 @@ void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
"GEP base pointer is not a vector or a vector of pointers", &GEP); "GEP base pointer is not a vector or a vector of pointers", &GEP);
Assert(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP); Assert(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP);
SmallVector<Value*, 16> Idxs(GEP.idx_begin(), GEP.idx_end()); SmallVector<Value *, 16> Idxs(GEP.indices());
Assert(all_of( Assert(all_of(
Idxs, [](Value* V) { return V->getType()->isIntOrIntVectorTy(); }), Idxs, [](Value* V) { return V->getType()->isIntOrIntVectorTy(); }),
"GEP indexes must be integers", &GEP); "GEP indexes must be integers", &GEP);

View File

@ -232,9 +232,7 @@ bool RecordRecTy::typeIsA(const RecTy *RHS) const {
static RecordRecTy *resolveRecordTypes(RecordRecTy *T1, RecordRecTy *T2) { static RecordRecTy *resolveRecordTypes(RecordRecTy *T1, RecordRecTy *T2) {
SmallVector<Record *, 4> CommonSuperClasses; SmallVector<Record *, 4> CommonSuperClasses;
SmallVector<Record *, 4> Stack; SmallVector<Record *, 4> Stack(T1->classes_begin(), T1->classes_end());
Stack.insert(Stack.end(), T1->classes_begin(), T1->classes_end());
while (!Stack.empty()) { while (!Stack.empty()) {
Record *R = Stack.back(); Record *R = Stack.back();