1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-25 20:23:11 +01:00

[llvm] Construct SmallVector with iterator ranges (NFC)

This commit is contained in:
Kazu Hirata 2021-01-04 11:42:43 -08:00
parent b6b5ea5b82
commit 08389fa62e
10 changed files with 18 additions and 28 deletions

View File

@ -332,8 +332,7 @@ public:
/// This is a helper function which calls the two-argument getUserCost
/// with \p Operands which are the current operands U has.
int getUserCost(const User *U, TargetCostKind CostKind) const {
SmallVector<const Value *, 4> Operands(U->value_op_begin(),
U->value_op_end());
SmallVector<const Value *, 4> Operands(U->operand_values());
return getUserCost(U, Operands, CostKind);
}

View File

@ -1071,8 +1071,7 @@ public:
}
int getInstructionLatency(const Instruction *I) {
SmallVector<const Value *, 4> Operands(I->value_op_begin(),
I->value_op_end());
SmallVector<const Value *, 4> Operands(I->operand_values());
if (getUserCost(I, Operands, TTI::TCK_Latency) == TTI::TCC_Free)
return 0;

View File

@ -240,9 +240,8 @@ class GenericDINode : public DINode {
StorageType Storage, bool ShouldCreate = true);
TempGenericDINode cloneImpl() const {
return getTemporary(
getContext(), getTag(), getHeader(),
SmallVector<Metadata *, 4>(dwarf_op_begin(), dwarf_op_end()));
return getTemporary(getContext(), getTag(), getHeader(),
SmallVector<Metadata *, 4>(dwarf_operands()));
}
public:

View File

@ -1128,8 +1128,7 @@ class MDTuple : public MDNode {
StorageType Storage, bool ShouldCreate = true);
TempMDTuple cloneImpl() const {
return getTemporary(getContext(),
SmallVector<Metadata *, 4>(op_begin(), op_end()));
return getTemporary(getContext(), SmallVector<Metadata *, 4>(operands()));
}
public:

View File

@ -44,7 +44,7 @@ private:
if (Entry)
return Entry;
SmallVector<BasicBlock *, 32> PredCache(pred_begin(BB), pred_end(BB));
SmallVector<BasicBlock *, 32> PredCache(predecessors(BB));
PredCache.push_back(nullptr); // null terminator.
BlockToPredCountMap[BB] = PredCache.size() - 1;

View File

@ -3599,7 +3599,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
// expression GEP with the same indices and a null base pointer to see
// what constant folding can make out of it.
Constant *Null = Constant::getNullValue(GLHS->getPointerOperandType());
SmallVector<Value *, 4> IndicesLHS(GLHS->idx_begin(), GLHS->idx_end());
SmallVector<Value *, 4> IndicesLHS(GLHS->indices());
Constant *NewLHS = ConstantExpr::getGetElementPtr(
GLHS->getSourceElementType(), Null, IndicesLHS);
@ -5814,7 +5814,7 @@ Value *llvm::SimplifyInstruction(Instruction *I, const SimplifyQuery &SQ,
I->getOperand(2), Q);
break;
case Instruction::GetElementPtr: {
SmallVector<Value *, 8> Ops(I->op_begin(), I->op_end());
SmallVector<Value *, 8> Ops(I->operands());
Result = SimplifyGEPInst(cast<GetElementPtrInst>(I)->getSourceElementType(),
Ops, Q);
break;

View File

@ -2602,8 +2602,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
// NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step}
LIOps.push_back(AddRec->getStart());
SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
AddRec->op_end());
SmallVector<const SCEV *, 4> AddRecOps(AddRec->operands());
// This follows from the fact that the no-wrap flags on the outer add
// expression are applicable on the 0th iteration, when the add recurrence
// will be equal to its start value.
@ -2641,8 +2640,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
"AddRecExprs are not sorted in reverse dominance order?");
if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) {
// Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L>
SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
AddRec->op_end());
SmallVector<const SCEV *, 4> AddRecOps(AddRec->operands());
for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
++OtherIdx) {
const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
@ -3182,8 +3180,7 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
const SCEV *Op = M->getOperand(i);
const SCEV *Div = getUDivExpr(Op, RHSC);
if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
Operands = SmallVector<const SCEV *, 4>(M->op_begin(),
M->op_end());
Operands = SmallVector<const SCEV *, 4>(M->operands());
Operands[i] = Div;
return getMulExpr(Operands);
}
@ -3368,8 +3365,7 @@ ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
? (L->getLoopDepth() < NestedLoop->getLoopDepth())
: (!NestedLoop->contains(L) &&
DT.dominates(L->getHeader(), NestedLoop->getHeader()))) {
SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(),
NestedAR->op_end());
SmallVector<const SCEV *, 4> NestedOperands(NestedAR->operands());
Operands[0] = NestedAR->getStart();
// AddRecs require their operands be loop-invariant with respect to their
// loops. Don't perform this transformation if it would break this
@ -11360,7 +11356,7 @@ const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range,
// If the start is a non-zero constant, shift the range to simplify things.
if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart()))
if (!SC->getValue()->isZero()) {
SmallVector<const SCEV *, 4> Operands(op_begin(), op_end());
SmallVector<const SCEV *, 4> Operands(operands());
Operands[0] = SE.getZero(SC->getType());
const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(),
getNoWrapFlags(FlagNW));
@ -11971,7 +11967,7 @@ void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) {
// so that future queries will recompute the expressions using the new
// value.
Value *Old = getValPtr();
SmallVector<User *, 16> Worklist(Old->user_begin(), Old->user_end());
SmallVector<User *, 16> Worklist(Old->users());
SmallPtrSet<User *, 8> Visited;
while (!Worklist.empty()) {
User *U = Worklist.pop_back_val();

View File

@ -3411,7 +3411,7 @@ Value *ConstantExpr::handleOperandChangeImpl(Value *From, Value *ToV) {
}
Instruction *ConstantExpr::getAsInstruction() const {
SmallVector<Value *, 4> ValueOperands(op_begin(), op_end());
SmallVector<Value *, 4> ValueOperands(operands());
ArrayRef<Value*> Ops(ValueOperands);
switch (getOpcode()) {

View File

@ -2566,7 +2566,7 @@ void Verifier::visitBasicBlock(BasicBlock &BB) {
// Check constraints that this basic block imposes on all of the PHI nodes in
// it.
if (isa<PHINode>(BB.front())) {
SmallVector<BasicBlock*, 8> Preds(pred_begin(&BB), pred_end(&BB));
SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
SmallVector<std::pair<BasicBlock*, Value*>, 8> Values;
llvm::sort(Preds);
for (const PHINode &PN : BB.phis()) {
@ -3495,7 +3495,7 @@ void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
"GEP base pointer is not a vector or a vector of pointers", &GEP);
Assert(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP);
SmallVector<Value*, 16> Idxs(GEP.idx_begin(), GEP.idx_end());
SmallVector<Value *, 16> Idxs(GEP.indices());
Assert(all_of(
Idxs, [](Value* V) { return V->getType()->isIntOrIntVectorTy(); }),
"GEP indexes must be integers", &GEP);

View File

@ -232,9 +232,7 @@ bool RecordRecTy::typeIsA(const RecTy *RHS) const {
static RecordRecTy *resolveRecordTypes(RecordRecTy *T1, RecordRecTy *T2) {
SmallVector<Record *, 4> CommonSuperClasses;
SmallVector<Record *, 4> Stack;
Stack.insert(Stack.end(), T1->classes_begin(), T1->classes_end());
SmallVector<Record *, 4> Stack(T1->classes_begin(), T1->classes_end());
while (!Stack.empty()) {
Record *R = Stack.back();