1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-25 20:23:11 +01:00

Clean up usages of asserting vector getters in Type

Summary:
Remove usages of asserting vector getters in Type in preparation for the
VectorType refactor. The existence of these functions complicates the
refactor while adding little value.

Reviewers: sunfish, sdesmalen, efriedma

Reviewed By: efriedma

Subscribers: hiraditya, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D77273
This commit is contained in:
Christopher Tetreault 2020-04-09 12:19:23 -07:00
parent ea5be969d3
commit 2fbc0bb4f3
8 changed files with 74 additions and 61 deletions

View File

@ -63,7 +63,8 @@ Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &DL, User *GEP,
// Splat the constant if needed.
if (IntIdxTy->isVectorTy() && !OpC->getType()->isVectorTy())
OpC = ConstantVector::getSplat(IntIdxTy->getVectorElementCount(), OpC);
OpC = ConstantVector::getSplat(
cast<VectorType>(IntIdxTy)->getElementCount(), OpC);
Constant *Scale = ConstantInt::get(IntIdxTy, Size);
Constant *OC = ConstantExpr::getIntegerCast(OpC, IntIdxTy, true /*SExt*/);
@ -76,7 +77,8 @@ Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &DL, User *GEP,
// Splat the index if needed.
if (IntIdxTy->isVectorTy() && !Op->getType()->isVectorTy())
Op = Builder->CreateVectorSplat(IntIdxTy->getVectorNumElements(), Op);
Op = Builder->CreateVectorSplat(
cast<VectorType>(IntIdxTy)->getNumElements(), Op);
// Convert to correct type.
if (Op->getType() != IntIdxTy)

View File

@ -155,11 +155,11 @@ Constant *FoldBitCast(Constant *C, Type *DestTy, const DataLayout &DL) {
// If the element types match, IR can fold it.
unsigned NumDstElt = DestVTy->getNumElements();
unsigned NumSrcElt = C->getType()->getVectorNumElements();
unsigned NumSrcElt = cast<VectorType>(C->getType())->getNumElements();
if (NumDstElt == NumSrcElt)
return ConstantExpr::getBitCast(C, DestTy);
Type *SrcEltTy = C->getType()->getVectorElementType();
Type *SrcEltTy = cast<VectorType>(C->getType())->getElementType();
Type *DstEltTy = DestVTy->getElementType();
// Otherwise, we're changing the number of elements in a vector, which
@ -218,7 +218,8 @@ Constant *FoldBitCast(Constant *C, Type *DestTy, const DataLayout &DL) {
for (unsigned j = 0; j != Ratio; ++j) {
Constant *Src = C->getAggregateElement(SrcElt++);
if (Src && isa<UndefValue>(Src))
Src = Constant::getNullValue(C->getType()->getVectorElementType());
Src = Constant::getNullValue(
cast<VectorType>(C->getType())->getElementType());
else
Src = dyn_cast_or_null<ConstantInt>(Src);
if (!Src) // Reject constantexpr elements.
@ -469,8 +470,8 @@ bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, unsigned char *CurPtr,
NumElts = AT->getNumElements();
EltTy = AT->getElementType();
} else {
NumElts = C->getType()->getVectorNumElements();
EltTy = C->getType()->getVectorElementType();
NumElts = cast<VectorType>(C->getType())->getNumElements();
EltTy = cast<VectorType>(C->getType())->getElementType();
}
uint64_t EltSize = DL.getTypeAllocSize(EltTy);
uint64_t Index = ByteOffset / EltSize;
@ -508,7 +509,7 @@ bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, unsigned char *CurPtr,
Constant *FoldReinterpretLoadFromConstPtr(Constant *C, Type *LoadTy,
const DataLayout &DL) {
// Bail out early. Not expect to load from scalable global variable.
if (LoadTy->isVectorTy() && LoadTy->getVectorIsScalable())
if (LoadTy->isVectorTy() && cast<VectorType>(LoadTy)->isScalable())
return nullptr;
auto *PTy = cast<PointerType>(C->getType());
@ -836,7 +837,7 @@ Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
Type *ResElemTy = GEP->getResultElementType();
Type *ResTy = GEP->getType();
if (!SrcElemTy->isSized() ||
(SrcElemTy->isVectorTy() && SrcElemTy->getVectorIsScalable()))
(SrcElemTy->isVectorTy() && cast<VectorType>(SrcElemTy)->isScalable()))
return nullptr;
if (Constant *C = CastGEPIndices(SrcElemTy, Ops, ResTy,
@ -2571,7 +2572,7 @@ static Constant *ConstantFoldVectorCall(StringRef Name,
// Do not iterate on scalable vector. The number of elements is unknown at
// compile-time.
if (VTy->getVectorIsScalable())
if (VTy->isScalable())
return nullptr;
if (IntrinsicID == Intrinsic::masked_load) {

View File

@ -945,8 +945,9 @@ static Value *simplifyDivRem(Value *Op0, Value *Op1, bool IsDiv) {
// If any element of a constant divisor vector is zero or undef, the whole op
// is undef.
auto *Op1C = dyn_cast<Constant>(Op1);
if (Op1C && Ty->isVectorTy()) {
unsigned NumElts = Ty->getVectorNumElements();
auto *VTy = dyn_cast<VectorType>(Ty);
if (Op1C && VTy) {
unsigned NumElts = VTy->getNumElements();
for (unsigned i = 0; i != NumElts; ++i) {
Constant *Elt = Op1C->getAggregateElement(i);
if (Elt && (Elt->isNullValue() || isa<UndefValue>(Elt)))
@ -1221,7 +1222,8 @@ static bool isUndefShift(Value *Amount) {
// If all lanes of a vector shift are undefined the whole shift is.
if (isa<ConstantVector>(C) || isa<ConstantDataVector>(C)) {
for (unsigned I = 0, E = C->getType()->getVectorNumElements(); I != E; ++I)
for (unsigned I = 0, E = cast<VectorType>(C->getType())->getNumElements();
I != E; ++I)
if (!isUndefShift(C->getAggregateElement(I)))
return false;
return true;
@ -4011,7 +4013,7 @@ static Value *SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
Constant *TrueC, *FalseC;
if (TrueVal->getType()->isVectorTy() && match(TrueVal, m_Constant(TrueC)) &&
match(FalseVal, m_Constant(FalseC))) {
unsigned NumElts = TrueC->getType()->getVectorNumElements();
unsigned NumElts = cast<VectorType>(TrueC->getType())->getNumElements();
SmallVector<Constant *, 16> NewC;
for (unsigned i = 0; i != NumElts; ++i) {
// Bail out on incomplete vector constants.
@ -4081,7 +4083,7 @@ static Value *SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops,
return UndefValue::get(GEPTy);
bool IsScalableVec =
SrcTy->isVectorTy() ? SrcTy->getVectorIsScalable() : false;
isa<VectorType>(SrcTy) && cast<VectorType>(SrcTy)->isScalable();
if (Ops.size() == 2) {
// getelementptr P, 0 -> P.
@ -4223,8 +4225,8 @@ Value *llvm::SimplifyInsertElementInst(Value *Vec, Value *Val, Value *Idx,
// For fixed-length vector, fold into undef if index is out of bounds.
if (auto *CI = dyn_cast<ConstantInt>(Idx)) {
if (!Vec->getType()->getVectorIsScalable() &&
CI->uge(Vec->getType()->getVectorNumElements()))
if (!cast<VectorType>(Vec->getType())->isScalable() &&
CI->uge(cast<VectorType>(Vec->getType())->getNumElements()))
return UndefValue::get(Vec->getType());
}
@ -4280,6 +4282,7 @@ Value *llvm::SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
/// If not, this returns null.
static Value *SimplifyExtractElementInst(Value *Vec, Value *Idx, const SimplifyQuery &,
unsigned) {
auto *VecVTy = cast<VectorType>(Vec->getType());
if (auto *CVec = dyn_cast<Constant>(Vec)) {
if (auto *CIdx = dyn_cast<Constant>(Idx))
return ConstantFoldExtractElementInstruction(CVec, CIdx);
@ -4289,16 +4292,15 @@ static Value *SimplifyExtractElementInst(Value *Vec, Value *Idx, const SimplifyQ
return Splat;
if (isa<UndefValue>(Vec))
return UndefValue::get(Vec->getType()->getVectorElementType());
return UndefValue::get(VecVTy->getElementType());
}
// If extracting a specified index from the vector, see if we can recursively
// find a previously computed scalar that was inserted into the vector.
if (auto *IdxC = dyn_cast<ConstantInt>(Idx)) {
// For fixed-length vector, fold into undef if index is out of bounds.
if (!Vec->getType()->getVectorIsScalable() &&
IdxC->getValue().uge(Vec->getType()->getVectorNumElements()))
return UndefValue::get(Vec->getType()->getVectorElementType());
if (!VecVTy->isScalable() && IdxC->getValue().uge(VecVTy->getNumElements()))
return UndefValue::get(VecVTy->getElementType());
if (Value *Elt = findScalarElement(Vec, IdxC->getZExtValue()))
return Elt;
}
@ -4306,7 +4308,7 @@ static Value *SimplifyExtractElementInst(Value *Vec, Value *Idx, const SimplifyQ
// An undef extract index can be arbitrarily chosen to be an out-of-range
// index value, which would result in the instruction being undef.
if (isa<UndefValue>(Idx))
return UndefValue::get(Vec->getType()->getVectorElementType());
return UndefValue::get(VecVTy->getElementType());
return nullptr;
}
@ -4403,7 +4405,7 @@ static Value *foldIdentityShuffles(int DestElt, Value *Op0, Value *Op1,
return nullptr;
// The mask value chooses which source operand we need to look at next.
int InVecNumElts = Op0->getType()->getVectorNumElements();
int InVecNumElts = cast<VectorType>(Op0->getType())->getNumElements();
int RootElt = MaskVal;
Value *SourceOp = Op0;
if (MaskVal >= InVecNumElts) {
@ -4446,9 +4448,9 @@ static Value *SimplifyShuffleVectorInst(Value *Op0, Value *Op1,
if (all_of(Mask, [](int Elem) { return Elem == UndefMaskElem; }))
return UndefValue::get(RetTy);
Type *InVecTy = Op0->getType();
auto *InVecTy = cast<VectorType>(Op0->getType());
unsigned MaskNumElts = Mask.size();
ElementCount InVecEltCount = InVecTy->getVectorElementCount();
ElementCount InVecEltCount = InVecTy->getElementCount();
bool Scalable = InVecEltCount.Scalable;

View File

@ -148,7 +148,8 @@ bool llvm::isDereferenceableAndAlignedPointer(const Value *V, Type *Ty,
const DominatorTree *DT) {
// For unsized types or scalable vectors we don't know exactly how many bytes
// are dereferenced, so bail out.
if (!Ty->isSized() || (Ty->isVectorTy() && Ty->getVectorIsScalable()))
if (!Ty->isSized() ||
(Ty->isVectorTy() && cast<VectorType>(Ty)->isScalable()))
return false;
// When dereferenceability information is provided by a dereferenceable

View File

@ -650,7 +650,7 @@ SizeOffsetType ObjectSizeOffsetVisitor::visitAllocaInst(AllocaInst &I) {
return unknown();
if (I.getAllocatedType()->isVectorTy() &&
I.getAllocatedType()->getVectorIsScalable())
cast<VectorType>(I.getAllocatedType())->isScalable())
return unknown();
APInt Size(IntTyBits, DL.getTypeAllocSize(I.getAllocatedType()));

View File

@ -874,7 +874,7 @@ static bool matchPairwiseShuffleMask(ShuffleVectorInst *SI, bool IsLeft,
else if (!SI)
return false;
SmallVector<int, 32> Mask(SI->getType()->getVectorNumElements(), -1);
SmallVector<int, 32> Mask(SI->getType()->getNumElements(), -1);
// Build a mask of 0, 2, ... (left) or 1, 3, ... (right) depending on whether
// we look at the left or right side.
@ -1036,8 +1036,8 @@ static ReductionKind matchPairwiseReduction(const ExtractElementInst *ReduxRoot,
if (!RD)
return RK_None;
Type *VecTy = RdxStart->getType();
unsigned NumVecElems = VecTy->getVectorNumElements();
auto *VecTy = cast<VectorType>(RdxStart->getType());
unsigned NumVecElems = VecTy->getNumElements();
if (!isPowerOf2_32(NumVecElems))
return RK_None;
@ -1101,8 +1101,8 @@ matchVectorSplittingReduction(const ExtractElementInst *ReduxRoot,
if (!RD)
return RK_None;
Type *VecTy = ReduxRoot->getOperand(0)->getType();
unsigned NumVecElems = VecTy->getVectorNumElements();
auto *VecTy = cast<VectorType>(ReduxRoot->getOperand(0)->getType());
unsigned NumVecElems = VecTy->getNumElements();
if (!isPowerOf2_32(NumVecElems))
return RK_None;

View File

@ -168,11 +168,12 @@ static bool getShuffleDemandedElts(const ShuffleVectorInst *Shuf,
APInt &DemandedLHS, APInt &DemandedRHS) {
// The length of scalable vectors is unknown at compile time, thus we
// cannot check their values
if (Shuf->getType()->getVectorElementCount().Scalable)
if (Shuf->getType()->isScalable())
return false;
int NumElts = Shuf->getOperand(0)->getType()->getVectorNumElements();
int NumMaskElts = Shuf->getType()->getVectorNumElements();
int NumElts =
cast<VectorType>(Shuf->getOperand(0)->getType())->getNumElements();
int NumMaskElts = Shuf->getType()->getNumElements();
DemandedLHS = DemandedRHS = APInt::getNullValue(NumElts);
if (DemandedElts.isNullValue())
return true;
@ -206,8 +207,9 @@ static void computeKnownBits(const Value *V, const APInt &DemandedElts,
static void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
const Query &Q) {
Type *Ty = V->getType();
APInt DemandedElts = Ty->isVectorTy()
? APInt::getAllOnesValue(Ty->getVectorNumElements())
APInt DemandedElts =
Ty->isVectorTy()
? APInt::getAllOnesValue(cast<VectorType>(Ty)->getNumElements())
: APInt(1, 1);
computeKnownBits(V, DemandedElts, Known, Depth, Q);
}
@ -373,8 +375,9 @@ static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
const Query &Q) {
Type *Ty = V->getType();
APInt DemandedElts = Ty->isVectorTy()
? APInt::getAllOnesValue(Ty->getVectorNumElements())
APInt DemandedElts =
Ty->isVectorTy()
? APInt::getAllOnesValue(cast<VectorType>(Ty)->getNumElements())
: APInt(1, 1);
return ComputeNumSignBits(V, DemandedElts, Depth, Q);
}
@ -1791,7 +1794,7 @@ static void computeKnownBitsFromOperator(const Operator *I,
const Value *Vec = I->getOperand(0);
const Value *Idx = I->getOperand(1);
auto *CIdx = dyn_cast<ConstantInt>(Idx);
unsigned NumElts = Vec->getType()->getVectorNumElements();
unsigned NumElts = cast<VectorType>(Vec->getType())->getNumElements();
APInt DemandedVecElts = APInt::getAllOnesValue(NumElts);
if (CIdx && CIdx->getValue().ult(NumElts))
DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
@ -1870,8 +1873,8 @@ void computeKnownBits(const Value *V, const APInt &DemandedElts,
Type *Ty = V->getType();
assert((Ty->isIntOrIntVectorTy(BitWidth) || Ty->isPtrOrPtrVectorTy()) &&
"Not integer or pointer type!");
assert(((Ty->isVectorTy() &&
Ty->getVectorNumElements() == DemandedElts.getBitWidth()) ||
assert(((Ty->isVectorTy() && cast<VectorType>(Ty)->getNumElements() ==
DemandedElts.getBitWidth()) ||
(!Ty->isVectorTy() && DemandedElts == APInt(1, 1))) &&
"Unexpected vector size");
@ -2510,7 +2513,7 @@ bool isKnownNonZero(const Value *V, const APInt &DemandedElts, unsigned Depth,
const Value *Vec = EEI->getVectorOperand();
const Value *Idx = EEI->getIndexOperand();
auto *CIdx = dyn_cast<ConstantInt>(Idx);
unsigned NumElts = Vec->getType()->getVectorNumElements();
unsigned NumElts = cast<VectorType>(Vec->getType())->getNumElements();
APInt DemandedVecElts = APInt::getAllOnesValue(NumElts);
if (CIdx && CIdx->getValue().ult(NumElts))
DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
@ -2524,8 +2527,9 @@ bool isKnownNonZero(const Value *V, const APInt &DemandedElts, unsigned Depth,
bool isKnownNonZero(const Value* V, unsigned Depth, const Query& Q) {
Type *Ty = V->getType();
APInt DemandedElts = Ty->isVectorTy()
? APInt::getAllOnesValue(Ty->getVectorNumElements())
APInt DemandedElts =
Ty->isVectorTy()
? APInt::getAllOnesValue(cast<VectorType>(Ty)->getNumElements())
: APInt(1, 1);
return isKnownNonZero(V, DemandedElts, Depth, Q);
}
@ -2627,7 +2631,7 @@ static unsigned computeNumSignBitsVectorConstant(const Value *V,
return 0;
unsigned MinSignBits = TyBits;
unsigned NumElts = CV->getType()->getVectorNumElements();
unsigned NumElts = cast<VectorType>(CV->getType())->getNumElements();
for (unsigned i = 0; i != NumElts; ++i) {
if (!DemandedElts[i])
continue;
@ -2670,8 +2674,8 @@ static unsigned ComputeNumSignBitsImpl(const Value *V,
// same behavior for poison though -- that's a FIXME today.
Type *Ty = V->getType();
assert(((Ty->isVectorTy() &&
Ty->getVectorNumElements() == DemandedElts.getBitWidth()) ||
assert(((Ty->isVectorTy() && cast<VectorType>(Ty)->getNumElements() ==
DemandedElts.getBitWidth()) ||
(!Ty->isVectorTy() && DemandedElts == APInt(1, 1))) &&
"Unexpected vector size");
@ -3246,8 +3250,8 @@ static bool cannotBeOrderedLessThanZeroImpl(const Value *V,
// Handle vector of constants.
if (auto *CV = dyn_cast<Constant>(V)) {
if (CV->getType()->isVectorTy()) {
unsigned NumElts = CV->getType()->getVectorNumElements();
if (auto *CVVTy = dyn_cast<VectorType>(CV->getType())) {
unsigned NumElts = CVVTy->getNumElements();
for (unsigned i = 0; i != NumElts; ++i) {
auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i));
if (!CFP)
@ -3423,7 +3427,7 @@ bool llvm::isKnownNeverInfinity(const Value *V, const TargetLibraryInfo *TLI,
return false;
// For vectors, verify that each element is not infinity.
unsigned NumElts = V->getType()->getVectorNumElements();
unsigned NumElts = cast<VectorType>(V->getType())->getNumElements();
for (unsigned i = 0; i != NumElts; ++i) {
Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
if (!Elt)
@ -3524,7 +3528,7 @@ bool llvm::isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI,
return false;
// For vectors, verify that each element is not NaN.
unsigned NumElts = V->getType()->getVectorNumElements();
unsigned NumElts = cast<VectorType>(V->getType())->getNumElements();
for (unsigned i = 0; i != NumElts; ++i) {
Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
if (!Elt)

View File

@ -263,7 +263,7 @@ Value *llvm::findScalarElement(Value *V, unsigned EltNo) {
assert(V->getType()->isVectorTy() && "Not looking at a vector?");
VectorType *VTy = cast<VectorType>(V->getType());
// For fixed-length vector, return undef for out of range access.
if (!V->getType()->getVectorIsScalable()) {
if (!VTy->isScalable()) {
unsigned Width = VTy->getNumElements();
if (EltNo >= Width)
return UndefValue::get(VTy->getElementType());
@ -289,7 +289,8 @@ Value *llvm::findScalarElement(Value *V, unsigned EltNo) {
}
if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(V)) {
unsigned LHSWidth = SVI->getOperand(0)->getType()->getVectorNumElements();
unsigned LHSWidth =
cast<VectorType>(SVI->getOperand(0)->getType())->getNumElements();
int InEl = SVI->getMaskValue(EltNo);
if (InEl < 0)
return UndefValue::get(VTy->getElementType());
@ -805,8 +806,9 @@ bool llvm::maskIsAllZeroOrUndef(Value *Mask) {
return false;
if (ConstMask->isNullValue() || isa<UndefValue>(ConstMask))
return true;
for (unsigned I = 0, E = ConstMask->getType()->getVectorNumElements(); I != E;
++I) {
for (unsigned I = 0,
E = cast<VectorType>(ConstMask->getType())->getNumElements();
I != E; ++I) {
if (auto *MaskElt = ConstMask->getAggregateElement(I))
if (MaskElt->isNullValue() || isa<UndefValue>(MaskElt))
continue;
@ -822,8 +824,9 @@ bool llvm::maskIsAllOneOrUndef(Value *Mask) {
return false;
if (ConstMask->isAllOnesValue() || isa<UndefValue>(ConstMask))
return true;
for (unsigned I = 0, E = ConstMask->getType()->getVectorNumElements(); I != E;
++I) {
for (unsigned I = 0,
E = cast<VectorType>(ConstMask->getType())->getNumElements();
I != E; ++I) {
if (auto *MaskElt = ConstMask->getAggregateElement(I))
if (MaskElt->isAllOnesValue() || isa<UndefValue>(MaskElt))
continue;