mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-25 12:12:47 +01:00
Clean up usages of asserting vector getters in Type
Summary: Remove usages of asserting vector getters in Type in preparation for the VectorType refactor. The existence of these functions complicates the refactor while adding little value. Reviewers: dexonsmith, sdesmalen, efriedma Reviewed By: efriedma Subscribers: hiraditya, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D77276
This commit is contained in:
parent
d850d60536
commit
830bc11d96
@ -1032,13 +1032,13 @@ public:
|
||||
Type *PtrTy = PointerType::get(checkGEPType(getIndexedType(ElTy, IdxList)),
|
||||
Ptr->getType()->getPointerAddressSpace());
|
||||
// Vector GEP
|
||||
if (Ptr->getType()->isVectorTy()) {
|
||||
ElementCount EltCount = Ptr->getType()->getVectorElementCount();
|
||||
if (auto *PtrVTy = dyn_cast<VectorType>(Ptr->getType())) {
|
||||
ElementCount EltCount = PtrVTy->getElementCount();
|
||||
return VectorType::get(PtrTy, EltCount);
|
||||
}
|
||||
for (Value *Index : IdxList)
|
||||
if (Index->getType()->isVectorTy()) {
|
||||
ElementCount EltCount = Index->getType()->getVectorElementCount();
|
||||
if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) {
|
||||
ElementCount EltCount = IndexVTy->getElementCount();
|
||||
return VectorType::get(PtrTy, EltCount);
|
||||
}
|
||||
// Scalar GEP
|
||||
@ -1991,7 +1991,8 @@ public:
|
||||
/// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3>
|
||||
/// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5>
|
||||
bool changesLength() const {
|
||||
unsigned NumSourceElts = Op<0>()->getType()->getVectorNumElements();
|
||||
unsigned NumSourceElts =
|
||||
cast<VectorType>(Op<0>()->getType())->getNumElements();
|
||||
unsigned NumMaskElts = ShuffleMask.size();
|
||||
return NumSourceElts != NumMaskElts;
|
||||
}
|
||||
@ -2000,7 +2001,8 @@ public:
|
||||
/// elements than its source vectors.
|
||||
/// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3>
|
||||
bool increasesLength() const {
|
||||
unsigned NumSourceElts = Op<0>()->getType()->getVectorNumElements();
|
||||
unsigned NumSourceElts =
|
||||
cast<VectorType>(Op<0>()->getType())->getNumElements();
|
||||
unsigned NumMaskElts = ShuffleMask.size();
|
||||
return NumSourceElts < NumMaskElts;
|
||||
}
|
||||
@ -2193,7 +2195,7 @@ public:
|
||||
|
||||
/// Return true if this shuffle mask is an extract subvector mask.
|
||||
bool isExtractSubvectorMask(int &Index) const {
|
||||
int NumSrcElts = Op<0>()->getType()->getVectorNumElements();
|
||||
int NumSrcElts = cast<VectorType>(Op<0>()->getType())->getNumElements();
|
||||
return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index);
|
||||
}
|
||||
|
||||
|
@ -275,7 +275,7 @@ template <typename Predicate> struct cst_pred_ty : public Predicate {
|
||||
return this->isValue(CI->getValue());
|
||||
|
||||
// Non-splat vector constant: check each element for a match.
|
||||
unsigned NumElts = V->getType()->getVectorNumElements();
|
||||
unsigned NumElts = cast<VectorType>(V->getType())->getNumElements();
|
||||
assert(NumElts != 0 && "Constant vector with no elements?");
|
||||
bool HasNonUndefElements = false;
|
||||
for (unsigned i = 0; i != NumElts; ++i) {
|
||||
@ -334,7 +334,7 @@ template <typename Predicate> struct cstfp_pred_ty : public Predicate {
|
||||
return this->isValue(CF->getValueAPF());
|
||||
|
||||
// Non-splat vector constant: check each element for a match.
|
||||
unsigned NumElts = V->getType()->getVectorNumElements();
|
||||
unsigned NumElts = cast<VectorType>(V->getType())->getNumElements();
|
||||
assert(NumElts != 0 && "Constant vector with no elements?");
|
||||
bool HasNonUndefElements = false;
|
||||
for (unsigned i = 0; i != NumElts; ++i) {
|
||||
@ -2173,8 +2173,8 @@ public:
|
||||
|
||||
if (m_PtrToInt(m_OffsetGep(m_Zero(), m_SpecificInt(1))).match(V)) {
|
||||
Type *PtrTy = cast<Operator>(V)->getOperand(0)->getType();
|
||||
Type *DerefTy = PtrTy->getPointerElementType();
|
||||
if (DerefTy->isVectorTy() && DerefTy->getVectorIsScalable() &&
|
||||
auto *DerefTy = dyn_cast<VectorType>(PtrTy->getPointerElementType());
|
||||
if (DerefTy && DerefTy->isScalable() &&
|
||||
DL.getTypeAllocSizeInBits(DerefTy).getKnownMinSize() == 8)
|
||||
return true;
|
||||
}
|
||||
|
@ -464,7 +464,7 @@ static void PrintLLVMName(raw_ostream &OS, const Value *V) {
|
||||
|
||||
static void PrintShuffleMask(raw_ostream &Out, Type *Ty, ArrayRef<int> Mask) {
|
||||
Out << ", <";
|
||||
if (Ty->getVectorIsScalable())
|
||||
if (cast<VectorType>(Ty)->isScalable())
|
||||
Out << "vscale x ";
|
||||
Out << Mask.size() << " x i32> ";
|
||||
bool FirstElt = true;
|
||||
@ -1504,13 +1504,14 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV,
|
||||
}
|
||||
|
||||
if (isa<ConstantVector>(CV) || isa<ConstantDataVector>(CV)) {
|
||||
Type *ETy = CV->getType()->getVectorElementType();
|
||||
auto *CVVTy = cast<VectorType>(CV->getType());
|
||||
Type *ETy = CVVTy->getElementType();
|
||||
Out << '<';
|
||||
TypePrinter.print(ETy, Out);
|
||||
Out << ' ';
|
||||
WriteAsOperandInternal(Out, CV->getAggregateElement(0U), &TypePrinter,
|
||||
Machine, Context);
|
||||
for (unsigned i = 1, e = CV->getType()->getVectorNumElements(); i != e;++i){
|
||||
for (unsigned i = 1, e = CVVTy->getNumElements(); i != e; ++i) {
|
||||
Out << ", ";
|
||||
TypePrinter.print(ETy, Out);
|
||||
Out << ' ';
|
||||
|
@ -899,8 +899,8 @@ GlobalVariable *llvm::UpgradeGlobalVariable(GlobalVariable *GV) {
|
||||
// to byte shuffles.
|
||||
static Value *UpgradeX86PSLLDQIntrinsics(IRBuilder<> &Builder,
|
||||
Value *Op, unsigned Shift) {
|
||||
Type *ResultTy = Op->getType();
|
||||
unsigned NumElts = ResultTy->getVectorNumElements() * 8;
|
||||
auto *ResultTy = cast<VectorType>(Op->getType());
|
||||
unsigned NumElts = ResultTy->getNumElements() * 8;
|
||||
|
||||
// Bitcast from a 64-bit element type to a byte element type.
|
||||
Type *VecTy = VectorType::get(Builder.getInt8Ty(), NumElts);
|
||||
@ -933,8 +933,8 @@ static Value *UpgradeX86PSLLDQIntrinsics(IRBuilder<> &Builder,
|
||||
// to byte shuffles.
|
||||
static Value *UpgradeX86PSRLDQIntrinsics(IRBuilder<> &Builder, Value *Op,
|
||||
unsigned Shift) {
|
||||
Type *ResultTy = Op->getType();
|
||||
unsigned NumElts = ResultTy->getVectorNumElements() * 8;
|
||||
auto *ResultTy = cast<VectorType>(Op->getType());
|
||||
unsigned NumElts = ResultTy->getNumElements() * 8;
|
||||
|
||||
// Bitcast from a 64-bit element type to a byte element type.
|
||||
Type *VecTy = VectorType::get(Builder.getInt8Ty(), NumElts);
|
||||
@ -990,7 +990,8 @@ static Value *EmitX86Select(IRBuilder<> &Builder, Value *Mask,
|
||||
if (C->isAllOnesValue())
|
||||
return Op0;
|
||||
|
||||
Mask = getX86MaskVec(Builder, Mask, Op0->getType()->getVectorNumElements());
|
||||
Mask = getX86MaskVec(Builder, Mask,
|
||||
cast<VectorType>(Op0->getType())->getNumElements());
|
||||
return Builder.CreateSelect(Mask, Op0, Op1);
|
||||
}
|
||||
|
||||
@ -1018,7 +1019,7 @@ static Value *UpgradeX86ALIGNIntrinsics(IRBuilder<> &Builder, Value *Op0,
|
||||
bool IsVALIGN) {
|
||||
unsigned ShiftVal = cast<llvm::ConstantInt>(Shift)->getZExtValue();
|
||||
|
||||
unsigned NumElts = Op0->getType()->getVectorNumElements();
|
||||
unsigned NumElts = cast<VectorType>(Op0->getType())->getNumElements();
|
||||
assert((IsVALIGN || NumElts % 16 == 0) && "Illegal NumElts for PALIGNR!");
|
||||
assert((!IsVALIGN || NumElts <= 16) && "NumElts too large for VALIGN!");
|
||||
assert(isPowerOf2_32(NumElts) && "NumElts not a power of 2!");
|
||||
@ -1149,7 +1150,7 @@ static Value *upgradeX86Rotate(IRBuilder<> &Builder, CallInst &CI,
|
||||
// Funnel shifts amounts are treated as modulo and types are all power-of-2 so
|
||||
// we only care about the lowest log2 bits anyway.
|
||||
if (Amt->getType() != Ty) {
|
||||
unsigned NumElts = Ty->getVectorNumElements();
|
||||
unsigned NumElts = cast<VectorType>(Ty)->getNumElements();
|
||||
Amt = Builder.CreateIntCast(Amt, Ty->getScalarType(), false);
|
||||
Amt = Builder.CreateVectorSplat(NumElts, Amt);
|
||||
}
|
||||
@ -1219,7 +1220,7 @@ static Value *upgradeX86ConcatShift(IRBuilder<> &Builder, CallInst &CI,
|
||||
// Funnel shifts amounts are treated as modulo and types are all power-of-2 so
|
||||
// we only care about the lowest log2 bits anyway.
|
||||
if (Amt->getType() != Ty) {
|
||||
unsigned NumElts = Ty->getVectorNumElements();
|
||||
unsigned NumElts = cast<VectorType>(Ty)->getNumElements();
|
||||
Amt = Builder.CreateIntCast(Amt, Ty->getScalarType(), false);
|
||||
Amt = Builder.CreateVectorSplat(NumElts, Amt);
|
||||
}
|
||||
@ -1255,7 +1256,7 @@ static Value *UpgradeMaskedStore(IRBuilder<> &Builder,
|
||||
return Builder.CreateAlignedStore(Data, Ptr, Alignment);
|
||||
|
||||
// Convert the mask from an integer type to a vector of i1.
|
||||
unsigned NumElts = Data->getType()->getVectorNumElements();
|
||||
unsigned NumElts = cast<VectorType>(Data->getType())->getNumElements();
|
||||
Mask = getX86MaskVec(Builder, Mask, NumElts);
|
||||
return Builder.CreateMaskedStore(Data, Ptr, Alignment, Mask);
|
||||
}
|
||||
@ -1276,7 +1277,7 @@ static Value *UpgradeMaskedLoad(IRBuilder<> &Builder,
|
||||
return Builder.CreateAlignedLoad(ValTy, Ptr, Alignment);
|
||||
|
||||
// Convert the mask from an integer type to a vector of i1.
|
||||
unsigned NumElts = Passthru->getType()->getVectorNumElements();
|
||||
unsigned NumElts = cast<VectorType>(Passthru->getType())->getNumElements();
|
||||
Mask = getX86MaskVec(Builder, Mask, NumElts);
|
||||
return Builder.CreateMaskedLoad(Ptr, Alignment, Mask, Passthru);
|
||||
}
|
||||
@ -1340,7 +1341,7 @@ static Value *upgradePMULDQ(IRBuilder<> &Builder, CallInst &CI, bool IsSigned) {
|
||||
// Applying mask on vector of i1's and make sure result is at least 8 bits wide.
|
||||
static Value *ApplyX86MaskOn1BitsVec(IRBuilder<> &Builder, Value *Vec,
|
||||
Value *Mask) {
|
||||
unsigned NumElts = Vec->getType()->getVectorNumElements();
|
||||
unsigned NumElts = cast<VectorType>(Vec->getType())->getNumElements();
|
||||
if (Mask) {
|
||||
const auto *C = dyn_cast<Constant>(Mask);
|
||||
if (!C || !C->isAllOnesValue())
|
||||
@ -1363,7 +1364,7 @@ static Value *ApplyX86MaskOn1BitsVec(IRBuilder<> &Builder, Value *Vec,
|
||||
static Value *upgradeMaskedCompare(IRBuilder<> &Builder, CallInst &CI,
|
||||
unsigned CC, bool Signed) {
|
||||
Value *Op0 = CI.getArgOperand(0);
|
||||
unsigned NumElts = Op0->getType()->getVectorNumElements();
|
||||
unsigned NumElts = cast<VectorType>(Op0->getType())->getNumElements();
|
||||
|
||||
Value *Cmp;
|
||||
if (CC == 3) {
|
||||
@ -1416,7 +1417,7 @@ static Value* upgradeMaskedMove(IRBuilder<> &Builder, CallInst &CI) {
|
||||
static Value* UpgradeMaskToInt(IRBuilder<> &Builder, CallInst &CI) {
|
||||
Value* Op = CI.getArgOperand(0);
|
||||
Type* ReturnOp = CI.getType();
|
||||
unsigned NumElts = CI.getType()->getVectorNumElements();
|
||||
unsigned NumElts = cast<VectorType>(CI.getType())->getNumElements();
|
||||
Value *Mask = getX86MaskVec(Builder, Op, NumElts);
|
||||
return Builder.CreateSExt(Mask, ReturnOp, "vpmovm2");
|
||||
}
|
||||
@ -1866,7 +1867,7 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
|
||||
Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, Mask);
|
||||
} else if (IsX86 && (Name.startswith("avx512.mask.pbroadcast"))){
|
||||
unsigned NumElts =
|
||||
CI->getArgOperand(1)->getType()->getVectorNumElements();
|
||||
cast<VectorType>(CI->getArgOperand(1)->getType())->getNumElements();
|
||||
Rep = Builder.CreateVectorSplat(NumElts, CI->getArgOperand(0));
|
||||
Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
|
||||
CI->getArgOperand(1));
|
||||
@ -2084,16 +2085,19 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
|
||||
Name == "sse2.cvtsi2sd" ||
|
||||
Name == "sse.cvtsi642ss" ||
|
||||
Name == "sse2.cvtsi642sd")) {
|
||||
Rep = Builder.CreateSIToFP(CI->getArgOperand(1),
|
||||
CI->getType()->getVectorElementType());
|
||||
Rep = Builder.CreateSIToFP(
|
||||
CI->getArgOperand(1),
|
||||
cast<VectorType>(CI->getType())->getElementType());
|
||||
Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0);
|
||||
} else if (IsX86 && Name == "avx512.cvtusi2sd") {
|
||||
Rep = Builder.CreateUIToFP(CI->getArgOperand(1),
|
||||
CI->getType()->getVectorElementType());
|
||||
Rep = Builder.CreateUIToFP(
|
||||
CI->getArgOperand(1),
|
||||
cast<VectorType>(CI->getType())->getElementType());
|
||||
Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0);
|
||||
} else if (IsX86 && Name == "sse2.cvtss2sd") {
|
||||
Rep = Builder.CreateExtractElement(CI->getArgOperand(1), (uint64_t)0);
|
||||
Rep = Builder.CreateFPExt(Rep, CI->getType()->getVectorElementType());
|
||||
Rep = Builder.CreateFPExt(
|
||||
Rep, cast<VectorType>(CI->getType())->getElementType());
|
||||
Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0);
|
||||
} else if (IsX86 && (Name == "sse2.cvtdq2pd" ||
|
||||
Name == "sse2.cvtdq2ps" ||
|
||||
@ -2113,18 +2117,18 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
|
||||
Name == "avx.cvt.ps2.pd.256" ||
|
||||
Name == "avx512.mask.cvtps2pd.128" ||
|
||||
Name == "avx512.mask.cvtps2pd.256")) {
|
||||
Type *DstTy = CI->getType();
|
||||
auto *DstTy = cast<VectorType>(CI->getType());
|
||||
Rep = CI->getArgOperand(0);
|
||||
Type *SrcTy = Rep->getType();
|
||||
auto *SrcTy = cast<VectorType>(Rep->getType());
|
||||
|
||||
unsigned NumDstElts = DstTy->getVectorNumElements();
|
||||
if (NumDstElts < SrcTy->getVectorNumElements()) {
|
||||
unsigned NumDstElts = DstTy->getNumElements();
|
||||
if (NumDstElts < SrcTy->getNumElements()) {
|
||||
assert(NumDstElts == 2 && "Unexpected vector size");
|
||||
uint32_t ShuffleMask[2] = { 0, 1 };
|
||||
Rep = Builder.CreateShuffleVector(Rep, Rep, ShuffleMask);
|
||||
}
|
||||
|
||||
bool IsPS2PD = SrcTy->getVectorElementType()->isFloatTy();
|
||||
bool IsPS2PD = SrcTy->getElementType()->isFloatTy();
|
||||
bool IsUnsigned = (StringRef::npos != Name.find("cvtu"));
|
||||
if (IsPS2PD)
|
||||
Rep = Builder.CreateFPExt(Rep, DstTy, "cvtps2pd");
|
||||
@ -2146,11 +2150,11 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
|
||||
CI->getArgOperand(1));
|
||||
} else if (IsX86 && (Name.startswith("avx512.mask.vcvtph2ps.") ||
|
||||
Name.startswith("vcvtph2ps."))) {
|
||||
Type *DstTy = CI->getType();
|
||||
auto *DstTy = cast<VectorType>(CI->getType());
|
||||
Rep = CI->getArgOperand(0);
|
||||
Type *SrcTy = Rep->getType();
|
||||
unsigned NumDstElts = DstTy->getVectorNumElements();
|
||||
if (NumDstElts != SrcTy->getVectorNumElements()) {
|
||||
auto *SrcTy = cast<VectorType>(Rep->getType());
|
||||
unsigned NumDstElts = DstTy->getNumElements();
|
||||
if (NumDstElts != SrcTy->getNumElements()) {
|
||||
assert(NumDstElts == 4 && "Unexpected vector size");
|
||||
uint32_t ShuffleMask[4] = {0, 1, 2, 3};
|
||||
Rep = Builder.CreateShuffleVector(Rep, Rep, ShuffleMask);
|
||||
@ -2170,30 +2174,30 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
|
||||
CI->getArgOperand(1),CI->getArgOperand(2),
|
||||
/*Aligned*/true);
|
||||
} else if (IsX86 && Name.startswith("avx512.mask.expand.load.")) {
|
||||
Type *ResultTy = CI->getType();
|
||||
Type *PtrTy = ResultTy->getVectorElementType();
|
||||
auto *ResultTy = cast<VectorType>(CI->getType());
|
||||
Type *PtrTy = ResultTy->getElementType();
|
||||
|
||||
// Cast the pointer to element type.
|
||||
Value *Ptr = Builder.CreateBitCast(CI->getOperand(0),
|
||||
llvm::PointerType::getUnqual(PtrTy));
|
||||
|
||||
Value *MaskVec = getX86MaskVec(Builder, CI->getArgOperand(2),
|
||||
ResultTy->getVectorNumElements());
|
||||
ResultTy->getNumElements());
|
||||
|
||||
Function *ELd = Intrinsic::getDeclaration(F->getParent(),
|
||||
Intrinsic::masked_expandload,
|
||||
ResultTy);
|
||||
Rep = Builder.CreateCall(ELd, { Ptr, MaskVec, CI->getOperand(1) });
|
||||
} else if (IsX86 && Name.startswith("avx512.mask.compress.store.")) {
|
||||
Type *ResultTy = CI->getArgOperand(1)->getType();
|
||||
Type *PtrTy = ResultTy->getVectorElementType();
|
||||
auto *ResultTy = cast<VectorType>(CI->getArgOperand(1)->getType());
|
||||
Type *PtrTy = ResultTy->getElementType();
|
||||
|
||||
// Cast the pointer to element type.
|
||||
Value *Ptr = Builder.CreateBitCast(CI->getOperand(0),
|
||||
llvm::PointerType::getUnqual(PtrTy));
|
||||
|
||||
Value *MaskVec = getX86MaskVec(Builder, CI->getArgOperand(2),
|
||||
ResultTy->getVectorNumElements());
|
||||
ResultTy->getNumElements());
|
||||
|
||||
Function *CSt = Intrinsic::getDeclaration(F->getParent(),
|
||||
Intrinsic::masked_compressstore,
|
||||
@ -2201,10 +2205,10 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
|
||||
Rep = Builder.CreateCall(CSt, { CI->getArgOperand(1), Ptr, MaskVec });
|
||||
} else if (IsX86 && (Name.startswith("avx512.mask.compress.") ||
|
||||
Name.startswith("avx512.mask.expand."))) {
|
||||
Type *ResultTy = CI->getType();
|
||||
auto *ResultTy = cast<VectorType>(CI->getType());
|
||||
|
||||
Value *MaskVec = getX86MaskVec(Builder, CI->getArgOperand(2),
|
||||
ResultTy->getVectorNumElements());
|
||||
ResultTy->getNumElements());
|
||||
|
||||
bool IsCompress = Name[12] == 'c';
|
||||
Intrinsic::ID IID = IsCompress ? Intrinsic::x86_avx512_mask_compress
|
||||
@ -2281,9 +2285,9 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
|
||||
} else if (IsX86 && (Name.startswith("avx.vbroadcast.s") ||
|
||||
Name.startswith("avx512.vbroadcast.s"))) {
|
||||
// Replace broadcasts with a series of insertelements.
|
||||
Type *VecTy = CI->getType();
|
||||
Type *EltTy = VecTy->getVectorElementType();
|
||||
unsigned EltNum = VecTy->getVectorNumElements();
|
||||
auto *VecTy = cast<VectorType>(CI->getType());
|
||||
Type *EltTy = VecTy->getElementType();
|
||||
unsigned EltNum = VecTy->getNumElements();
|
||||
Value *Cast = Builder.CreateBitCast(CI->getArgOperand(0),
|
||||
EltTy->getPointerTo());
|
||||
Value *Load = Builder.CreateLoad(EltTy, Cast);
|
||||
@ -2328,7 +2332,7 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
|
||||
} else if (IsX86 && (Name.startswith("avx.vbroadcastf128") ||
|
||||
Name == "avx2.vbroadcasti128")) {
|
||||
// Replace vbroadcastf128/vbroadcasti128 with a vector load+shuffle.
|
||||
Type *EltTy = CI->getType()->getVectorElementType();
|
||||
Type *EltTy = cast<VectorType>(CI->getType())->getElementType();
|
||||
unsigned NumSrcElts = 128 / EltTy->getPrimitiveSizeInBits();
|
||||
Type *VT = VectorType::get(EltTy, NumSrcElts);
|
||||
Value *Op = Builder.CreatePointerCast(CI->getArgOperand(0),
|
||||
@ -2366,8 +2370,8 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
|
||||
}else if (IsX86 && (Name.startswith("avx512.mask.broadcastf") ||
|
||||
Name.startswith("avx512.mask.broadcasti"))) {
|
||||
unsigned NumSrcElts =
|
||||
CI->getArgOperand(0)->getType()->getVectorNumElements();
|
||||
unsigned NumDstElts = CI->getType()->getVectorNumElements();
|
||||
cast<VectorType>(CI->getArgOperand(0)->getType())->getNumElements();
|
||||
unsigned NumDstElts = cast<VectorType>(CI->getType())->getNumElements();
|
||||
|
||||
SmallVector<uint32_t, 8> ShuffleMask(NumDstElts);
|
||||
for (unsigned i = 0; i != NumDstElts; ++i)
|
||||
@ -2384,8 +2388,8 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
|
||||
Name.startswith("avx512.mask.broadcast.s"))) {
|
||||
// Replace vp?broadcasts with a vector shuffle.
|
||||
Value *Op = CI->getArgOperand(0);
|
||||
unsigned NumElts = CI->getType()->getVectorNumElements();
|
||||
Type *MaskTy = VectorType::get(Type::getInt32Ty(C), NumElts);
|
||||
ElementCount EC = cast<VectorType>(CI->getType())->getElementCount();
|
||||
Type *MaskTy = VectorType::get(Type::getInt32Ty(C), EC);
|
||||
Rep = Builder.CreateShuffleVector(Op, UndefValue::get(Op->getType()),
|
||||
Constant::getNullValue(MaskTy));
|
||||
|
||||
@ -2470,8 +2474,8 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
|
||||
Value *Op0 = CI->getArgOperand(0);
|
||||
Value *Op1 = CI->getArgOperand(1);
|
||||
unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
|
||||
unsigned DstNumElts = CI->getType()->getVectorNumElements();
|
||||
unsigned SrcNumElts = Op1->getType()->getVectorNumElements();
|
||||
unsigned DstNumElts = cast<VectorType>(CI->getType())->getNumElements();
|
||||
unsigned SrcNumElts = cast<VectorType>(Op1->getType())->getNumElements();
|
||||
unsigned Scale = DstNumElts / SrcNumElts;
|
||||
|
||||
// Mask off the high bits of the immediate value; hardware ignores those.
|
||||
@ -2514,8 +2518,8 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
|
||||
Name.startswith("avx512.mask.vextract"))) {
|
||||
Value *Op0 = CI->getArgOperand(0);
|
||||
unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
|
||||
unsigned DstNumElts = CI->getType()->getVectorNumElements();
|
||||
unsigned SrcNumElts = Op0->getType()->getVectorNumElements();
|
||||
unsigned DstNumElts = cast<VectorType>(CI->getType())->getNumElements();
|
||||
unsigned SrcNumElts = cast<VectorType>(Op0->getType())->getNumElements();
|
||||
unsigned Scale = SrcNumElts / DstNumElts;
|
||||
|
||||
// Mask off the high bits of the immediate value; hardware ignores those.
|
||||
@ -2562,7 +2566,7 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
|
||||
|
||||
uint8_t Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
|
||||
|
||||
unsigned NumElts = CI->getType()->getVectorNumElements();
|
||||
unsigned NumElts = cast<VectorType>(CI->getType())->getNumElements();
|
||||
unsigned HalfSize = NumElts / 2;
|
||||
SmallVector<uint32_t, 8> ShuffleMask(NumElts);
|
||||
|
||||
@ -2614,7 +2618,7 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
|
||||
Name.startswith("avx512.mask.pshufl.w."))) {
|
||||
Value *Op0 = CI->getArgOperand(0);
|
||||
unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
|
||||
unsigned NumElts = CI->getType()->getVectorNumElements();
|
||||
unsigned NumElts = cast<VectorType>(CI->getType())->getNumElements();
|
||||
|
||||
SmallVector<uint32_t, 16> Idxs(NumElts);
|
||||
for (unsigned l = 0; l != NumElts; l += 8) {
|
||||
@ -2633,7 +2637,7 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
|
||||
Name.startswith("avx512.mask.pshufh.w."))) {
|
||||
Value *Op0 = CI->getArgOperand(0);
|
||||
unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
|
||||
unsigned NumElts = CI->getType()->getVectorNumElements();
|
||||
unsigned NumElts = cast<VectorType>(CI->getType())->getNumElements();
|
||||
|
||||
SmallVector<uint32_t, 16> Idxs(NumElts);
|
||||
for (unsigned l = 0; l != NumElts; l += 8) {
|
||||
@ -2652,7 +2656,7 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
|
||||
Value *Op0 = CI->getArgOperand(0);
|
||||
Value *Op1 = CI->getArgOperand(1);
|
||||
unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
|
||||
unsigned NumElts = CI->getType()->getVectorNumElements();
|
||||
unsigned NumElts = cast<VectorType>(CI->getType())->getNumElements();
|
||||
|
||||
unsigned NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
|
||||
unsigned HalfLaneElts = NumLaneElts / 2;
|
||||
@ -2677,7 +2681,7 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
|
||||
Name.startswith("avx512.mask.movshdup") ||
|
||||
Name.startswith("avx512.mask.movsldup"))) {
|
||||
Value *Op0 = CI->getArgOperand(0);
|
||||
unsigned NumElts = CI->getType()->getVectorNumElements();
|
||||
unsigned NumElts = cast<VectorType>(CI->getType())->getNumElements();
|
||||
unsigned NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
|
||||
|
||||
unsigned Offset = 0;
|
||||
@ -2699,7 +2703,7 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
|
||||
Name.startswith("avx512.mask.unpckl."))) {
|
||||
Value *Op0 = CI->getArgOperand(0);
|
||||
Value *Op1 = CI->getArgOperand(1);
|
||||
int NumElts = CI->getType()->getVectorNumElements();
|
||||
int NumElts = cast<VectorType>(CI->getType())->getNumElements();
|
||||
int NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
|
||||
|
||||
SmallVector<uint32_t, 64> Idxs(NumElts);
|
||||
@ -2715,7 +2719,7 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
|
||||
Name.startswith("avx512.mask.unpckh."))) {
|
||||
Value *Op0 = CI->getArgOperand(0);
|
||||
Value *Op1 = CI->getArgOperand(1);
|
||||
int NumElts = CI->getType()->getVectorNumElements();
|
||||
int NumElts = cast<VectorType>(CI->getType())->getNumElements();
|
||||
int NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
|
||||
|
||||
SmallVector<uint32_t, 64> Idxs(NumElts);
|
||||
@ -3283,7 +3287,7 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
|
||||
Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
|
||||
Ops);
|
||||
} else {
|
||||
int NumElts = CI->getType()->getVectorNumElements();
|
||||
int NumElts = cast<VectorType>(CI->getType())->getNumElements();
|
||||
|
||||
Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1),
|
||||
CI->getArgOperand(2) };
|
||||
|
@ -56,13 +56,13 @@ static Constant *BitCastConstantVector(Constant *CV, VectorType *DstTy) {
|
||||
// doing so requires endianness information. This should be handled by
|
||||
// Analysis/ConstantFolding.cpp
|
||||
unsigned NumElts = DstTy->getNumElements();
|
||||
if (NumElts != CV->getType()->getVectorNumElements())
|
||||
if (NumElts != cast<VectorType>(CV->getType())->getNumElements())
|
||||
return nullptr;
|
||||
|
||||
Type *DstEltTy = DstTy->getElementType();
|
||||
// Fast path for splatted constants.
|
||||
if (Constant *Splat = CV->getSplatValue()) {
|
||||
return ConstantVector::getSplat(DstTy->getVectorElementCount(),
|
||||
return ConstantVector::getSplat(DstTy->getElementCount(),
|
||||
ConstantExpr::getBitCast(Splat, DstEltTy));
|
||||
}
|
||||
|
||||
@ -572,18 +572,20 @@ Constant *llvm::ConstantFoldCastInstruction(unsigned opc, Constant *V,
|
||||
// count may be mismatched; don't attempt to handle that here.
|
||||
if ((isa<ConstantVector>(V) || isa<ConstantDataVector>(V)) &&
|
||||
DestTy->isVectorTy() &&
|
||||
DestTy->getVectorNumElements() == V->getType()->getVectorNumElements()) {
|
||||
cast<VectorType>(DestTy)->getNumElements() ==
|
||||
cast<VectorType>(V->getType())->getNumElements()) {
|
||||
VectorType *DestVecTy = cast<VectorType>(DestTy);
|
||||
Type *DstEltTy = DestVecTy->getElementType();
|
||||
// Fast path for splatted constants.
|
||||
if (Constant *Splat = V->getSplatValue()) {
|
||||
return ConstantVector::getSplat(
|
||||
DestTy->getVectorElementCount(),
|
||||
cast<VectorType>(DestTy)->getElementCount(),
|
||||
ConstantExpr::getCast(opc, Splat, DstEltTy));
|
||||
}
|
||||
SmallVector<Constant *, 16> res;
|
||||
Type *Ty = IntegerType::get(V->getContext(), 32);
|
||||
for (unsigned i = 0, e = V->getType()->getVectorNumElements(); i != e; ++i) {
|
||||
for (unsigned i = 0, e = cast<VectorType>(V->getType())->getNumElements();
|
||||
i != e; ++i) {
|
||||
Constant *C =
|
||||
ConstantExpr::getExtractElement(V, ConstantInt::get(Ty, i));
|
||||
res.push_back(ConstantExpr::getCast(opc, C, DstEltTy));
|
||||
@ -745,9 +747,10 @@ Constant *llvm::ConstantFoldSelectInstruction(Constant *Cond,
|
||||
|
||||
// If the condition is a vector constant, fold the result elementwise.
|
||||
if (ConstantVector *CondV = dyn_cast<ConstantVector>(Cond)) {
|
||||
auto *V1VTy = CondV->getType();
|
||||
SmallVector<Constant*, 16> Result;
|
||||
Type *Ty = IntegerType::get(CondV->getContext(), 32);
|
||||
for (unsigned i = 0, e = V1->getType()->getVectorNumElements(); i != e;++i){
|
||||
for (unsigned i = 0, e = V1VTy->getNumElements(); i != e; ++i) {
|
||||
Constant *V;
|
||||
Constant *V1Element = ConstantExpr::getExtractElement(V1,
|
||||
ConstantInt::get(Ty, i));
|
||||
@ -766,7 +769,7 @@ Constant *llvm::ConstantFoldSelectInstruction(Constant *Cond,
|
||||
}
|
||||
|
||||
// If we were able to build the vector, return it.
|
||||
if (Result.size() == V1->getType()->getVectorNumElements())
|
||||
if (Result.size() == V1VTy->getNumElements())
|
||||
return ConstantVector::get(Result);
|
||||
}
|
||||
|
||||
@ -794,18 +797,20 @@ Constant *llvm::ConstantFoldSelectInstruction(Constant *Cond,
|
||||
|
||||
Constant *llvm::ConstantFoldExtractElementInstruction(Constant *Val,
|
||||
Constant *Idx) {
|
||||
auto *ValVTy = cast<VectorType>(Val->getType());
|
||||
|
||||
// extractelt undef, C -> undef
|
||||
// extractelt C, undef -> undef
|
||||
if (isa<UndefValue>(Val) || isa<UndefValue>(Idx))
|
||||
return UndefValue::get(Val->getType()->getVectorElementType());
|
||||
return UndefValue::get(ValVTy->getElementType());
|
||||
|
||||
auto *CIdx = dyn_cast<ConstantInt>(Idx);
|
||||
if (!CIdx)
|
||||
return nullptr;
|
||||
|
||||
// ee({w,x,y,z}, wrong_value) -> undef
|
||||
if (CIdx->uge(Val->getType()->getVectorNumElements()))
|
||||
return UndefValue::get(Val->getType()->getVectorElementType());
|
||||
if (CIdx->uge(ValVTy->getNumElements()))
|
||||
return UndefValue::get(ValVTy->getElementType());
|
||||
|
||||
// ee (gep (ptr, idx0, ...), idx) -> gep (ee (ptr, idx), ee (idx0, idx), ...)
|
||||
if (auto *CE = dyn_cast<ConstantExpr>(Val)) {
|
||||
@ -822,8 +827,7 @@ Constant *llvm::ConstantFoldExtractElementInstruction(Constant *Val,
|
||||
} else
|
||||
Ops.push_back(Op);
|
||||
}
|
||||
return CE->getWithOperands(Ops, CE->getType()->getVectorElementType(),
|
||||
false,
|
||||
return CE->getWithOperands(Ops, ValVTy->getElementType(), false,
|
||||
Ops[0]->getType()->getPointerElementType());
|
||||
}
|
||||
}
|
||||
@ -846,7 +850,7 @@ Constant *llvm::ConstantFoldInsertElementInstruction(Constant *Val,
|
||||
if (ValTy->isScalable())
|
||||
return nullptr;
|
||||
|
||||
unsigned NumElts = Val->getType()->getVectorNumElements();
|
||||
unsigned NumElts = cast<VectorType>(Val->getType())->getNumElements();
|
||||
if (CIdx->uge(NumElts))
|
||||
return UndefValue::get(Val->getType());
|
||||
|
||||
@ -869,10 +873,10 @@ Constant *llvm::ConstantFoldInsertElementInstruction(Constant *Val,
|
||||
|
||||
Constant *llvm::ConstantFoldShuffleVectorInstruction(Constant *V1, Constant *V2,
|
||||
ArrayRef<int> Mask) {
|
||||
auto *V1VTy = cast<VectorType>(V1->getType());
|
||||
unsigned MaskNumElts = Mask.size();
|
||||
ElementCount MaskEltCount = {MaskNumElts,
|
||||
V1->getType()->getVectorIsScalable()};
|
||||
Type *EltTy = V1->getType()->getVectorElementType();
|
||||
ElementCount MaskEltCount = {MaskNumElts, V1VTy->isScalable()};
|
||||
Type *EltTy = V1VTy->getElementType();
|
||||
|
||||
// Undefined shuffle mask -> undefined value.
|
||||
if (all_of(Mask, [](int Elt) { return Elt == UndefMaskElem; })) {
|
||||
@ -890,11 +894,10 @@ Constant *llvm::ConstantFoldShuffleVectorInstruction(Constant *V1, Constant *V2,
|
||||
}
|
||||
// Do not iterate on scalable vector. The num of elements is unknown at
|
||||
// compile-time.
|
||||
VectorType *ValTy = cast<VectorType>(V1->getType());
|
||||
if (ValTy->isScalable())
|
||||
if (V1VTy->isScalable())
|
||||
return nullptr;
|
||||
|
||||
unsigned SrcNumElts = V1->getType()->getVectorNumElements();
|
||||
unsigned SrcNumElts = V1VTy->getNumElements();
|
||||
|
||||
// Loop over the shuffle mask, evaluating each element.
|
||||
SmallVector<Constant*, 32> Result;
|
||||
@ -968,8 +971,8 @@ Constant *llvm::ConstantFoldUnaryInstruction(unsigned Opcode, Constant *C) {
|
||||
|
||||
// Handle scalar UndefValue and scalable vector UndefValue. Fixed-length
|
||||
// vectors are always evaluated per element.
|
||||
bool IsScalableVector =
|
||||
C->getType()->isVectorTy() && C->getType()->getVectorIsScalable();
|
||||
bool IsScalableVector = isa<VectorType>(C->getType()) &&
|
||||
cast<VectorType>(C->getType())->isScalable();
|
||||
bool HasScalarUndefOrScalableVectorUndef =
|
||||
(!C->getType()->isVectorTy() || IsScalableVector) && isa<UndefValue>(C);
|
||||
|
||||
@ -1042,8 +1045,8 @@ Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode, Constant *C1,
|
||||
|
||||
// Handle scalar UndefValue and scalable vector UndefValue. Fixed-length
|
||||
// vectors are always evaluated per element.
|
||||
bool IsScalableVector =
|
||||
C1->getType()->isVectorTy() && C1->getType()->getVectorIsScalable();
|
||||
bool IsScalableVector = isa<VectorType>(C1->getType()) &&
|
||||
cast<VectorType>(C1->getType())->isScalable();
|
||||
bool HasScalarUndefOrScalableVectorUndef =
|
||||
(!C1->getType()->isVectorTy() || IsScalableVector) &&
|
||||
(isa<UndefValue>(C1) || isa<UndefValue>(C2));
|
||||
@ -1375,7 +1378,7 @@ Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode, Constant *C1,
|
||||
return UndefValue::get(VTy);
|
||||
if (Constant *C1Splat = C1->getSplatValue()) {
|
||||
return ConstantVector::getSplat(
|
||||
VTy->getVectorElementCount(),
|
||||
VTy->getElementCount(),
|
||||
ConstantExpr::get(Opcode, C1Splat, C2Splat));
|
||||
}
|
||||
}
|
||||
@ -1992,16 +1995,18 @@ Constant *llvm::ConstantFoldCompareInstruction(unsigned short pred,
|
||||
return ConstantInt::get(ResultTy, R==APFloat::cmpGreaterThan ||
|
||||
R==APFloat::cmpEqual);
|
||||
}
|
||||
} else if (C1->getType()->isVectorTy()) {
|
||||
} else if (auto *C1VTy = dyn_cast<VectorType>(C1->getType())) {
|
||||
|
||||
// Do not iterate on scalable vector. The number of elements is unknown at
|
||||
// compile-time.
|
||||
if (C1->getType()->getVectorIsScalable())
|
||||
if (C1VTy->isScalable())
|
||||
return nullptr;
|
||||
|
||||
// Fast path for splatted constants.
|
||||
if (Constant *C1Splat = C1->getSplatValue())
|
||||
if (Constant *C2Splat = C2->getSplatValue())
|
||||
return ConstantVector::getSplat(
|
||||
C1->getType()->getVectorElementCount(),
|
||||
C1VTy->getElementCount(),
|
||||
ConstantExpr::getCompare(pred, C1Splat, C2Splat));
|
||||
|
||||
// If we can constant fold the comparison of each element, constant fold
|
||||
@ -2009,7 +2014,7 @@ Constant *llvm::ConstantFoldCompareInstruction(unsigned short pred,
|
||||
SmallVector<Constant*, 4> ResElts;
|
||||
Type *Ty = IntegerType::get(C1->getContext(), 32);
|
||||
// Compare the elements, producing an i1 result or constant expr.
|
||||
for (unsigned i = 0, e = C1->getType()->getVectorNumElements(); i != e;++i){
|
||||
for (unsigned i = 0, e = C1VTy->getNumElements(); i != e; ++i) {
|
||||
Constant *C1E =
|
||||
ConstantExpr::getExtractElement(C1, ConstantInt::get(Ty, i));
|
||||
Constant *C2E =
|
||||
@ -2262,7 +2267,8 @@ Constant *llvm::ConstantFoldGetElementPtr(Type *PointeeTy, Constant *C,
|
||||
Constant *Idx0 = cast<Constant>(Idxs[0]);
|
||||
if (Idxs.size() == 1 && (Idx0->isNullValue() || isa<UndefValue>(Idx0)))
|
||||
return GEPTy->isVectorTy() && !C->getType()->isVectorTy()
|
||||
? ConstantVector::getSplat(GEPTy->getVectorElementCount(), C)
|
||||
? ConstantVector::getSplat(
|
||||
cast<VectorType>(GEPTy)->getElementCount(), C)
|
||||
: C;
|
||||
|
||||
if (C->isNullValue()) {
|
||||
@ -2494,18 +2500,19 @@ Constant *llvm::ConstantFoldGetElementPtr(Type *PointeeTy, Constant *C,
|
||||
|
||||
if (!IsCurrIdxVector && IsPrevIdxVector)
|
||||
CurrIdx = ConstantDataVector::getSplat(
|
||||
PrevIdx->getType()->getVectorNumElements(), CurrIdx);
|
||||
cast<VectorType>(PrevIdx->getType())->getNumElements(), CurrIdx);
|
||||
|
||||
if (!IsPrevIdxVector && IsCurrIdxVector)
|
||||
PrevIdx = ConstantDataVector::getSplat(
|
||||
CurrIdx->getType()->getVectorNumElements(), PrevIdx);
|
||||
cast<VectorType>(CurrIdx->getType())->getNumElements(), PrevIdx);
|
||||
|
||||
Constant *Factor =
|
||||
ConstantInt::get(CurrIdx->getType()->getScalarType(), NumElements);
|
||||
if (UseVector)
|
||||
Factor = ConstantDataVector::getSplat(
|
||||
IsPrevIdxVector ? PrevIdx->getType()->getVectorNumElements()
|
||||
: CurrIdx->getType()->getVectorNumElements(),
|
||||
IsPrevIdxVector
|
||||
? cast<VectorType>(PrevIdx->getType())->getNumElements()
|
||||
: cast<VectorType>(CurrIdx->getType())->getNumElements(),
|
||||
Factor);
|
||||
|
||||
NewIdxs[i] = ConstantExpr::getSRem(CurrIdx, Factor);
|
||||
@ -2522,9 +2529,10 @@ Constant *llvm::ConstantFoldGetElementPtr(Type *PointeeTy, Constant *C,
|
||||
Type *ExtendedTy = Type::getIntNTy(Div->getContext(), CommonExtendedWidth);
|
||||
if (UseVector)
|
||||
ExtendedTy = VectorType::get(
|
||||
ExtendedTy, IsPrevIdxVector
|
||||
? PrevIdx->getType()->getVectorNumElements()
|
||||
: CurrIdx->getType()->getVectorNumElements());
|
||||
ExtendedTy,
|
||||
IsPrevIdxVector
|
||||
? cast<VectorType>(PrevIdx->getType())->getNumElements()
|
||||
: cast<VectorType>(CurrIdx->getType())->getNumElements());
|
||||
|
||||
if (!PrevIdx->getType()->isIntOrIntVectorTy(CommonExtendedWidth))
|
||||
PrevIdx = ConstantExpr::getSExt(PrevIdx, ExtendedTy);
|
||||
|
@ -160,8 +160,8 @@ bool Constant::isNotOneValue() const {
|
||||
return !CFP->getValueAPF().bitcastToAPInt().isOneValue();
|
||||
|
||||
// Check that vectors don't contain 1
|
||||
if (this->getType()->isVectorTy()) {
|
||||
unsigned NumElts = this->getType()->getVectorNumElements();
|
||||
if (auto *VTy = dyn_cast<VectorType>(this->getType())) {
|
||||
unsigned NumElts = VTy->getNumElements();
|
||||
for (unsigned i = 0; i != NumElts; ++i) {
|
||||
Constant *Elt = this->getAggregateElement(i);
|
||||
if (!Elt || !Elt->isNotOneValue())
|
||||
@ -210,8 +210,8 @@ bool Constant::isNotMinSignedValue() const {
|
||||
return !CFP->getValueAPF().bitcastToAPInt().isMinSignedValue();
|
||||
|
||||
// Check that vectors don't contain INT_MIN
|
||||
if (this->getType()->isVectorTy()) {
|
||||
unsigned NumElts = this->getType()->getVectorNumElements();
|
||||
if (auto *VTy = dyn_cast<VectorType>(this->getType())) {
|
||||
unsigned NumElts = VTy->getNumElements();
|
||||
for (unsigned i = 0; i != NumElts; ++i) {
|
||||
Constant *Elt = this->getAggregateElement(i);
|
||||
if (!Elt || !Elt->isNotMinSignedValue())
|
||||
@ -227,9 +227,10 @@ bool Constant::isNotMinSignedValue() const {
|
||||
bool Constant::isFiniteNonZeroFP() const {
|
||||
if (auto *CFP = dyn_cast<ConstantFP>(this))
|
||||
return CFP->getValueAPF().isFiniteNonZero();
|
||||
if (!getType()->isVectorTy())
|
||||
auto *VTy = dyn_cast<VectorType>(getType());
|
||||
if (!VTy)
|
||||
return false;
|
||||
for (unsigned i = 0, e = getType()->getVectorNumElements(); i != e; ++i) {
|
||||
for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
|
||||
auto *CFP = dyn_cast_or_null<ConstantFP>(this->getAggregateElement(i));
|
||||
if (!CFP || !CFP->getValueAPF().isFiniteNonZero())
|
||||
return false;
|
||||
@ -240,9 +241,10 @@ bool Constant::isFiniteNonZeroFP() const {
|
||||
bool Constant::isNormalFP() const {
|
||||
if (auto *CFP = dyn_cast<ConstantFP>(this))
|
||||
return CFP->getValueAPF().isNormal();
|
||||
if (!getType()->isVectorTy())
|
||||
auto *VTy = dyn_cast<VectorType>(getType());
|
||||
if (!VTy)
|
||||
return false;
|
||||
for (unsigned i = 0, e = getType()->getVectorNumElements(); i != e; ++i) {
|
||||
for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
|
||||
auto *CFP = dyn_cast_or_null<ConstantFP>(this->getAggregateElement(i));
|
||||
if (!CFP || !CFP->getValueAPF().isNormal())
|
||||
return false;
|
||||
@ -253,9 +255,10 @@ bool Constant::isNormalFP() const {
|
||||
bool Constant::hasExactInverseFP() const {
|
||||
if (auto *CFP = dyn_cast<ConstantFP>(this))
|
||||
return CFP->getValueAPF().getExactInverse(nullptr);
|
||||
if (!getType()->isVectorTy())
|
||||
auto *VTy = dyn_cast<VectorType>(getType());
|
||||
if (!VTy)
|
||||
return false;
|
||||
for (unsigned i = 0, e = getType()->getVectorNumElements(); i != e; ++i) {
|
||||
for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
|
||||
auto *CFP = dyn_cast_or_null<ConstantFP>(this->getAggregateElement(i));
|
||||
if (!CFP || !CFP->getValueAPF().getExactInverse(nullptr))
|
||||
return false;
|
||||
@ -266,9 +269,10 @@ bool Constant::hasExactInverseFP() const {
|
||||
bool Constant::isNaN() const {
|
||||
if (auto *CFP = dyn_cast<ConstantFP>(this))
|
||||
return CFP->isNaN();
|
||||
if (!getType()->isVectorTy())
|
||||
auto *VTy = dyn_cast<VectorType>(getType());
|
||||
if (!VTy)
|
||||
return false;
|
||||
for (unsigned i = 0, e = getType()->getVectorNumElements(); i != e; ++i) {
|
||||
for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
|
||||
auto *CFP = dyn_cast_or_null<ConstantFP>(this->getAggregateElement(i));
|
||||
if (!CFP || !CFP->isNaN())
|
||||
return false;
|
||||
@ -282,18 +286,18 @@ bool Constant::isElementWiseEqual(Value *Y) const {
|
||||
return true;
|
||||
|
||||
// The input value must be a vector constant with the same type.
|
||||
Type *Ty = getType();
|
||||
if (!isa<Constant>(Y) || !Ty->isVectorTy() || Ty != Y->getType())
|
||||
auto *VTy = dyn_cast<VectorType>(getType());
|
||||
if (!isa<Constant>(Y) || !VTy || VTy != Y->getType())
|
||||
return false;
|
||||
|
||||
// TODO: Compare pointer constants?
|
||||
if (!(Ty->getVectorElementType()->isIntegerTy() ||
|
||||
Ty->getVectorElementType()->isFloatingPointTy()))
|
||||
if (!(VTy->getElementType()->isIntegerTy() ||
|
||||
VTy->getElementType()->isFloatingPointTy()))
|
||||
return false;
|
||||
|
||||
// They may still be identical element-wise (if they have `undef`s).
|
||||
// Bitcast to integer to allow exact bitwise comparison for all types.
|
||||
Type *IntTy = VectorType::getInteger(cast<VectorType>(Ty));
|
||||
Type *IntTy = VectorType::getInteger(VTy);
|
||||
Constant *C0 = ConstantExpr::getBitCast(const_cast<Constant *>(this), IntTy);
|
||||
Constant *C1 = ConstantExpr::getBitCast(cast<Constant>(Y), IntTy);
|
||||
Constant *CmpEq = ConstantExpr::getICmp(ICmpInst::ICMP_EQ, C0, C1);
|
||||
@ -301,21 +305,21 @@ bool Constant::isElementWiseEqual(Value *Y) const {
|
||||
}
|
||||
|
||||
bool Constant::containsUndefElement() const {
|
||||
if (!getType()->isVectorTy())
|
||||
return false;
|
||||
for (unsigned i = 0, e = getType()->getVectorNumElements(); i != e; ++i)
|
||||
if (isa<UndefValue>(getAggregateElement(i)))
|
||||
return true;
|
||||
if (auto *VTy = dyn_cast<VectorType>(getType())) {
|
||||
for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
|
||||
if (isa<UndefValue>(getAggregateElement(i)))
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool Constant::containsConstantExpression() const {
|
||||
if (!getType()->isVectorTy())
|
||||
return false;
|
||||
for (unsigned i = 0, e = getType()->getVectorNumElements(); i != e; ++i)
|
||||
if (isa<ConstantExpr>(getAggregateElement(i)))
|
||||
return true;
|
||||
if (auto *VTy = dyn_cast<VectorType>(getType())) {
|
||||
for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
|
||||
if (isa<ConstantExpr>(getAggregateElement(i)))
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
@ -639,10 +643,11 @@ Constant *Constant::replaceUndefsWith(Constant *C, Constant *Replacement) {
|
||||
}
|
||||
|
||||
// Don't know how to deal with this constant.
|
||||
if (!Ty->isVectorTy())
|
||||
auto *VTy = dyn_cast<VectorType>(Ty);
|
||||
if (!VTy)
|
||||
return C;
|
||||
|
||||
unsigned NumElts = Ty->getVectorNumElements();
|
||||
unsigned NumElts = VTy->getNumElements();
|
||||
SmallVector<Constant *, 32> NewC(NumElts);
|
||||
for (unsigned i = 0; i != NumElts; ++i) {
|
||||
Constant *EltC = C->getAggregateElement(i);
|
||||
@ -1490,7 +1495,7 @@ void ConstantVector::destroyConstantImpl() {
|
||||
Constant *Constant::getSplatValue(bool AllowUndefs) const {
|
||||
assert(this->getType()->isVectorTy() && "Only valid for vectors!");
|
||||
if (isa<ConstantAggregateZero>(this))
|
||||
return getNullValue(this->getType()->getVectorElementType());
|
||||
return getNullValue(cast<VectorType>(getType())->getElementType());
|
||||
if (const ConstantDataVector *CV = dyn_cast<ConstantDataVector>(this))
|
||||
return CV->getSplatValue();
|
||||
if (const ConstantVector *CV = dyn_cast<ConstantVector>(this))
|
||||
@ -1890,8 +1895,9 @@ Constant *ConstantExpr::getPtrToInt(Constant *C, Type *DstTy,
|
||||
assert(DstTy->isIntOrIntVectorTy() &&
|
||||
"PtrToInt destination must be integer or integer vector");
|
||||
assert(isa<VectorType>(C->getType()) == isa<VectorType>(DstTy));
|
||||
if (isa<VectorType>(C->getType()))
|
||||
assert(C->getType()->getVectorNumElements()==DstTy->getVectorNumElements()&&
|
||||
if (auto *CVTy = dyn_cast<VectorType>(C->getType()))
|
||||
assert(CVTy->getNumElements() ==
|
||||
cast<VectorType>(DstTy)->getNumElements() &&
|
||||
"Invalid cast between a different number of vector elements");
|
||||
return getFoldedCast(Instruction::PtrToInt, C, DstTy, OnlyIfReduced);
|
||||
}
|
||||
@ -1903,8 +1909,9 @@ Constant *ConstantExpr::getIntToPtr(Constant *C, Type *DstTy,
|
||||
assert(DstTy->isPtrOrPtrVectorTy() &&
|
||||
"IntToPtr destination must be a pointer or pointer vector");
|
||||
assert(isa<VectorType>(C->getType()) == isa<VectorType>(DstTy));
|
||||
if (isa<VectorType>(C->getType()))
|
||||
assert(C->getType()->getVectorNumElements()==DstTy->getVectorNumElements()&&
|
||||
if (auto *CVTy = dyn_cast<VectorType>(C->getType()))
|
||||
assert(CVTy->getNumElements() ==
|
||||
cast<VectorType>(DstTy)->getNumElements() &&
|
||||
"Invalid cast between a different number of vector elements");
|
||||
return getFoldedCast(Instruction::IntToPtr, C, DstTy, OnlyIfReduced);
|
||||
}
|
||||
@ -2151,9 +2158,10 @@ Constant *ConstantExpr::getGetElementPtr(Type *Ty, Constant *C,
|
||||
ArgVec.reserve(1 + Idxs.size());
|
||||
ArgVec.push_back(C);
|
||||
for (unsigned i = 0, e = Idxs.size(); i != e; ++i) {
|
||||
assert((!Idxs[i]->getType()->isVectorTy() ||
|
||||
Idxs[i]->getType()->getVectorElementCount() == EltCount) &&
|
||||
"getelementptr index type missmatch");
|
||||
assert(
|
||||
(!isa<VectorType>(Idxs[i]->getType()) ||
|
||||
cast<VectorType>(Idxs[i]->getType())->getElementCount() == EltCount) &&
|
||||
"getelementptr index type missmatch");
|
||||
|
||||
Constant *Idx = cast<Constant>(Idxs[i]);
|
||||
if (EltCount.Min != 0 && !Idxs[i]->getType()->isVectorTy())
|
||||
@ -2231,7 +2239,7 @@ Constant *ConstantExpr::getExtractElement(Constant *Val, Constant *Idx,
|
||||
if (Constant *FC = ConstantFoldExtractElementInstruction(Val, Idx))
|
||||
return FC; // Fold a few common cases.
|
||||
|
||||
Type *ReqTy = Val->getType()->getVectorElementType();
|
||||
Type *ReqTy = cast<VectorType>(Val->getType())->getElementType();
|
||||
if (OnlyIfReducedTy == ReqTy)
|
||||
return nullptr;
|
||||
|
||||
@ -2247,7 +2255,7 @@ Constant *ConstantExpr::getInsertElement(Constant *Val, Constant *Elt,
|
||||
Constant *Idx, Type *OnlyIfReducedTy) {
|
||||
assert(Val->getType()->isVectorTy() &&
|
||||
"Tried to create insertelement operation on non-vector type!");
|
||||
assert(Elt->getType() == Val->getType()->getVectorElementType() &&
|
||||
assert(Elt->getType() == cast<VectorType>(Val->getType())->getElementType() &&
|
||||
"Insertelement types must match!");
|
||||
assert(Idx->getType()->isIntegerTy() &&
|
||||
"Insertelement index must be i32 type!");
|
||||
@ -2276,8 +2284,9 @@ Constant *ConstantExpr::getShuffleVector(Constant *V1, Constant *V2,
|
||||
return FC; // Fold a few common cases.
|
||||
|
||||
unsigned NElts = Mask.size();
|
||||
Type *EltTy = V1->getType()->getVectorElementType();
|
||||
bool TypeIsScalable = V1->getType()->getVectorIsScalable();
|
||||
auto V1VTy = cast<VectorType>(V1->getType());
|
||||
Type *EltTy = V1VTy->getElementType();
|
||||
bool TypeIsScalable = V1VTy->isScalable();
|
||||
Type *ShufTy = VectorType::get(EltTy, NElts, TypeIsScalable);
|
||||
|
||||
if (OnlyIfReducedTy == ShufTy)
|
||||
@ -2569,7 +2578,7 @@ bool ConstantDataSequential::isElementTypeCompatible(Type *Ty) {
|
||||
unsigned ConstantDataSequential::getNumElements() const {
|
||||
if (ArrayType *AT = dyn_cast<ArrayType>(getType()))
|
||||
return AT->getNumElements();
|
||||
return getType()->getVectorNumElements();
|
||||
return cast<VectorType>(getType())->getNumElements();
|
||||
}
|
||||
|
||||
|
||||
|
@ -150,7 +150,8 @@ public:
|
||||
ShuffleVectorConstantExpr(Constant *C1, Constant *C2, ArrayRef<int> Mask)
|
||||
: ConstantExpr(
|
||||
VectorType::get(cast<VectorType>(C1->getType())->getElementType(),
|
||||
Mask.size(), C1->getType()->getVectorIsScalable()),
|
||||
Mask.size(),
|
||||
cast<VectorType>(C1->getType())->isScalable()),
|
||||
Instruction::ShuffleVector, &Op<0>(), 2) {
|
||||
assert(ShuffleVectorInst::isValidOperands(C1, C2, Mask) &&
|
||||
"Invalid shuffle vector instruction operands!");
|
||||
|
@ -646,8 +646,8 @@ static std::string getMangledTypeStr(Type* Ty) {
|
||||
} else if (VectorType* VTy = dyn_cast<VectorType>(Ty)) {
|
||||
if (VTy->isScalable())
|
||||
Result += "nx";
|
||||
Result += "v" + utostr(VTy->getVectorNumElements()) +
|
||||
getMangledTypeStr(VTy->getVectorElementType());
|
||||
Result += "v" + utostr(VTy->getNumElements()) +
|
||||
getMangledTypeStr(VTy->getElementType());
|
||||
} else if (Ty) {
|
||||
switch (Ty->getTypeID()) {
|
||||
default: llvm_unreachable("Unhandled type");
|
||||
@ -1055,7 +1055,7 @@ static Type *DecodeFixedType(ArrayRef<Intrinsic::IITDescriptor> &Infos,
|
||||
VectorType *VTy = dyn_cast<VectorType>(Ty);
|
||||
if (!VTy)
|
||||
llvm_unreachable("Expected an argument of Vector Type");
|
||||
Type *EltTy = VTy->getVectorElementType();
|
||||
Type *EltTy = VTy->getElementType();
|
||||
return PointerType::getUnqual(EltTy);
|
||||
}
|
||||
case IITDescriptor::VecElementArgument: {
|
||||
@ -1074,9 +1074,9 @@ static Type *DecodeFixedType(ArrayRef<Intrinsic::IITDescriptor> &Infos,
|
||||
// Return the overloaded type (which determines the pointers address space)
|
||||
return Tys[D.getOverloadArgNumber()];
|
||||
case IITDescriptor::ScalableVecArgument: {
|
||||
Type *Ty = DecodeFixedType(Infos, Tys, Context);
|
||||
return VectorType::get(Ty->getVectorElementType(),
|
||||
{ Ty->getVectorNumElements(), true });
|
||||
auto *Ty = cast<VectorType>(DecodeFixedType(Infos, Tys, Context));
|
||||
return VectorType::get(Ty->getElementType(),
|
||||
{(unsigned)Ty->getNumElements(), true});
|
||||
}
|
||||
}
|
||||
llvm_unreachable("unhandled");
|
||||
@ -1281,7 +1281,7 @@ static bool matchIntrinsicType(
|
||||
if (ReferenceType->getElementCount() !=
|
||||
ThisArgType->getElementCount())
|
||||
return true;
|
||||
EltTy = ThisArgType->getVectorElementType();
|
||||
EltTy = ThisArgType->getElementType();
|
||||
}
|
||||
return matchIntrinsicType(EltTy, Infos, ArgTys, DeferredChecks,
|
||||
IsDeferredCheck);
|
||||
@ -1326,15 +1326,13 @@ static bool matchIntrinsicType(
|
||||
VectorType *ReferenceType = dyn_cast<VectorType>(ArgTys[RefArgNumber]);
|
||||
VectorType *ThisArgVecTy = dyn_cast<VectorType>(Ty);
|
||||
if (!ThisArgVecTy || !ReferenceType ||
|
||||
(ReferenceType->getVectorNumElements() !=
|
||||
ThisArgVecTy->getVectorNumElements()))
|
||||
(ReferenceType->getNumElements() != ThisArgVecTy->getNumElements()))
|
||||
return true;
|
||||
PointerType *ThisArgEltTy =
|
||||
dyn_cast<PointerType>(ThisArgVecTy->getVectorElementType());
|
||||
dyn_cast<PointerType>(ThisArgVecTy->getElementType());
|
||||
if (!ThisArgEltTy)
|
||||
return true;
|
||||
return ThisArgEltTy->getElementType() !=
|
||||
ReferenceType->getVectorElementType();
|
||||
return ThisArgEltTy->getElementType() != ReferenceType->getElementType();
|
||||
}
|
||||
case IITDescriptor::VecElementArgument: {
|
||||
if (D.getArgumentNumber() >= ArgTys.size())
|
||||
|
@ -524,7 +524,7 @@ CallInst *IRBuilderBase::CreateMaskedGather(Value *Ptrs, Align Alignment,
|
||||
const Twine &Name) {
|
||||
auto PtrsTy = cast<VectorType>(Ptrs->getType());
|
||||
auto PtrTy = cast<PointerType>(PtrsTy->getElementType());
|
||||
unsigned NumElts = PtrsTy->getVectorNumElements();
|
||||
unsigned NumElts = PtrsTy->getNumElements();
|
||||
Type *DataTy = VectorType::get(PtrTy->getElementType(), NumElts);
|
||||
|
||||
if (!Mask)
|
||||
@ -554,11 +554,11 @@ CallInst *IRBuilderBase::CreateMaskedScatter(Value *Data, Value *Ptrs,
|
||||
Align Alignment, Value *Mask) {
|
||||
auto PtrsTy = cast<VectorType>(Ptrs->getType());
|
||||
auto DataTy = cast<VectorType>(Data->getType());
|
||||
unsigned NumElts = PtrsTy->getVectorNumElements();
|
||||
unsigned NumElts = PtrsTy->getNumElements();
|
||||
|
||||
#ifndef NDEBUG
|
||||
auto PtrTy = cast<PointerType>(PtrsTy->getElementType());
|
||||
assert(NumElts == DataTy->getVectorNumElements() &&
|
||||
assert(NumElts == DataTy->getNumElements() &&
|
||||
PtrTy->getElementType() == DataTy->getElementType() &&
|
||||
"Incompatible pointer and data types");
|
||||
#endif
|
||||
|
@ -1879,7 +1879,8 @@ ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
|
||||
Instruction *InsertBefore)
|
||||
: Instruction(
|
||||
VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
|
||||
Mask.size(), V1->getType()->getVectorIsScalable()),
|
||||
Mask.size(),
|
||||
cast<VectorType>(V1->getType())->isScalable()),
|
||||
ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
|
||||
OperandTraits<ShuffleVectorInst>::operands(this), InsertBefore) {
|
||||
assert(isValidOperands(V1, V2, Mask) &&
|
||||
@ -1894,7 +1895,8 @@ ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
|
||||
const Twine &Name, BasicBlock *InsertAtEnd)
|
||||
: Instruction(
|
||||
VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
|
||||
Mask.size(), V1->getType()->getVectorIsScalable()),
|
||||
Mask.size(),
|
||||
cast<VectorType>(V1->getType())->isScalable()),
|
||||
ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
|
||||
OperandTraits<ShuffleVectorInst>::operands(this), InsertAtEnd) {
|
||||
assert(isValidOperands(V1, V2, Mask) &&
|
||||
@ -1907,7 +1909,7 @@ ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
|
||||
}
|
||||
|
||||
void ShuffleVectorInst::commute() {
|
||||
int NumOpElts = Op<0>()->getType()->getVectorNumElements();
|
||||
int NumOpElts = cast<VectorType>(Op<0>()->getType())->getNumElements();
|
||||
int NumMaskElts = ShuffleMask.size();
|
||||
SmallVector<int, 16> NewMask(NumMaskElts);
|
||||
for (int i = 0; i != NumMaskElts; ++i) {
|
||||
@ -1936,7 +1938,7 @@ bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2,
|
||||
if (Elem != UndefMaskElem && Elem >= V1Size * 2)
|
||||
return false;
|
||||
|
||||
if (V1->getType()->getVectorIsScalable())
|
||||
if (cast<VectorType>(V1->getType())->isScalable())
|
||||
if ((Mask[0] != 0 && Mask[0] != UndefMaskElem) || !is_splat(Mask))
|
||||
return false;
|
||||
|
||||
@ -1952,7 +1954,7 @@ bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2,
|
||||
// Mask must be vector of i32.
|
||||
auto *MaskTy = dyn_cast<VectorType>(Mask->getType());
|
||||
if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32) ||
|
||||
MaskTy->isScalable() != V1->getType()->getVectorIsScalable())
|
||||
MaskTy->isScalable() != cast<VectorType>(V1->getType())->isScalable())
|
||||
return false;
|
||||
|
||||
// Check to see if Mask is valid.
|
||||
@ -1985,7 +1987,7 @@ bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2,
|
||||
|
||||
void ShuffleVectorInst::getShuffleMask(const Constant *Mask,
|
||||
SmallVectorImpl<int> &Result) {
|
||||
unsigned NumElts = Mask->getType()->getVectorElementCount().Min;
|
||||
unsigned NumElts = cast<VectorType>(Mask->getType())->getElementCount().Min;
|
||||
if (isa<ConstantAggregateZero>(Mask)) {
|
||||
Result.resize(NumElts, 0);
|
||||
return;
|
||||
@ -2010,7 +2012,7 @@ void ShuffleVectorInst::setShuffleMask(ArrayRef<int> Mask) {
|
||||
Constant *ShuffleVectorInst::convertShuffleMaskForBitcode(ArrayRef<int> Mask,
|
||||
Type *ResultTy) {
|
||||
Type *Int32Ty = Type::getInt32Ty(ResultTy->getContext());
|
||||
if (ResultTy->getVectorIsScalable()) {
|
||||
if (cast<VectorType>(ResultTy)->isScalable()) {
|
||||
assert(is_splat(Mask) && "Unexpected shuffle");
|
||||
Type *VecTy = VectorType::get(Int32Ty, Mask.size(), true);
|
||||
if (Mask[0] == 0)
|
||||
@ -2170,8 +2172,8 @@ bool ShuffleVectorInst::isExtractSubvectorMask(ArrayRef<int> Mask,
|
||||
}
|
||||
|
||||
bool ShuffleVectorInst::isIdentityWithPadding() const {
|
||||
int NumOpElts = Op<0>()->getType()->getVectorNumElements();
|
||||
int NumMaskElts = getType()->getVectorNumElements();
|
||||
int NumOpElts = cast<VectorType>(Op<0>()->getType())->getNumElements();
|
||||
int NumMaskElts = cast<VectorType>(getType())->getNumElements();
|
||||
if (NumMaskElts <= NumOpElts)
|
||||
return false;
|
||||
|
||||
@ -2189,8 +2191,8 @@ bool ShuffleVectorInst::isIdentityWithPadding() const {
|
||||
}
|
||||
|
||||
bool ShuffleVectorInst::isIdentityWithExtract() const {
|
||||
int NumOpElts = Op<0>()->getType()->getVectorNumElements();
|
||||
int NumMaskElts = getType()->getVectorNumElements();
|
||||
int NumOpElts = cast<VectorType>(Op<0>()->getType())->getNumElements();
|
||||
int NumMaskElts = getType()->getNumElements();
|
||||
if (NumMaskElts >= NumOpElts)
|
||||
return false;
|
||||
|
||||
@ -2202,8 +2204,8 @@ bool ShuffleVectorInst::isConcat() const {
|
||||
if (isa<UndefValue>(Op<0>()) || isa<UndefValue>(Op<1>()))
|
||||
return false;
|
||||
|
||||
int NumOpElts = Op<0>()->getType()->getVectorNumElements();
|
||||
int NumMaskElts = getType()->getVectorNumElements();
|
||||
int NumOpElts = cast<VectorType>(Op<0>()->getType())->getNumElements();
|
||||
int NumMaskElts = getType()->getNumElements();
|
||||
if (NumMaskElts != NumOpElts * 2)
|
||||
return false;
|
||||
|
||||
@ -2944,7 +2946,8 @@ CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty,
|
||||
"Invalid cast");
|
||||
assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
|
||||
assert((!Ty->isVectorTy() ||
|
||||
Ty->getVectorNumElements() == S->getType()->getVectorNumElements()) &&
|
||||
cast<VectorType>(Ty)->getNumElements() ==
|
||||
cast<VectorType>(S->getType())->getNumElements()) &&
|
||||
"Invalid cast");
|
||||
|
||||
if (Ty->isIntOrIntVectorTy())
|
||||
@ -2962,7 +2965,8 @@ CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty,
|
||||
"Invalid cast");
|
||||
assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
|
||||
assert((!Ty->isVectorTy() ||
|
||||
Ty->getVectorNumElements() == S->getType()->getVectorNumElements()) &&
|
||||
cast<VectorType>(Ty)->getNumElements() ==
|
||||
cast<VectorType>(S->getType())->getNumElements()) &&
|
||||
"Invalid cast");
|
||||
|
||||
if (Ty->isIntOrIntVectorTy())
|
||||
|
@ -2825,8 +2825,9 @@ void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
|
||||
&I);
|
||||
Assert(SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace(),
|
||||
"AddrSpaceCast must be between different address spaces", &I);
|
||||
if (SrcTy->isVectorTy())
|
||||
Assert(SrcTy->getVectorNumElements() == DestTy->getVectorNumElements(),
|
||||
if (auto *SrcVTy = dyn_cast<VectorType>(SrcTy))
|
||||
Assert(SrcVTy->getNumElements() ==
|
||||
cast<VectorType>(DestTy)->getNumElements(),
|
||||
"AddrSpaceCast vector pointer number of elements mismatch", &I);
|
||||
visitInstruction(I);
|
||||
}
|
||||
@ -3333,16 +3334,18 @@ void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
|
||||
GEP.getResultElementType() == ElTy,
|
||||
"GEP is not of right type for indices!", &GEP, ElTy);
|
||||
|
||||
if (GEP.getType()->isVectorTy()) {
|
||||
if (auto *GEPVTy = dyn_cast<VectorType>(GEP.getType())) {
|
||||
// Additional checks for vector GEPs.
|
||||
unsigned GEPWidth = GEP.getType()->getVectorNumElements();
|
||||
unsigned GEPWidth = GEPVTy->getNumElements();
|
||||
if (GEP.getPointerOperandType()->isVectorTy())
|
||||
Assert(GEPWidth == GEP.getPointerOperandType()->getVectorNumElements(),
|
||||
"Vector GEP result width doesn't match operand's", &GEP);
|
||||
Assert(
|
||||
GEPWidth ==
|
||||
cast<VectorType>(GEP.getPointerOperandType())->getNumElements(),
|
||||
"Vector GEP result width doesn't match operand's", &GEP);
|
||||
for (Value *Idx : Idxs) {
|
||||
Type *IndexTy = Idx->getType();
|
||||
if (IndexTy->isVectorTy()) {
|
||||
unsigned IndexWidth = IndexTy->getVectorNumElements();
|
||||
if (auto *IndexVTy = dyn_cast<VectorType>(IndexTy)) {
|
||||
unsigned IndexWidth = IndexVTy->getNumElements();
|
||||
Assert(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP);
|
||||
}
|
||||
Assert(IndexTy->isIntOrIntVectorTy(),
|
||||
@ -4656,8 +4659,8 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
|
||||
"masked_load: return must match pointer type", Call);
|
||||
Assert(PassThru->getType() == DataTy,
|
||||
"masked_load: pass through and data type must match", Call);
|
||||
Assert(Mask->getType()->getVectorNumElements() ==
|
||||
DataTy->getVectorNumElements(),
|
||||
Assert(cast<VectorType>(Mask->getType())->getNumElements() ==
|
||||
cast<VectorType>(DataTy)->getNumElements(),
|
||||
"masked_load: vector mask must be same length as data", Call);
|
||||
break;
|
||||
}
|
||||
@ -4675,8 +4678,8 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
|
||||
Type *DataTy = cast<PointerType>(Ptr->getType())->getElementType();
|
||||
Assert(DataTy == Val->getType(),
|
||||
"masked_store: storee must match pointer type", Call);
|
||||
Assert(Mask->getType()->getVectorNumElements() ==
|
||||
DataTy->getVectorNumElements(),
|
||||
Assert(cast<VectorType>(Mask->getType())->getNumElements() ==
|
||||
cast<VectorType>(DataTy)->getNumElements(),
|
||||
"masked_store: vector mask must be same length as data", Call);
|
||||
break;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user