1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-22 02:33:06 +01:00

[IRBuilder] Add type argument to CreateMaskedLoad/Gather

Same as other CreateLoad-style APIs, these need an explicit type
argument to support opaque pointers.

Differential Revision: https://reviews.llvm.org/D105395
This commit is contained in:
Nikita Popov 2021-07-03 14:57:41 +02:00
parent b463417679
commit ecd2dc975e
8 changed files with 32 additions and 26 deletions

View File

@ -752,7 +752,7 @@ public:
CallInst *CreateInvariantStart(Value *Ptr, ConstantInt *Size = nullptr);
/// Create a call to Masked Load intrinsic
CallInst *CreateMaskedLoad(Value *Ptr, Align Alignment, Value *Mask,
CallInst *CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment, Value *Mask,
Value *PassThru = nullptr, const Twine &Name = "");
/// Create a call to Masked Store intrinsic
@ -760,7 +760,7 @@ public:
Value *Mask);
/// Create a call to Masked Gather intrinsic
CallInst *CreateMaskedGather(Value *Ptrs, Align Alignment,
CallInst *CreateMaskedGather(Type *Ty, Value *Ptrs, Align Alignment,
Value *Mask = nullptr, Value *PassThru = nullptr,
const Twine &Name = "");

View File

@ -1421,10 +1421,9 @@ static Value *UpgradeMaskedLoad(IRBuilder<> &Builder,
return Builder.CreateAlignedLoad(ValTy, Ptr, Alignment);
// Convert the mask from an integer type to a vector of i1.
unsigned NumElts =
cast<FixedVectorType>(Passthru->getType())->getNumElements();
unsigned NumElts = cast<FixedVectorType>(ValTy)->getNumElements();
Mask = getX86MaskVec(Builder, Mask, NumElts);
return Builder.CreateMaskedLoad(Ptr, Alignment, Mask, Passthru);
return Builder.CreateMaskedLoad(ValTy, Ptr, Alignment, Mask, Passthru);
}
static Value *upgradeAbs(IRBuilder<> &Builder, CallInst &CI) {

View File

@ -493,6 +493,7 @@ Instruction *IRBuilderBase::CreateNoAliasScopeDeclaration(Value *Scope) {
}
/// Create a call to a Masked Load intrinsic.
/// \p Ty - vector type to load
/// \p Ptr - base pointer for the load
/// \p Alignment - alignment of the source location
/// \p Mask - vector of booleans which indicates what vector lanes should
@ -500,16 +501,16 @@ Instruction *IRBuilderBase::CreateNoAliasScopeDeclaration(Value *Scope) {
/// \p PassThru - pass-through value that is used to fill the masked-off lanes
/// of the result
/// \p Name - name of the result variable
CallInst *IRBuilderBase::CreateMaskedLoad(Value *Ptr, Align Alignment,
CallInst *IRBuilderBase::CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment,
Value *Mask, Value *PassThru,
const Twine &Name) {
auto *PtrTy = cast<PointerType>(Ptr->getType());
Type *DataTy = PtrTy->getElementType();
assert(DataTy->isVectorTy() && "Ptr should point to a vector");
assert(Ty->isVectorTy() && "Type should be vector");
assert(PtrTy->isOpaqueOrPointeeTypeMatches(Ty) && "Wrong element type");
assert(Mask && "Mask should not be all-ones (null)");
if (!PassThru)
PassThru = UndefValue::get(DataTy);
Type *OverloadedTypes[] = { DataTy, PtrTy };
PassThru = UndefValue::get(Ty);
Type *OverloadedTypes[] = { Ty, PtrTy };
Value *Ops[] = {Ptr, getInt32(Alignment.value()), Mask, PassThru};
return CreateMaskedIntrinsic(Intrinsic::masked_load, Ops,
OverloadedTypes, Name);
@ -546,6 +547,7 @@ CallInst *IRBuilderBase::CreateMaskedIntrinsic(Intrinsic::ID Id,
}
/// Create a call to a Masked Gather intrinsic.
/// \p Ty - vector type to gather
/// \p Ptrs - vector of pointers for loading
/// \p Align - alignment for one element
/// \p Mask - vector of booleans which indicates what vector lanes should
@ -553,22 +555,27 @@ CallInst *IRBuilderBase::CreateMaskedIntrinsic(Intrinsic::ID Id,
/// \p PassThru - pass-through value that is used to fill the masked-off lanes
/// of the result
/// \p Name - name of the result variable
CallInst *IRBuilderBase::CreateMaskedGather(Value *Ptrs, Align Alignment,
Value *Mask, Value *PassThru,
CallInst *IRBuilderBase::CreateMaskedGather(Type *Ty, Value *Ptrs,
Align Alignment, Value *Mask,
Value *PassThru,
const Twine &Name) {
auto *VecTy = cast<VectorType>(Ty);
ElementCount NumElts = VecTy->getElementCount();
auto *PtrsTy = cast<VectorType>(Ptrs->getType());
auto *PtrTy = cast<PointerType>(PtrsTy->getElementType());
ElementCount NumElts = PtrsTy->getElementCount();
auto *DataTy = VectorType::get(PtrTy->getElementType(), NumElts);
assert(cast<PointerType>(PtrsTy->getElementType())
->isOpaqueOrPointeeTypeMatches(
cast<VectorType>(Ty)->getElementType()) &&
"Element type mismatch");
assert(NumElts == PtrsTy->getElementCount() && "Element count mismatch");
if (!Mask)
Mask = Constant::getAllOnesValue(
VectorType::get(Type::getInt1Ty(Context), NumElts));
if (!PassThru)
PassThru = UndefValue::get(DataTy);
PassThru = UndefValue::get(Ty);
Type *OverloadedTypes[] = {DataTy, PtrsTy};
Type *OverloadedTypes[] = {Ty, PtrsTy};
Value *Ops[] = {Ptrs, getInt32(Alignment.value()), Mask, PassThru};
// We specify only one type when we create this intrinsic. Types of other

View File

@ -475,7 +475,7 @@ auto AlignVectors::createAlignedLoad(IRBuilder<> &Builder, Type *ValTy,
return PassThru;
if (Mask == ConstantInt::getTrue(Mask->getType()))
return Builder.CreateAlignedLoad(ValTy, Ptr, Align(Alignment));
return Builder.CreateMaskedLoad(Ptr, Align(Alignment), Mask, PassThru);
return Builder.CreateMaskedLoad(ValTy, Ptr, Align(Alignment), Mask, PassThru);
}
auto AlignVectors::createAlignedStore(IRBuilder<> &Builder, Value *Val,

View File

@ -72,8 +72,8 @@ static Instruction *simplifyX86MaskedLoad(IntrinsicInst &II, InstCombiner &IC) {
Value *PtrCast = IC.Builder.CreateBitCast(Ptr, VecPtrTy, "castvec");
// The pass-through vector for an x86 masked load is a zero vector.
CallInst *NewMaskedLoad =
IC.Builder.CreateMaskedLoad(PtrCast, Align(1), BoolMask, ZeroVec);
CallInst *NewMaskedLoad = IC.Builder.CreateMaskedLoad(
II.getType(), PtrCast, Align(1), BoolMask, ZeroVec);
return IC.replaceInstUsesWith(II, NewMaskedLoad);
}

View File

@ -3117,7 +3117,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
if (PropagateShadow) {
std::tie(ShadowPtr, OriginPtr) =
getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ false);
setShadow(&I, IRB.CreateMaskedLoad(ShadowPtr, Alignment, Mask,
setShadow(&I, IRB.CreateMaskedLoad(ShadowTy, ShadowPtr, Alignment, Mask,
getShadow(PassThru), "_msmaskedld"));
} else {
setShadow(&I, getCleanShadow(&I));

View File

@ -2778,7 +2778,7 @@ void InnerLoopVectorizer::vectorizeInterleaveGroup(
: ShuffledMask;
}
NewLoad =
Builder.CreateMaskedLoad(AddrParts[Part], Group->getAlign(),
Builder.CreateMaskedLoad(VecTy, AddrParts[Part], Group->getAlign(),
GroupMask, PoisonVec, "wide.masked.vec");
}
else
@ -2990,15 +2990,15 @@ void InnerLoopVectorizer::vectorizeMemoryInstruction(
if (CreateGatherScatter) {
Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
Value *VectorGep = State.get(Addr, Part);
NewLI = Builder.CreateMaskedGather(VectorGep, Alignment, MaskPart,
NewLI = Builder.CreateMaskedGather(DataTy, VectorGep, Alignment, MaskPart,
nullptr, "wide.masked.gather");
addMetadata(NewLI, LI);
} else {
auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0)));
if (isMaskRequired)
NewLI = Builder.CreateMaskedLoad(
VecPtr, Alignment, BlockInMaskParts[Part], PoisonValue::get(DataTy),
"wide.masked.load");
DataTy, VecPtr, Alignment, BlockInMaskParts[Part],
PoisonValue::get(DataTy), "wide.masked.load");
else
NewLI =
Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load");

View File

@ -5403,7 +5403,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
for (Value *V : E->Scalars)
CommonAlignment =
commonAlignment(CommonAlignment, cast<LoadInst>(V)->getAlign());
NewLI = Builder.CreateMaskedGather(VecPtr, CommonAlignment);
NewLI = Builder.CreateMaskedGather(VecTy, VecPtr, CommonAlignment);
}
Value *V = propagateMetadata(NewLI, E->Scalars);