1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-22 10:42:39 +01:00

[Alignment][NFC] Use Align with CreateMaskedLoad

Summary:
This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790

Reviewers: courbet

Subscribers: hiraditya, cfe-commits, llvm-commits

Tags: #clang, #llvm

Differential Revision: https://reviews.llvm.org/D73087
This commit is contained in:
Guillaume Chatelet 2020-01-21 11:21:31 +01:00
parent 6b77481d59
commit 640dc77d98
7 changed files with 29 additions and 21 deletions

View File

@ -509,6 +509,7 @@ public:
bool isReverse() const { return Reverse; }
uint32_t getFactor() const { return Factor; }
uint32_t getAlignment() const { return Alignment.value(); }
Align getAlign() const { return Alignment; }
uint32_t getNumMembers() const { return Members.size(); }
/// Try to insert a new member \p Instr with index \p Index and

View File

@ -727,7 +727,14 @@ public:
CallInst *CreateInvariantStart(Value *Ptr, ConstantInt *Size = nullptr);
/// Create a call to Masked Load intrinsic
CallInst *CreateMaskedLoad(Value *Ptr, unsigned Align, Value *Mask,
LLVM_ATTRIBUTE_DEPRECATED(
CallInst *CreateMaskedLoad(Value *Ptr, unsigned Alignment, Value *Mask,
Value *PassThru = nullptr,
const Twine &Name = ""),
"Use the version that takes Align instead") {
return CreateMaskedLoad(Ptr, Align(Alignment), Mask, PassThru, Name);
}
CallInst *CreateMaskedLoad(Value *Ptr, Align Alignment, Value *Mask,
Value *PassThru = nullptr, const Twine &Name = "");
/// Create a call to Masked Store intrinsic

View File

@ -1257,18 +1257,19 @@ static Value *UpgradeMaskedLoad(IRBuilder<> &Builder,
Type *ValTy = Passthru->getType();
// Cast the pointer to the right type.
Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(ValTy));
unsigned Align =
Aligned ? cast<VectorType>(Passthru->getType())->getBitWidth() / 8 : 1;
const Align Alignment =
Aligned ? Align(cast<VectorType>(Passthru->getType())->getBitWidth() / 8)
: Align::None();
// If the mask is all ones just emit a regular store.
if (const auto *C = dyn_cast<Constant>(Mask))
if (C->isAllOnesValue())
return Builder.CreateAlignedLoad(ValTy, Ptr, Align);
return Builder.CreateAlignedLoad(ValTy, Ptr, Alignment);
// Convert the mask from an integer type to a vector of i1.
unsigned NumElts = Passthru->getType()->getVectorNumElements();
Mask = getX86MaskVec(Builder, Mask, NumElts);
return Builder.CreateMaskedLoad(Ptr, Align, Mask, Passthru);
return Builder.CreateMaskedLoad(Ptr, Alignment, Mask, Passthru);
}
static Value *upgradeAbs(IRBuilder<> &Builder, CallInst &CI) {

View File

@ -466,14 +466,14 @@ CallInst *IRBuilderBase::CreateAssumption(Value *Cond) {
}
/// Create a call to a Masked Load intrinsic.
/// \p Ptr - base pointer for the load
/// \p Align - alignment of the source location
/// \p Mask - vector of booleans which indicates what vector lanes should
/// be accessed in memory
/// \p PassThru - pass-through value that is used to fill the masked-off lanes
/// of the result
/// \p Name - name of the result variable
CallInst *IRBuilderBase::CreateMaskedLoad(Value *Ptr, unsigned Align,
/// \p Ptr - base pointer for the load
/// \p Alignment - alignment of the source location
/// \p Mask - vector of booleans which indicates what vector lanes should
/// be accessed in memory
/// \p PassThru - pass-through value that is used to fill the masked-off lanes
/// of the result
/// \p Name - name of the result variable
CallInst *IRBuilderBase::CreateMaskedLoad(Value *Ptr, Align Alignment,
Value *Mask, Value *PassThru,
const Twine &Name) {
auto *PtrTy = cast<PointerType>(Ptr->getType());
@ -483,7 +483,7 @@ CallInst *IRBuilderBase::CreateMaskedLoad(Value *Ptr, unsigned Align,
if (!PassThru)
PassThru = UndefValue::get(DataTy);
Type *OverloadedTypes[] = { DataTy, PtrTy };
Value *Ops[] = { Ptr, getInt32(Align), Mask, PassThru};
Value *Ops[] = {Ptr, getInt32(Alignment.value()), Mask, PassThru};
return CreateMaskedIntrinsic(Intrinsic::masked_load, Ops,
OverloadedTypes, Name);
}

View File

@ -1331,7 +1331,7 @@ static Instruction *simplifyX86MaskedLoad(IntrinsicInst &II, InstCombiner &IC) {
// The pass-through vector for an x86 masked load is a zero vector.
CallInst *NewMaskedLoad =
IC.Builder.CreateMaskedLoad(PtrCast, 1, BoolMask, ZeroVec);
IC.Builder.CreateMaskedLoad(PtrCast, Align::None(), BoolMask, ZeroVec);
return IC.replaceInstUsesWith(II, NewMaskedLoad);
}

View File

@ -2945,9 +2945,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
if (PropagateShadow) {
std::tie(ShadowPtr, OriginPtr) =
getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ false);
setShadow(&I, IRB.CreateMaskedLoad(
ShadowPtr, Alignment ? Alignment->value() : 0, Mask,
getShadow(PassThru), "_msmaskedld"));
setShadow(&I, IRB.CreateMaskedLoad(ShadowPtr, *Alignment, Mask,
getShadow(PassThru), "_msmaskedld"));
} else {
setShadow(&I, getCleanShadow(&I));
}

View File

@ -2263,7 +2263,7 @@ void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr,
: ShuffledMask;
}
NewLoad =
Builder.CreateMaskedLoad(AddrParts[Part], Group->getAlignment(),
Builder.CreateMaskedLoad(AddrParts[Part], Group->getAlign(),
GroupMask, UndefVec, "wide.masked.vec");
}
else
@ -2475,8 +2475,8 @@ void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr,
auto *VecPtr = CreateVecPtr(Part, State.get(Addr, {0, 0}));
if (isMaskRequired)
NewLI = Builder.CreateMaskedLoad(
VecPtr, Alignment.value(), BlockInMaskParts[Part],
UndefValue::get(DataTy), "wide.masked.load");
VecPtr, Alignment, BlockInMaskParts[Part], UndefValue::get(DataTy),
"wide.masked.load");
else
NewLI = Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment.value(),
"wide.load");