mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-01-31 20:51:52 +01:00
Reland [AssumeBundles] Use operand bundles to encode alignment assumptions
NOTE: There is a mailing list discussion on this: http://lists.llvm.org/pipermail/llvm-dev/2019-December/137632.html Complemantary to the assumption outliner prototype in D71692, this patch shows how we could simplify the code emitted for an alignemnt assumption. The generated code is smaller, less fragile, and it makes it easier to recognize the additional use as a "assumption use". As mentioned in D71692 and on the mailing list, we could adopt this scheme, and similar schemes for other patterns, without adopting the assumption outlining.
This commit is contained in:
parent
9b2e865f05
commit
c86946593e
@ -785,7 +785,11 @@ public:
|
||||
|
||||
/// Create an assume intrinsic call that allows the optimizer to
|
||||
/// assume that the provided condition will be true.
|
||||
CallInst *CreateAssumption(Value *Cond);
|
||||
///
|
||||
/// The optional argument \p OpBundles specifies operand bundles that are
|
||||
/// added to the call instruction.
|
||||
CallInst *CreateAssumption(Value *Cond,
|
||||
ArrayRef<OperandBundleDef> OpBundles = llvm::None);
|
||||
|
||||
/// Create a call to the experimental.gc.statepoint intrinsic to
|
||||
/// start a new statepoint sequence.
|
||||
@ -2513,13 +2517,11 @@ public:
|
||||
|
||||
private:
|
||||
/// Helper function that creates an assume intrinsic call that
|
||||
/// represents an alignment assumption on the provided Ptr, Mask, Type
|
||||
/// and Offset. It may be sometimes useful to do some other logic
|
||||
/// based on this alignment check, thus it can be stored into 'TheCheck'.
|
||||
/// represents an alignment assumption on the provided pointer \p PtrValue
|
||||
/// with offset \p OffsetValue and alignment value \p AlignValue.
|
||||
CallInst *CreateAlignmentAssumptionHelper(const DataLayout &DL,
|
||||
Value *PtrValue, Value *Mask,
|
||||
Type *IntPtrTy, Value *OffsetValue,
|
||||
Value **TheCheck);
|
||||
Value *PtrValue, Value *AlignValue,
|
||||
Value *OffsetValue);
|
||||
|
||||
public:
|
||||
/// Create an assume intrinsic call that represents an alignment
|
||||
@ -2528,13 +2530,9 @@ public:
|
||||
/// An optional offset can be provided, and if it is provided, the offset
|
||||
/// must be subtracted from the provided pointer to get the pointer with the
|
||||
/// specified alignment.
|
||||
///
|
||||
/// It may be sometimes useful to do some other logic
|
||||
/// based on this alignment check, thus it can be stored into 'TheCheck'.
|
||||
CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue,
|
||||
unsigned Alignment,
|
||||
Value *OffsetValue = nullptr,
|
||||
Value **TheCheck = nullptr);
|
||||
Value *OffsetValue = nullptr);
|
||||
|
||||
/// Create an assume intrinsic call that represents an alignment
|
||||
/// assumption on the provided pointer.
|
||||
@ -2543,15 +2541,11 @@ public:
|
||||
/// must be subtracted from the provided pointer to get the pointer with the
|
||||
/// specified alignment.
|
||||
///
|
||||
/// It may be sometimes useful to do some other logic
|
||||
/// based on this alignment check, thus it can be stored into 'TheCheck'.
|
||||
///
|
||||
/// This overload handles the condition where the Alignment is dependent
|
||||
/// on an existing value rather than a static value.
|
||||
CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue,
|
||||
Value *Alignment,
|
||||
Value *OffsetValue = nullptr,
|
||||
Value **TheCheck = nullptr);
|
||||
Value *OffsetValue = nullptr);
|
||||
};
|
||||
|
||||
/// This provides a uniform API for creating instructions and inserting
|
||||
|
@ -37,9 +37,9 @@ struct AlignmentFromAssumptionsPass
|
||||
ScalarEvolution *SE = nullptr;
|
||||
DominatorTree *DT = nullptr;
|
||||
|
||||
bool extractAlignmentInfo(CallInst *I, Value *&AAPtr, const SCEV *&AlignSCEV,
|
||||
const SCEV *&OffSCEV);
|
||||
bool processAssumption(CallInst *I);
|
||||
bool extractAlignmentInfo(CallInst *I, unsigned Idx, Value *&AAPtr,
|
||||
const SCEV *&AlignSCEV, const SCEV *&OffSCEV);
|
||||
bool processAssumption(CallInst *I, unsigned Idx);
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -108,10 +108,17 @@ llvm::getKnowledgeFromBundle(CallInst &Assume,
|
||||
Result.AttrKind = Attribute::getAttrKindFromName(BOI.Tag->getKey());
|
||||
if (bundleHasArgument(BOI, ABA_WasOn))
|
||||
Result.WasOn = getValueFromBundleOpInfo(Assume, BOI, ABA_WasOn);
|
||||
auto GetArgOr1 = [&](unsigned Idx) -> unsigned {
|
||||
if (auto *ConstInt = dyn_cast<ConstantInt>(
|
||||
getValueFromBundleOpInfo(Assume, BOI, ABA_Argument + Idx)))
|
||||
return ConstInt->getZExtValue();
|
||||
return 1;
|
||||
};
|
||||
if (BOI.End - BOI.Begin > ABA_Argument)
|
||||
Result.ArgValue =
|
||||
cast<ConstantInt>(getValueFromBundleOpInfo(Assume, BOI, ABA_Argument))
|
||||
->getZExtValue();
|
||||
Result.ArgValue = GetArgOr1(0);
|
||||
if (Result.AttrKind == Attribute::Alignment)
|
||||
if (BOI.End - BOI.Begin > ABA_Argument + 1)
|
||||
Result.ArgValue = MinAlign(Result.ArgValue, GetArgOr1(1));
|
||||
return Result;
|
||||
}
|
||||
|
||||
|
@ -72,8 +72,9 @@ Value *IRBuilderBase::getCastedInt8PtrValue(Value *Ptr) {
|
||||
static CallInst *createCallHelper(Function *Callee, ArrayRef<Value *> Ops,
|
||||
IRBuilderBase *Builder,
|
||||
const Twine &Name = "",
|
||||
Instruction *FMFSource = nullptr) {
|
||||
CallInst *CI = Builder->CreateCall(Callee, Ops, Name);
|
||||
Instruction *FMFSource = nullptr,
|
||||
ArrayRef<OperandBundleDef> OpBundles = {}) {
|
||||
CallInst *CI = Builder->CreateCall(Callee, Ops, OpBundles, Name);
|
||||
if (FMFSource)
|
||||
CI->copyFastMathFlags(FMFSource);
|
||||
return CI;
|
||||
@ -450,14 +451,16 @@ CallInst *IRBuilderBase::CreateInvariantStart(Value *Ptr, ConstantInt *Size) {
|
||||
return createCallHelper(TheFn, Ops, this);
|
||||
}
|
||||
|
||||
CallInst *IRBuilderBase::CreateAssumption(Value *Cond) {
|
||||
CallInst *
|
||||
IRBuilderBase::CreateAssumption(Value *Cond,
|
||||
ArrayRef<OperandBundleDef> OpBundles) {
|
||||
assert(Cond->getType() == getInt1Ty() &&
|
||||
"an assumption condition must be of type i1");
|
||||
|
||||
Value *Ops[] = { Cond };
|
||||
Module *M = BB->getParent()->getParent();
|
||||
Function *FnAssume = Intrinsic::getDeclaration(M, Intrinsic::assume);
|
||||
return createCallHelper(FnAssume, Ops, this);
|
||||
return createCallHelper(FnAssume, Ops, this, "", nullptr, OpBundles);
|
||||
}
|
||||
|
||||
/// Create a call to a Masked Load intrinsic.
|
||||
@ -1113,63 +1116,37 @@ Value *IRBuilderBase::CreatePreserveStructAccessIndex(
|
||||
return Fn;
|
||||
}
|
||||
|
||||
CallInst *IRBuilderBase::CreateAlignmentAssumptionHelper(
|
||||
const DataLayout &DL, Value *PtrValue, Value *Mask, Type *IntPtrTy,
|
||||
Value *OffsetValue, Value **TheCheck) {
|
||||
Value *PtrIntValue = CreatePtrToInt(PtrValue, IntPtrTy, "ptrint");
|
||||
|
||||
if (OffsetValue) {
|
||||
bool IsOffsetZero = false;
|
||||
if (const auto *CI = dyn_cast<ConstantInt>(OffsetValue))
|
||||
IsOffsetZero = CI->isZero();
|
||||
|
||||
if (!IsOffsetZero) {
|
||||
if (OffsetValue->getType() != IntPtrTy)
|
||||
OffsetValue = CreateIntCast(OffsetValue, IntPtrTy, /*isSigned*/ true,
|
||||
"offsetcast");
|
||||
PtrIntValue = CreateSub(PtrIntValue, OffsetValue, "offsetptr");
|
||||
}
|
||||
CallInst *IRBuilderBase::CreateAlignmentAssumptionHelper(const DataLayout &DL,
|
||||
Value *PtrValue,
|
||||
Value *AlignValue,
|
||||
Value *OffsetValue) {
|
||||
SmallVector<Value *, 4> Vals({PtrValue, AlignValue});
|
||||
if (OffsetValue)
|
||||
Vals.push_back(OffsetValue);
|
||||
OperandBundleDefT<Value *> AlignOpB("align", Vals);
|
||||
return CreateAssumption(ConstantInt::getTrue(getContext()), {AlignOpB});
|
||||
}
|
||||
|
||||
Value *Zero = ConstantInt::get(IntPtrTy, 0);
|
||||
Value *MaskedPtr = CreateAnd(PtrIntValue, Mask, "maskedptr");
|
||||
Value *InvCond = CreateICmpEQ(MaskedPtr, Zero, "maskcond");
|
||||
if (TheCheck)
|
||||
*TheCheck = InvCond;
|
||||
|
||||
return CreateAssumption(InvCond);
|
||||
}
|
||||
|
||||
CallInst *IRBuilderBase::CreateAlignmentAssumption(
|
||||
const DataLayout &DL, Value *PtrValue, unsigned Alignment,
|
||||
Value *OffsetValue, Value **TheCheck) {
|
||||
CallInst *IRBuilderBase::CreateAlignmentAssumption(const DataLayout &DL,
|
||||
Value *PtrValue,
|
||||
unsigned Alignment,
|
||||
Value *OffsetValue) {
|
||||
assert(isa<PointerType>(PtrValue->getType()) &&
|
||||
"trying to create an alignment assumption on a non-pointer?");
|
||||
assert(Alignment != 0 && "Invalid Alignment");
|
||||
auto *PtrTy = cast<PointerType>(PtrValue->getType());
|
||||
Type *IntPtrTy = getIntPtrTy(DL, PtrTy->getAddressSpace());
|
||||
|
||||
Value *Mask = ConstantInt::get(IntPtrTy, Alignment - 1);
|
||||
return CreateAlignmentAssumptionHelper(DL, PtrValue, Mask, IntPtrTy,
|
||||
OffsetValue, TheCheck);
|
||||
Value *AlignValue = ConstantInt::get(IntPtrTy, Alignment);
|
||||
return CreateAlignmentAssumptionHelper(DL, PtrValue, AlignValue, OffsetValue);
|
||||
}
|
||||
|
||||
CallInst *IRBuilderBase::CreateAlignmentAssumption(
|
||||
const DataLayout &DL, Value *PtrValue, Value *Alignment,
|
||||
Value *OffsetValue, Value **TheCheck) {
|
||||
CallInst *IRBuilderBase::CreateAlignmentAssumption(const DataLayout &DL,
|
||||
Value *PtrValue,
|
||||
Value *Alignment,
|
||||
Value *OffsetValue) {
|
||||
assert(isa<PointerType>(PtrValue->getType()) &&
|
||||
"trying to create an alignment assumption on a non-pointer?");
|
||||
auto *PtrTy = cast<PointerType>(PtrValue->getType());
|
||||
Type *IntPtrTy = getIntPtrTy(DL, PtrTy->getAddressSpace());
|
||||
|
||||
if (Alignment->getType() != IntPtrTy)
|
||||
Alignment = CreateIntCast(Alignment, IntPtrTy, /*isSigned*/ false,
|
||||
"alignmentcast");
|
||||
|
||||
Value *Mask = CreateSub(Alignment, ConstantInt::get(IntPtrTy, 1), "mask");
|
||||
|
||||
return CreateAlignmentAssumptionHelper(DL, PtrValue, Mask, IntPtrTy,
|
||||
OffsetValue, TheCheck);
|
||||
return CreateAlignmentAssumptionHelper(DL, PtrValue, Alignment, OffsetValue);
|
||||
}
|
||||
|
||||
IRBuilderDefaultInserter::~IRBuilderDefaultInserter() {}
|
||||
|
@ -4483,21 +4483,32 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
|
||||
Assert(Elem.Tag->getKey() == "ignore" ||
|
||||
Attribute::isExistingAttribute(Elem.Tag->getKey()),
|
||||
"tags must be valid attribute names");
|
||||
Assert(Elem.End - Elem.Begin <= 2, "to many arguments");
|
||||
Attribute::AttrKind Kind =
|
||||
Attribute::getAttrKindFromName(Elem.Tag->getKey());
|
||||
unsigned ArgCount = Elem.End - Elem.Begin;
|
||||
if (Kind == Attribute::Alignment) {
|
||||
Assert(ArgCount <= 3 && ArgCount >= 2,
|
||||
"alignment assumptions should have 2 or 3 arguments");
|
||||
Assert(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
|
||||
"first argument should be a pointer");
|
||||
Assert(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
|
||||
"second argument should be an integer");
|
||||
if (ArgCount == 3)
|
||||
Assert(Call.getOperand(Elem.Begin + 2)->getType()->isIntegerTy(),
|
||||
"third argument should be an integer if present");
|
||||
return;
|
||||
}
|
||||
Assert(ArgCount <= 2, "to many arguments");
|
||||
if (Kind == Attribute::None)
|
||||
break;
|
||||
if (Attribute::doesAttrKindHaveArgument(Kind)) {
|
||||
Assert(Elem.End - Elem.Begin == 2,
|
||||
"this attribute should have 2 arguments");
|
||||
Assert(ArgCount == 2, "this attribute should have 2 arguments");
|
||||
Assert(isa<ConstantInt>(Call.getOperand(Elem.Begin + 1)),
|
||||
"the second argument should be a constant integral value");
|
||||
} else if (isFuncOnlyAttr(Kind)) {
|
||||
Assert((Elem.End - Elem.Begin) == 0, "this attribute has no argument");
|
||||
Assert((ArgCount) == 0, "this attribute has no argument");
|
||||
} else if (!isFuncOrArgAttr(Kind)) {
|
||||
Assert((Elem.End - Elem.Begin) == 1,
|
||||
"this attribute should have one argument");
|
||||
Assert((ArgCount) == 1, "this attribute should have one argument");
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
@ -1461,11 +1461,16 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
|
||||
break;
|
||||
case Intrinsic::assume: {
|
||||
Value *IIOperand = II->getArgOperand(0);
|
||||
SmallVector<OperandBundleDef, 4> OpBundles;
|
||||
II->getOperandBundlesAsDefs(OpBundles);
|
||||
bool HasOpBundles = !OpBundles.empty();
|
||||
// Remove an assume if it is followed by an identical assume.
|
||||
// TODO: Do we need this? Unless there are conflicting assumptions, the
|
||||
// computeKnownBits(IIOperand) below here eliminates redundant assumes.
|
||||
Instruction *Next = II->getNextNonDebugInstruction();
|
||||
if (match(Next, m_Intrinsic<Intrinsic::assume>(m_Specific(IIOperand))))
|
||||
if (HasOpBundles &&
|
||||
match(Next, m_Intrinsic<Intrinsic::assume>(m_Specific(IIOperand))) &&
|
||||
!cast<IntrinsicInst>(Next)->hasOperandBundles())
|
||||
return eraseInstFromFunction(CI);
|
||||
|
||||
// Canonicalize assume(a && b) -> assume(a); assume(b);
|
||||
@ -1475,14 +1480,15 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
|
||||
Value *AssumeIntrinsic = II->getCalledOperand();
|
||||
Value *A, *B;
|
||||
if (match(IIOperand, m_And(m_Value(A), m_Value(B)))) {
|
||||
Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, A, II->getName());
|
||||
Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, A, OpBundles,
|
||||
II->getName());
|
||||
Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, B, II->getName());
|
||||
return eraseInstFromFunction(*II);
|
||||
}
|
||||
// assume(!(a || b)) -> assume(!a); assume(!b);
|
||||
if (match(IIOperand, m_Not(m_Or(m_Value(A), m_Value(B))))) {
|
||||
Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic,
|
||||
Builder.CreateNot(A), II->getName());
|
||||
Builder.CreateNot(A), OpBundles, II->getName());
|
||||
Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic,
|
||||
Builder.CreateNot(B), II->getName());
|
||||
return eraseInstFromFunction(*II);
|
||||
@ -1498,6 +1504,7 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
|
||||
isValidAssumeForContext(II, LHS, &DT)) {
|
||||
MDNode *MD = MDNode::get(II->getContext(), None);
|
||||
LHS->setMetadata(LLVMContext::MD_nonnull, MD);
|
||||
if (!HasOpBundles)
|
||||
return eraseInstFromFunction(*II);
|
||||
|
||||
// TODO: apply nonnull return attributes to calls and invokes
|
||||
|
@ -15,6 +15,7 @@
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "llvm/IR/Instructions.h"
|
||||
#include "llvm/InitializePasses.h"
|
||||
#define AA_NAME "alignment-from-assumptions"
|
||||
#define DEBUG_TYPE AA_NAME
|
||||
@ -203,103 +204,33 @@ static Align getNewAlignment(const SCEV *AASCEV, const SCEV *AlignSCEV,
|
||||
}
|
||||
|
||||
bool AlignmentFromAssumptionsPass::extractAlignmentInfo(CallInst *I,
|
||||
unsigned Idx,
|
||||
Value *&AAPtr,
|
||||
const SCEV *&AlignSCEV,
|
||||
const SCEV *&OffSCEV) {
|
||||
// An alignment assume must be a statement about the least-significant
|
||||
// bits of the pointer being zero, possibly with some offset.
|
||||
ICmpInst *ICI = dyn_cast<ICmpInst>(I->getArgOperand(0));
|
||||
if (!ICI)
|
||||
Type *Int64Ty = Type::getInt64Ty(I->getContext());
|
||||
OperandBundleUse AlignOB = I->getOperandBundleAt(Idx);
|
||||
if (AlignOB.getTagName() != "align")
|
||||
return false;
|
||||
|
||||
// This must be an expression of the form: x & m == 0.
|
||||
if (ICI->getPredicate() != ICmpInst::ICMP_EQ)
|
||||
return false;
|
||||
|
||||
// Swap things around so that the RHS is 0.
|
||||
Value *CmpLHS = ICI->getOperand(0);
|
||||
Value *CmpRHS = ICI->getOperand(1);
|
||||
const SCEV *CmpLHSSCEV = SE->getSCEV(CmpLHS);
|
||||
const SCEV *CmpRHSSCEV = SE->getSCEV(CmpRHS);
|
||||
if (CmpLHSSCEV->isZero())
|
||||
std::swap(CmpLHS, CmpRHS);
|
||||
else if (!CmpRHSSCEV->isZero())
|
||||
return false;
|
||||
|
||||
BinaryOperator *CmpBO = dyn_cast<BinaryOperator>(CmpLHS);
|
||||
if (!CmpBO || CmpBO->getOpcode() != Instruction::And)
|
||||
return false;
|
||||
|
||||
// Swap things around so that the right operand of the and is a constant
|
||||
// (the mask); we cannot deal with variable masks.
|
||||
Value *AndLHS = CmpBO->getOperand(0);
|
||||
Value *AndRHS = CmpBO->getOperand(1);
|
||||
const SCEV *AndLHSSCEV = SE->getSCEV(AndLHS);
|
||||
const SCEV *AndRHSSCEV = SE->getSCEV(AndRHS);
|
||||
if (isa<SCEVConstant>(AndLHSSCEV)) {
|
||||
std::swap(AndLHS, AndRHS);
|
||||
std::swap(AndLHSSCEV, AndRHSSCEV);
|
||||
}
|
||||
|
||||
const SCEVConstant *MaskSCEV = dyn_cast<SCEVConstant>(AndRHSSCEV);
|
||||
if (!MaskSCEV)
|
||||
return false;
|
||||
|
||||
// The mask must have some trailing ones (otherwise the condition is
|
||||
// trivial and tells us nothing about the alignment of the left operand).
|
||||
unsigned TrailingOnes = MaskSCEV->getAPInt().countTrailingOnes();
|
||||
if (!TrailingOnes)
|
||||
return false;
|
||||
|
||||
// Cap the alignment at the maximum with which LLVM can deal (and make sure
|
||||
// we don't overflow the shift).
|
||||
uint64_t Alignment;
|
||||
TrailingOnes = std::min(TrailingOnes,
|
||||
unsigned(sizeof(unsigned) * CHAR_BIT - 1));
|
||||
Alignment = std::min(1u << TrailingOnes, +Value::MaximumAlignment);
|
||||
|
||||
Type *Int64Ty = Type::getInt64Ty(I->getParent()->getParent()->getContext());
|
||||
AlignSCEV = SE->getConstant(Int64Ty, Alignment);
|
||||
|
||||
// The LHS might be a ptrtoint instruction, or it might be the pointer
|
||||
// with an offset.
|
||||
AAPtr = nullptr;
|
||||
OffSCEV = nullptr;
|
||||
if (PtrToIntInst *PToI = dyn_cast<PtrToIntInst>(AndLHS)) {
|
||||
AAPtr = PToI->getPointerOperand();
|
||||
assert(AlignOB.Inputs.size() >= 2);
|
||||
AAPtr = AlignOB.Inputs[0].get();
|
||||
// TODO: Consider accumulating the offset to the base.
|
||||
AAPtr = AAPtr->stripPointerCastsSameRepresentation();
|
||||
AlignSCEV = SE->getSCEV(AlignOB.Inputs[1].get());
|
||||
AlignSCEV = SE->getTruncateOrZeroExtend(AlignSCEV, Int64Ty);
|
||||
if (AlignOB.Inputs.size() == 3)
|
||||
OffSCEV = SE->getSCEV(AlignOB.Inputs[2].get());
|
||||
else
|
||||
OffSCEV = SE->getZero(Int64Ty);
|
||||
} else if (const SCEVAddExpr* AndLHSAddSCEV =
|
||||
dyn_cast<SCEVAddExpr>(AndLHSSCEV)) {
|
||||
// Try to find the ptrtoint; subtract it and the rest is the offset.
|
||||
for (SCEVAddExpr::op_iterator J = AndLHSAddSCEV->op_begin(),
|
||||
JE = AndLHSAddSCEV->op_end(); J != JE; ++J)
|
||||
if (const SCEVUnknown *OpUnk = dyn_cast<SCEVUnknown>(*J))
|
||||
if (PtrToIntInst *PToI = dyn_cast<PtrToIntInst>(OpUnk->getValue())) {
|
||||
AAPtr = PToI->getPointerOperand();
|
||||
OffSCEV = SE->getMinusSCEV(AndLHSAddSCEV, *J);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!AAPtr)
|
||||
return false;
|
||||
|
||||
// Sign extend the offset to 64 bits (so that it is like all of the other
|
||||
// expressions).
|
||||
unsigned OffSCEVBits = OffSCEV->getType()->getPrimitiveSizeInBits();
|
||||
if (OffSCEVBits < 64)
|
||||
OffSCEV = SE->getSignExtendExpr(OffSCEV, Int64Ty);
|
||||
else if (OffSCEVBits > 64)
|
||||
return false;
|
||||
|
||||
AAPtr = AAPtr->stripPointerCasts();
|
||||
OffSCEV = SE->getTruncateOrZeroExtend(OffSCEV, Int64Ty);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool AlignmentFromAssumptionsPass::processAssumption(CallInst *ACall) {
|
||||
bool AlignmentFromAssumptionsPass::processAssumption(CallInst *ACall,
|
||||
unsigned Idx) {
|
||||
Value *AAPtr;
|
||||
const SCEV *AlignSCEV, *OffSCEV;
|
||||
if (!extractAlignmentInfo(ACall, AAPtr, AlignSCEV, OffSCEV))
|
||||
if (!extractAlignmentInfo(ACall, Idx, AAPtr, AlignSCEV, OffSCEV))
|
||||
return false;
|
||||
|
||||
// Skip ConstantPointerNull and UndefValue. Assumptions on these shouldn't
|
||||
@ -317,13 +248,14 @@ bool AlignmentFromAssumptionsPass::processAssumption(CallInst *ACall) {
|
||||
continue;
|
||||
|
||||
if (Instruction *K = dyn_cast<Instruction>(J))
|
||||
if (isValidAssumeForContext(ACall, K, DT))
|
||||
WorkList.push_back(K);
|
||||
}
|
||||
|
||||
while (!WorkList.empty()) {
|
||||
Instruction *J = WorkList.pop_back_val();
|
||||
if (LoadInst *LI = dyn_cast<LoadInst>(J)) {
|
||||
if (!isValidAssumeForContext(ACall, J, DT))
|
||||
continue;
|
||||
Align NewAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV,
|
||||
LI->getPointerOperand(), SE);
|
||||
if (NewAlignment > LI->getAlign()) {
|
||||
@ -331,6 +263,8 @@ bool AlignmentFromAssumptionsPass::processAssumption(CallInst *ACall) {
|
||||
++NumLoadAlignChanged;
|
||||
}
|
||||
} else if (StoreInst *SI = dyn_cast<StoreInst>(J)) {
|
||||
if (!isValidAssumeForContext(ACall, J, DT))
|
||||
continue;
|
||||
Align NewAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV,
|
||||
SI->getPointerOperand(), SE);
|
||||
if (NewAlignment > SI->getAlign()) {
|
||||
@ -338,6 +272,8 @@ bool AlignmentFromAssumptionsPass::processAssumption(CallInst *ACall) {
|
||||
++NumStoreAlignChanged;
|
||||
}
|
||||
} else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(J)) {
|
||||
if (!isValidAssumeForContext(ACall, J, DT))
|
||||
continue;
|
||||
Align NewDestAlignment =
|
||||
getNewAlignment(AASCEV, AlignSCEV, OffSCEV, MI->getDest(), SE);
|
||||
|
||||
@ -369,7 +305,7 @@ bool AlignmentFromAssumptionsPass::processAssumption(CallInst *ACall) {
|
||||
Visited.insert(J);
|
||||
for (User *UJ : J->users()) {
|
||||
Instruction *K = cast<Instruction>(UJ);
|
||||
if (!Visited.count(K) && isValidAssumeForContext(ACall, K, DT))
|
||||
if (!Visited.count(K))
|
||||
WorkList.push_back(K);
|
||||
}
|
||||
}
|
||||
@ -396,8 +332,11 @@ bool AlignmentFromAssumptionsPass::runImpl(Function &F, AssumptionCache &AC,
|
||||
|
||||
bool Changed = false;
|
||||
for (auto &AssumeVH : AC.assumptions())
|
||||
if (AssumeVH)
|
||||
Changed |= processAssumption(cast<CallInst>(AssumeVH));
|
||||
if (AssumeVH) {
|
||||
CallInst *Call = cast<CallInst>(AssumeVH);
|
||||
for (unsigned Idx = 0; Idx < Call->getNumOperandBundles(); Idx++)
|
||||
Changed |= processAssumption(Call, Idx);
|
||||
}
|
||||
|
||||
return Changed;
|
||||
}
|
||||
|
@ -4,10 +4,7 @@ target datalayout = "e-i64:64-f80:128-n8:16:32:64-S128"
|
||||
|
||||
define i32 @foo(i32* nocapture %a) nounwind uwtable readonly {
|
||||
entry:
|
||||
%ptrint = ptrtoint i32* %a to i64
|
||||
%maskedptr = and i64 %ptrint, 31
|
||||
%maskcond = icmp eq i64 %maskedptr, 0
|
||||
tail call void @llvm.assume(i1 %maskcond)
|
||||
tail call void @llvm.assume(i1 true) ["align"(i32* %a, i32 32)]
|
||||
%0 = load i32, i32* %a, align 4
|
||||
ret i32 %0
|
||||
|
||||
@ -18,11 +15,7 @@ entry:
|
||||
|
||||
define i32 @foo2(i32* nocapture %a) nounwind uwtable readonly {
|
||||
entry:
|
||||
%ptrint = ptrtoint i32* %a to i64
|
||||
%offsetptr = add i64 %ptrint, 24
|
||||
%maskedptr = and i64 %offsetptr, 31
|
||||
%maskcond = icmp eq i64 %maskedptr, 0
|
||||
tail call void @llvm.assume(i1 %maskcond)
|
||||
tail call void @llvm.assume(i1 true) ["align"(i32* %a, i32 32, i32 24)]
|
||||
%arrayidx = getelementptr inbounds i32, i32* %a, i64 2
|
||||
%0 = load i32, i32* %arrayidx, align 4
|
||||
ret i32 %0
|
||||
@ -34,11 +27,7 @@ entry:
|
||||
|
||||
define i32 @foo2a(i32* nocapture %a) nounwind uwtable readonly {
|
||||
entry:
|
||||
%ptrint = ptrtoint i32* %a to i64
|
||||
%offsetptr = add i64 %ptrint, 28
|
||||
%maskedptr = and i64 %offsetptr, 31
|
||||
%maskcond = icmp eq i64 %maskedptr, 0
|
||||
tail call void @llvm.assume(i1 %maskcond)
|
||||
tail call void @llvm.assume(i1 true) ["align"(i32* %a, i32 32, i32 28)]
|
||||
%arrayidx = getelementptr inbounds i32, i32* %a, i64 -1
|
||||
%0 = load i32, i32* %arrayidx, align 4
|
||||
ret i32 %0
|
||||
@ -50,10 +39,7 @@ entry:
|
||||
|
||||
define i32 @goo(i32* nocapture %a) nounwind uwtable readonly {
|
||||
entry:
|
||||
%ptrint = ptrtoint i32* %a to i64
|
||||
%maskedptr = and i64 %ptrint, 31
|
||||
%maskcond = icmp eq i64 %maskedptr, 0
|
||||
tail call void @llvm.assume(i1 %maskcond)
|
||||
tail call void @llvm.assume(i1 true) ["align"(i32* %a, i32 32, i32 0)]
|
||||
%0 = load i32, i32* %a, align 4
|
||||
ret i32 %0
|
||||
|
||||
@ -64,10 +50,7 @@ entry:
|
||||
|
||||
define i32 @hoo(i32* nocapture %a) nounwind uwtable readonly {
|
||||
entry:
|
||||
%ptrint = ptrtoint i32* %a to i64
|
||||
%maskedptr = and i64 %ptrint, 31
|
||||
%maskcond = icmp eq i64 %maskedptr, 0
|
||||
tail call void @llvm.assume(i1 %maskcond)
|
||||
tail call void @llvm.assume(i1 true) ["align"(i32* %a, i64 32, i32 0)]
|
||||
br label %for.body
|
||||
|
||||
for.body: ; preds = %entry, %for.body
|
||||
@ -98,10 +81,7 @@ for.end: ; preds = %for.body
|
||||
; load(a, i0+i1+i2+32)
|
||||
define void @hoo2(i32* nocapture %a, i64 %id, i64 %num) nounwind uwtable readonly {
|
||||
entry:
|
||||
%ptrint = ptrtoint i32* %a to i64
|
||||
%maskedptr = and i64 %ptrint, 31
|
||||
%maskcond = icmp eq i64 %maskedptr, 0
|
||||
tail call void @llvm.assume(i1 %maskcond)
|
||||
tail call void @llvm.assume(i1 true) ["align"(i32* %a, i8 32, i64 0)]
|
||||
%id.mul = shl nsw i64 %id, 6
|
||||
%num.mul = shl nsw i64 %num, 6
|
||||
br label %for0.body
|
||||
@ -147,10 +127,7 @@ return:
|
||||
|
||||
define i32 @joo(i32* nocapture %a) nounwind uwtable readonly {
|
||||
entry:
|
||||
%ptrint = ptrtoint i32* %a to i64
|
||||
%maskedptr = and i64 %ptrint, 31
|
||||
%maskcond = icmp eq i64 %maskedptr, 0
|
||||
tail call void @llvm.assume(i1 %maskcond)
|
||||
tail call void @llvm.assume(i1 true) ["align"(i32* %a, i8 32, i8 0)]
|
||||
br label %for.body
|
||||
|
||||
for.body: ; preds = %entry, %for.body
|
||||
@ -175,16 +152,13 @@ for.end: ; preds = %for.body
|
||||
|
||||
define i32 @koo(i32* nocapture %a) nounwind uwtable readonly {
|
||||
entry:
|
||||
%ptrint = ptrtoint i32* %a to i64
|
||||
%maskedptr = and i64 %ptrint, 31
|
||||
%maskcond = icmp eq i64 %maskedptr, 0
|
||||
tail call void @llvm.assume(i1 %maskcond)
|
||||
br label %for.body
|
||||
|
||||
for.body: ; preds = %entry, %for.body
|
||||
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
|
||||
%r.06 = phi i32 [ 0, %entry ], [ %add, %for.body ]
|
||||
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
|
||||
tail call void @llvm.assume(i1 true) ["align"(i32* %a, i8 32, i8 0)]
|
||||
%0 = load i32, i32* %arrayidx, align 4
|
||||
%add = add nsw i32 %0, %r.06
|
||||
%indvars.iv.next = add i64 %indvars.iv, 4
|
||||
@ -203,10 +177,7 @@ for.end: ; preds = %for.body
|
||||
|
||||
define i32 @koo2(i32* nocapture %a) nounwind uwtable readonly {
|
||||
entry:
|
||||
%ptrint = ptrtoint i32* %a to i64
|
||||
%maskedptr = and i64 %ptrint, 31
|
||||
%maskcond = icmp eq i64 %maskedptr, 0
|
||||
tail call void @llvm.assume(i1 %maskcond)
|
||||
tail call void @llvm.assume(i1 true) ["align"(i32* %a, i128 32, i128 0)]
|
||||
br label %for.body
|
||||
|
||||
for.body: ; preds = %entry, %for.body
|
||||
@ -231,10 +202,7 @@ for.end: ; preds = %for.body
|
||||
|
||||
define i32 @moo(i32* nocapture %a) nounwind uwtable {
|
||||
entry:
|
||||
%ptrint = ptrtoint i32* %a to i64
|
||||
%maskedptr = and i64 %ptrint, 31
|
||||
%maskcond = icmp eq i64 %maskedptr, 0
|
||||
tail call void @llvm.assume(i1 %maskcond)
|
||||
tail call void @llvm.assume(i1 true) ["align"(i32* %a, i16 32)]
|
||||
%0 = bitcast i32* %a to i8*
|
||||
tail call void @llvm.memset.p0i8.i64(i8* align 4 %0, i8 0, i64 64, i1 false)
|
||||
ret i32 undef
|
||||
@ -246,15 +214,9 @@ entry:
|
||||
|
||||
define i32 @moo2(i32* nocapture %a, i32* nocapture %b) nounwind uwtable {
|
||||
entry:
|
||||
%ptrint = ptrtoint i32* %a to i64
|
||||
%maskedptr = and i64 %ptrint, 31
|
||||
%maskcond = icmp eq i64 %maskedptr, 0
|
||||
tail call void @llvm.assume(i1 %maskcond)
|
||||
%ptrint1 = ptrtoint i32* %b to i64
|
||||
%maskedptr3 = and i64 %ptrint1, 127
|
||||
%maskcond4 = icmp eq i64 %maskedptr3, 0
|
||||
tail call void @llvm.assume(i1 %maskcond4)
|
||||
tail call void @llvm.assume(i1 true) ["align"(i32* %b, i32 128)]
|
||||
%0 = bitcast i32* %a to i8*
|
||||
tail call void @llvm.assume(i1 true) ["align"(i8* %0, i16 32)]
|
||||
%1 = bitcast i32* %b to i8*
|
||||
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %1, i64 64, i1 false)
|
||||
ret i32 undef
|
||||
@ -264,6 +226,19 @@ entry:
|
||||
; CHECK: ret i32 undef
|
||||
}
|
||||
|
||||
define i32 @moo3(i32* nocapture %a, i32* nocapture %b) nounwind uwtable {
|
||||
entry:
|
||||
%0 = bitcast i32* %a to i8*
|
||||
tail call void @llvm.assume(i1 true) ["align"(i8* %0, i16 32), "align"(i32* %b, i32 128)]
|
||||
%1 = bitcast i32* %b to i8*
|
||||
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %1, i64 64, i1 false)
|
||||
ret i32 undef
|
||||
|
||||
; CHECK-LABEL: @moo3
|
||||
; CHECK: @llvm.memcpy.p0i8.p0i8.i64(i8* align 32 %0, i8* align 128 %1, i64 64, i1 false)
|
||||
; CHECK: ret i32 undef
|
||||
}
|
||||
|
||||
declare void @llvm.assume(i1) nounwind
|
||||
|
||||
declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
|
||||
|
@ -7,18 +7,12 @@ define i32 @foo(i32* nocapture %a) nounwind uwtable readonly {
|
||||
; CHECK-LABEL: define {{[^@]+}}@foo
|
||||
; CHECK-SAME: (i32* nocapture [[A:%.*]]) #0
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[A]] to i64
|
||||
; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
|
||||
; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
|
||||
; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])
|
||||
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[A]], i64 32) ]
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 32
|
||||
; CHECK-NEXT: ret i32 [[TMP0]]
|
||||
;
|
||||
entry:
|
||||
%ptrint = ptrtoint i32* %a to i64
|
||||
%maskedptr = and i64 %ptrint, 31
|
||||
%maskcond = icmp eq i64 %maskedptr, 0
|
||||
tail call void @llvm.assume(i1 %maskcond)
|
||||
call void @llvm.assume(i1 true) ["align"(i32* %a, i64 32)]
|
||||
%0 = load i32, i32* %a, align 4
|
||||
ret i32 %0
|
||||
|
||||
@ -28,21 +22,13 @@ define i32 @foo2(i32* nocapture %a) nounwind uwtable readonly {
|
||||
; CHECK-LABEL: define {{[^@]+}}@foo2
|
||||
; CHECK-SAME: (i32* nocapture [[A:%.*]]) #0
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[A]] to i64
|
||||
; CHECK-NEXT: [[OFFSETPTR:%.*]] = add i64 [[PTRINT]], 24
|
||||
; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[OFFSETPTR]], 31
|
||||
; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
|
||||
; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])
|
||||
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[A]], i64 32, i64 24) ]
|
||||
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 2
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 16
|
||||
; CHECK-NEXT: ret i32 [[TMP0]]
|
||||
;
|
||||
entry:
|
||||
%ptrint = ptrtoint i32* %a to i64
|
||||
%offsetptr = add i64 %ptrint, 24
|
||||
%maskedptr = and i64 %offsetptr, 31
|
||||
%maskcond = icmp eq i64 %maskedptr, 0
|
||||
tail call void @llvm.assume(i1 %maskcond)
|
||||
call void @llvm.assume(i1 true) ["align"(i32* %a, i64 32, i64 24)]
|
||||
%arrayidx = getelementptr inbounds i32, i32* %a, i64 2
|
||||
%0 = load i32, i32* %arrayidx, align 4
|
||||
ret i32 %0
|
||||
@ -53,21 +39,13 @@ define i32 @foo2a(i32* nocapture %a) nounwind uwtable readonly {
|
||||
; CHECK-LABEL: define {{[^@]+}}@foo2a
|
||||
; CHECK-SAME: (i32* nocapture [[A:%.*]]) #0
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[A]] to i64
|
||||
; CHECK-NEXT: [[OFFSETPTR:%.*]] = add i64 [[PTRINT]], 28
|
||||
; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[OFFSETPTR]], 31
|
||||
; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
|
||||
; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])
|
||||
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[A]], i64 32, i64 28) ]
|
||||
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 -1
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 32
|
||||
; CHECK-NEXT: ret i32 [[TMP0]]
|
||||
;
|
||||
entry:
|
||||
%ptrint = ptrtoint i32* %a to i64
|
||||
%offsetptr = add i64 %ptrint, 28
|
||||
%maskedptr = and i64 %offsetptr, 31
|
||||
%maskcond = icmp eq i64 %maskedptr, 0
|
||||
tail call void @llvm.assume(i1 %maskcond)
|
||||
call void @llvm.assume(i1 true) ["align"(i32* %a, i64 32, i64 28)]
|
||||
%arrayidx = getelementptr inbounds i32, i32* %a, i64 -1
|
||||
%0 = load i32, i32* %arrayidx, align 4
|
||||
ret i32 %0
|
||||
@ -78,18 +56,12 @@ define i32 @goo(i32* nocapture %a) nounwind uwtable readonly {
|
||||
; CHECK-LABEL: define {{[^@]+}}@goo
|
||||
; CHECK-SAME: (i32* nocapture [[A:%.*]]) #0
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[A]] to i64
|
||||
; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
|
||||
; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
|
||||
; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])
|
||||
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[A]], i64 32) ]
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 32
|
||||
; CHECK-NEXT: ret i32 [[TMP0]]
|
||||
;
|
||||
entry:
|
||||
%ptrint = ptrtoint i32* %a to i64
|
||||
%maskedptr = and i64 %ptrint, 31
|
||||
%maskcond = icmp eq i64 %maskedptr, 0
|
||||
tail call void @llvm.assume(i1 %maskcond)
|
||||
call void @llvm.assume(i1 true) ["align"(i32* %a, i64 32)]
|
||||
%0 = load i32, i32* %a, align 4
|
||||
ret i32 %0
|
||||
|
||||
@ -99,10 +71,7 @@ define i32 @hoo(i32* nocapture %a) nounwind uwtable readonly {
|
||||
; CHECK-LABEL: define {{[^@]+}}@hoo
|
||||
; CHECK-SAME: (i32* nocapture [[A:%.*]]) #0
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[A]] to i64
|
||||
; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
|
||||
; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
|
||||
; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])
|
||||
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[A]], i64 32) ]
|
||||
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
|
||||
; CHECK: for.body:
|
||||
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
|
||||
@ -119,10 +88,7 @@ define i32 @hoo(i32* nocapture %a) nounwind uwtable readonly {
|
||||
; CHECK-NEXT: ret i32 [[ADD_LCSSA]]
|
||||
;
|
||||
entry:
|
||||
%ptrint = ptrtoint i32* %a to i64
|
||||
%maskedptr = and i64 %ptrint, 31
|
||||
%maskcond = icmp eq i64 %maskedptr, 0
|
||||
tail call void @llvm.assume(i1 %maskcond)
|
||||
call void @llvm.assume(i1 true) ["align"(i32* %a, i64 32)]
|
||||
br label %for.body
|
||||
|
||||
for.body: ; preds = %entry, %for.body
|
||||
@ -146,10 +112,7 @@ define i32 @joo(i32* nocapture %a) nounwind uwtable readonly {
|
||||
; CHECK-LABEL: define {{[^@]+}}@joo
|
||||
; CHECK-SAME: (i32* nocapture [[A:%.*]]) #0
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[A]] to i64
|
||||
; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
|
||||
; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
|
||||
; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])
|
||||
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[A]], i64 32) ]
|
||||
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
|
||||
; CHECK: for.body:
|
||||
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 4, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
|
||||
@ -166,10 +129,7 @@ define i32 @joo(i32* nocapture %a) nounwind uwtable readonly {
|
||||
; CHECK-NEXT: ret i32 [[ADD_LCSSA]]
|
||||
;
|
||||
entry:
|
||||
%ptrint = ptrtoint i32* %a to i64
|
||||
%maskedptr = and i64 %ptrint, 31
|
||||
%maskcond = icmp eq i64 %maskedptr, 0
|
||||
tail call void @llvm.assume(i1 %maskcond)
|
||||
call void @llvm.assume(i1 true) ["align"(i32* %a, i64 32)]
|
||||
br label %for.body
|
||||
|
||||
for.body: ; preds = %entry, %for.body
|
||||
@ -193,10 +153,7 @@ define i32 @koo(i32* nocapture %a) nounwind uwtable readonly {
|
||||
; CHECK-LABEL: define {{[^@]+}}@koo
|
||||
; CHECK-SAME: (i32* nocapture [[A:%.*]]) #0
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[A]] to i64
|
||||
; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
|
||||
; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
|
||||
; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])
|
||||
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[A]], i64 32) ]
|
||||
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
|
||||
; CHECK: for.body:
|
||||
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
|
||||
@ -213,10 +170,7 @@ define i32 @koo(i32* nocapture %a) nounwind uwtable readonly {
|
||||
; CHECK-NEXT: ret i32 [[ADD_LCSSA]]
|
||||
;
|
||||
entry:
|
||||
%ptrint = ptrtoint i32* %a to i64
|
||||
%maskedptr = and i64 %ptrint, 31
|
||||
%maskcond = icmp eq i64 %maskedptr, 0
|
||||
tail call void @llvm.assume(i1 %maskcond)
|
||||
call void @llvm.assume(i1 true) ["align"(i32* %a, i64 32)]
|
||||
br label %for.body
|
||||
|
||||
for.body: ; preds = %entry, %for.body
|
||||
@ -240,10 +194,7 @@ define i32 @koo2(i32* nocapture %a) nounwind uwtable readonly {
|
||||
; CHECK-LABEL: define {{[^@]+}}@koo2
|
||||
; CHECK-SAME: (i32* nocapture [[A:%.*]]) #0
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[A]] to i64
|
||||
; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
|
||||
; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
|
||||
; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])
|
||||
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[A]], i64 32) ]
|
||||
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
|
||||
; CHECK: for.body:
|
||||
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ -4, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
|
||||
@ -260,10 +211,7 @@ define i32 @koo2(i32* nocapture %a) nounwind uwtable readonly {
|
||||
; CHECK-NEXT: ret i32 [[ADD_LCSSA]]
|
||||
;
|
||||
entry:
|
||||
%ptrint = ptrtoint i32* %a to i64
|
||||
%maskedptr = and i64 %ptrint, 31
|
||||
%maskcond = icmp eq i64 %maskedptr, 0
|
||||
tail call void @llvm.assume(i1 %maskcond)
|
||||
call void @llvm.assume(i1 true) ["align"(i32* %a, i64 32)]
|
||||
br label %for.body
|
||||
|
||||
for.body: ; preds = %entry, %for.body
|
||||
@ -287,19 +235,13 @@ define i32 @moo(i32* nocapture %a) nounwind uwtable {
|
||||
; CHECK-LABEL: define {{[^@]+}}@moo
|
||||
; CHECK-SAME: (i32* nocapture [[A:%.*]]) #1
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[A]] to i64
|
||||
; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
|
||||
; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
|
||||
; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])
|
||||
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[A]], i64 32) ]
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[A]] to i8*
|
||||
; CHECK-NEXT: tail call void @llvm.memset.p0i8.i64(i8* align 32 [[TMP0]], i8 0, i64 64, i1 false)
|
||||
; CHECK-NEXT: ret i32 undef
|
||||
;
|
||||
entry:
|
||||
%ptrint = ptrtoint i32* %a to i64
|
||||
%maskedptr = and i64 %ptrint, 31
|
||||
%maskcond = icmp eq i64 %maskedptr, 0
|
||||
tail call void @llvm.assume(i1 %maskcond)
|
||||
call void @llvm.assume(i1 true) ["align"(i32* %a, i64 32)]
|
||||
%0 = bitcast i32* %a to i8*
|
||||
tail call void @llvm.memset.p0i8.i64(i8* align 4 %0, i8 0, i64 64, i1 false)
|
||||
ret i32 undef
|
||||
@ -310,28 +252,16 @@ define i32 @moo2(i32* nocapture %a, i32* nocapture %b) nounwind uwtable {
|
||||
; CHECK-LABEL: define {{[^@]+}}@moo2
|
||||
; CHECK-SAME: (i32* nocapture [[A:%.*]], i32* nocapture [[B:%.*]]) #1
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[A]] to i64
|
||||
; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
|
||||
; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
|
||||
; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])
|
||||
; CHECK-NEXT: [[PTRINT1:%.*]] = ptrtoint i32* [[B]] to i64
|
||||
; CHECK-NEXT: [[MASKEDPTR3:%.*]] = and i64 [[PTRINT1]], 127
|
||||
; CHECK-NEXT: [[MASKCOND4:%.*]] = icmp eq i64 [[MASKEDPTR3]], 0
|
||||
; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND4]])
|
||||
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[A]], i64 32) ]
|
||||
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[B]], i64 128) ]
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[A]] to i8*
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[B]] to i8*
|
||||
; CHECK-NEXT: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 32 [[TMP0]], i8* align 128 [[TMP1]], i64 64, i1 false)
|
||||
; CHECK-NEXT: ret i32 undef
|
||||
;
|
||||
entry:
|
||||
%ptrint = ptrtoint i32* %a to i64
|
||||
%maskedptr = and i64 %ptrint, 31
|
||||
%maskcond = icmp eq i64 %maskedptr, 0
|
||||
tail call void @llvm.assume(i1 %maskcond)
|
||||
%ptrint1 = ptrtoint i32* %b to i64
|
||||
%maskedptr3 = and i64 %ptrint1, 127
|
||||
%maskcond4 = icmp eq i64 %maskedptr3, 0
|
||||
tail call void @llvm.assume(i1 %maskcond4)
|
||||
call void @llvm.assume(i1 true) ["align"(i32* %a, i64 32)]
|
||||
call void @llvm.assume(i1 true) ["align"(i32* %b, i64 128)]
|
||||
%0 = bitcast i32* %a to i8*
|
||||
%1 = bitcast i32* %b to i8*
|
||||
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %1, i64 64, i1 false)
|
||||
|
@ -23,10 +23,7 @@ define void @foo(float* nocapture %a, float* nocapture readonly %c) #0 {
|
||||
; CHECK-LABEL: define {{[^@]+}}@foo
|
||||
; CHECK-SAME: (float* nocapture [[A:%.*]], float* nocapture readonly [[C:%.*]]) #0
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint float* [[A]] to i64
|
||||
; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 127
|
||||
; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
|
||||
; CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
|
||||
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[A]], i64 128) ]
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = load float, float* [[C]], align 4
|
||||
; CHECK-NEXT: [[ARRAYIDX_I:%.*]] = getelementptr inbounds float, float* [[A]], i64 5
|
||||
; CHECK-NEXT: store float [[TMP0]], float* [[ARRAYIDX_I]], align 4
|
||||
@ -87,14 +84,8 @@ define void @foo2(float* nocapture %a, float* nocapture %b, float* nocapture rea
|
||||
; CHECK-LABEL: define {{[^@]+}}@foo2
|
||||
; CHECK-SAME: (float* nocapture [[A:%.*]], float* nocapture [[B:%.*]], float* nocapture readonly [[C:%.*]]) #0
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint float* [[A]] to i64
|
||||
; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 127
|
||||
; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
|
||||
; CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
|
||||
; CHECK-NEXT: [[PTRINT1:%.*]] = ptrtoint float* [[B]] to i64
|
||||
; CHECK-NEXT: [[MASKEDPTR2:%.*]] = and i64 [[PTRINT1]], 127
|
||||
; CHECK-NEXT: [[MASKCOND3:%.*]] = icmp eq i64 [[MASKEDPTR2]], 0
|
||||
; CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND3]])
|
||||
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[A]], i64 128) ]
|
||||
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[B]], i64 128) ]
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = load float, float* [[C]], align 4
|
||||
; CHECK-NEXT: [[ARRAYIDX_I:%.*]] = getelementptr inbounds float, float* [[A]], i64 5
|
||||
; CHECK-NEXT: store float [[TMP0]], float* [[ARRAYIDX_I]], align 4
|
||||
|
@ -8,7 +8,7 @@ target triple = "x86_64-unknown-linux-gnu"
|
||||
; should be inserted.
|
||||
define void @byref_callee(float* align(128) byref(float) nocapture %a, float* %b) #0 {
|
||||
; CHECK-LABEL: define {{[^@]+}}@byref_callee
|
||||
; CHECK-SAME: (float* nocapture byref(float) align 128 [[A:%.*]], float* [[B:%.*]]) #0
|
||||
; CHECK-SAME: (float* nocapture byref(float) align 128 [[A:%.*]], float* [[B:%.*]]) [[ATTR0:#.*]] {
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[LOAD:%.*]] = load float, float* [[A]], align 4
|
||||
; CHECK-NEXT: [[B_IDX:%.*]] = getelementptr inbounds float, float* [[B]], i64 8
|
||||
@ -26,12 +26,9 @@ entry:
|
||||
|
||||
define void @byref_caller(float* nocapture align 64 %a, float* %b) #0 {
|
||||
; CHECK-LABEL: define {{[^@]+}}@byref_caller
|
||||
; CHECK-SAME: (float* nocapture align 64 [[A:%.*]], float* [[B:%.*]]) #0
|
||||
; CHECK-SAME: (float* nocapture align 64 [[A:%.*]], float* [[B:%.*]]) [[ATTR0]] {
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint float* [[A]] to i64
|
||||
; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 127
|
||||
; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
|
||||
; CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
|
||||
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[A]], i64 128) ]
|
||||
; CHECK-NEXT: [[LOAD_I:%.*]] = load float, float* [[A]], align 4
|
||||
; CHECK-NEXT: [[B_IDX_I:%.*]] = getelementptr inbounds float, float* [[B]], i64 8
|
||||
; CHECK-NEXT: [[ADD_I:%.*]] = fadd float [[LOAD_I]], 2.000000e+00
|
||||
|
@ -346,6 +346,7 @@ define i32 @assumption_conflicts_with_known_bits(i32 %a, i32 %b) {
|
||||
define void @debug_interference(i8 %x) {
|
||||
; CHECK-LABEL: @debug_interference(
|
||||
; CHECK-NEXT: [[CMP2:%.*]] = icmp ne i8 [[X:%.*]], 0
|
||||
; CHECK-NEXT: tail call void @llvm.assume(i1 false)
|
||||
; CHECK-NEXT: tail call void @llvm.dbg.value(metadata i32 5, [[META7:metadata !.*]], metadata !DIExpression()), [[DBG9:!dbg !.*]]
|
||||
; CHECK-NEXT: tail call void @llvm.assume(i1 false)
|
||||
; CHECK-NEXT: tail call void @llvm.dbg.value(metadata i32 5, [[META7]], metadata !DIExpression()), [[DBG9]]
|
||||
|
@ -41,10 +41,7 @@ define void @caller1(i1 %c, i64* align 1 %ptr) {
|
||||
; ASSUMPTIONS-ON-NEXT: br i1 [[C:%.*]], label [[TRUE2_CRITEDGE:%.*]], label [[FALSE1:%.*]]
|
||||
; ASSUMPTIONS-ON: false1:
|
||||
; ASSUMPTIONS-ON-NEXT: store volatile i64 1, i64* [[PTR:%.*]], align 8
|
||||
; ASSUMPTIONS-ON-NEXT: [[PTRINT:%.*]] = ptrtoint i64* [[PTR]] to i64
|
||||
; ASSUMPTIONS-ON-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 7
|
||||
; ASSUMPTIONS-ON-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
|
||||
; ASSUMPTIONS-ON-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])
|
||||
; ASSUMPTIONS-ON-NEXT: call void @llvm.assume(i1 true) [ "align"(i64* [[PTR]], i64 8) ]
|
||||
; ASSUMPTIONS-ON-NEXT: store volatile i64 0, i64* [[PTR]], align 8
|
||||
; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, i64* [[PTR]], align 8
|
||||
; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, i64* [[PTR]], align 8
|
||||
@ -54,10 +51,7 @@ define void @caller1(i1 %c, i64* align 1 %ptr) {
|
||||
; ASSUMPTIONS-ON-NEXT: store volatile i64 3, i64* [[PTR]], align 8
|
||||
; ASSUMPTIONS-ON-NEXT: ret void
|
||||
; ASSUMPTIONS-ON: true2.critedge:
|
||||
; ASSUMPTIONS-ON-NEXT: [[PTRINT_C:%.*]] = ptrtoint i64* [[PTR]] to i64
|
||||
; ASSUMPTIONS-ON-NEXT: [[MASKEDPTR_C:%.*]] = and i64 [[PTRINT_C]], 7
|
||||
; ASSUMPTIONS-ON-NEXT: [[MASKCOND_C:%.*]] = icmp eq i64 [[MASKEDPTR_C]], 0
|
||||
; ASSUMPTIONS-ON-NEXT: tail call void @llvm.assume(i1 [[MASKCOND_C]])
|
||||
; ASSUMPTIONS-ON-NEXT: call void @llvm.assume(i1 true) [ "align"(i64* [[PTR]], i64 8) ]
|
||||
; ASSUMPTIONS-ON-NEXT: store volatile i64 0, i64* [[PTR]], align 8
|
||||
; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, i64* [[PTR]], align 8
|
||||
; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, i64* [[PTR]], align 8
|
||||
@ -94,26 +88,17 @@ false2:
|
||||
; This test checks that alignment assumptions do not prevent SROA.
|
||||
; See PR45763.
|
||||
|
||||
define internal void @callee2(i64* noalias sret align 8 %arg) {
|
||||
define internal void @callee2(i64* noalias sret align 32 %arg) {
|
||||
store i64 0, i64* %arg, align 8
|
||||
ret void
|
||||
}
|
||||
|
||||
define amdgpu_kernel void @caller2() {
|
||||
; ASSUMPTIONS-OFF-LABEL: @caller2(
|
||||
; ASSUMPTIONS-OFF-NEXT: ret void
|
||||
;
|
||||
; ASSUMPTIONS-ON-LABEL: @caller2(
|
||||
; ASSUMPTIONS-ON-NEXT: [[ALLOCA:%.*]] = alloca i64, align 8, addrspace(5)
|
||||
; ASSUMPTIONS-ON-NEXT: [[CAST:%.*]] = addrspacecast i64 addrspace(5)* [[ALLOCA]] to i64*
|
||||
; ASSUMPTIONS-ON-NEXT: [[PTRINT:%.*]] = ptrtoint i64* [[CAST]] to i64
|
||||
; ASSUMPTIONS-ON-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 7
|
||||
; ASSUMPTIONS-ON-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
|
||||
; ASSUMPTIONS-ON-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
|
||||
; ASSUMPTIONS-ON-NEXT: ret void
|
||||
; CHECK-LABEL: @caller2(
|
||||
; CHECK-NEXT: ret void
|
||||
;
|
||||
%alloca = alloca i64, align 8, addrspace(5)
|
||||
%cast = addrspacecast i64 addrspace(5)* %alloca to i64*
|
||||
call void @callee2(i64* sret align 8 %cast)
|
||||
call void @callee2(i64* sret align 32 %cast)
|
||||
ret void
|
||||
}
|
||||
|
@ -1,3 +1,4 @@
|
||||
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
||||
; RUN: not opt -verify < %s 2>&1 | FileCheck %s
|
||||
|
||||
declare void @llvm.assume(i1)
|
||||
@ -6,14 +7,21 @@ define void @func(i32* %P, i32 %P1, i32* %P2, i32* %P3) {
|
||||
; CHECK: tags must be valid attribute names
|
||||
call void @llvm.assume(i1 true) ["adazdazd"()]
|
||||
; CHECK: the second argument should be a constant integral value
|
||||
call void @llvm.assume(i1 true) ["align"(i32* %P, i32 %P1)]
|
||||
call void @llvm.assume(i1 true) ["dereferenceable"(i32* %P, i32 %P1)]
|
||||
; CHECK: to many arguments
|
||||
call void @llvm.assume(i1 true) ["align"(i32* %P, i32 8, i32 8)]
|
||||
call void @llvm.assume(i1 true) ["dereferenceable"(i32* %P, i32 8, i32 8)]
|
||||
; CHECK: this attribute should have 2 arguments
|
||||
call void @llvm.assume(i1 true) ["align"(i32* %P)]
|
||||
call void @llvm.assume(i1 true) ["dereferenceable"(i32* %P)]
|
||||
; CHECK: this attribute has no argument
|
||||
call void @llvm.assume(i1 true) ["align"(i32* %P, i32 4), "cold"(i32* %P)]
|
||||
call void @llvm.assume(i1 true) ["dereferenceable"(i32* %P, i32 4), "cold"(i32* %P)]
|
||||
; CHECK: this attribute should have one argument
|
||||
call void @llvm.assume(i1 true) ["noalias"()]
|
||||
call void @llvm.assume(i1 true) ["align"(i32* %P, i32 %P1, i32 4)]
|
||||
; CHECK: alignment assumptions should have 2 or 3 arguments
|
||||
call void @llvm.assume(i1 true) ["align"(i32* %P, i32 %P1, i32 4, i32 4)]
|
||||
; CHECK: second argument should be an integer
|
||||
call void @llvm.assume(i1 true) ["align"(i32* %P, i32* %P2)]
|
||||
; CHECK: third argument should be an integer if present
|
||||
call void @llvm.assume(i1 true) ["align"(i32* %P, i32 %P1, i32* %P2)]
|
||||
ret void
|
||||
}
|
||||
|
@ -546,3 +546,41 @@ TEST(AssumeQueryAPI, AssumptionCache) {
|
||||
ASSERT_EQ(AR[0].Index, 1u);
|
||||
ASSERT_EQ(AR[0].Assume, &*First);
|
||||
}
|
||||
|
||||
TEST(AssumeQueryAPI, Alignment) {
|
||||
LLVMContext C;
|
||||
SMDiagnostic Err;
|
||||
std::unique_ptr<Module> Mod = parseAssemblyString(
|
||||
"declare void @llvm.assume(i1)\n"
|
||||
"define void @test(i32* %P, i32* %P1, i32* %P2, i32 %I3, i1 %B) {\n"
|
||||
"call void @llvm.assume(i1 true) [\"align\"(i32* %P, i32 8, i32 %I3)]\n"
|
||||
"call void @llvm.assume(i1 true) [\"align\"(i32* %P1, i32 %I3, i32 "
|
||||
"%I3)]\n"
|
||||
"call void @llvm.assume(i1 true) [\"align\"(i32* %P2, i32 16, i32 8)]\n"
|
||||
"ret void\n}\n",
|
||||
Err, C);
|
||||
if (!Mod)
|
||||
Err.print("AssumeQueryAPI", errs());
|
||||
|
||||
Function *F = Mod->getFunction("test");
|
||||
BasicBlock::iterator Start = F->begin()->begin();
|
||||
IntrinsicInst *II;
|
||||
RetainedKnowledge RK;
|
||||
II = cast<IntrinsicInst>(&*Start);
|
||||
RK = getKnowledgeFromBundle(*II, II->bundle_op_info_begin()[0]);
|
||||
ASSERT_EQ(RK.AttrKind, Attribute::Alignment);
|
||||
ASSERT_EQ(RK.WasOn, F->getArg(0));
|
||||
ASSERT_EQ(RK.ArgValue, 1u);
|
||||
Start++;
|
||||
II = cast<IntrinsicInst>(&*Start);
|
||||
RK = getKnowledgeFromBundle(*II, II->bundle_op_info_begin()[0]);
|
||||
ASSERT_EQ(RK.AttrKind, Attribute::Alignment);
|
||||
ASSERT_EQ(RK.WasOn, F->getArg(1));
|
||||
ASSERT_EQ(RK.ArgValue, 1u);
|
||||
Start++;
|
||||
II = cast<IntrinsicInst>(&*Start);
|
||||
RK = getKnowledgeFromBundle(*II, II->bundle_op_info_begin()[0]);
|
||||
ASSERT_EQ(RK.AttrKind, Attribute::Alignment);
|
||||
ASSERT_EQ(RK.WasOn, F->getArg(2));
|
||||
ASSERT_EQ(RK.ArgValue, 8u);
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user