mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-22 10:42:39 +01:00
IR: Introduce inrange attribute on getelementptr indices.
If the inrange keyword is present before any index, loading from or storing to any pointer derived from the getelementptr has undefined behavior if the load or store would access memory outside of the bounds of the element selected by the index marked as inrange. This can be used, e.g. for alias analysis or to split globals at element boundaries where beneficial. As previously proposed on llvm-dev: http://lists.llvm.org/pipermail/llvm-dev/2016-July/102472.html Differential Revision: https://reviews.llvm.org/D22793 llvm-svn: 286514
This commit is contained in:
parent
cba269ed0b
commit
fbb7ea5270
@ -7450,9 +7450,9 @@ Syntax:
|
||||
|
||||
::
|
||||
|
||||
<result> = getelementptr <ty>, <ty>* <ptrval>{, <ty> <idx>}*
|
||||
<result> = getelementptr inbounds <ty>, <ty>* <ptrval>{, <ty> <idx>}*
|
||||
<result> = getelementptr <ty>, <ptr vector> <ptrval>, <vector index type> <idx>
|
||||
<result> = getelementptr <ty>, <ty>* <ptrval>{, [inrange] <ty> <idx>}*
|
||||
<result> = getelementptr inbounds <ty>, <ty>* <ptrval>{, [inrange] <ty> <idx>}*
|
||||
<result> = getelementptr <ty>, <ptr vector> <ptrval>, [inrange] <vector index type> <idx>
|
||||
|
||||
Overview:
|
||||
"""""""""
|
||||
@ -7569,6 +7569,18 @@ though, even if it happens to point into allocated storage. See the
|
||||
:ref:`Pointer Aliasing Rules <pointeraliasing>` section for more
|
||||
information.
|
||||
|
||||
If the ``inrange`` keyword is present before any index, loading from or
|
||||
storing to any pointer derived from the ``getelementptr`` has undefined
|
||||
behavior if the load or store would access memory outside of the bounds of
|
||||
the element selected by the index marked as ``inrange``. The result of a
|
||||
pointer comparison or ``ptrtoint`` (including ``ptrtoint``-like operations
|
||||
involving memory) involving a pointer derived from a ``getelementptr`` with
|
||||
the ``inrange`` keyword is undefined, with the exception of comparisons
|
||||
in the case where both operands are in the range of the element selected
|
||||
by the ``inrange`` keyword, inclusive of the address one past the end of
|
||||
that element. Note that the ``inrange`` keyword is currently only allowed
|
||||
in constant ``getelementptr`` expressions.
|
||||
|
||||
The getelementptr instruction is often confusing. For some more insight
|
||||
into how it works, see :doc:`the getelementptr FAQ <GetElementPtr>`.
|
||||
|
||||
|
@ -280,8 +280,9 @@ enum ConstantsCodes {
|
||||
CST_CODE_CE_INBOUNDS_GEP = 20, // INBOUNDS_GEP: [n x operands]
|
||||
CST_CODE_BLOCKADDRESS = 21, // CST_CODE_BLOCKADDRESS [fnty, fnval, bb#]
|
||||
CST_CODE_DATA = 22, // DATA: [n x elements]
|
||||
CST_CODE_INLINEASM = 23 // INLINEASM: [sideeffect|alignstack|
|
||||
CST_CODE_INLINEASM = 23, // INLINEASM: [sideeffect|alignstack|
|
||||
// asmdialect,asmstr,conststr]
|
||||
CST_CODE_CE_GEP_WITH_INRANGE_INDEX = 24, // [opty, flags, n x operands]
|
||||
};
|
||||
|
||||
/// CastOpcodes - These are values used in the bitcode files to encode which
|
||||
|
@ -24,6 +24,7 @@
|
||||
#include "llvm/ADT/APFloat.h"
|
||||
#include "llvm/ADT/APInt.h"
|
||||
#include "llvm/ADT/ArrayRef.h"
|
||||
#include "llvm/ADT/Optional.h"
|
||||
#include "llvm/IR/Constant.h"
|
||||
#include "llvm/IR/DerivedTypes.h"
|
||||
#include "llvm/IR/OperandTraits.h"
|
||||
@ -1071,26 +1072,31 @@ public:
|
||||
/// Getelementptr form. Value* is only accepted for convenience;
|
||||
/// all elements must be Constants.
|
||||
///
|
||||
/// \param InRangeIndex the inrange index if present or None.
|
||||
/// \param OnlyIfReducedTy see \a getWithOperands() docs.
|
||||
static Constant *getGetElementPtr(Type *Ty, Constant *C,
|
||||
ArrayRef<Constant *> IdxList,
|
||||
bool InBounds = false,
|
||||
Optional<unsigned> InRangeIndex = None,
|
||||
Type *OnlyIfReducedTy = nullptr) {
|
||||
return getGetElementPtr(
|
||||
Ty, C, makeArrayRef((Value * const *)IdxList.data(), IdxList.size()),
|
||||
InBounds, OnlyIfReducedTy);
|
||||
InBounds, InRangeIndex, OnlyIfReducedTy);
|
||||
}
|
||||
static Constant *getGetElementPtr(Type *Ty, Constant *C, Constant *Idx,
|
||||
bool InBounds = false,
|
||||
Optional<unsigned> InRangeIndex = None,
|
||||
Type *OnlyIfReducedTy = nullptr) {
|
||||
// This form of the function only exists to avoid ambiguous overload
|
||||
// warnings about whether to convert Idx to ArrayRef<Constant *> or
|
||||
// ArrayRef<Value *>.
|
||||
return getGetElementPtr(Ty, C, cast<Value>(Idx), InBounds, OnlyIfReducedTy);
|
||||
return getGetElementPtr(Ty, C, cast<Value>(Idx), InBounds, InRangeIndex,
|
||||
OnlyIfReducedTy);
|
||||
}
|
||||
static Constant *getGetElementPtr(Type *Ty, Constant *C,
|
||||
ArrayRef<Value *> IdxList,
|
||||
bool InBounds = false,
|
||||
Optional<unsigned> InRangeIndex = None,
|
||||
Type *OnlyIfReducedTy = nullptr);
|
||||
|
||||
/// Create an "inbounds" getelementptr. See the documentation for the
|
||||
|
@ -364,7 +364,8 @@ class ZExtOperator : public ConcreteOperator<Operator, Instruction::ZExt> {};
|
||||
class GEPOperator
|
||||
: public ConcreteOperator<Operator, Instruction::GetElementPtr> {
|
||||
enum {
|
||||
IsInBounds = (1 << 0)
|
||||
IsInBounds = (1 << 0),
|
||||
// InRangeIndex: bits 1-6
|
||||
};
|
||||
|
||||
friend class GetElementPtrInst;
|
||||
@ -379,6 +380,12 @@ public:
|
||||
bool isInBounds() const {
|
||||
return SubclassOptionalData & IsInBounds;
|
||||
}
|
||||
/// Returns the offset of the index with an inrange attachment, or None if
|
||||
/// none.
|
||||
Optional<unsigned> getInRangeIndex() const {
|
||||
if (SubclassOptionalData >> 1 == 0) return None;
|
||||
return (SubclassOptionalData >> 1) - 1;
|
||||
}
|
||||
|
||||
inline op_iterator idx_begin() { return op_begin()+1; }
|
||||
inline const_op_iterator idx_begin() const { return op_begin()+1; }
|
||||
|
@ -718,8 +718,8 @@ Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, Constant *Op1,
|
||||
/// If array indices are not pointer-sized integers, explicitly cast them so
|
||||
/// that they aren't implicitly casted by the getelementptr.
|
||||
Constant *CastGEPIndices(Type *SrcElemTy, ArrayRef<Constant *> Ops,
|
||||
Type *ResultTy, const DataLayout &DL,
|
||||
const TargetLibraryInfo *TLI) {
|
||||
Type *ResultTy, Optional<unsigned> InRangeIndex,
|
||||
const DataLayout &DL, const TargetLibraryInfo *TLI) {
|
||||
Type *IntPtrTy = DL.getIntPtrType(ResultTy);
|
||||
|
||||
bool Any = false;
|
||||
@ -742,7 +742,8 @@ Constant *CastGEPIndices(Type *SrcElemTy, ArrayRef<Constant *> Ops,
|
||||
if (!Any)
|
||||
return nullptr;
|
||||
|
||||
Constant *C = ConstantExpr::getGetElementPtr(SrcElemTy, Ops[0], NewIdxs);
|
||||
Constant *C = ConstantExpr::getGetElementPtr(
|
||||
SrcElemTy, Ops[0], NewIdxs, /*InBounds=*/false, InRangeIndex);
|
||||
if (Constant *Folded = ConstantFoldConstant(C, DL, TLI))
|
||||
C = Folded;
|
||||
|
||||
@ -771,13 +772,16 @@ Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
|
||||
ArrayRef<Constant *> Ops,
|
||||
const DataLayout &DL,
|
||||
const TargetLibraryInfo *TLI) {
|
||||
const GEPOperator *InnermostGEP = GEP;
|
||||
|
||||
Type *SrcElemTy = GEP->getSourceElementType();
|
||||
Type *ResElemTy = GEP->getResultElementType();
|
||||
Type *ResTy = GEP->getType();
|
||||
if (!SrcElemTy->isSized())
|
||||
return nullptr;
|
||||
|
||||
if (Constant *C = CastGEPIndices(SrcElemTy, Ops, ResTy, DL, TLI))
|
||||
if (Constant *C = CastGEPIndices(SrcElemTy, Ops, ResTy,
|
||||
GEP->getInRangeIndex(), DL, TLI))
|
||||
return C;
|
||||
|
||||
Constant *Ptr = Ops[0];
|
||||
@ -820,6 +824,8 @@ Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
|
||||
|
||||
// If this is a GEP of a GEP, fold it all into a single GEP.
|
||||
while (auto *GEP = dyn_cast<GEPOperator>(Ptr)) {
|
||||
InnermostGEP = GEP;
|
||||
|
||||
SmallVector<Value *, 4> NestedOps(GEP->op_begin() + 1, GEP->op_end());
|
||||
|
||||
// Do not try the incorporate the sub-GEP if some index is not a number.
|
||||
@ -925,8 +931,23 @@ Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
|
||||
if (Offset != 0)
|
||||
return nullptr;
|
||||
|
||||
// Preserve the inrange index from the innermost GEP if possible. We must
|
||||
// have calculated the same indices up to and including the inrange index.
|
||||
Optional<unsigned> InRangeIndex;
|
||||
if (Optional<unsigned> LastIRIndex = InnermostGEP->getInRangeIndex())
|
||||
if (SrcElemTy == InnermostGEP->getSourceElementType() &&
|
||||
NewIdxs.size() > *LastIRIndex) {
|
||||
InRangeIndex = LastIRIndex;
|
||||
for (unsigned I = 0; I <= *LastIRIndex; ++I)
|
||||
if (NewIdxs[I] != InnermostGEP->getOperand(I + 1)) {
|
||||
InRangeIndex = None;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Create a GEP.
|
||||
Constant *C = ConstantExpr::getGetElementPtr(SrcElemTy, Ptr, NewIdxs);
|
||||
Constant *C = ConstantExpr::getGetElementPtr(
|
||||
SrcElemTy, Ptr, NewIdxs, /*InBounds=*/false, InRangeIndex);
|
||||
assert(C->getType()->getPointerElementType() == Ty &&
|
||||
"Computed GetElementPtr has unexpected type!");
|
||||
|
||||
@ -944,8 +965,8 @@ Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
|
||||
/// attempting to fold instructions like loads and stores, which have no
|
||||
/// constant expression form.
|
||||
///
|
||||
/// TODO: This function neither utilizes nor preserves nsw/nuw/inbounds/etc
|
||||
/// information, due to only being passed an opcode and operands. Constant
|
||||
/// TODO: This function neither utilizes nor preserves nsw/nuw/inbounds/inrange
|
||||
/// etc information, due to only being passed an opcode and operands. Constant
|
||||
/// folding using this function strips this information.
|
||||
///
|
||||
Constant *ConstantFoldInstOperandsImpl(const Value *InstOrCE, unsigned Opcode,
|
||||
@ -965,8 +986,9 @@ Constant *ConstantFoldInstOperandsImpl(const Value *InstOrCE, unsigned Opcode,
|
||||
if (Constant *C = SymbolicallyEvaluateGEP(GEP, Ops, DL, TLI))
|
||||
return C;
|
||||
|
||||
return ConstantExpr::getGetElementPtr(GEP->getSourceElementType(),
|
||||
Ops[0], Ops.slice(1));
|
||||
return ConstantExpr::getGetElementPtr(GEP->getSourceElementType(), Ops[0],
|
||||
Ops.slice(1), GEP->isInBounds(),
|
||||
GEP->getInRangeIndex());
|
||||
}
|
||||
|
||||
if (auto *CE = dyn_cast<ConstantExpr>(InstOrCE))
|
||||
|
@ -551,6 +551,7 @@ lltok::Kind LLLexer::LexIdentifier() {
|
||||
KEYWORD(nsw);
|
||||
KEYWORD(exact);
|
||||
KEYWORD(inbounds);
|
||||
KEYWORD(inrange);
|
||||
KEYWORD(align);
|
||||
KEYWORD(addrspace);
|
||||
KEYWORD(section);
|
||||
|
@ -3175,7 +3175,9 @@ bool LLParser::ParseValID(ValID &ID, PerFunctionState *PFS) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (ParseGlobalValueVector(Elts) ||
|
||||
Optional<unsigned> InRangeOp;
|
||||
if (ParseGlobalValueVector(
|
||||
Elts, Opc == Instruction::GetElementPtr ? &InRangeOp : nullptr) ||
|
||||
ParseToken(lltok::rparen, "expected ')' in constantexpr"))
|
||||
return true;
|
||||
|
||||
@ -3214,8 +3216,16 @@ bool LLParser::ParseValID(ValID &ID, PerFunctionState *PFS) {
|
||||
|
||||
if (!GetElementPtrInst::getIndexedType(Ty, Indices))
|
||||
return Error(ID.Loc, "invalid getelementptr indices");
|
||||
ID.ConstantVal =
|
||||
ConstantExpr::getGetElementPtr(Ty, Elts[0], Indices, InBounds);
|
||||
|
||||
if (InRangeOp) {
|
||||
if (*InRangeOp == 0)
|
||||
return Error(ID.Loc,
|
||||
"inrange keyword may not appear on pointer operand");
|
||||
--*InRangeOp;
|
||||
}
|
||||
|
||||
ID.ConstantVal = ConstantExpr::getGetElementPtr(Ty, Elts[0], Indices,
|
||||
InBounds, InRangeOp);
|
||||
} else if (Opc == Instruction::Select) {
|
||||
if (Elts.size() != 3)
|
||||
return Error(ID.Loc, "expected three operands to select");
|
||||
@ -3298,8 +3308,9 @@ bool LLParser::parseOptionalComdat(StringRef GlobalName, Comdat *&C) {
|
||||
|
||||
/// ParseGlobalValueVector
|
||||
/// ::= /*empty*/
|
||||
/// ::= TypeAndValue (',' TypeAndValue)*
|
||||
bool LLParser::ParseGlobalValueVector(SmallVectorImpl<Constant *> &Elts) {
|
||||
/// ::= [inrange] TypeAndValue (',' [inrange] TypeAndValue)*
|
||||
bool LLParser::ParseGlobalValueVector(SmallVectorImpl<Constant *> &Elts,
|
||||
Optional<unsigned> *InRangeOp) {
|
||||
// Empty list.
|
||||
if (Lex.getKind() == lltok::rbrace ||
|
||||
Lex.getKind() == lltok::rsquare ||
|
||||
@ -3307,14 +3318,14 @@ bool LLParser::ParseGlobalValueVector(SmallVectorImpl<Constant *> &Elts) {
|
||||
Lex.getKind() == lltok::rparen)
|
||||
return false;
|
||||
|
||||
Constant *C;
|
||||
if (ParseGlobalTypeAndValue(C)) return true;
|
||||
Elts.push_back(C);
|
||||
do {
|
||||
if (InRangeOp && !*InRangeOp && EatIfPresent(lltok::kw_inrange))
|
||||
*InRangeOp = Elts.size();
|
||||
|
||||
while (EatIfPresent(lltok::comma)) {
|
||||
Constant *C;
|
||||
if (ParseGlobalTypeAndValue(C)) return true;
|
||||
Elts.push_back(C);
|
||||
}
|
||||
} while (EatIfPresent(lltok::comma));
|
||||
|
||||
return false;
|
||||
}
|
||||
|
@ -411,7 +411,8 @@ namespace llvm {
|
||||
bool ParseValID(ValID &ID, PerFunctionState *PFS = nullptr);
|
||||
bool ParseGlobalValue(Type *Ty, Constant *&V);
|
||||
bool ParseGlobalTypeAndValue(Constant *&V);
|
||||
bool ParseGlobalValueVector(SmallVectorImpl<Constant *> &Elts);
|
||||
bool ParseGlobalValueVector(SmallVectorImpl<Constant *> &Elts,
|
||||
Optional<unsigned> *InRangeOp = nullptr);
|
||||
bool parseOptionalComdat(StringRef GlobalName, Comdat *&C);
|
||||
bool ParseMetadataAsValue(Value *&V, PerFunctionState &PFS);
|
||||
bool ParseValueAsMetadata(Metadata *&MD, const Twine &TypeMsg,
|
||||
|
@ -103,6 +103,7 @@ enum Kind {
|
||||
kw_nsw,
|
||||
kw_exact,
|
||||
kw_inbounds,
|
||||
kw_inrange,
|
||||
kw_align,
|
||||
kw_addrspace,
|
||||
kw_section,
|
||||
|
@ -3268,12 +3268,25 @@ Error BitcodeReader::parseConstants() {
|
||||
}
|
||||
break;
|
||||
}
|
||||
case bitc::CST_CODE_CE_INBOUNDS_GEP:
|
||||
case bitc::CST_CODE_CE_GEP: { // CE_GEP: [n x operands]
|
||||
case bitc::CST_CODE_CE_INBOUNDS_GEP: // [ty, n x operands]
|
||||
case bitc::CST_CODE_CE_GEP: // [ty, n x operands]
|
||||
case bitc::CST_CODE_CE_GEP_WITH_INRANGE_INDEX: { // [ty, flags, n x
|
||||
// operands]
|
||||
unsigned OpNum = 0;
|
||||
Type *PointeeType = nullptr;
|
||||
if (Record.size() % 2)
|
||||
if (BitCode == bitc::CST_CODE_CE_GEP_WITH_INRANGE_INDEX ||
|
||||
Record.size() % 2)
|
||||
PointeeType = getTypeByID(Record[OpNum++]);
|
||||
|
||||
bool InBounds = false;
|
||||
Optional<unsigned> InRangeIndex;
|
||||
if (BitCode == bitc::CST_CODE_CE_GEP_WITH_INRANGE_INDEX) {
|
||||
uint64_t Op = Record[OpNum++];
|
||||
InBounds = Op & 1;
|
||||
InRangeIndex = Op >> 1;
|
||||
} else if (BitCode == bitc::CST_CODE_CE_INBOUNDS_GEP)
|
||||
InBounds = true;
|
||||
|
||||
SmallVector<Constant*, 16> Elts;
|
||||
while (OpNum != Record.size()) {
|
||||
Type *ElTy = getTypeByID(Record[OpNum++]);
|
||||
@ -3294,8 +3307,7 @@ Error BitcodeReader::parseConstants() {
|
||||
|
||||
ArrayRef<Constant *> Indices(Elts.begin() + 1, Elts.end());
|
||||
V = ConstantExpr::getGetElementPtr(PointeeType, Elts[0], Indices,
|
||||
BitCode ==
|
||||
bitc::CST_CODE_CE_INBOUNDS_GEP);
|
||||
InBounds, InRangeIndex);
|
||||
break;
|
||||
}
|
||||
case bitc::CST_CODE_CE_SELECT: { // CE_SELECT: [opval#, opval#, opval#]
|
||||
|
@ -2217,9 +2217,12 @@ void ModuleBitcodeWriter::writeConstants(unsigned FirstVal, unsigned LastVal,
|
||||
case Instruction::GetElementPtr: {
|
||||
Code = bitc::CST_CODE_CE_GEP;
|
||||
const auto *GO = cast<GEPOperator>(C);
|
||||
if (GO->isInBounds())
|
||||
Code = bitc::CST_CODE_CE_INBOUNDS_GEP;
|
||||
Record.push_back(VE.getTypeID(GO->getSourceElementType()));
|
||||
if (Optional<unsigned> Idx = GO->getInRangeIndex()) {
|
||||
Code = bitc::CST_CODE_CE_GEP_WITH_INRANGE_INDEX;
|
||||
Record.push_back((*Idx << 1) | GO->isInBounds());
|
||||
} else if (GO->isInBounds())
|
||||
Code = bitc::CST_CODE_CE_INBOUNDS_GEP;
|
||||
for (unsigned i = 0, e = CE->getNumOperands(); i != e; ++i) {
|
||||
Record.push_back(VE.getTypeID(C->getOperand(i)->getType()));
|
||||
Record.push_back(VE.getValueID(C->getOperand(i)));
|
||||
|
@ -1320,12 +1320,18 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV,
|
||||
static_cast<CmpInst::Predicate>(CE->getPredicate()));
|
||||
Out << " (";
|
||||
|
||||
Optional<unsigned> InRangeOp;
|
||||
if (const GEPOperator *GEP = dyn_cast<GEPOperator>(CE)) {
|
||||
TypePrinter.print(GEP->getSourceElementType(), Out);
|
||||
Out << ", ";
|
||||
InRangeOp = GEP->getInRangeIndex();
|
||||
if (InRangeOp)
|
||||
++*InRangeOp;
|
||||
}
|
||||
|
||||
for (User::const_op_iterator OI=CE->op_begin(); OI != CE->op_end(); ++OI) {
|
||||
if (InRangeOp && (OI - CE->op_begin()) == *InRangeOp)
|
||||
Out << "inrange ";
|
||||
TypePrinter.print((*OI)->getType(), Out);
|
||||
Out << ' ';
|
||||
WriteAsOperandInternal(Out, *OI, &TypePrinter, Machine, Context);
|
||||
|
@ -545,7 +545,10 @@ Constant *llvm::ConstantFoldCastInstruction(unsigned opc, Constant *V,
|
||||
} else if (CE->getOpcode() == Instruction::GetElementPtr &&
|
||||
// Do not fold addrspacecast (gep 0, .., 0). It might make the
|
||||
// addrspacecast uncanonicalized.
|
||||
opc != Instruction::AddrSpaceCast) {
|
||||
opc != Instruction::AddrSpaceCast &&
|
||||
// Do not fold bitcast (gep) with inrange index, as this loses
|
||||
// information.
|
||||
!cast<GEPOperator>(CE)->getInRangeIndex().hasValue()) {
|
||||
// If all of the indexes in the GEP are null values, there is no pointer
|
||||
// adjustment going on. We might as well cast the source pointer.
|
||||
bool isAllNull = true;
|
||||
@ -2046,10 +2049,10 @@ static bool isIndexInRangeOfSequentialType(SequentialType *STy,
|
||||
return true;
|
||||
}
|
||||
|
||||
template<typename IndexTy>
|
||||
static Constant *ConstantFoldGetElementPtrImpl(Type *PointeeTy, Constant *C,
|
||||
bool inBounds,
|
||||
ArrayRef<IndexTy> Idxs) {
|
||||
Constant *llvm::ConstantFoldGetElementPtr(Type *PointeeTy, Constant *C,
|
||||
bool InBounds,
|
||||
Optional<unsigned> InRangeIndex,
|
||||
ArrayRef<Value *> Idxs) {
|
||||
if (Idxs.empty()) return C;
|
||||
Constant *Idx0 = cast<Constant>(Idxs[0]);
|
||||
if ((Idxs.size() == 1 && Idx0->isNullValue()))
|
||||
@ -2146,9 +2149,18 @@ static Constant *ConstantFoldGetElementPtrImpl(Type *PointeeTy, Constant *C,
|
||||
|
||||
NewIndices.push_back(Combined);
|
||||
NewIndices.append(Idxs.begin() + 1, Idxs.end());
|
||||
|
||||
// The combined GEP normally inherits its index inrange attribute from
|
||||
// the inner GEP, but if the inner GEP's last index was adjusted by the
|
||||
// outer GEP, any inbounds attribute on that index is invalidated.
|
||||
Optional<unsigned> IRIndex = cast<GEPOperator>(CE)->getInRangeIndex();
|
||||
if (IRIndex && *IRIndex == CE->getNumOperands() - 2 && !Idx0->isNullValue())
|
||||
IRIndex = None;
|
||||
|
||||
return ConstantExpr::getGetElementPtr(
|
||||
cast<GEPOperator>(CE)->getSourceElementType(), CE->getOperand(0),
|
||||
NewIndices, inBounds && cast<GEPOperator>(CE)->isInBounds());
|
||||
NewIndices, InBounds && cast<GEPOperator>(CE)->isInBounds(),
|
||||
IRIndex);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2173,8 +2185,9 @@ static Constant *ConstantFoldGetElementPtrImpl(Type *PointeeTy, Constant *C,
|
||||
if (SrcArrayTy && DstArrayTy
|
||||
&& SrcArrayTy->getElementType() == DstArrayTy->getElementType()
|
||||
&& SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace())
|
||||
return ConstantExpr::getGetElementPtr(
|
||||
SrcArrayTy, (Constant *)CE->getOperand(0), Idxs, inBounds);
|
||||
return ConstantExpr::getGetElementPtr(SrcArrayTy,
|
||||
(Constant *)CE->getOperand(0),
|
||||
Idxs, InBounds, InRangeIndex);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2194,6 +2207,12 @@ static Constant *ConstantFoldGetElementPtrImpl(Type *PointeeTy, Constant *C,
|
||||
Unknown = true;
|
||||
continue;
|
||||
}
|
||||
if (InRangeIndex && i == *InRangeIndex + 1) {
|
||||
// If an index is marked inrange, we cannot apply this canonicalization to
|
||||
// the following index, as that will cause the inrange index to point to
|
||||
// the wrong element.
|
||||
continue;
|
||||
}
|
||||
if (isa<StructType>(Ty)) {
|
||||
// The verify makes sure that GEPs into a struct are in range.
|
||||
continue;
|
||||
@ -2256,27 +2275,17 @@ static Constant *ConstantFoldGetElementPtrImpl(Type *PointeeTy, Constant *C,
|
||||
if (!NewIdxs.empty()) {
|
||||
for (unsigned i = 0, e = Idxs.size(); i != e; ++i)
|
||||
if (!NewIdxs[i]) NewIdxs[i] = cast<Constant>(Idxs[i]);
|
||||
return ConstantExpr::getGetElementPtr(PointeeTy, C, NewIdxs, inBounds);
|
||||
return ConstantExpr::getGetElementPtr(PointeeTy, C, NewIdxs, InBounds,
|
||||
InRangeIndex);
|
||||
}
|
||||
|
||||
// If all indices are known integers and normalized, we can do a simple
|
||||
// check for the "inbounds" property.
|
||||
if (!Unknown && !inBounds)
|
||||
if (!Unknown && !InBounds)
|
||||
if (auto *GV = dyn_cast<GlobalVariable>(C))
|
||||
if (!GV->hasExternalWeakLinkage() && isInBoundsIndices(Idxs))
|
||||
return ConstantExpr::getInBoundsGetElementPtr(PointeeTy, C, Idxs);
|
||||
return ConstantExpr::getGetElementPtr(PointeeTy, C, Idxs,
|
||||
/*InBounds=*/true, InRangeIndex);
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Constant *llvm::ConstantFoldGetElementPtr(Type *Ty, Constant *C,
|
||||
bool inBounds,
|
||||
ArrayRef<Constant *> Idxs) {
|
||||
return ConstantFoldGetElementPtrImpl(Ty, C, inBounds, Idxs);
|
||||
}
|
||||
|
||||
Constant *llvm::ConstantFoldGetElementPtr(Type *Ty, Constant *C,
|
||||
bool inBounds,
|
||||
ArrayRef<Value *> Idxs) {
|
||||
return ConstantFoldGetElementPtrImpl(Ty, C, inBounds, Idxs);
|
||||
}
|
||||
|
@ -19,6 +19,8 @@
|
||||
#ifndef LLVM_LIB_IR_CONSTANTFOLD_H
|
||||
#define LLVM_LIB_IR_CONSTANTFOLD_H
|
||||
|
||||
#include "llvm/ADT/Optional.h"
|
||||
|
||||
namespace llvm {
|
||||
template <typename T> class ArrayRef;
|
||||
class Value;
|
||||
@ -46,9 +48,8 @@ template <typename T> class ArrayRef;
|
||||
Constant *V2);
|
||||
Constant *ConstantFoldCompareInstruction(unsigned short predicate,
|
||||
Constant *C1, Constant *C2);
|
||||
Constant *ConstantFoldGetElementPtr(Type *Ty, Constant *C, bool inBounds,
|
||||
ArrayRef<Constant *> Idxs);
|
||||
Constant *ConstantFoldGetElementPtr(Type *Ty, Constant *C, bool inBounds,
|
||||
Constant *ConstantFoldGetElementPtr(Type *Ty, Constant *C, bool InBounds,
|
||||
Optional<unsigned> InRangeIndex,
|
||||
ArrayRef<Value *> Idxs);
|
||||
} // End llvm namespace
|
||||
|
||||
|
@ -1167,7 +1167,7 @@ Constant *ConstantExpr::getWithOperands(ArrayRef<Constant *> Ops, Type *Ty,
|
||||
assert(SrcTy || (Ops[0]->getType() == getOperand(0)->getType()));
|
||||
return ConstantExpr::getGetElementPtr(
|
||||
SrcTy ? SrcTy : GEPO->getSourceElementType(), Ops[0], Ops.slice(1),
|
||||
GEPO->isInBounds(), OnlyIfReducedTy);
|
||||
GEPO->isInBounds(), GEPO->getInRangeIndex(), OnlyIfReducedTy);
|
||||
}
|
||||
case Instruction::ICmp:
|
||||
case Instruction::FCmp:
|
||||
@ -1893,6 +1893,7 @@ Constant *ConstantExpr::getSelect(Constant *C, Constant *V1, Constant *V2,
|
||||
|
||||
Constant *ConstantExpr::getGetElementPtr(Type *Ty, Constant *C,
|
||||
ArrayRef<Value *> Idxs, bool InBounds,
|
||||
Optional<unsigned> InRangeIndex,
|
||||
Type *OnlyIfReducedTy) {
|
||||
if (!Ty)
|
||||
Ty = cast<PointerType>(C->getType()->getScalarType())->getElementType();
|
||||
@ -1901,7 +1902,8 @@ Constant *ConstantExpr::getGetElementPtr(Type *Ty, Constant *C,
|
||||
Ty ==
|
||||
cast<PointerType>(C->getType()->getScalarType())->getContainedType(0u));
|
||||
|
||||
if (Constant *FC = ConstantFoldGetElementPtr(Ty, C, InBounds, Idxs))
|
||||
if (Constant *FC =
|
||||
ConstantFoldGetElementPtr(Ty, C, InBounds, InRangeIndex, Idxs))
|
||||
return FC; // Fold a few common cases.
|
||||
|
||||
// Get the result type of the getelementptr!
|
||||
@ -1937,9 +1939,12 @@ Constant *ConstantExpr::getGetElementPtr(Type *Ty, Constant *C,
|
||||
Idx = ConstantVector::getSplat(NumVecElts, Idx);
|
||||
ArgVec.push_back(Idx);
|
||||
}
|
||||
|
||||
unsigned SubClassOptionalData = InBounds ? GEPOperator::IsInBounds : 0;
|
||||
if (InRangeIndex && *InRangeIndex < 63)
|
||||
SubClassOptionalData |= (*InRangeIndex + 1) << 1;
|
||||
const ConstantExprKeyType Key(Instruction::GetElementPtr, ArgVec, 0,
|
||||
InBounds ? GEPOperator::IsInBounds : 0, None,
|
||||
Ty);
|
||||
SubClassOptionalData, None, Ty);
|
||||
|
||||
LLVMContextImpl *pImpl = C->getContext().pImpl;
|
||||
return pImpl->ExprConstants.getOrCreate(ReqTy, Key);
|
||||
|
30
test/Analysis/ConstantFolding/gep.ll
Normal file
30
test/Analysis/ConstantFolding/gep.ll
Normal file
@ -0,0 +1,30 @@
|
||||
; RUN: opt -instcombine -S -o - %s | FileCheck %s
|
||||
; Tests that we preserve the inrange attribute on indices where possible.
|
||||
|
||||
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
|
||||
target triple = "x86_64-unknown-linux-gnu"
|
||||
|
||||
%struct.A = type { i32 (...)** }
|
||||
|
||||
@vt = external global [3 x i8*]
|
||||
|
||||
; CHECK: define i32 (...)* @f0()
|
||||
define i32 (...)* @f0() {
|
||||
; CHECK-NEXT: load i32 (...)*, i32 (...)** bitcast (i8** getelementptr inbounds ([3 x i8*], [3 x i8*]* @vt, inrange i64 0, i64 2) to i32 (...)**)
|
||||
%load = load i32 (...)*, i32 (...)** getelementptr (i32 (...)*, i32 (...)** bitcast (i8** getelementptr inbounds ([3 x i8*], [3 x i8*]* @vt, inrange i64 0, i64 1) to i32 (...)**), i64 1)
|
||||
ret i32 (...)* %load
|
||||
}
|
||||
|
||||
; CHECK: define i32 (...)* @f1()
|
||||
define i32 (...)* @f1() {
|
||||
; CHECK-NEXT: load i32 (...)*, i32 (...)** bitcast (i8** getelementptr inbounds ([3 x i8*], [3 x i8*]* @vt, i64 0, i64 2) to i32 (...)**)
|
||||
%load = load i32 (...)*, i32 (...)** getelementptr (i32 (...)*, i32 (...)** bitcast (i8** getelementptr inbounds ([3 x i8*], [3 x i8*]* @vt, i64 0, inrange i64 1) to i32 (...)**), i64 1)
|
||||
ret i32 (...)* %load
|
||||
}
|
||||
|
||||
; CHECK: define i32 (...)* @f2()
|
||||
define i32 (...)* @f2() {
|
||||
; CHECK-NEXT: load i32 (...)*, i32 (...)** bitcast (i8** getelementptr ([3 x i8*], [3 x i8*]* @vt, i64 1, i64 1) to i32 (...)**)
|
||||
%load = load i32 (...)*, i32 (...)** getelementptr (i32 (...)*, i32 (...)** bitcast (i8** getelementptr inbounds ([3 x i8*], [3 x i8*]* @vt, i64 0, inrange i64 1) to i32 (...)**), i64 3)
|
||||
ret i32 (...)* %load
|
||||
}
|
@ -23,6 +23,25 @@
|
||||
@PR23753_b = global i8* getelementptr (i8, i8* @PR23753_a, i64 ptrtoint (i8* @PR23753_a to i64))
|
||||
; CHECK: @PR23753_b = global i8* getelementptr (i8, i8* @PR23753_a, i64 ptrtoint (i8* @PR23753_a to i64))
|
||||
|
||||
; Verify that inrange on an index inhibits over-indexed getelementptr folding.
|
||||
|
||||
@nestedarray = global [2 x [4 x i8*]] zeroinitializer
|
||||
|
||||
; CHECK: @nestedarray.1 = alias i8*, getelementptr inbounds ([2 x [4 x i8*]], [2 x [4 x i8*]]* @nestedarray, inrange i32 0, i64 1, i32 0)
|
||||
@nestedarray.1 = alias i8*, getelementptr inbounds ([2 x [4 x i8*]], [2 x [4 x i8*]]* @nestedarray, inrange i32 0, i32 0, i32 4)
|
||||
|
||||
; CHECK: @nestedarray.2 = alias i8*, getelementptr inbounds ([2 x [4 x i8*]], [2 x [4 x i8*]]* @nestedarray, i32 0, inrange i32 0, i32 4)
|
||||
@nestedarray.2 = alias i8*, getelementptr inbounds ([2 x [4 x i8*]], [2 x [4 x i8*]]* @nestedarray, i32 0, inrange i32 0, i32 4)
|
||||
|
||||
; CHECK: @nestedarray.3 = alias i8*, getelementptr inbounds ([2 x [4 x i8*]], [2 x [4 x i8*]]* @nestedarray, i32 0, inrange i32 0, i32 0)
|
||||
@nestedarray.3 = alias i8*, getelementptr inbounds ([4 x i8*], [4 x i8*]* getelementptr inbounds ([2 x [4 x i8*]], [2 x [4 x i8*]]* @nestedarray, i32 0, inrange i32 0), i32 0, i32 0)
|
||||
|
||||
; CHECK: @nestedarray.4 = alias i8*, getelementptr inbounds ([2 x [4 x i8*]], [2 x [4 x i8*]]* @nestedarray, i32 0, i32 1, i32 0)
|
||||
@nestedarray.4 = alias i8*, getelementptr inbounds ([4 x i8*], [4 x i8*]* getelementptr inbounds ([2 x [4 x i8*]], [2 x [4 x i8*]]* @nestedarray, i32 0, inrange i32 0), i32 1, i32 0)
|
||||
|
||||
; CHECK: @nestedarray.5 = alias i8*, getelementptr inbounds ([2 x [4 x i8*]], [2 x [4 x i8*]]* @nestedarray, inrange i32 0, i32 1, i32 0)
|
||||
@nestedarray.5 = alias i8*, getelementptr inbounds ([4 x i8*], [4 x i8*]* getelementptr inbounds ([2 x [4 x i8*]], [2 x [4 x i8*]]* @nestedarray, inrange i32 0, i32 0), i32 1, i32 0)
|
||||
|
||||
; See if i92 indices work too.
|
||||
define i32 *@test({i32, i32}* %t, i92 %n) {
|
||||
; CHECK: @test
|
||||
|
@ -1611,6 +1611,13 @@ normal:
|
||||
declare void @f.writeonly() writeonly
|
||||
; CHECK: declare void @f.writeonly() #39
|
||||
|
||||
;; Constant Expressions
|
||||
|
||||
define i8** @constexpr() {
|
||||
; CHECK: ret i8** getelementptr inbounds ({ [4 x i8*], [4 x i8*] }, { [4 x i8*], [4 x i8*] }* null, i32 0, inrange i32 1, i32 2)
|
||||
ret i8** getelementptr inbounds ({ [4 x i8*], [4 x i8*] }, { [4 x i8*], [4 x i8*] }* null, i32 0, inrange i32 1, i32 2)
|
||||
}
|
||||
|
||||
; CHECK: attributes #0 = { alignstack=4 }
|
||||
; CHECK: attributes #1 = { alignstack=8 }
|
||||
; CHECK: attributes #2 = { alwaysinline }
|
||||
|
Loading…
Reference in New Issue
Block a user