mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-22 10:42:39 +01:00
land David Blaikie's patch to de-constify Type, with a few tweaks.
llvm-svn: 135375
This commit is contained in:
parent
568b96b828
commit
e1fe7061ce
@ -1136,7 +1136,7 @@ namespace llvm {
|
||||
return reinterpret_cast<Type**>(Tys);
|
||||
}
|
||||
|
||||
inline LLVMTypeRef *wrap(const Type **Tys) {
|
||||
inline LLVMTypeRef *wrap(Type **Tys) {
|
||||
return reinterpret_cast<LLVMTypeRef*>(const_cast<Type**>(Tys));
|
||||
}
|
||||
|
||||
|
@ -88,7 +88,7 @@ public:
|
||||
/// getTypeStoreSize - Return the TargetData store size for the given type,
|
||||
/// if known, or a conservative value otherwise.
|
||||
///
|
||||
uint64_t getTypeStoreSize(const Type *Ty);
|
||||
uint64_t getTypeStoreSize(Type *Ty);
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
/// Alias Queries...
|
||||
|
@ -47,7 +47,7 @@ Constant *ConstantFoldConstantExpression(const ConstantExpr *CE,
|
||||
/// fold instructions like loads and stores, which have no constant expression
|
||||
/// form.
|
||||
///
|
||||
Constant *ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy,
|
||||
Constant *ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
|
||||
Constant *const *Ops, unsigned NumOps,
|
||||
const TargetData *TD = 0);
|
||||
|
||||
|
@ -23,7 +23,7 @@ class Type;
|
||||
class Value;
|
||||
|
||||
class FindUsedTypes : public ModulePass {
|
||||
SetVector<const Type *> UsedTypes;
|
||||
SetVector<Type *> UsedTypes;
|
||||
public:
|
||||
static char ID; // Pass identification, replacement for typeid
|
||||
FindUsedTypes() : ModulePass(ID) {
|
||||
@ -33,7 +33,7 @@ public:
|
||||
/// getTypes - After the pass has been run, return the set containing all of
|
||||
/// the types used in the module.
|
||||
///
|
||||
const SetVector<const Type *> &getTypes() const { return UsedTypes; }
|
||||
const SetVector<Type *> &getTypes() const { return UsedTypes; }
|
||||
|
||||
/// Print the types found in the module. If the optional Module parameter is
|
||||
/// passed in, then the types are printed symbolically if possible, using the
|
||||
@ -45,7 +45,7 @@ private:
|
||||
/// IncorporateType - Incorporate one type and all of its subtypes into the
|
||||
/// collection of used types.
|
||||
///
|
||||
void IncorporateType(const Type *Ty);
|
||||
void IncorporateType(Type *Ty);
|
||||
|
||||
/// IncorporateValue - Incorporate all of the types used by this value.
|
||||
///
|
||||
|
@ -51,14 +51,14 @@ const CallInst *isArrayMalloc(const Value *I, const TargetData *TD);
|
||||
/// 0: PointerType is the malloc calls' return type.
|
||||
/// 1: PointerType is the bitcast's result type.
|
||||
/// >1: Unique PointerType cannot be determined, return NULL.
|
||||
const PointerType *getMallocType(const CallInst *CI);
|
||||
PointerType *getMallocType(const CallInst *CI);
|
||||
|
||||
/// getMallocAllocatedType - Returns the Type allocated by malloc call.
|
||||
/// The Type depends on the number of bitcast uses of the malloc call:
|
||||
/// 0: PointerType is the malloc calls' return type.
|
||||
/// 1: PointerType is the bitcast's result type.
|
||||
/// >1: Unique PointerType cannot be determined, return NULL.
|
||||
const Type *getMallocAllocatedType(const CallInst *CI);
|
||||
Type *getMallocAllocatedType(const CallInst *CI);
|
||||
|
||||
/// getMallocArraySize - Returns the array size of a malloc call. If the
|
||||
/// argument passed to malloc is a multiple of the size of the malloced type,
|
||||
|
@ -103,7 +103,7 @@ namespace llvm {
|
||||
|
||||
/// getType - Return the LLVM type of this SCEV expression.
|
||||
///
|
||||
const Type *getType() const;
|
||||
Type *getType() const;
|
||||
|
||||
/// isZero - Return true if the expression is a constant zero.
|
||||
///
|
||||
@ -479,17 +479,17 @@ namespace llvm {
|
||||
/// the SCEV framework. This primarily includes integer types, and it
|
||||
/// can optionally include pointer types if the ScalarEvolution class
|
||||
/// has access to target-specific information.
|
||||
bool isSCEVable(const Type *Ty) const;
|
||||
bool isSCEVable(Type *Ty) const;
|
||||
|
||||
/// getTypeSizeInBits - Return the size in bits of the specified type,
|
||||
/// for which isSCEVable must return true.
|
||||
uint64_t getTypeSizeInBits(const Type *Ty) const;
|
||||
uint64_t getTypeSizeInBits(Type *Ty) const;
|
||||
|
||||
/// getEffectiveSCEVType - Return a type with the same bitwidth as
|
||||
/// the given type and which represents how SCEV will treat the given
|
||||
/// type, for which isSCEVable must return true. For pointer types,
|
||||
/// this is the pointer-sized integer type.
|
||||
const Type *getEffectiveSCEVType(const Type *Ty) const;
|
||||
Type *getEffectiveSCEVType(Type *Ty) const;
|
||||
|
||||
/// getSCEV - Return a SCEV expression for the full generality of the
|
||||
/// specified expression.
|
||||
@ -497,11 +497,11 @@ namespace llvm {
|
||||
|
||||
const SCEV *getConstant(ConstantInt *V);
|
||||
const SCEV *getConstant(const APInt& Val);
|
||||
const SCEV *getConstant(const Type *Ty, uint64_t V, bool isSigned = false);
|
||||
const SCEV *getTruncateExpr(const SCEV *Op, const Type *Ty);
|
||||
const SCEV *getZeroExtendExpr(const SCEV *Op, const Type *Ty);
|
||||
const SCEV *getSignExtendExpr(const SCEV *Op, const Type *Ty);
|
||||
const SCEV *getAnyExtendExpr(const SCEV *Op, const Type *Ty);
|
||||
const SCEV *getConstant(Type *Ty, uint64_t V, bool isSigned = false);
|
||||
const SCEV *getTruncateExpr(const SCEV *Op, Type *Ty);
|
||||
const SCEV *getZeroExtendExpr(const SCEV *Op, Type *Ty);
|
||||
const SCEV *getSignExtendExpr(const SCEV *Op, Type *Ty);
|
||||
const SCEV *getAnyExtendExpr(const SCEV *Op, Type *Ty);
|
||||
const SCEV *getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
|
||||
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap);
|
||||
const SCEV *getAddExpr(const SCEV *LHS, const SCEV *RHS,
|
||||
@ -550,19 +550,19 @@ namespace llvm {
|
||||
|
||||
/// getSizeOfExpr - Return an expression for sizeof on the given type.
|
||||
///
|
||||
const SCEV *getSizeOfExpr(const Type *AllocTy);
|
||||
const SCEV *getSizeOfExpr(Type *AllocTy);
|
||||
|
||||
/// getAlignOfExpr - Return an expression for alignof on the given type.
|
||||
///
|
||||
const SCEV *getAlignOfExpr(const Type *AllocTy);
|
||||
const SCEV *getAlignOfExpr(Type *AllocTy);
|
||||
|
||||
/// getOffsetOfExpr - Return an expression for offsetof on the given field.
|
||||
///
|
||||
const SCEV *getOffsetOfExpr(const StructType *STy, unsigned FieldNo);
|
||||
const SCEV *getOffsetOfExpr(StructType *STy, unsigned FieldNo);
|
||||
|
||||
/// getOffsetOfExpr - Return an expression for offsetof on the given field.
|
||||
///
|
||||
const SCEV *getOffsetOfExpr(const Type *CTy, Constant *FieldNo);
|
||||
const SCEV *getOffsetOfExpr(Type *CTy, Constant *FieldNo);
|
||||
|
||||
/// getNegativeSCEV - Return the SCEV object corresponding to -V.
|
||||
///
|
||||
@ -579,33 +579,33 @@ namespace llvm {
|
||||
/// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion
|
||||
/// of the input value to the specified type. If the type must be
|
||||
/// extended, it is zero extended.
|
||||
const SCEV *getTruncateOrZeroExtend(const SCEV *V, const Type *Ty);
|
||||
const SCEV *getTruncateOrZeroExtend(const SCEV *V, Type *Ty);
|
||||
|
||||
/// getTruncateOrSignExtend - Return a SCEV corresponding to a conversion
|
||||
/// of the input value to the specified type. If the type must be
|
||||
/// extended, it is sign extended.
|
||||
const SCEV *getTruncateOrSignExtend(const SCEV *V, const Type *Ty);
|
||||
const SCEV *getTruncateOrSignExtend(const SCEV *V, Type *Ty);
|
||||
|
||||
/// getNoopOrZeroExtend - Return a SCEV corresponding to a conversion of
|
||||
/// the input value to the specified type. If the type must be extended,
|
||||
/// it is zero extended. The conversion must not be narrowing.
|
||||
const SCEV *getNoopOrZeroExtend(const SCEV *V, const Type *Ty);
|
||||
const SCEV *getNoopOrZeroExtend(const SCEV *V, Type *Ty);
|
||||
|
||||
/// getNoopOrSignExtend - Return a SCEV corresponding to a conversion of
|
||||
/// the input value to the specified type. If the type must be extended,
|
||||
/// it is sign extended. The conversion must not be narrowing.
|
||||
const SCEV *getNoopOrSignExtend(const SCEV *V, const Type *Ty);
|
||||
const SCEV *getNoopOrSignExtend(const SCEV *V, Type *Ty);
|
||||
|
||||
/// getNoopOrAnyExtend - Return a SCEV corresponding to a conversion of
|
||||
/// the input value to the specified type. If the type must be extended,
|
||||
/// it is extended with unspecified bits. The conversion must not be
|
||||
/// narrowing.
|
||||
const SCEV *getNoopOrAnyExtend(const SCEV *V, const Type *Ty);
|
||||
const SCEV *getNoopOrAnyExtend(const SCEV *V, Type *Ty);
|
||||
|
||||
/// getTruncateOrNoop - Return a SCEV corresponding to a conversion of the
|
||||
/// input value to the specified type. The conversion must not be
|
||||
/// widening.
|
||||
const SCEV *getTruncateOrNoop(const SCEV *V, const Type *Ty);
|
||||
const SCEV *getTruncateOrNoop(const SCEV *V, Type *Ty);
|
||||
|
||||
/// getUMaxFromMismatchedTypes - Promote the operands to the wider of
|
||||
/// the types using zero-extension, and then perform a umax operation
|
||||
|
@ -89,12 +89,12 @@ namespace llvm {
|
||||
/// loop (inserting one if there is none). A canonical induction variable
|
||||
/// starts at zero and steps by one on each iteration.
|
||||
PHINode *getOrInsertCanonicalInductionVariable(const Loop *L,
|
||||
const Type *Ty);
|
||||
Type *Ty);
|
||||
|
||||
/// expandCodeFor - Insert code to directly compute the specified SCEV
|
||||
/// expression into the program. The inserted code is inserted into the
|
||||
/// specified block.
|
||||
Value *expandCodeFor(const SCEV *SH, const Type *Ty, Instruction *I);
|
||||
Value *expandCodeFor(const SCEV *SH, Type *Ty, Instruction *I);
|
||||
|
||||
/// setIVIncInsertPos - Set the current IV increment loop and position.
|
||||
void setIVIncInsertPos(const Loop *L, Instruction *Pos) {
|
||||
@ -145,20 +145,20 @@ namespace llvm {
|
||||
/// reusing an existing cast if a suitable one exists, moving an existing
|
||||
/// cast if a suitable one exists but isn't in the right place, or
|
||||
/// or creating a new one.
|
||||
Value *ReuseOrCreateCast(Value *V, const Type *Ty,
|
||||
Value *ReuseOrCreateCast(Value *V, Type *Ty,
|
||||
Instruction::CastOps Op,
|
||||
BasicBlock::iterator IP);
|
||||
|
||||
/// InsertNoopCastOfTo - Insert a cast of V to the specified type,
|
||||
/// which must be possible with a noop cast, doing what we can to
|
||||
/// share the casts.
|
||||
Value *InsertNoopCastOfTo(Value *V, const Type *Ty);
|
||||
Value *InsertNoopCastOfTo(Value *V, Type *Ty);
|
||||
|
||||
/// expandAddToGEP - Expand a SCEVAddExpr with a pointer type into a GEP
|
||||
/// instead of using ptrtoint+arithmetic+inttoptr.
|
||||
Value *expandAddToGEP(const SCEV *const *op_begin,
|
||||
const SCEV *const *op_end,
|
||||
const PointerType *PTy, const Type *Ty, Value *V);
|
||||
PointerType *PTy, Type *Ty, Value *V);
|
||||
|
||||
Value *expand(const SCEV *S);
|
||||
|
||||
@ -166,7 +166,7 @@ namespace llvm {
|
||||
/// expression into the program. The inserted code is inserted into the
|
||||
/// SCEVExpander's current insertion point. If a type is specified, the
|
||||
/// result will be expanded to have that type, with a cast if necessary.
|
||||
Value *expandCodeFor(const SCEV *SH, const Type *Ty = 0);
|
||||
Value *expandCodeFor(const SCEV *SH, Type *Ty = 0);
|
||||
|
||||
/// isInsertedInstruction - Return true if the specified instruction was
|
||||
/// inserted by the code rewriter. If so, the client should not modify the
|
||||
@ -211,8 +211,8 @@ namespace llvm {
|
||||
Value *expandAddRecExprLiterally(const SCEVAddRecExpr *);
|
||||
PHINode *getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
|
||||
const Loop *L,
|
||||
const Type *ExpandTy,
|
||||
const Type *IntTy);
|
||||
Type *ExpandTy,
|
||||
Type *IntTy);
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -42,7 +42,7 @@ namespace llvm {
|
||||
public:
|
||||
ConstantInt *getValue() const { return V; }
|
||||
|
||||
const Type *getType() const { return V->getType(); }
|
||||
Type *getType() const { return V->getType(); }
|
||||
|
||||
/// Methods for support type inquiry through isa, cast, and dyn_cast:
|
||||
static inline bool classof(const SCEVConstant *S) { return true; }
|
||||
@ -57,14 +57,14 @@ namespace llvm {
|
||||
class SCEVCastExpr : public SCEV {
|
||||
protected:
|
||||
const SCEV *Op;
|
||||
const Type *Ty;
|
||||
Type *Ty;
|
||||
|
||||
SCEVCastExpr(const FoldingSetNodeIDRef ID,
|
||||
unsigned SCEVTy, const SCEV *op, const Type *ty);
|
||||
unsigned SCEVTy, const SCEV *op, Type *ty);
|
||||
|
||||
public:
|
||||
const SCEV *getOperand() const { return Op; }
|
||||
const Type *getType() const { return Ty; }
|
||||
Type *getType() const { return Ty; }
|
||||
|
||||
/// Methods for support type inquiry through isa, cast, and dyn_cast:
|
||||
static inline bool classof(const SCEVCastExpr *S) { return true; }
|
||||
@ -83,7 +83,7 @@ namespace llvm {
|
||||
friend class ScalarEvolution;
|
||||
|
||||
SCEVTruncateExpr(const FoldingSetNodeIDRef ID,
|
||||
const SCEV *op, const Type *ty);
|
||||
const SCEV *op, Type *ty);
|
||||
|
||||
public:
|
||||
/// Methods for support type inquiry through isa, cast, and dyn_cast:
|
||||
@ -101,7 +101,7 @@ namespace llvm {
|
||||
friend class ScalarEvolution;
|
||||
|
||||
SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
|
||||
const SCEV *op, const Type *ty);
|
||||
const SCEV *op, Type *ty);
|
||||
|
||||
public:
|
||||
/// Methods for support type inquiry through isa, cast, and dyn_cast:
|
||||
@ -119,7 +119,7 @@ namespace llvm {
|
||||
friend class ScalarEvolution;
|
||||
|
||||
SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
|
||||
const SCEV *op, const Type *ty);
|
||||
const SCEV *op, Type *ty);
|
||||
|
||||
public:
|
||||
/// Methods for support type inquiry through isa, cast, and dyn_cast:
|
||||
@ -158,7 +158,7 @@ namespace llvm {
|
||||
op_iterator op_begin() const { return Operands; }
|
||||
op_iterator op_end() const { return Operands + NumOperands; }
|
||||
|
||||
const Type *getType() const { return getOperand(0)->getType(); }
|
||||
Type *getType() const { return getOperand(0)->getType(); }
|
||||
|
||||
NoWrapFlags getNoWrapFlags(NoWrapFlags Mask = NoWrapMask) const {
|
||||
return (NoWrapFlags)(SubclassData & Mask);
|
||||
@ -214,7 +214,7 @@ namespace llvm {
|
||||
}
|
||||
|
||||
public:
|
||||
const Type *getType() const {
|
||||
Type *getType() const {
|
||||
// Use the type of the last operand, which is likely to be a pointer
|
||||
// type, if there is one. This doesn't usually matter, but it can help
|
||||
// reduce casts when the expressions are expanded.
|
||||
@ -263,7 +263,7 @@ namespace llvm {
|
||||
const SCEV *getLHS() const { return LHS; }
|
||||
const SCEV *getRHS() const { return RHS; }
|
||||
|
||||
const Type *getType() const {
|
||||
Type *getType() const {
|
||||
// In most cases the types of LHS and RHS will be the same, but in some
|
||||
// crazy cases one or the other may be a pointer. ScalarEvolution doesn't
|
||||
// depend on the type for correctness, but handling types carefully can
|
||||
@ -441,11 +441,11 @@ namespace llvm {
|
||||
/// folded with other operations into something unrecognizable. This
|
||||
/// is mainly only useful for pretty-printing and other situations
|
||||
/// where it isn't absolutely required for these to succeed.
|
||||
bool isSizeOf(const Type *&AllocTy) const;
|
||||
bool isAlignOf(const Type *&AllocTy) const;
|
||||
bool isOffsetOf(const Type *&STy, Constant *&FieldNo) const;
|
||||
bool isSizeOf(Type *&AllocTy) const;
|
||||
bool isAlignOf(Type *&AllocTy) const;
|
||||
bool isOffsetOf(Type *&STy, Constant *&FieldNo) const;
|
||||
|
||||
const Type *getType() const { return getValPtr()->getType(); }
|
||||
Type *getType() const { return getValPtr()->getType(); }
|
||||
|
||||
/// Methods for support type inquiry through isa, cast, and dyn_cast:
|
||||
static inline bool classof(const SCEVUnknown *S) { return true; }
|
||||
|
@ -39,7 +39,7 @@ public:
|
||||
/// Argument ctor - If Function argument is specified, this argument is
|
||||
/// inserted at the end of the argument list for the function.
|
||||
///
|
||||
explicit Argument(const Type *Ty, const Twine &Name = "", Function *F = 0);
|
||||
explicit Argument(Type *Ty, const Twine &Name = "", Function *F = 0);
|
||||
|
||||
inline const Function *getParent() const { return Parent; }
|
||||
inline Function *getParent() { return Parent; }
|
||||
|
@ -107,7 +107,7 @@ const Attributes MutuallyIncompatible[4] = {
|
||||
};
|
||||
|
||||
/// @brief Which attributes cannot be applied to a type.
|
||||
Attributes typeIncompatible(const Type *Ty);
|
||||
Attributes typeIncompatible(Type *Ty);
|
||||
|
||||
/// This turns an int alignment (a power of 2, normally) into the
|
||||
/// form used internally in Attributes.
|
||||
|
@ -33,12 +33,12 @@ class SelectionDAG;
|
||||
/// of insertvalue or extractvalue indices that identify a member, return
|
||||
/// the linearized index of the start of the member.
|
||||
///
|
||||
unsigned ComputeLinearIndex(const Type *Ty,
|
||||
unsigned ComputeLinearIndex(Type *Ty,
|
||||
const unsigned *Indices,
|
||||
const unsigned *IndicesEnd,
|
||||
unsigned CurIndex = 0);
|
||||
|
||||
inline unsigned ComputeLinearIndex(const Type *Ty,
|
||||
inline unsigned ComputeLinearIndex(Type *Ty,
|
||||
ArrayRef<unsigned> Indices,
|
||||
unsigned CurIndex = 0) {
|
||||
return ComputeLinearIndex(Ty, Indices.begin(), Indices.end(), CurIndex);
|
||||
@ -51,7 +51,7 @@ inline unsigned ComputeLinearIndex(const Type *Ty,
|
||||
/// If Offsets is non-null, it points to a vector to be filled in
|
||||
/// with the in-memory offsets of each of the individual values.
|
||||
///
|
||||
void ComputeValueVTs(const TargetLowering &TLI, const Type *Ty,
|
||||
void ComputeValueVTs(const TargetLowering &TLI, Type *Ty,
|
||||
SmallVectorImpl<EVT> &ValueVTs,
|
||||
SmallVectorImpl<uint64_t> *Offsets = 0,
|
||||
uint64_t StartingOffset = 0);
|
||||
|
@ -139,7 +139,7 @@ public:
|
||||
|
||||
unsigned CreateReg(EVT VT);
|
||||
|
||||
unsigned CreateRegs(const Type *Ty);
|
||||
unsigned CreateRegs(Type *Ty);
|
||||
|
||||
unsigned InitializeRegForValue(const Value *V) {
|
||||
unsigned &R = ValueMap[V];
|
||||
|
@ -34,15 +34,15 @@ class raw_ostream;
|
||||
/// Abstract base class for all machine specific constantpool value subclasses.
|
||||
///
|
||||
class MachineConstantPoolValue {
|
||||
const Type *Ty;
|
||||
Type *Ty;
|
||||
|
||||
public:
|
||||
explicit MachineConstantPoolValue(const Type *ty) : Ty(ty) {}
|
||||
explicit MachineConstantPoolValue(Type *ty) : Ty(ty) {}
|
||||
virtual ~MachineConstantPoolValue() {}
|
||||
|
||||
/// getType - get type of this MachineConstantPoolValue.
|
||||
///
|
||||
const Type *getType() const { return Ty; }
|
||||
Type *getType() const { return Ty; }
|
||||
|
||||
|
||||
/// getRelocationInfo - This method classifies the entry according to
|
||||
@ -104,7 +104,7 @@ public:
|
||||
return Alignment & ~(1 << (sizeof(unsigned)*CHAR_BIT-1));
|
||||
}
|
||||
|
||||
const Type *getType() const;
|
||||
Type *getType() const;
|
||||
|
||||
/// getRelocationInfo - This method classifies the entry according to
|
||||
/// whether or not it may generate a relocation entry. This must be
|
||||
|
@ -1291,7 +1291,7 @@ public:
|
||||
unsigned getAlignment() const { return Alignment; }
|
||||
unsigned char getTargetFlags() const { return TargetFlags; }
|
||||
|
||||
const Type *getType() const;
|
||||
Type *getType() const;
|
||||
|
||||
static bool classof(const ConstantPoolSDNode *) { return true; }
|
||||
static bool classof(const SDNode *N) {
|
||||
|
@ -380,7 +380,7 @@ namespace llvm {
|
||||
struct EVT {
|
||||
private:
|
||||
MVT V;
|
||||
const Type *LLVMTy;
|
||||
Type *LLVMTy;
|
||||
|
||||
public:
|
||||
EVT() : V((MVT::SimpleValueType)(MVT::INVALID_SIMPLE_VALUE_TYPE)),
|
||||
@ -645,12 +645,12 @@ namespace llvm {
|
||||
/// getTypeForEVT - This method returns an LLVM type corresponding to the
|
||||
/// specified EVT. For integer types, this returns an unsigned type. Note
|
||||
/// that this will abort for types that cannot be represented.
|
||||
const Type *getTypeForEVT(LLVMContext &Context) const;
|
||||
Type *getTypeForEVT(LLVMContext &Context) const;
|
||||
|
||||
/// getEVT - Return the value type corresponding to the specified type.
|
||||
/// This returns all pointers as iPTR. If HandleUnknown is true, unknown
|
||||
/// types are returned as Other, otherwise they are invalid.
|
||||
static EVT getEVT(const Type *Ty, bool HandleUnknown = false);
|
||||
static EVT getEVT(Type *Ty, bool HandleUnknown = false);
|
||||
|
||||
intptr_t getRawBits() {
|
||||
if (isSimple())
|
||||
|
@ -43,7 +43,7 @@ class Constant : public User {
|
||||
Constant(const Constant &); // Do not implement
|
||||
|
||||
protected:
|
||||
Constant(const Type *ty, ValueTy vty, Use *Ops, unsigned NumOps)
|
||||
Constant(Type *ty, ValueTy vty, Use *Ops, unsigned NumOps)
|
||||
: User(ty, vty, Ops, NumOps) {}
|
||||
|
||||
void destroyConstantImpl();
|
||||
@ -128,16 +128,16 @@ public:
|
||||
assert(0 && "Constants that do not have operands cannot be using 'From'!");
|
||||
}
|
||||
|
||||
static Constant *getNullValue(const Type* Ty);
|
||||
static Constant *getNullValue(Type* Ty);
|
||||
|
||||
/// @returns the value for an integer constant of the given type that has all
|
||||
/// its bits set to true.
|
||||
/// @brief Get the all ones value
|
||||
static Constant *getAllOnesValue(const Type* Ty);
|
||||
static Constant *getAllOnesValue(Type* Ty);
|
||||
|
||||
/// getIntegerValue - Return the value for an integer or pointer constant,
|
||||
/// or a vector thereof, with the given scalar value.
|
||||
static Constant *getIntegerValue(const Type* Ty, const APInt &V);
|
||||
static Constant *getIntegerValue(Type* Ty, const APInt &V);
|
||||
|
||||
/// removeDeadConstantUsers - If there are any dead constant users dangling
|
||||
/// off of this constant, remove them. This method is useful for clients
|
||||
|
@ -47,7 +47,7 @@ struct ConvertConstantType;
|
||||
class ConstantInt : public Constant {
|
||||
void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
|
||||
ConstantInt(const ConstantInt &); // DO NOT IMPLEMENT
|
||||
ConstantInt(const IntegerType *Ty, const APInt& V);
|
||||
ConstantInt(IntegerType *Ty, const APInt& V);
|
||||
APInt Val;
|
||||
protected:
|
||||
// allocate space for exactly zero operands
|
||||
@ -57,12 +57,12 @@ protected:
|
||||
public:
|
||||
static ConstantInt *getTrue(LLVMContext &Context);
|
||||
static ConstantInt *getFalse(LLVMContext &Context);
|
||||
static Constant *getTrue(const Type *Ty);
|
||||
static Constant *getFalse(const Type *Ty);
|
||||
static Constant *getTrue(Type *Ty);
|
||||
static Constant *getFalse(Type *Ty);
|
||||
|
||||
/// If Ty is a vector type, return a Constant with a splat of the given
|
||||
/// value. Otherwise return a ConstantInt for the given value.
|
||||
static Constant *get(const Type *Ty, uint64_t V, bool isSigned = false);
|
||||
static Constant *get(Type *Ty, uint64_t V, bool isSigned = false);
|
||||
|
||||
/// Return a ConstantInt with the specified integer value for the specified
|
||||
/// type. If the type is wider than 64 bits, the value will be zero-extended
|
||||
@ -70,7 +70,7 @@ public:
|
||||
/// be interpreted as a 64-bit signed integer and sign-extended to fit
|
||||
/// the type.
|
||||
/// @brief Get a ConstantInt for a specific value.
|
||||
static ConstantInt *get(const IntegerType *Ty, uint64_t V,
|
||||
static ConstantInt *get(IntegerType *Ty, uint64_t V,
|
||||
bool isSigned = false);
|
||||
|
||||
/// Return a ConstantInt with the specified value for the specified type. The
|
||||
@ -78,8 +78,8 @@ public:
|
||||
/// either getSExtValue() or getZExtValue() will yield a correctly sized and
|
||||
/// signed value for the type Ty.
|
||||
/// @brief Get a ConstantInt for a specific signed value.
|
||||
static ConstantInt *getSigned(const IntegerType *Ty, int64_t V);
|
||||
static Constant *getSigned(const Type *Ty, int64_t V);
|
||||
static ConstantInt *getSigned(IntegerType *Ty, int64_t V);
|
||||
static Constant *getSigned(Type *Ty, int64_t V);
|
||||
|
||||
/// Return a ConstantInt with the specified value and an implied Type. The
|
||||
/// type is the integer type that corresponds to the bit width of the value.
|
||||
@ -87,12 +87,12 @@ public:
|
||||
|
||||
/// Return a ConstantInt constructed from the string strStart with the given
|
||||
/// radix.
|
||||
static ConstantInt *get(const IntegerType *Ty, StringRef Str,
|
||||
static ConstantInt *get(IntegerType *Ty, StringRef Str,
|
||||
uint8_t radix);
|
||||
|
||||
/// If Ty is a vector type, return a Constant with a splat of the given
|
||||
/// value. Otherwise return a ConstantInt for the given value.
|
||||
static Constant *get(const Type* Ty, const APInt& V);
|
||||
static Constant *get(Type* Ty, const APInt& V);
|
||||
|
||||
/// Return the constant as an APInt value reference. This allows clients to
|
||||
/// obtain a copy of the value, with all its precision in tact.
|
||||
@ -133,8 +133,8 @@ public:
|
||||
/// getType - Specialize the getType() method to always return an IntegerType,
|
||||
/// which reduces the amount of casting needed in parts of the compiler.
|
||||
///
|
||||
inline const IntegerType *getType() const {
|
||||
return reinterpret_cast<const IntegerType*>(Value::getType());
|
||||
inline IntegerType *getType() const {
|
||||
return reinterpret_cast<IntegerType*>(Value::getType());
|
||||
}
|
||||
|
||||
/// This static method returns true if the type Ty is big enough to
|
||||
@ -146,8 +146,8 @@ public:
|
||||
/// to the appropriate unsigned type before calling the method.
|
||||
/// @returns true if V is a valid value for type Ty
|
||||
/// @brief Determine if the value is in range for the given type.
|
||||
static bool isValueValidForType(const Type *Ty, uint64_t V);
|
||||
static bool isValueValidForType(const Type *Ty, int64_t V);
|
||||
static bool isValueValidForType(Type *Ty, uint64_t V);
|
||||
static bool isValueValidForType(Type *Ty, int64_t V);
|
||||
|
||||
bool isNegative() const { return Val.isNegative(); }
|
||||
|
||||
@ -233,7 +233,7 @@ class ConstantFP : public Constant {
|
||||
ConstantFP(const ConstantFP &); // DO NOT IMPLEMENT
|
||||
friend class LLVMContextImpl;
|
||||
protected:
|
||||
ConstantFP(const Type *Ty, const APFloat& V);
|
||||
ConstantFP(Type *Ty, const APFloat& V);
|
||||
protected:
|
||||
// allocate space for exactly zero operands
|
||||
void *operator new(size_t s) {
|
||||
@ -243,20 +243,20 @@ public:
|
||||
/// Floating point negation must be implemented with f(x) = -0.0 - x. This
|
||||
/// method returns the negative zero constant for floating point or vector
|
||||
/// floating point types; for all other types, it returns the null value.
|
||||
static Constant *getZeroValueForNegation(const Type *Ty);
|
||||
static Constant *getZeroValueForNegation(Type *Ty);
|
||||
|
||||
/// get() - This returns a ConstantFP, or a vector containing a splat of a
|
||||
/// ConstantFP, for the specified value in the specified type. This should
|
||||
/// only be used for simple constant values like 2.0/1.0 etc, that are
|
||||
/// known-valid both as host double and as the target format.
|
||||
static Constant *get(const Type* Ty, double V);
|
||||
static Constant *get(const Type* Ty, StringRef Str);
|
||||
static Constant *get(Type* Ty, double V);
|
||||
static Constant *get(Type* Ty, StringRef Str);
|
||||
static ConstantFP *get(LLVMContext &Context, const APFloat &V);
|
||||
static ConstantFP *getNegativeZero(const Type* Ty);
|
||||
static ConstantFP *getInfinity(const Type *Ty, bool Negative = false);
|
||||
static ConstantFP *getNegativeZero(Type* Ty);
|
||||
static ConstantFP *getInfinity(Type *Ty, bool Negative = false);
|
||||
|
||||
/// isValueValidForType - return true if Ty is big enough to represent V.
|
||||
static bool isValueValidForType(const Type *Ty, const APFloat &V);
|
||||
static bool isValueValidForType(Type *Ty, const APFloat &V);
|
||||
inline const APFloat &getValueAPF() const { return Val; }
|
||||
|
||||
/// isZero - Return true if the value is positive or negative zero.
|
||||
@ -300,7 +300,7 @@ class ConstantAggregateZero : public Constant {
|
||||
void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
|
||||
ConstantAggregateZero(const ConstantAggregateZero &); // DO NOT IMPLEMENT
|
||||
protected:
|
||||
explicit ConstantAggregateZero(const Type *ty)
|
||||
explicit ConstantAggregateZero(Type *ty)
|
||||
: Constant(ty, ConstantAggregateZeroVal, 0, 0) {}
|
||||
protected:
|
||||
// allocate space for exactly zero operands
|
||||
@ -308,7 +308,7 @@ protected:
|
||||
return User::operator new(s, 0);
|
||||
}
|
||||
public:
|
||||
static ConstantAggregateZero* get(const Type *Ty);
|
||||
static ConstantAggregateZero* get(Type *Ty);
|
||||
|
||||
virtual void destroyConstant();
|
||||
|
||||
@ -329,10 +329,10 @@ class ConstantArray : public Constant {
|
||||
std::vector<Constant*> >;
|
||||
ConstantArray(const ConstantArray &); // DO NOT IMPLEMENT
|
||||
protected:
|
||||
ConstantArray(const ArrayType *T, const std::vector<Constant*> &Val);
|
||||
ConstantArray(ArrayType *T, const std::vector<Constant*> &Val);
|
||||
public:
|
||||
// ConstantArray accessors
|
||||
static Constant *get(const ArrayType *T, ArrayRef<Constant*> V);
|
||||
static Constant *get(ArrayType *T, ArrayRef<Constant*> V);
|
||||
|
||||
/// This method constructs a ConstantArray and initializes it with a text
|
||||
/// string. The default behavior (AddNull==true) causes a null terminator to
|
||||
@ -349,8 +349,8 @@ public:
|
||||
/// getType - Specialize the getType() method to always return an ArrayType,
|
||||
/// which reduces the amount of casting needed in parts of the compiler.
|
||||
///
|
||||
inline const ArrayType *getType() const {
|
||||
return reinterpret_cast<const ArrayType*>(Value::getType());
|
||||
inline ArrayType *getType() const {
|
||||
return reinterpret_cast<ArrayType*>(Value::getType());
|
||||
}
|
||||
|
||||
/// isString - This method returns true if the array is an array of i8 and
|
||||
@ -400,11 +400,11 @@ class ConstantStruct : public Constant {
|
||||
std::vector<Constant*> >;
|
||||
ConstantStruct(const ConstantStruct &); // DO NOT IMPLEMENT
|
||||
protected:
|
||||
ConstantStruct(const StructType *T, const std::vector<Constant*> &Val);
|
||||
ConstantStruct(StructType *T, const std::vector<Constant*> &Val);
|
||||
public:
|
||||
// ConstantStruct accessors
|
||||
static Constant *get(const StructType *T, ArrayRef<Constant*> V);
|
||||
static Constant *get(const StructType *T, ...) END_WITH_NULL;
|
||||
static Constant *get(StructType *T, ArrayRef<Constant*> V);
|
||||
static Constant *get(StructType *T, ...) END_WITH_NULL;
|
||||
|
||||
/// getAnon - Return an anonymous struct that has the specified
|
||||
/// elements. If the struct is possibly empty, then you must specify a
|
||||
@ -431,8 +431,8 @@ public:
|
||||
|
||||
/// getType() specialization - Reduce amount of casting...
|
||||
///
|
||||
inline const StructType *getType() const {
|
||||
return reinterpret_cast<const StructType*>(Value::getType());
|
||||
inline StructType *getType() const {
|
||||
return reinterpret_cast<StructType*>(Value::getType());
|
||||
}
|
||||
|
||||
virtual void destroyConstant();
|
||||
@ -461,7 +461,7 @@ class ConstantVector : public Constant {
|
||||
std::vector<Constant*> >;
|
||||
ConstantVector(const ConstantVector &); // DO NOT IMPLEMENT
|
||||
protected:
|
||||
ConstantVector(const VectorType *T, const std::vector<Constant*> &Val);
|
||||
ConstantVector(VectorType *T, const std::vector<Constant*> &Val);
|
||||
public:
|
||||
// ConstantVector accessors
|
||||
static Constant *get(ArrayRef<Constant*> V);
|
||||
@ -472,8 +472,8 @@ public:
|
||||
/// getType - Specialize the getType() method to always return a VectorType,
|
||||
/// which reduces the amount of casting needed in parts of the compiler.
|
||||
///
|
||||
inline const VectorType *getType() const {
|
||||
return reinterpret_cast<const VectorType*>(Value::getType());
|
||||
inline VectorType *getType() const {
|
||||
return reinterpret_cast<VectorType*>(Value::getType());
|
||||
}
|
||||
|
||||
/// This function will return true iff every element in this vector constant
|
||||
@ -511,8 +511,8 @@ class ConstantPointerNull : public Constant {
|
||||
void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
|
||||
ConstantPointerNull(const ConstantPointerNull &); // DO NOT IMPLEMENT
|
||||
protected:
|
||||
explicit ConstantPointerNull(const PointerType *T)
|
||||
: Constant(reinterpret_cast<const Type*>(T),
|
||||
explicit ConstantPointerNull(PointerType *T)
|
||||
: Constant(reinterpret_cast<Type*>(T),
|
||||
Value::ConstantPointerNullVal, 0, 0) {}
|
||||
|
||||
protected:
|
||||
@ -522,15 +522,15 @@ protected:
|
||||
}
|
||||
public:
|
||||
/// get() - Static factory methods - Return objects of the specified value
|
||||
static ConstantPointerNull *get(const PointerType *T);
|
||||
static ConstantPointerNull *get(PointerType *T);
|
||||
|
||||
virtual void destroyConstant();
|
||||
|
||||
/// getType - Specialize the getType() method to always return an PointerType,
|
||||
/// which reduces the amount of casting needed in parts of the compiler.
|
||||
///
|
||||
inline const PointerType *getType() const {
|
||||
return reinterpret_cast<const PointerType*>(Value::getType());
|
||||
inline PointerType *getType() const {
|
||||
return reinterpret_cast<PointerType*>(Value::getType());
|
||||
}
|
||||
|
||||
/// Methods for support type inquiry through isa, cast, and dyn_cast:
|
||||
@ -591,7 +591,7 @@ class ConstantExpr : public Constant {
|
||||
friend struct ConvertConstantType<ConstantExpr, Type>;
|
||||
|
||||
protected:
|
||||
ConstantExpr(const Type *ty, unsigned Opcode, Use *Ops, unsigned NumOps)
|
||||
ConstantExpr(Type *ty, unsigned Opcode, Use *Ops, unsigned NumOps)
|
||||
: Constant(ty, ConstantExprVal, Ops, NumOps) {
|
||||
// Operation type (an Instruction opcode) is stored as the SubclassData.
|
||||
setValueSubclassData(Opcode);
|
||||
@ -605,23 +605,23 @@ public:
|
||||
|
||||
/// getAlignOf constant expr - computes the alignment of a type in a target
|
||||
/// independent way (Note: the return type is an i64).
|
||||
static Constant *getAlignOf(const Type *Ty);
|
||||
static Constant *getAlignOf(Type *Ty);
|
||||
|
||||
/// getSizeOf constant expr - computes the (alloc) size of a type (in
|
||||
/// address-units, not bits) in a target independent way (Note: the return
|
||||
/// type is an i64).
|
||||
///
|
||||
static Constant *getSizeOf(const Type *Ty);
|
||||
static Constant *getSizeOf(Type *Ty);
|
||||
|
||||
/// getOffsetOf constant expr - computes the offset of a struct field in a
|
||||
/// target independent way (Note: the return type is an i64).
|
||||
///
|
||||
static Constant *getOffsetOf(const StructType *STy, unsigned FieldNo);
|
||||
static Constant *getOffsetOf(StructType *STy, unsigned FieldNo);
|
||||
|
||||
/// getOffsetOf constant expr - This is a generalized form of getOffsetOf,
|
||||
/// which supports any aggregate type, and any Constant index.
|
||||
///
|
||||
static Constant *getOffsetOf(const Type *Ty, Constant *FieldNo);
|
||||
static Constant *getOffsetOf(Type *Ty, Constant *FieldNo);
|
||||
|
||||
static Constant *getNeg(Constant *C, bool HasNUW = false, bool HasNSW =false);
|
||||
static Constant *getFNeg(Constant *C);
|
||||
@ -648,18 +648,18 @@ public:
|
||||
bool HasNUW = false, bool HasNSW = false);
|
||||
static Constant *getLShr(Constant *C1, Constant *C2, bool isExact = false);
|
||||
static Constant *getAShr(Constant *C1, Constant *C2, bool isExact = false);
|
||||
static Constant *getTrunc (Constant *C, const Type *Ty);
|
||||
static Constant *getSExt (Constant *C, const Type *Ty);
|
||||
static Constant *getZExt (Constant *C, const Type *Ty);
|
||||
static Constant *getFPTrunc (Constant *C, const Type *Ty);
|
||||
static Constant *getFPExtend(Constant *C, const Type *Ty);
|
||||
static Constant *getUIToFP (Constant *C, const Type *Ty);
|
||||
static Constant *getSIToFP (Constant *C, const Type *Ty);
|
||||
static Constant *getFPToUI (Constant *C, const Type *Ty);
|
||||
static Constant *getFPToSI (Constant *C, const Type *Ty);
|
||||
static Constant *getPtrToInt(Constant *C, const Type *Ty);
|
||||
static Constant *getIntToPtr(Constant *C, const Type *Ty);
|
||||
static Constant *getBitCast (Constant *C, const Type *Ty);
|
||||
static Constant *getTrunc (Constant *C, Type *Ty);
|
||||
static Constant *getSExt (Constant *C, Type *Ty);
|
||||
static Constant *getZExt (Constant *C, Type *Ty);
|
||||
static Constant *getFPTrunc (Constant *C, Type *Ty);
|
||||
static Constant *getFPExtend(Constant *C, Type *Ty);
|
||||
static Constant *getUIToFP (Constant *C, Type *Ty);
|
||||
static Constant *getSIToFP (Constant *C, Type *Ty);
|
||||
static Constant *getFPToUI (Constant *C, Type *Ty);
|
||||
static Constant *getFPToSI (Constant *C, Type *Ty);
|
||||
static Constant *getPtrToInt(Constant *C, Type *Ty);
|
||||
static Constant *getIntToPtr(Constant *C, Type *Ty);
|
||||
static Constant *getBitCast (Constant *C, Type *Ty);
|
||||
|
||||
static Constant *getNSWNeg(Constant *C) { return getNeg(C, false, true); }
|
||||
static Constant *getNUWNeg(Constant *C) { return getNeg(C, true, false); }
|
||||
@ -708,44 +708,44 @@ public:
|
||||
static Constant *getCast(
|
||||
unsigned ops, ///< The opcode for the conversion
|
||||
Constant *C, ///< The constant to be converted
|
||||
const Type *Ty ///< The type to which the constant is converted
|
||||
Type *Ty ///< The type to which the constant is converted
|
||||
);
|
||||
|
||||
// @brief Create a ZExt or BitCast cast constant expression
|
||||
static Constant *getZExtOrBitCast(
|
||||
Constant *C, ///< The constant to zext or bitcast
|
||||
const Type *Ty ///< The type to zext or bitcast C to
|
||||
Type *Ty ///< The type to zext or bitcast C to
|
||||
);
|
||||
|
||||
// @brief Create a SExt or BitCast cast constant expression
|
||||
static Constant *getSExtOrBitCast(
|
||||
Constant *C, ///< The constant to sext or bitcast
|
||||
const Type *Ty ///< The type to sext or bitcast C to
|
||||
Type *Ty ///< The type to sext or bitcast C to
|
||||
);
|
||||
|
||||
// @brief Create a Trunc or BitCast cast constant expression
|
||||
static Constant *getTruncOrBitCast(
|
||||
Constant *C, ///< The constant to trunc or bitcast
|
||||
const Type *Ty ///< The type to trunc or bitcast C to
|
||||
Type *Ty ///< The type to trunc or bitcast C to
|
||||
);
|
||||
|
||||
/// @brief Create a BitCast or a PtrToInt cast constant expression
|
||||
static Constant *getPointerCast(
|
||||
Constant *C, ///< The pointer value to be casted (operand 0)
|
||||
const Type *Ty ///< The type to which cast should be made
|
||||
Type *Ty ///< The type to which cast should be made
|
||||
);
|
||||
|
||||
/// @brief Create a ZExt, Bitcast or Trunc for integer -> integer casts
|
||||
static Constant *getIntegerCast(
|
||||
Constant *C, ///< The integer constant to be casted
|
||||
const Type *Ty, ///< The integer type to cast to
|
||||
Type *Ty, ///< The integer type to cast to
|
||||
bool isSigned ///< Whether C should be treated as signed or not
|
||||
);
|
||||
|
||||
/// @brief Create a FPExt, Bitcast or FPTrunc for fp -> fp casts
|
||||
static Constant *getFPCast(
|
||||
Constant *C, ///< The integer constant to be casted
|
||||
const Type *Ty ///< The integer type to cast to
|
||||
Type *Ty ///< The integer type to cast to
|
||||
);
|
||||
|
||||
/// @brief Return true if this is a convert constant expression
|
||||
@ -845,7 +845,7 @@ public:
|
||||
/// operands replaced with the specified values and with the specified result
|
||||
/// type. The specified array must have the same number of operands as our
|
||||
/// current one.
|
||||
Constant *getWithOperands(ArrayRef<Constant*> Ops, const Type *Ty) const;
|
||||
Constant *getWithOperands(ArrayRef<Constant*> Ops, Type *Ty) const;
|
||||
|
||||
virtual void destroyConstant();
|
||||
virtual void replaceUsesOfWithOnConstant(Value *From, Value *To, Use *U);
|
||||
@ -886,7 +886,7 @@ class UndefValue : public Constant {
|
||||
void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
|
||||
UndefValue(const UndefValue &); // DO NOT IMPLEMENT
|
||||
protected:
|
||||
explicit UndefValue(const Type *T) : Constant(T, UndefValueVal, 0, 0) {}
|
||||
explicit UndefValue(Type *T) : Constant(T, UndefValueVal, 0, 0) {}
|
||||
protected:
|
||||
// allocate space for exactly zero operands
|
||||
void *operator new(size_t s) {
|
||||
@ -896,7 +896,7 @@ public:
|
||||
/// get() - Static factory methods - Return an 'undef' object of the specified
|
||||
/// type.
|
||||
///
|
||||
static UndefValue *get(const Type *T);
|
||||
static UndefValue *get(Type *T);
|
||||
|
||||
virtual void destroyConstant();
|
||||
|
||||
|
@ -96,26 +96,26 @@ public:
|
||||
class FunctionType : public Type {
|
||||
FunctionType(const FunctionType &); // Do not implement
|
||||
const FunctionType &operator=(const FunctionType &); // Do not implement
|
||||
FunctionType(const Type *Result, ArrayRef<Type*> Params, bool IsVarArgs);
|
||||
FunctionType(Type *Result, ArrayRef<Type*> Params, bool IsVarArgs);
|
||||
|
||||
public:
|
||||
/// FunctionType::get - This static method is the primary way of constructing
|
||||
/// a FunctionType.
|
||||
///
|
||||
static FunctionType *get(const Type *Result,
|
||||
static FunctionType *get(Type *Result,
|
||||
ArrayRef<Type*> Params, bool isVarArg);
|
||||
|
||||
/// FunctionType::get - Create a FunctionType taking no parameters.
|
||||
///
|
||||
static FunctionType *get(const Type *Result, bool isVarArg);
|
||||
static FunctionType *get(Type *Result, bool isVarArg);
|
||||
|
||||
/// isValidReturnType - Return true if the specified type is valid as a return
|
||||
/// type.
|
||||
static bool isValidReturnType(const Type *RetTy);
|
||||
static bool isValidReturnType(Type *RetTy);
|
||||
|
||||
/// isValidArgumentType - Return true if the specified type is valid as an
|
||||
/// argument type.
|
||||
static bool isValidArgumentType(const Type *ArgTy);
|
||||
static bool isValidArgumentType(Type *ArgTy);
|
||||
|
||||
bool isVarArg() const { return getSubclassData(); }
|
||||
Type *getReturnType() const { return ContainedTys[0]; }
|
||||
@ -150,8 +150,8 @@ public:
|
||||
/// getTypeAtIndex - Given an index value into the type, return the type of
|
||||
/// the element.
|
||||
///
|
||||
Type *getTypeAtIndex(const Value *V) const;
|
||||
Type *getTypeAtIndex(unsigned Idx) const;
|
||||
Type *getTypeAtIndex(const Value *V);
|
||||
Type *getTypeAtIndex(unsigned Idx);
|
||||
bool indexValid(const Value *V) const;
|
||||
bool indexValid(unsigned Idx) const;
|
||||
|
||||
@ -250,7 +250,7 @@ public:
|
||||
|
||||
/// isValidElementType - Return true if the specified type is valid as a
|
||||
/// element type.
|
||||
static bool isValidElementType(const Type *ElemTy);
|
||||
static bool isValidElementType(Type *ElemTy);
|
||||
|
||||
|
||||
// Iterator access to the elements.
|
||||
@ -260,7 +260,7 @@ public:
|
||||
|
||||
/// isLayoutIdentical - Return true if this is layout identical to the
|
||||
/// specified struct.
|
||||
bool isLayoutIdentical(const StructType *Other) const;
|
||||
bool isLayoutIdentical(StructType *Other) const;
|
||||
|
||||
// Random access to the elements
|
||||
unsigned getNumElements() const { return NumContainedTys; }
|
||||
@ -321,11 +321,11 @@ public:
|
||||
/// ArrayType::get - This static method is the primary way to construct an
|
||||
/// ArrayType
|
||||
///
|
||||
static ArrayType *get(const Type *ElementType, uint64_t NumElements);
|
||||
static ArrayType *get(Type *ElementType, uint64_t NumElements);
|
||||
|
||||
/// isValidElementType - Return true if the specified type is valid as a
|
||||
/// element type.
|
||||
static bool isValidElementType(const Type *ElemTy);
|
||||
static bool isValidElementType(Type *ElemTy);
|
||||
|
||||
uint64_t getNumElements() const { return NumElements; }
|
||||
|
||||
@ -348,13 +348,13 @@ public:
|
||||
/// VectorType::get - This static method is the primary way to construct an
|
||||
/// VectorType.
|
||||
///
|
||||
static VectorType *get(const Type *ElementType, unsigned NumElements);
|
||||
static VectorType *get(Type *ElementType, unsigned NumElements);
|
||||
|
||||
/// VectorType::getInteger - This static method gets a VectorType with the
|
||||
/// same number of elements as the input type, and the element type is an
|
||||
/// integer type of the same width as the input element type.
|
||||
///
|
||||
static VectorType *getInteger(const VectorType *VTy) {
|
||||
static VectorType *getInteger(VectorType *VTy) {
|
||||
unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
|
||||
Type *EltTy = IntegerType::get(VTy->getContext(), EltBits);
|
||||
return VectorType::get(EltTy, VTy->getNumElements());
|
||||
@ -364,7 +364,7 @@ public:
|
||||
/// getInteger except that the element types are twice as wide as the
|
||||
/// elements in the input type.
|
||||
///
|
||||
static VectorType *getExtendedElementVectorType(const VectorType *VTy) {
|
||||
static VectorType *getExtendedElementVectorType(VectorType *VTy) {
|
||||
unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
|
||||
Type *EltTy = IntegerType::get(VTy->getContext(), EltBits * 2);
|
||||
return VectorType::get(EltTy, VTy->getNumElements());
|
||||
@ -374,7 +374,7 @@ public:
|
||||
/// getInteger except that the element types are half as wide as the
|
||||
/// elements in the input type.
|
||||
///
|
||||
static VectorType *getTruncatedElementVectorType(const VectorType *VTy) {
|
||||
static VectorType *getTruncatedElementVectorType(VectorType *VTy) {
|
||||
unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
|
||||
assert((EltBits & 1) == 0 &&
|
||||
"Cannot truncate vector element with odd bit-width");
|
||||
@ -384,7 +384,7 @@ public:
|
||||
|
||||
/// isValidElementType - Return true if the specified type is valid as a
|
||||
/// element type.
|
||||
static bool isValidElementType(const Type *ElemTy);
|
||||
static bool isValidElementType(Type *ElemTy);
|
||||
|
||||
/// @brief Return the number of elements in the Vector type.
|
||||
unsigned getNumElements() const { return NumElements; }
|
||||
@ -411,17 +411,17 @@ class PointerType : public SequentialType {
|
||||
public:
|
||||
/// PointerType::get - This constructs a pointer to an object of the specified
|
||||
/// type in a numbered address space.
|
||||
static PointerType *get(const Type *ElementType, unsigned AddressSpace);
|
||||
static PointerType *get(Type *ElementType, unsigned AddressSpace);
|
||||
|
||||
/// PointerType::getUnqual - This constructs a pointer to an object of the
|
||||
/// specified type in the generic address space (address space zero).
|
||||
static PointerType *getUnqual(const Type *ElementType) {
|
||||
static PointerType *getUnqual(Type *ElementType) {
|
||||
return PointerType::get(ElementType, 0);
|
||||
}
|
||||
|
||||
/// isValidElementType - Return true if the specified type is valid as a
|
||||
/// element type.
|
||||
static bool isValidElementType(const Type *ElemTy);
|
||||
static bool isValidElementType(Type *ElemTy);
|
||||
|
||||
/// @brief Return the address space of the Pointer type.
|
||||
inline unsigned getAddressSpace() const { return getSubclassData(); }
|
||||
|
@ -314,7 +314,7 @@ public:
|
||||
/// GenericValue *. It is not a pointer to a GenericValue containing the
|
||||
/// address at which to store Val.
|
||||
void StoreValueToMemory(const GenericValue &Val, GenericValue *Ptr,
|
||||
const Type *Ty);
|
||||
Type *Ty);
|
||||
|
||||
void InitializeMemory(const Constant *Init, void *Addr);
|
||||
|
||||
@ -440,7 +440,7 @@ protected:
|
||||
|
||||
GenericValue getConstantValue(const Constant *C);
|
||||
void LoadValueFromMemory(GenericValue &Result, GenericValue *Ptr,
|
||||
const Type *Ty);
|
||||
Type *Ty);
|
||||
};
|
||||
|
||||
namespace EngineKind {
|
||||
|
@ -117,11 +117,11 @@ private:
|
||||
/// function is automatically inserted into the end of the function list for
|
||||
/// the module.
|
||||
///
|
||||
Function(const FunctionType *Ty, LinkageTypes Linkage,
|
||||
Function(FunctionType *Ty, LinkageTypes Linkage,
|
||||
const Twine &N = "", Module *M = 0);
|
||||
|
||||
public:
|
||||
static Function *Create(const FunctionType *Ty, LinkageTypes Linkage,
|
||||
static Function *Create(FunctionType *Ty, LinkageTypes Linkage,
|
||||
const Twine &N = "", Module *M = 0) {
|
||||
return new(0) Function(Ty, Linkage, N, M);
|
||||
}
|
||||
|
@ -41,7 +41,7 @@ public:
|
||||
}
|
||||
/// GlobalAlias ctor - If a parent module is specified, the alias is
|
||||
/// automatically inserted into the end of the specified module's alias list.
|
||||
GlobalAlias(const Type *Ty, LinkageTypes Linkage, const Twine &Name = "",
|
||||
GlobalAlias(Type *Ty, LinkageTypes Linkage, const Twine &Name = "",
|
||||
Constant* Aliasee = 0, Module *Parent = 0);
|
||||
|
||||
/// Provide fast operand accessors
|
||||
|
@ -57,7 +57,7 @@ public:
|
||||
};
|
||||
|
||||
protected:
|
||||
GlobalValue(const Type *ty, ValueTy vty, Use *Ops, unsigned NumOps,
|
||||
GlobalValue(Type *ty, ValueTy vty, Use *Ops, unsigned NumOps,
|
||||
LinkageTypes linkage, const Twine &Name)
|
||||
: Constant(ty, vty, Ops, NumOps), Parent(0),
|
||||
Linkage(linkage), Visibility(DefaultVisibility), Alignment(0),
|
||||
|
@ -50,12 +50,12 @@ public:
|
||||
}
|
||||
/// GlobalVariable ctor - If a parent module is specified, the global is
|
||||
/// automatically inserted into the end of the specified modules global list.
|
||||
GlobalVariable(const Type *Ty, bool isConstant, LinkageTypes Linkage,
|
||||
GlobalVariable(Type *Ty, bool isConstant, LinkageTypes Linkage,
|
||||
Constant *Initializer = 0, const Twine &Name = "",
|
||||
bool ThreadLocal = false, unsigned AddressSpace = 0);
|
||||
/// GlobalVariable ctor - This creates a global and inserts it before the
|
||||
/// specified other global.
|
||||
GlobalVariable(Module &M, const Type *Ty, bool isConstant,
|
||||
GlobalVariable(Module &M, Type *Ty, bool isConstant,
|
||||
LinkageTypes Linkage, Constant *Initializer,
|
||||
const Twine &Name,
|
||||
GlobalVariable *InsertBefore = 0, bool ThreadLocal = false,
|
||||
|
@ -43,7 +43,7 @@ class InlineAsm : public Value {
|
||||
bool HasSideEffects;
|
||||
bool IsAlignStack;
|
||||
|
||||
InlineAsm(const PointerType *Ty, const std::string &AsmString,
|
||||
InlineAsm(PointerType *Ty, const std::string &AsmString,
|
||||
const std::string &Constraints, bool hasSideEffects,
|
||||
bool isAlignStack);
|
||||
virtual ~InlineAsm();
|
||||
@ -55,7 +55,7 @@ public:
|
||||
|
||||
/// InlineAsm::get - Return the specified uniqued inline asm string.
|
||||
///
|
||||
static InlineAsm *get(const FunctionType *Ty, StringRef AsmString,
|
||||
static InlineAsm *get(FunctionType *Ty, StringRef AsmString,
|
||||
StringRef Constraints, bool hasSideEffects,
|
||||
bool isAlignStack = false);
|
||||
|
||||
@ -79,7 +79,7 @@ public:
|
||||
/// the specified constraint string is legal for the type. This returns true
|
||||
/// if legal, false if not.
|
||||
///
|
||||
static bool Verify(const FunctionType *Ty, StringRef Constraints);
|
||||
static bool Verify(FunctionType *Ty, StringRef Constraints);
|
||||
|
||||
// Constraint String Parsing
|
||||
enum ConstraintPrefix {
|
||||
|
@ -34,12 +34,12 @@ class LLVMContext;
|
||||
///
|
||||
class TerminatorInst : public Instruction {
|
||||
protected:
|
||||
TerminatorInst(const Type *Ty, Instruction::TermOps iType,
|
||||
TerminatorInst(Type *Ty, Instruction::TermOps iType,
|
||||
Use *Ops, unsigned NumOps,
|
||||
Instruction *InsertBefore = 0)
|
||||
: Instruction(Ty, iType, Ops, NumOps, InsertBefore) {}
|
||||
|
||||
TerminatorInst(const Type *Ty, Instruction::TermOps iType,
|
||||
TerminatorInst(Type *Ty, Instruction::TermOps iType,
|
||||
Use *Ops, unsigned NumOps, BasicBlock *InsertAtEnd)
|
||||
: Instruction(Ty, iType, Ops, NumOps, InsertAtEnd) {}
|
||||
|
||||
@ -91,12 +91,12 @@ class UnaryInstruction : public Instruction {
|
||||
void *operator new(size_t, unsigned); // Do not implement
|
||||
|
||||
protected:
|
||||
UnaryInstruction(const Type *Ty, unsigned iType, Value *V,
|
||||
UnaryInstruction(Type *Ty, unsigned iType, Value *V,
|
||||
Instruction *IB = 0)
|
||||
: Instruction(Ty, iType, &Op<0>(), 1, IB) {
|
||||
Op<0>() = V;
|
||||
}
|
||||
UnaryInstruction(const Type *Ty, unsigned iType, Value *V, BasicBlock *IAE)
|
||||
UnaryInstruction(Type *Ty, unsigned iType, Value *V, BasicBlock *IAE)
|
||||
: Instruction(Ty, iType, &Op<0>(), 1, IAE) {
|
||||
Op<0>() = V;
|
||||
}
|
||||
@ -141,9 +141,9 @@ class BinaryOperator : public Instruction {
|
||||
void *operator new(size_t, unsigned); // Do not implement
|
||||
protected:
|
||||
void init(BinaryOps iType);
|
||||
BinaryOperator(BinaryOps iType, Value *S1, Value *S2, const Type *Ty,
|
||||
BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty,
|
||||
const Twine &Name, Instruction *InsertBefore);
|
||||
BinaryOperator(BinaryOps iType, Value *S1, Value *S2, const Type *Ty,
|
||||
BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty,
|
||||
const Twine &Name, BasicBlock *InsertAtEnd);
|
||||
virtual BinaryOperator *clone_impl() const;
|
||||
public:
|
||||
@ -390,13 +390,13 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BinaryOperator, Value)
|
||||
class CastInst : public UnaryInstruction {
|
||||
protected:
|
||||
/// @brief Constructor with insert-before-instruction semantics for subclasses
|
||||
CastInst(const Type *Ty, unsigned iType, Value *S,
|
||||
CastInst(Type *Ty, unsigned iType, Value *S,
|
||||
const Twine &NameStr = "", Instruction *InsertBefore = 0)
|
||||
: UnaryInstruction(Ty, iType, S, InsertBefore) {
|
||||
setName(NameStr);
|
||||
}
|
||||
/// @brief Constructor with insert-at-end-of-block semantics for subclasses
|
||||
CastInst(const Type *Ty, unsigned iType, Value *S,
|
||||
CastInst(Type *Ty, unsigned iType, Value *S,
|
||||
const Twine &NameStr, BasicBlock *InsertAtEnd)
|
||||
: UnaryInstruction(Ty, iType, S, InsertAtEnd) {
|
||||
setName(NameStr);
|
||||
@ -411,7 +411,7 @@ public:
|
||||
static CastInst *Create(
|
||||
Instruction::CastOps, ///< The opcode of the cast instruction
|
||||
Value *S, ///< The value to be casted (operand 0)
|
||||
const Type *Ty, ///< The type to which cast should be made
|
||||
Type *Ty, ///< The type to which cast should be made
|
||||
const Twine &Name = "", ///< Name for the instruction
|
||||
Instruction *InsertBefore = 0 ///< Place to insert the instruction
|
||||
);
|
||||
@ -424,7 +424,7 @@ public:
|
||||
static CastInst *Create(
|
||||
Instruction::CastOps, ///< The opcode for the cast instruction
|
||||
Value *S, ///< The value to be casted (operand 0)
|
||||
const Type *Ty, ///< The type to which operand is casted
|
||||
Type *Ty, ///< The type to which operand is casted
|
||||
const Twine &Name, ///< The name for the instruction
|
||||
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
|
||||
);
|
||||
@ -432,7 +432,7 @@ public:
|
||||
/// @brief Create a ZExt or BitCast cast instruction
|
||||
static CastInst *CreateZExtOrBitCast(
|
||||
Value *S, ///< The value to be casted (operand 0)
|
||||
const Type *Ty, ///< The type to which cast should be made
|
||||
Type *Ty, ///< The type to which cast should be made
|
||||
const Twine &Name = "", ///< Name for the instruction
|
||||
Instruction *InsertBefore = 0 ///< Place to insert the instruction
|
||||
);
|
||||
@ -440,7 +440,7 @@ public:
|
||||
/// @brief Create a ZExt or BitCast cast instruction
|
||||
static CastInst *CreateZExtOrBitCast(
|
||||
Value *S, ///< The value to be casted (operand 0)
|
||||
const Type *Ty, ///< The type to which operand is casted
|
||||
Type *Ty, ///< The type to which operand is casted
|
||||
const Twine &Name, ///< The name for the instruction
|
||||
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
|
||||
);
|
||||
@ -448,7 +448,7 @@ public:
|
||||
/// @brief Create a SExt or BitCast cast instruction
|
||||
static CastInst *CreateSExtOrBitCast(
|
||||
Value *S, ///< The value to be casted (operand 0)
|
||||
const Type *Ty, ///< The type to which cast should be made
|
||||
Type *Ty, ///< The type to which cast should be made
|
||||
const Twine &Name = "", ///< Name for the instruction
|
||||
Instruction *InsertBefore = 0 ///< Place to insert the instruction
|
||||
);
|
||||
@ -456,7 +456,7 @@ public:
|
||||
/// @brief Create a SExt or BitCast cast instruction
|
||||
static CastInst *CreateSExtOrBitCast(
|
||||
Value *S, ///< The value to be casted (operand 0)
|
||||
const Type *Ty, ///< The type to which operand is casted
|
||||
Type *Ty, ///< The type to which operand is casted
|
||||
const Twine &Name, ///< The name for the instruction
|
||||
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
|
||||
);
|
||||
@ -464,7 +464,7 @@ public:
|
||||
/// @brief Create a BitCast or a PtrToInt cast instruction
|
||||
static CastInst *CreatePointerCast(
|
||||
Value *S, ///< The pointer value to be casted (operand 0)
|
||||
const Type *Ty, ///< The type to which operand is casted
|
||||
Type *Ty, ///< The type to which operand is casted
|
||||
const Twine &Name, ///< The name for the instruction
|
||||
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
|
||||
);
|
||||
@ -472,7 +472,7 @@ public:
|
||||
/// @brief Create a BitCast or a PtrToInt cast instruction
|
||||
static CastInst *CreatePointerCast(
|
||||
Value *S, ///< The pointer value to be casted (operand 0)
|
||||
const Type *Ty, ///< The type to which cast should be made
|
||||
Type *Ty, ///< The type to which cast should be made
|
||||
const Twine &Name = "", ///< Name for the instruction
|
||||
Instruction *InsertBefore = 0 ///< Place to insert the instruction
|
||||
);
|
||||
@ -480,7 +480,7 @@ public:
|
||||
/// @brief Create a ZExt, BitCast, or Trunc for int -> int casts.
|
||||
static CastInst *CreateIntegerCast(
|
||||
Value *S, ///< The pointer value to be casted (operand 0)
|
||||
const Type *Ty, ///< The type to which cast should be made
|
||||
Type *Ty, ///< The type to which cast should be made
|
||||
bool isSigned, ///< Whether to regard S as signed or not
|
||||
const Twine &Name = "", ///< Name for the instruction
|
||||
Instruction *InsertBefore = 0 ///< Place to insert the instruction
|
||||
@ -489,7 +489,7 @@ public:
|
||||
/// @brief Create a ZExt, BitCast, or Trunc for int -> int casts.
|
||||
static CastInst *CreateIntegerCast(
|
||||
Value *S, ///< The integer value to be casted (operand 0)
|
||||
const Type *Ty, ///< The integer type to which operand is casted
|
||||
Type *Ty, ///< The integer type to which operand is casted
|
||||
bool isSigned, ///< Whether to regard S as signed or not
|
||||
const Twine &Name, ///< The name for the instruction
|
||||
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
|
||||
@ -498,7 +498,7 @@ public:
|
||||
/// @brief Create an FPExt, BitCast, or FPTrunc for fp -> fp casts
|
||||
static CastInst *CreateFPCast(
|
||||
Value *S, ///< The floating point value to be casted
|
||||
const Type *Ty, ///< The floating point type to cast to
|
||||
Type *Ty, ///< The floating point type to cast to
|
||||
const Twine &Name = "", ///< Name for the instruction
|
||||
Instruction *InsertBefore = 0 ///< Place to insert the instruction
|
||||
);
|
||||
@ -506,7 +506,7 @@ public:
|
||||
/// @brief Create an FPExt, BitCast, or FPTrunc for fp -> fp casts
|
||||
static CastInst *CreateFPCast(
|
||||
Value *S, ///< The floating point value to be casted
|
||||
const Type *Ty, ///< The floating point type to cast to
|
||||
Type *Ty, ///< The floating point type to cast to
|
||||
const Twine &Name, ///< The name for the instruction
|
||||
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
|
||||
);
|
||||
@ -514,7 +514,7 @@ public:
|
||||
/// @brief Create a Trunc or BitCast cast instruction
|
||||
static CastInst *CreateTruncOrBitCast(
|
||||
Value *S, ///< The value to be casted (operand 0)
|
||||
const Type *Ty, ///< The type to which cast should be made
|
||||
Type *Ty, ///< The type to which cast should be made
|
||||
const Twine &Name = "", ///< Name for the instruction
|
||||
Instruction *InsertBefore = 0 ///< Place to insert the instruction
|
||||
);
|
||||
@ -522,15 +522,15 @@ public:
|
||||
/// @brief Create a Trunc or BitCast cast instruction
|
||||
static CastInst *CreateTruncOrBitCast(
|
||||
Value *S, ///< The value to be casted (operand 0)
|
||||
const Type *Ty, ///< The type to which operand is casted
|
||||
Type *Ty, ///< The type to which operand is casted
|
||||
const Twine &Name, ///< The name for the instruction
|
||||
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
|
||||
);
|
||||
|
||||
/// @brief Check whether it is valid to call getCastOpcode for these types.
|
||||
static bool isCastable(
|
||||
const Type *SrcTy, ///< The Type from which the value should be cast.
|
||||
const Type *DestTy ///< The Type to which the value should be cast.
|
||||
Type *SrcTy, ///< The Type from which the value should be cast.
|
||||
Type *DestTy ///< The Type to which the value should be cast.
|
||||
);
|
||||
|
||||
/// Returns the opcode necessary to cast Val into Ty using usual casting
|
||||
@ -539,7 +539,7 @@ public:
|
||||
static Instruction::CastOps getCastOpcode(
|
||||
const Value *Val, ///< The value to cast
|
||||
bool SrcIsSigned, ///< Whether to treat the source as signed
|
||||
const Type *Ty, ///< The Type to which the value should be casted
|
||||
Type *Ty, ///< The Type to which the value should be casted
|
||||
bool DstIsSigned ///< Whether to treate the dest. as signed
|
||||
);
|
||||
|
||||
@ -568,14 +568,14 @@ public:
|
||||
/// @brief Determine if the described cast is a no-op cast.
|
||||
static bool isNoopCast(
|
||||
Instruction::CastOps Opcode, ///< Opcode of cast
|
||||
const Type *SrcTy, ///< SrcTy of cast
|
||||
const Type *DstTy, ///< DstTy of cast
|
||||
const Type *IntPtrTy ///< Integer type corresponding to Ptr types, or null
|
||||
Type *SrcTy, ///< SrcTy of cast
|
||||
Type *DstTy, ///< DstTy of cast
|
||||
Type *IntPtrTy ///< Integer type corresponding to Ptr types, or null
|
||||
);
|
||||
|
||||
/// @brief Determine if this cast is a no-op cast.
|
||||
bool isNoopCast(
|
||||
const Type *IntPtrTy ///< Integer type corresponding to pointer
|
||||
Type *IntPtrTy ///< Integer type corresponding to pointer
|
||||
) const;
|
||||
|
||||
/// Determine how a pair of casts can be eliminated, if they can be at all.
|
||||
@ -587,10 +587,10 @@ public:
|
||||
static unsigned isEliminableCastPair(
|
||||
Instruction::CastOps firstOpcode, ///< Opcode of first cast
|
||||
Instruction::CastOps secondOpcode, ///< Opcode of second cast
|
||||
const Type *SrcTy, ///< SrcTy of 1st cast
|
||||
const Type *MidTy, ///< DstTy of 1st cast & SrcTy of 2nd cast
|
||||
const Type *DstTy, ///< DstTy of 2nd cast
|
||||
const Type *IntPtrTy ///< Integer type corresponding to Ptr types, or null
|
||||
Type *SrcTy, ///< SrcTy of 1st cast
|
||||
Type *MidTy, ///< DstTy of 1st cast & SrcTy of 2nd cast
|
||||
Type *DstTy, ///< DstTy of 2nd cast
|
||||
Type *IntPtrTy ///< Integer type corresponding to Ptr types, or null
|
||||
);
|
||||
|
||||
/// @brief Return the opcode of this CastInst
|
||||
@ -599,15 +599,15 @@ public:
|
||||
}
|
||||
|
||||
/// @brief Return the source type, as a convenience
|
||||
const Type* getSrcTy() const { return getOperand(0)->getType(); }
|
||||
Type* getSrcTy() const { return getOperand(0)->getType(); }
|
||||
/// @brief Return the destination type, as a convenience
|
||||
const Type* getDestTy() const { return getType(); }
|
||||
Type* getDestTy() const { return getType(); }
|
||||
|
||||
/// This method can be used to determine if a cast from S to DstTy using
|
||||
/// Opcode op is valid or not.
|
||||
/// @returns true iff the proposed cast is valid.
|
||||
/// @brief Determine if a cast is valid without creating one.
|
||||
static bool castIsValid(Instruction::CastOps op, Value *S, const Type *DstTy);
|
||||
static bool castIsValid(Instruction::CastOps op, Value *S, Type *DstTy);
|
||||
|
||||
/// @brief Methods for support type inquiry through isa, cast, and dyn_cast:
|
||||
static inline bool classof(const CastInst *) { return true; }
|
||||
@ -629,11 +629,11 @@ class CmpInst : public Instruction {
|
||||
void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
|
||||
CmpInst(); // do not implement
|
||||
protected:
|
||||
CmpInst(const Type *ty, Instruction::OtherOps op, unsigned short pred,
|
||||
CmpInst(Type *ty, Instruction::OtherOps op, unsigned short pred,
|
||||
Value *LHS, Value *RHS, const Twine &Name = "",
|
||||
Instruction *InsertBefore = 0);
|
||||
|
||||
CmpInst(const Type *ty, Instruction::OtherOps op, unsigned short pred,
|
||||
CmpInst(Type *ty, Instruction::OtherOps op, unsigned short pred,
|
||||
Value *LHS, Value *RHS, const Twine &Name,
|
||||
BasicBlock *InsertAtEnd);
|
||||
|
||||
@ -825,8 +825,8 @@ public:
|
||||
}
|
||||
|
||||
/// @brief Create a result type for fcmp/icmp
|
||||
static const Type* makeCmpResultType(const Type* opnd_type) {
|
||||
if (const VectorType* vt = dyn_cast<const VectorType>(opnd_type)) {
|
||||
static Type* makeCmpResultType(Type* opnd_type) {
|
||||
if (VectorType* vt = dyn_cast<VectorType>(opnd_type)) {
|
||||
return VectorType::get(Type::getInt1Ty(opnd_type->getContext()),
|
||||
vt->getNumElements());
|
||||
}
|
||||
|
@ -365,9 +365,9 @@ protected:
|
||||
return getSubclassDataFromValue() & ~HasMetadataBit;
|
||||
}
|
||||
|
||||
Instruction(const Type *Ty, unsigned iType, Use *Ops, unsigned NumOps,
|
||||
Instruction(Type *Ty, unsigned iType, Use *Ops, unsigned NumOps,
|
||||
Instruction *InsertBefore = 0);
|
||||
Instruction(const Type *Ty, unsigned iType, Use *Ops, unsigned NumOps,
|
||||
Instruction(Type *Ty, unsigned iType, Use *Ops, unsigned NumOps,
|
||||
BasicBlock *InsertAtEnd);
|
||||
virtual Instruction *clone_impl() const = 0;
|
||||
|
||||
|
@ -41,17 +41,17 @@ class AllocaInst : public UnaryInstruction {
|
||||
protected:
|
||||
virtual AllocaInst *clone_impl() const;
|
||||
public:
|
||||
explicit AllocaInst(const Type *Ty, Value *ArraySize = 0,
|
||||
explicit AllocaInst(Type *Ty, Value *ArraySize = 0,
|
||||
const Twine &Name = "", Instruction *InsertBefore = 0);
|
||||
AllocaInst(const Type *Ty, Value *ArraySize,
|
||||
AllocaInst(Type *Ty, Value *ArraySize,
|
||||
const Twine &Name, BasicBlock *InsertAtEnd);
|
||||
|
||||
AllocaInst(const Type *Ty, const Twine &Name, Instruction *InsertBefore = 0);
|
||||
AllocaInst(const Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd);
|
||||
AllocaInst(Type *Ty, const Twine &Name, Instruction *InsertBefore = 0);
|
||||
AllocaInst(Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd);
|
||||
|
||||
AllocaInst(const Type *Ty, Value *ArraySize, unsigned Align,
|
||||
AllocaInst(Type *Ty, Value *ArraySize, unsigned Align,
|
||||
const Twine &Name = "", Instruction *InsertBefore = 0);
|
||||
AllocaInst(const Type *Ty, Value *ArraySize, unsigned Align,
|
||||
AllocaInst(Type *Ty, Value *ArraySize, unsigned Align,
|
||||
const Twine &Name, BasicBlock *InsertAtEnd);
|
||||
|
||||
// Out of line virtual method, so the vtable, etc. has a home.
|
||||
@ -70,8 +70,8 @@ public:
|
||||
|
||||
/// getType - Overload to return most specific pointer type
|
||||
///
|
||||
const PointerType *getType() const {
|
||||
return reinterpret_cast<const PointerType*>(Instruction::getType());
|
||||
PointerType *getType() const {
|
||||
return reinterpret_cast<PointerType*>(Instruction::getType());
|
||||
}
|
||||
|
||||
/// getAllocatedType - Return the type that is being allocated by the
|
||||
@ -275,7 +275,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)
|
||||
// checkGEPType - Simple wrapper function to give a better assertion failure
|
||||
// message on bad indexes for a gep instruction.
|
||||
//
|
||||
static inline const Type *checkGEPType(const Type *Ty) {
|
||||
static inline Type *checkGEPType(Type *Ty) {
|
||||
assert(Ty && "Invalid GetElementPtrInst indices for type!");
|
||||
return Ty;
|
||||
}
|
||||
@ -316,7 +316,7 @@ class GetElementPtrInst : public Instruction {
|
||||
/// pointer type.
|
||||
///
|
||||
template<typename RandomAccessIterator>
|
||||
static Type *getIndexedType(const Type *Ptr,
|
||||
static Type *getIndexedType(Type *Ptr,
|
||||
RandomAccessIterator IdxBegin,
|
||||
RandomAccessIterator IdxEnd,
|
||||
// This argument ensures that we
|
||||
@ -436,8 +436,8 @@ public:
|
||||
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
|
||||
|
||||
// getType - Overload to return most specific pointer type...
|
||||
const PointerType *getType() const {
|
||||
return reinterpret_cast<const PointerType*>(Instruction::getType());
|
||||
PointerType *getType() const {
|
||||
return reinterpret_cast<PointerType*>(Instruction::getType());
|
||||
}
|
||||
|
||||
/// getIndexedType - Returns the type of the element that would be loaded with
|
||||
@ -447,7 +447,7 @@ public:
|
||||
/// pointer type.
|
||||
///
|
||||
template<typename RandomAccessIterator>
|
||||
static Type *getIndexedType(const Type *Ptr, RandomAccessIterator IdxBegin,
|
||||
static Type *getIndexedType(Type *Ptr, RandomAccessIterator IdxBegin,
|
||||
RandomAccessIterator IdxEnd) {
|
||||
return getIndexedType(Ptr, IdxBegin, IdxEnd,
|
||||
typename std::iterator_traits<RandomAccessIterator>::
|
||||
@ -455,14 +455,14 @@ public:
|
||||
}
|
||||
|
||||
// FIXME: Use ArrayRef
|
||||
static Type *getIndexedType(const Type *Ptr,
|
||||
static Type *getIndexedType(Type *Ptr,
|
||||
Value* const *Idx, unsigned NumIdx);
|
||||
static Type *getIndexedType(const Type *Ptr,
|
||||
static Type *getIndexedType(Type *Ptr,
|
||||
Constant* const *Idx, unsigned NumIdx);
|
||||
|
||||
static Type *getIndexedType(const Type *Ptr,
|
||||
static Type *getIndexedType(Type *Ptr,
|
||||
uint64_t const *Idx, unsigned NumIdx);
|
||||
static Type *getIndexedType(const Type *Ptr, Value *Idx);
|
||||
static Type *getIndexedType(Type *Ptr, Value *Idx);
|
||||
|
||||
inline op_iterator idx_begin() { return op_begin()+1; }
|
||||
inline const_op_iterator idx_begin() const { return op_begin()+1; }
|
||||
@ -485,8 +485,8 @@ public:
|
||||
|
||||
/// getPointerOperandType - Method to return the pointer operand as a
|
||||
/// PointerType.
|
||||
const PointerType *getPointerOperandType() const {
|
||||
return reinterpret_cast<const PointerType*>(getPointerOperand()->getType());
|
||||
PointerType *getPointerOperandType() const {
|
||||
return reinterpret_cast<PointerType*>(getPointerOperand()->getType());
|
||||
}
|
||||
|
||||
|
||||
@ -893,12 +893,12 @@ public:
|
||||
/// 2. Call malloc with that argument.
|
||||
/// 3. Bitcast the result of the malloc call to the specified type.
|
||||
static Instruction *CreateMalloc(Instruction *InsertBefore,
|
||||
const Type *IntPtrTy, const Type *AllocTy,
|
||||
Type *IntPtrTy, Type *AllocTy,
|
||||
Value *AllocSize, Value *ArraySize = 0,
|
||||
Function* MallocF = 0,
|
||||
const Twine &Name = "");
|
||||
static Instruction *CreateMalloc(BasicBlock *InsertAtEnd,
|
||||
const Type *IntPtrTy, const Type *AllocTy,
|
||||
Type *IntPtrTy, Type *AllocTy,
|
||||
Value *AllocSize, Value *ArraySize = 0,
|
||||
Function* MallocF = 0,
|
||||
const Twine &Name = "");
|
||||
@ -1165,12 +1165,12 @@ protected:
|
||||
virtual VAArgInst *clone_impl() const;
|
||||
|
||||
public:
|
||||
VAArgInst(Value *List, const Type *Ty, const Twine &NameStr = "",
|
||||
VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "",
|
||||
Instruction *InsertBefore = 0)
|
||||
: UnaryInstruction(Ty, VAArg, List, InsertBefore) {
|
||||
setName(NameStr);
|
||||
}
|
||||
VAArgInst(Value *List, const Type *Ty, const Twine &NameStr,
|
||||
VAArgInst(Value *List, Type *Ty, const Twine &NameStr,
|
||||
BasicBlock *InsertAtEnd)
|
||||
: UnaryInstruction(Ty, VAArg, List, InsertAtEnd) {
|
||||
setName(NameStr);
|
||||
@ -1226,8 +1226,8 @@ public:
|
||||
const Value *getVectorOperand() const { return Op<0>(); }
|
||||
const Value *getIndexOperand() const { return Op<1>(); }
|
||||
|
||||
const VectorType *getVectorOperandType() const {
|
||||
return reinterpret_cast<const VectorType*>(getVectorOperand()->getType());
|
||||
VectorType *getVectorOperandType() const {
|
||||
return reinterpret_cast<VectorType*>(getVectorOperand()->getType());
|
||||
}
|
||||
|
||||
|
||||
@ -1286,8 +1286,8 @@ public:
|
||||
|
||||
/// getType - Overload to return most specific vector type.
|
||||
///
|
||||
const VectorType *getType() const {
|
||||
return reinterpret_cast<const VectorType*>(Instruction::getType());
|
||||
VectorType *getType() const {
|
||||
return reinterpret_cast<VectorType*>(Instruction::getType());
|
||||
}
|
||||
|
||||
/// Transparently provide more efficient getOperand methods.
|
||||
@ -1339,8 +1339,8 @@ public:
|
||||
|
||||
/// getType - Overload to return most specific vector type.
|
||||
///
|
||||
const VectorType *getType() const {
|
||||
return reinterpret_cast<const VectorType*>(Instruction::getType());
|
||||
VectorType *getType() const {
|
||||
return reinterpret_cast<VectorType*>(Instruction::getType());
|
||||
}
|
||||
|
||||
/// Transparently provide more efficient getOperand methods.
|
||||
@ -1419,7 +1419,7 @@ public:
|
||||
/// with an extractvalue instruction with the specified parameters.
|
||||
///
|
||||
/// Null is returned if the indices are invalid for the specified type.
|
||||
static Type *getIndexedType(const Type *Agg, ArrayRef<unsigned> Idxs);
|
||||
static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs);
|
||||
|
||||
typedef const unsigned* idx_iterator;
|
||||
inline idx_iterator idx_begin() const { return Indices.begin(); }
|
||||
@ -1625,7 +1625,7 @@ class PHINode : public Instruction {
|
||||
void *operator new(size_t s) {
|
||||
return User::operator new(s, 0);
|
||||
}
|
||||
explicit PHINode(const Type *Ty, unsigned NumReservedValues,
|
||||
explicit PHINode(Type *Ty, unsigned NumReservedValues,
|
||||
const Twine &NameStr = "", Instruction *InsertBefore = 0)
|
||||
: Instruction(Ty, Instruction::PHI, 0, 0, InsertBefore),
|
||||
ReservedSpace(NumReservedValues) {
|
||||
@ -1633,7 +1633,7 @@ class PHINode : public Instruction {
|
||||
OperandList = allocHungoffUses(ReservedSpace);
|
||||
}
|
||||
|
||||
PHINode(const Type *Ty, unsigned NumReservedValues, const Twine &NameStr,
|
||||
PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr,
|
||||
BasicBlock *InsertAtEnd)
|
||||
: Instruction(Ty, Instruction::PHI, 0, 0, InsertAtEnd),
|
||||
ReservedSpace(NumReservedValues) {
|
||||
@ -1650,12 +1650,12 @@ protected:
|
||||
public:
|
||||
/// Constructors - NumReservedValues is a hint for the number of incoming
|
||||
/// edges that this phi node will have (use 0 if you really have no idea).
|
||||
static PHINode *Create(const Type *Ty, unsigned NumReservedValues,
|
||||
static PHINode *Create(Type *Ty, unsigned NumReservedValues,
|
||||
const Twine &NameStr = "",
|
||||
Instruction *InsertBefore = 0) {
|
||||
return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore);
|
||||
}
|
||||
static PHINode *Create(const Type *Ty, unsigned NumReservedValues,
|
||||
static PHINode *Create(Type *Ty, unsigned NumReservedValues,
|
||||
const Twine &NameStr, BasicBlock *InsertAtEnd) {
|
||||
return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd);
|
||||
}
|
||||
@ -2543,7 +2543,7 @@ public:
|
||||
/// @brief Constructor with insert-before-instruction semantics
|
||||
TruncInst(
|
||||
Value *S, ///< The value to be truncated
|
||||
const Type *Ty, ///< The (smaller) type to truncate to
|
||||
Type *Ty, ///< The (smaller) type to truncate to
|
||||
const Twine &NameStr = "", ///< A name for the new instruction
|
||||
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
|
||||
);
|
||||
@ -2551,7 +2551,7 @@ public:
|
||||
/// @brief Constructor with insert-at-end-of-block semantics
|
||||
TruncInst(
|
||||
Value *S, ///< The value to be truncated
|
||||
const Type *Ty, ///< The (smaller) type to truncate to
|
||||
Type *Ty, ///< The (smaller) type to truncate to
|
||||
const Twine &NameStr, ///< A name for the new instruction
|
||||
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
|
||||
);
|
||||
@ -2580,7 +2580,7 @@ public:
|
||||
/// @brief Constructor with insert-before-instruction semantics
|
||||
ZExtInst(
|
||||
Value *S, ///< The value to be zero extended
|
||||
const Type *Ty, ///< The type to zero extend to
|
||||
Type *Ty, ///< The type to zero extend to
|
||||
const Twine &NameStr = "", ///< A name for the new instruction
|
||||
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
|
||||
);
|
||||
@ -2588,7 +2588,7 @@ public:
|
||||
/// @brief Constructor with insert-at-end semantics.
|
||||
ZExtInst(
|
||||
Value *S, ///< The value to be zero extended
|
||||
const Type *Ty, ///< The type to zero extend to
|
||||
Type *Ty, ///< The type to zero extend to
|
||||
const Twine &NameStr, ///< A name for the new instruction
|
||||
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
|
||||
);
|
||||
@ -2617,7 +2617,7 @@ public:
|
||||
/// @brief Constructor with insert-before-instruction semantics
|
||||
SExtInst(
|
||||
Value *S, ///< The value to be sign extended
|
||||
const Type *Ty, ///< The type to sign extend to
|
||||
Type *Ty, ///< The type to sign extend to
|
||||
const Twine &NameStr = "", ///< A name for the new instruction
|
||||
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
|
||||
);
|
||||
@ -2625,7 +2625,7 @@ public:
|
||||
/// @brief Constructor with insert-at-end-of-block semantics
|
||||
SExtInst(
|
||||
Value *S, ///< The value to be sign extended
|
||||
const Type *Ty, ///< The type to sign extend to
|
||||
Type *Ty, ///< The type to sign extend to
|
||||
const Twine &NameStr, ///< A name for the new instruction
|
||||
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
|
||||
);
|
||||
@ -2654,7 +2654,7 @@ public:
|
||||
/// @brief Constructor with insert-before-instruction semantics
|
||||
FPTruncInst(
|
||||
Value *S, ///< The value to be truncated
|
||||
const Type *Ty, ///< The type to truncate to
|
||||
Type *Ty, ///< The type to truncate to
|
||||
const Twine &NameStr = "", ///< A name for the new instruction
|
||||
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
|
||||
);
|
||||
@ -2662,7 +2662,7 @@ public:
|
||||
/// @brief Constructor with insert-before-instruction semantics
|
||||
FPTruncInst(
|
||||
Value *S, ///< The value to be truncated
|
||||
const Type *Ty, ///< The type to truncate to
|
||||
Type *Ty, ///< The type to truncate to
|
||||
const Twine &NameStr, ///< A name for the new instruction
|
||||
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
|
||||
);
|
||||
@ -2691,7 +2691,7 @@ public:
|
||||
/// @brief Constructor with insert-before-instruction semantics
|
||||
FPExtInst(
|
||||
Value *S, ///< The value to be extended
|
||||
const Type *Ty, ///< The type to extend to
|
||||
Type *Ty, ///< The type to extend to
|
||||
const Twine &NameStr = "", ///< A name for the new instruction
|
||||
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
|
||||
);
|
||||
@ -2699,7 +2699,7 @@ public:
|
||||
/// @brief Constructor with insert-at-end-of-block semantics
|
||||
FPExtInst(
|
||||
Value *S, ///< The value to be extended
|
||||
const Type *Ty, ///< The type to extend to
|
||||
Type *Ty, ///< The type to extend to
|
||||
const Twine &NameStr, ///< A name for the new instruction
|
||||
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
|
||||
);
|
||||
@ -2728,7 +2728,7 @@ public:
|
||||
/// @brief Constructor with insert-before-instruction semantics
|
||||
UIToFPInst(
|
||||
Value *S, ///< The value to be converted
|
||||
const Type *Ty, ///< The type to convert to
|
||||
Type *Ty, ///< The type to convert to
|
||||
const Twine &NameStr = "", ///< A name for the new instruction
|
||||
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
|
||||
);
|
||||
@ -2736,7 +2736,7 @@ public:
|
||||
/// @brief Constructor with insert-at-end-of-block semantics
|
||||
UIToFPInst(
|
||||
Value *S, ///< The value to be converted
|
||||
const Type *Ty, ///< The type to convert to
|
||||
Type *Ty, ///< The type to convert to
|
||||
const Twine &NameStr, ///< A name for the new instruction
|
||||
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
|
||||
);
|
||||
@ -2765,7 +2765,7 @@ public:
|
||||
/// @brief Constructor with insert-before-instruction semantics
|
||||
SIToFPInst(
|
||||
Value *S, ///< The value to be converted
|
||||
const Type *Ty, ///< The type to convert to
|
||||
Type *Ty, ///< The type to convert to
|
||||
const Twine &NameStr = "", ///< A name for the new instruction
|
||||
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
|
||||
);
|
||||
@ -2773,7 +2773,7 @@ public:
|
||||
/// @brief Constructor with insert-at-end-of-block semantics
|
||||
SIToFPInst(
|
||||
Value *S, ///< The value to be converted
|
||||
const Type *Ty, ///< The type to convert to
|
||||
Type *Ty, ///< The type to convert to
|
||||
const Twine &NameStr, ///< A name for the new instruction
|
||||
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
|
||||
);
|
||||
@ -2802,7 +2802,7 @@ public:
|
||||
/// @brief Constructor with insert-before-instruction semantics
|
||||
FPToUIInst(
|
||||
Value *S, ///< The value to be converted
|
||||
const Type *Ty, ///< The type to convert to
|
||||
Type *Ty, ///< The type to convert to
|
||||
const Twine &NameStr = "", ///< A name for the new instruction
|
||||
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
|
||||
);
|
||||
@ -2810,7 +2810,7 @@ public:
|
||||
/// @brief Constructor with insert-at-end-of-block semantics
|
||||
FPToUIInst(
|
||||
Value *S, ///< The value to be converted
|
||||
const Type *Ty, ///< The type to convert to
|
||||
Type *Ty, ///< The type to convert to
|
||||
const Twine &NameStr, ///< A name for the new instruction
|
||||
BasicBlock *InsertAtEnd ///< Where to insert the new instruction
|
||||
);
|
||||
@ -2839,7 +2839,7 @@ public:
|
||||
/// @brief Constructor with insert-before-instruction semantics
|
||||
FPToSIInst(
|
||||
Value *S, ///< The value to be converted
|
||||
const Type *Ty, ///< The type to convert to
|
||||
Type *Ty, ///< The type to convert to
|
||||
const Twine &NameStr = "", ///< A name for the new instruction
|
||||
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
|
||||
);
|
||||
@ -2847,7 +2847,7 @@ public:
|
||||
/// @brief Constructor with insert-at-end-of-block semantics
|
||||
FPToSIInst(
|
||||
Value *S, ///< The value to be converted
|
||||
const Type *Ty, ///< The type to convert to
|
||||
Type *Ty, ///< The type to convert to
|
||||
const Twine &NameStr, ///< A name for the new instruction
|
||||
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
|
||||
);
|
||||
@ -2872,7 +2872,7 @@ public:
|
||||
/// @brief Constructor with insert-before-instruction semantics
|
||||
IntToPtrInst(
|
||||
Value *S, ///< The value to be converted
|
||||
const Type *Ty, ///< The type to convert to
|
||||
Type *Ty, ///< The type to convert to
|
||||
const Twine &NameStr = "", ///< A name for the new instruction
|
||||
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
|
||||
);
|
||||
@ -2880,7 +2880,7 @@ public:
|
||||
/// @brief Constructor with insert-at-end-of-block semantics
|
||||
IntToPtrInst(
|
||||
Value *S, ///< The value to be converted
|
||||
const Type *Ty, ///< The type to convert to
|
||||
Type *Ty, ///< The type to convert to
|
||||
const Twine &NameStr, ///< A name for the new instruction
|
||||
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
|
||||
);
|
||||
@ -2912,7 +2912,7 @@ public:
|
||||
/// @brief Constructor with insert-before-instruction semantics
|
||||
PtrToIntInst(
|
||||
Value *S, ///< The value to be converted
|
||||
const Type *Ty, ///< The type to convert to
|
||||
Type *Ty, ///< The type to convert to
|
||||
const Twine &NameStr = "", ///< A name for the new instruction
|
||||
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
|
||||
);
|
||||
@ -2920,7 +2920,7 @@ public:
|
||||
/// @brief Constructor with insert-at-end-of-block semantics
|
||||
PtrToIntInst(
|
||||
Value *S, ///< The value to be converted
|
||||
const Type *Ty, ///< The type to convert to
|
||||
Type *Ty, ///< The type to convert to
|
||||
const Twine &NameStr, ///< A name for the new instruction
|
||||
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
|
||||
);
|
||||
@ -2949,7 +2949,7 @@ public:
|
||||
/// @brief Constructor with insert-before-instruction semantics
|
||||
BitCastInst(
|
||||
Value *S, ///< The value to be casted
|
||||
const Type *Ty, ///< The type to casted to
|
||||
Type *Ty, ///< The type to casted to
|
||||
const Twine &NameStr = "", ///< A name for the new instruction
|
||||
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
|
||||
);
|
||||
@ -2957,7 +2957,7 @@ public:
|
||||
/// @brief Constructor with insert-at-end-of-block semantics
|
||||
BitCastInst(
|
||||
Value *S, ///< The value to be casted
|
||||
const Type *Ty, ///< The type to casted to
|
||||
Type *Ty, ///< The type to casted to
|
||||
const Twine &NameStr, ///< A name for the new instruction
|
||||
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
|
||||
);
|
||||
|
@ -170,7 +170,7 @@ namespace llvm {
|
||||
setArgOperand(4, V);
|
||||
}
|
||||
|
||||
const Type *getAlignmentType() const {
|
||||
Type *getAlignmentType() const {
|
||||
return getArgOperand(3)->getType();
|
||||
}
|
||||
|
||||
|
@ -49,7 +49,7 @@ namespace Intrinsic {
|
||||
|
||||
/// Intrinsic::getType(ID) - Return the function type for an intrinsic.
|
||||
///
|
||||
const FunctionType *getType(LLVMContext &Context, ID id,
|
||||
FunctionType *getType(LLVMContext &Context, ID id,
|
||||
ArrayRef<Type*> Tys = ArrayRef<Type*>());
|
||||
|
||||
/// Intrinsic::isOverloaded(ID) - Returns true if the intrinsic can be
|
||||
|
@ -272,10 +272,10 @@ public:
|
||||
/// the existing function.
|
||||
/// 4. Finally, the function exists but has the wrong prototype: return the
|
||||
/// function with a constantexpr cast to the right prototype.
|
||||
Constant *getOrInsertFunction(StringRef Name, const FunctionType *T,
|
||||
Constant *getOrInsertFunction(StringRef Name, FunctionType *T,
|
||||
AttrListPtr AttributeList);
|
||||
|
||||
Constant *getOrInsertFunction(StringRef Name, const FunctionType *T);
|
||||
Constant *getOrInsertFunction(StringRef Name, FunctionType *T);
|
||||
|
||||
/// getOrInsertFunction - Look up the specified function in the module symbol
|
||||
/// table. If it does not exist, add a prototype for the function and return
|
||||
@ -286,14 +286,14 @@ public:
|
||||
/// clients to use.
|
||||
Constant *getOrInsertFunction(StringRef Name,
|
||||
AttrListPtr AttributeList,
|
||||
const Type *RetTy, ...) END_WITH_NULL;
|
||||
Type *RetTy, ...) END_WITH_NULL;
|
||||
|
||||
/// getOrInsertFunction - Same as above, but without the attributes.
|
||||
Constant *getOrInsertFunction(StringRef Name, const Type *RetTy, ...)
|
||||
Constant *getOrInsertFunction(StringRef Name, Type *RetTy, ...)
|
||||
END_WITH_NULL;
|
||||
|
||||
Constant *getOrInsertTargetIntrinsic(StringRef Name,
|
||||
const FunctionType *Ty,
|
||||
FunctionType *Ty,
|
||||
AttrListPtr AttributeList);
|
||||
|
||||
/// getFunction - Look up the specified function in the module symbol table.
|
||||
@ -325,7 +325,7 @@ public:
|
||||
/// with a constantexpr cast to the right type.
|
||||
/// 3. Finally, if the existing global is the correct declaration, return
|
||||
/// the existing global.
|
||||
Constant *getOrInsertGlobal(StringRef Name, const Type *Ty);
|
||||
Constant *getOrInsertGlobal(StringRef Name, Type *Ty);
|
||||
|
||||
/// @}
|
||||
/// @name Global Alias Accessors
|
||||
|
@ -261,8 +261,8 @@ public:
|
||||
|
||||
/// getPointerOperandType - Method to return the pointer operand as a
|
||||
/// PointerType.
|
||||
const PointerType *getPointerOperandType() const {
|
||||
return reinterpret_cast<const PointerType*>(getPointerOperand()->getType());
|
||||
PointerType *getPointerOperandType() const {
|
||||
return reinterpret_cast<PointerType*>(getPointerOperand()->getType());
|
||||
}
|
||||
|
||||
unsigned getNumIndices() const { // Note: always non-negative
|
||||
|
@ -147,7 +147,7 @@ public:
|
||||
|
||||
/// getType - Return the type of the instruction that generated this call site
|
||||
///
|
||||
const Type *getType() const { return (*this)->getType(); }
|
||||
Type *getType() const { return (*this)->getType(); }
|
||||
|
||||
/// getCaller - Return the caller function for this call site
|
||||
///
|
||||
|
@ -141,37 +141,37 @@ public:
|
||||
//===--------------------------------------------------------------------===//
|
||||
|
||||
Constant *CreateCast(Instruction::CastOps Op, Constant *C,
|
||||
const Type *DestTy) const {
|
||||
Type *DestTy) const {
|
||||
return ConstantExpr::getCast(Op, C, DestTy);
|
||||
}
|
||||
Constant *CreatePointerCast(Constant *C, const Type *DestTy) const {
|
||||
Constant *CreatePointerCast(Constant *C, Type *DestTy) const {
|
||||
return ConstantExpr::getPointerCast(C, DestTy);
|
||||
}
|
||||
Constant *CreateIntCast(Constant *C, const Type *DestTy,
|
||||
Constant *CreateIntCast(Constant *C, Type *DestTy,
|
||||
bool isSigned) const {
|
||||
return ConstantExpr::getIntegerCast(C, DestTy, isSigned);
|
||||
}
|
||||
Constant *CreateFPCast(Constant *C, const Type *DestTy) const {
|
||||
Constant *CreateFPCast(Constant *C, Type *DestTy) const {
|
||||
return ConstantExpr::getFPCast(C, DestTy);
|
||||
}
|
||||
|
||||
Constant *CreateBitCast(Constant *C, const Type *DestTy) const {
|
||||
Constant *CreateBitCast(Constant *C, Type *DestTy) const {
|
||||
return CreateCast(Instruction::BitCast, C, DestTy);
|
||||
}
|
||||
Constant *CreateIntToPtr(Constant *C, const Type *DestTy) const {
|
||||
Constant *CreateIntToPtr(Constant *C, Type *DestTy) const {
|
||||
return CreateCast(Instruction::IntToPtr, C, DestTy);
|
||||
}
|
||||
Constant *CreatePtrToInt(Constant *C, const Type *DestTy) const {
|
||||
Constant *CreatePtrToInt(Constant *C, Type *DestTy) const {
|
||||
return CreateCast(Instruction::PtrToInt, C, DestTy);
|
||||
}
|
||||
Constant *CreateZExtOrBitCast(Constant *C, const Type *DestTy) const {
|
||||
Constant *CreateZExtOrBitCast(Constant *C, Type *DestTy) const {
|
||||
return ConstantExpr::getZExtOrBitCast(C, DestTy);
|
||||
}
|
||||
Constant *CreateSExtOrBitCast(Constant *C, const Type *DestTy) const {
|
||||
Constant *CreateSExtOrBitCast(Constant *C, Type *DestTy) const {
|
||||
return ConstantExpr::getSExtOrBitCast(C, DestTy);
|
||||
}
|
||||
|
||||
Constant *CreateTruncOrBitCast(Constant *C, const Type *DestTy) const {
|
||||
Constant *CreateTruncOrBitCast(Constant *C, Type *DestTy) const {
|
||||
return ConstantExpr::getTruncOrBitCast(C, DestTy);
|
||||
}
|
||||
|
||||
|
@ -21,16 +21,16 @@
|
||||
namespace llvm {
|
||||
template<typename ItTy = User::const_op_iterator>
|
||||
class generic_gep_type_iterator
|
||||
: public std::iterator<std::forward_iterator_tag, const Type *, ptrdiff_t> {
|
||||
: public std::iterator<std::forward_iterator_tag, Type *, ptrdiff_t> {
|
||||
typedef std::iterator<std::forward_iterator_tag,
|
||||
const Type *, ptrdiff_t> super;
|
||||
Type *, ptrdiff_t> super;
|
||||
|
||||
ItTy OpIt;
|
||||
const Type *CurTy;
|
||||
Type *CurTy;
|
||||
generic_gep_type_iterator() {}
|
||||
public:
|
||||
|
||||
static generic_gep_type_iterator begin(const Type *Ty, ItTy It) {
|
||||
static generic_gep_type_iterator begin(Type *Ty, ItTy It) {
|
||||
generic_gep_type_iterator I;
|
||||
I.CurTy = Ty;
|
||||
I.OpIt = It;
|
||||
@ -50,23 +50,23 @@ namespace llvm {
|
||||
return !operator==(x);
|
||||
}
|
||||
|
||||
const Type *operator*() const {
|
||||
Type *operator*() const {
|
||||
return CurTy;
|
||||
}
|
||||
|
||||
const Type *getIndexedType() const {
|
||||
const CompositeType *CT = cast<CompositeType>(CurTy);
|
||||
Type *getIndexedType() const {
|
||||
CompositeType *CT = cast<CompositeType>(CurTy);
|
||||
return CT->getTypeAtIndex(getOperand());
|
||||
}
|
||||
|
||||
// This is a non-standard operator->. It allows you to call methods on the
|
||||
// current type directly.
|
||||
const Type *operator->() const { return operator*(); }
|
||||
Type *operator->() const { return operator*(); }
|
||||
|
||||
Value *getOperand() const { return *OpIt; }
|
||||
|
||||
generic_gep_type_iterator& operator++() { // Preincrement
|
||||
if (const CompositeType *CT = dyn_cast<CompositeType>(CurTy)) {
|
||||
if (CompositeType *CT = dyn_cast<CompositeType>(CurTy)) {
|
||||
CurTy = CT->getTypeAtIndex(getOperand());
|
||||
} else {
|
||||
CurTy = 0;
|
||||
@ -99,13 +99,13 @@ namespace llvm {
|
||||
|
||||
template<typename ItTy>
|
||||
inline generic_gep_type_iterator<ItTy>
|
||||
gep_type_begin(const Type *Op0, ItTy I, ItTy E) {
|
||||
gep_type_begin(Type *Op0, ItTy I, ItTy E) {
|
||||
return generic_gep_type_iterator<ItTy>::begin(Op0, I);
|
||||
}
|
||||
|
||||
template<typename ItTy>
|
||||
inline generic_gep_type_iterator<ItTy>
|
||||
gep_type_end(const Type *Op0, ItTy I, ItTy E) {
|
||||
gep_type_end(Type *Op0, ItTy I, ItTy E) {
|
||||
return generic_gep_type_iterator<ItTy>::end(E);
|
||||
}
|
||||
} // end namespace llvm
|
||||
|
@ -744,7 +744,7 @@ public:
|
||||
// Instruction creation methods: Memory Instructions
|
||||
//===--------------------------------------------------------------------===//
|
||||
|
||||
AllocaInst *CreateAlloca(const Type *Ty, Value *ArraySize = 0,
|
||||
AllocaInst *CreateAlloca(Type *Ty, Value *ArraySize = 0,
|
||||
const Twine &Name = "") {
|
||||
return Insert(new AllocaInst(Ty, ArraySize), Name);
|
||||
}
|
||||
@ -910,47 +910,47 @@ public:
|
||||
// Instruction creation methods: Cast/Conversion Operators
|
||||
//===--------------------------------------------------------------------===//
|
||||
|
||||
Value *CreateTrunc(Value *V, const Type *DestTy, const Twine &Name = "") {
|
||||
Value *CreateTrunc(Value *V, Type *DestTy, const Twine &Name = "") {
|
||||
return CreateCast(Instruction::Trunc, V, DestTy, Name);
|
||||
}
|
||||
Value *CreateZExt(Value *V, const Type *DestTy, const Twine &Name = "") {
|
||||
Value *CreateZExt(Value *V, Type *DestTy, const Twine &Name = "") {
|
||||
return CreateCast(Instruction::ZExt, V, DestTy, Name);
|
||||
}
|
||||
Value *CreateSExt(Value *V, const Type *DestTy, const Twine &Name = "") {
|
||||
Value *CreateSExt(Value *V, Type *DestTy, const Twine &Name = "") {
|
||||
return CreateCast(Instruction::SExt, V, DestTy, Name);
|
||||
}
|
||||
Value *CreateFPToUI(Value *V, const Type *DestTy, const Twine &Name = ""){
|
||||
Value *CreateFPToUI(Value *V, Type *DestTy, const Twine &Name = ""){
|
||||
return CreateCast(Instruction::FPToUI, V, DestTy, Name);
|
||||
}
|
||||
Value *CreateFPToSI(Value *V, const Type *DestTy, const Twine &Name = ""){
|
||||
Value *CreateFPToSI(Value *V, Type *DestTy, const Twine &Name = ""){
|
||||
return CreateCast(Instruction::FPToSI, V, DestTy, Name);
|
||||
}
|
||||
Value *CreateUIToFP(Value *V, const Type *DestTy, const Twine &Name = ""){
|
||||
Value *CreateUIToFP(Value *V, Type *DestTy, const Twine &Name = ""){
|
||||
return CreateCast(Instruction::UIToFP, V, DestTy, Name);
|
||||
}
|
||||
Value *CreateSIToFP(Value *V, const Type *DestTy, const Twine &Name = ""){
|
||||
Value *CreateSIToFP(Value *V, Type *DestTy, const Twine &Name = ""){
|
||||
return CreateCast(Instruction::SIToFP, V, DestTy, Name);
|
||||
}
|
||||
Value *CreateFPTrunc(Value *V, const Type *DestTy,
|
||||
Value *CreateFPTrunc(Value *V, Type *DestTy,
|
||||
const Twine &Name = "") {
|
||||
return CreateCast(Instruction::FPTrunc, V, DestTy, Name);
|
||||
}
|
||||
Value *CreateFPExt(Value *V, const Type *DestTy, const Twine &Name = "") {
|
||||
Value *CreateFPExt(Value *V, Type *DestTy, const Twine &Name = "") {
|
||||
return CreateCast(Instruction::FPExt, V, DestTy, Name);
|
||||
}
|
||||
Value *CreatePtrToInt(Value *V, const Type *DestTy,
|
||||
Value *CreatePtrToInt(Value *V, Type *DestTy,
|
||||
const Twine &Name = "") {
|
||||
return CreateCast(Instruction::PtrToInt, V, DestTy, Name);
|
||||
}
|
||||
Value *CreateIntToPtr(Value *V, const Type *DestTy,
|
||||
Value *CreateIntToPtr(Value *V, Type *DestTy,
|
||||
const Twine &Name = "") {
|
||||
return CreateCast(Instruction::IntToPtr, V, DestTy, Name);
|
||||
}
|
||||
Value *CreateBitCast(Value *V, const Type *DestTy,
|
||||
Value *CreateBitCast(Value *V, Type *DestTy,
|
||||
const Twine &Name = "") {
|
||||
return CreateCast(Instruction::BitCast, V, DestTy, Name);
|
||||
}
|
||||
Value *CreateZExtOrBitCast(Value *V, const Type *DestTy,
|
||||
Value *CreateZExtOrBitCast(Value *V, Type *DestTy,
|
||||
const Twine &Name = "") {
|
||||
if (V->getType() == DestTy)
|
||||
return V;
|
||||
@ -958,7 +958,7 @@ public:
|
||||
return Insert(Folder.CreateZExtOrBitCast(VC, DestTy), Name);
|
||||
return Insert(CastInst::CreateZExtOrBitCast(V, DestTy), Name);
|
||||
}
|
||||
Value *CreateSExtOrBitCast(Value *V, const Type *DestTy,
|
||||
Value *CreateSExtOrBitCast(Value *V, Type *DestTy,
|
||||
const Twine &Name = "") {
|
||||
if (V->getType() == DestTy)
|
||||
return V;
|
||||
@ -966,7 +966,7 @@ public:
|
||||
return Insert(Folder.CreateSExtOrBitCast(VC, DestTy), Name);
|
||||
return Insert(CastInst::CreateSExtOrBitCast(V, DestTy), Name);
|
||||
}
|
||||
Value *CreateTruncOrBitCast(Value *V, const Type *DestTy,
|
||||
Value *CreateTruncOrBitCast(Value *V, Type *DestTy,
|
||||
const Twine &Name = "") {
|
||||
if (V->getType() == DestTy)
|
||||
return V;
|
||||
@ -974,7 +974,7 @@ public:
|
||||
return Insert(Folder.CreateTruncOrBitCast(VC, DestTy), Name);
|
||||
return Insert(CastInst::CreateTruncOrBitCast(V, DestTy), Name);
|
||||
}
|
||||
Value *CreateCast(Instruction::CastOps Op, Value *V, const Type *DestTy,
|
||||
Value *CreateCast(Instruction::CastOps Op, Value *V, Type *DestTy,
|
||||
const Twine &Name = "") {
|
||||
if (V->getType() == DestTy)
|
||||
return V;
|
||||
@ -982,7 +982,7 @@ public:
|
||||
return Insert(Folder.CreateCast(Op, VC, DestTy), Name);
|
||||
return Insert(CastInst::Create(Op, V, DestTy), Name);
|
||||
}
|
||||
Value *CreatePointerCast(Value *V, const Type *DestTy,
|
||||
Value *CreatePointerCast(Value *V, Type *DestTy,
|
||||
const Twine &Name = "") {
|
||||
if (V->getType() == DestTy)
|
||||
return V;
|
||||
@ -990,7 +990,7 @@ public:
|
||||
return Insert(Folder.CreatePointerCast(VC, DestTy), Name);
|
||||
return Insert(CastInst::CreatePointerCast(V, DestTy), Name);
|
||||
}
|
||||
Value *CreateIntCast(Value *V, const Type *DestTy, bool isSigned,
|
||||
Value *CreateIntCast(Value *V, Type *DestTy, bool isSigned,
|
||||
const Twine &Name = "") {
|
||||
if (V->getType() == DestTy)
|
||||
return V;
|
||||
@ -1001,9 +1001,9 @@ public:
|
||||
private:
|
||||
// Provided to resolve 'CreateIntCast(Ptr, Ptr, "...")', giving a compile time
|
||||
// error, instead of converting the string to bool for the isSigned parameter.
|
||||
Value *CreateIntCast(Value *, const Type *, const char *); // DO NOT IMPLEMENT
|
||||
Value *CreateIntCast(Value *, Type *, const char *); // DO NOT IMPLEMENT
|
||||
public:
|
||||
Value *CreateFPCast(Value *V, const Type *DestTy, const Twine &Name = "") {
|
||||
Value *CreateFPCast(Value *V, Type *DestTy, const Twine &Name = "") {
|
||||
if (V->getType() == DestTy)
|
||||
return V;
|
||||
if (Constant *VC = dyn_cast<Constant>(V))
|
||||
@ -1108,7 +1108,7 @@ public:
|
||||
// Instruction creation methods: Other Instructions
|
||||
//===--------------------------------------------------------------------===//
|
||||
|
||||
PHINode *CreatePHI(const Type *Ty, unsigned NumReservedValues,
|
||||
PHINode *CreatePHI(Type *Ty, unsigned NumReservedValues,
|
||||
const Twine &Name = "") {
|
||||
return Insert(PHINode::Create(Ty, NumReservedValues), Name);
|
||||
}
|
||||
@ -1154,7 +1154,7 @@ public:
|
||||
return Insert(SelectInst::Create(C, True, False), Name);
|
||||
}
|
||||
|
||||
VAArgInst *CreateVAArg(Value *List, const Type *Ty, const Twine &Name = "") {
|
||||
VAArgInst *CreateVAArg(Value *List, Type *Ty, const Twine &Name = "") {
|
||||
return Insert(new VAArgInst(List, Ty), Name);
|
||||
}
|
||||
|
||||
|
@ -200,37 +200,37 @@ public:
|
||||
//===--------------------------------------------------------------------===//
|
||||
|
||||
Instruction *CreateCast(Instruction::CastOps Op, Constant *C,
|
||||
const Type *DestTy) const {
|
||||
Type *DestTy) const {
|
||||
return CastInst::Create(Op, C, DestTy);
|
||||
}
|
||||
Instruction *CreatePointerCast(Constant *C, const Type *DestTy) const {
|
||||
Instruction *CreatePointerCast(Constant *C, Type *DestTy) const {
|
||||
return CastInst::CreatePointerCast(C, DestTy);
|
||||
}
|
||||
Instruction *CreateIntCast(Constant *C, const Type *DestTy,
|
||||
Instruction *CreateIntCast(Constant *C, Type *DestTy,
|
||||
bool isSigned) const {
|
||||
return CastInst::CreateIntegerCast(C, DestTy, isSigned);
|
||||
}
|
||||
Instruction *CreateFPCast(Constant *C, const Type *DestTy) const {
|
||||
Instruction *CreateFPCast(Constant *C, Type *DestTy) const {
|
||||
return CastInst::CreateFPCast(C, DestTy);
|
||||
}
|
||||
|
||||
Instruction *CreateBitCast(Constant *C, const Type *DestTy) const {
|
||||
Instruction *CreateBitCast(Constant *C, Type *DestTy) const {
|
||||
return CreateCast(Instruction::BitCast, C, DestTy);
|
||||
}
|
||||
Instruction *CreateIntToPtr(Constant *C, const Type *DestTy) const {
|
||||
Instruction *CreateIntToPtr(Constant *C, Type *DestTy) const {
|
||||
return CreateCast(Instruction::IntToPtr, C, DestTy);
|
||||
}
|
||||
Instruction *CreatePtrToInt(Constant *C, const Type *DestTy) const {
|
||||
Instruction *CreatePtrToInt(Constant *C, Type *DestTy) const {
|
||||
return CreateCast(Instruction::PtrToInt, C, DestTy);
|
||||
}
|
||||
Instruction *CreateZExtOrBitCast(Constant *C, const Type *DestTy) const {
|
||||
Instruction *CreateZExtOrBitCast(Constant *C, Type *DestTy) const {
|
||||
return CastInst::CreateZExtOrBitCast(C, DestTy);
|
||||
}
|
||||
Instruction *CreateSExtOrBitCast(Constant *C, const Type *DestTy) const {
|
||||
Instruction *CreateSExtOrBitCast(Constant *C, Type *DestTy) const {
|
||||
return CastInst::CreateSExtOrBitCast(C, DestTy);
|
||||
}
|
||||
|
||||
Instruction *CreateTruncOrBitCast(Constant *C, const Type *DestTy) const {
|
||||
Instruction *CreateTruncOrBitCast(Constant *C, Type *DestTy) const {
|
||||
return CastInst::CreateTruncOrBitCast(C, DestTy);
|
||||
}
|
||||
|
||||
|
@ -153,40 +153,40 @@ public:
|
||||
//===--------------------------------------------------------------------===//
|
||||
|
||||
Constant *CreateCast(Instruction::CastOps Op, Constant *C,
|
||||
const Type *DestTy) const {
|
||||
Type *DestTy) const {
|
||||
if (C->getType() == DestTy)
|
||||
return C; // avoid calling Fold
|
||||
return Fold(ConstantExpr::getCast(Op, C, DestTy));
|
||||
}
|
||||
Constant *CreateIntCast(Constant *C, const Type *DestTy,
|
||||
Constant *CreateIntCast(Constant *C, Type *DestTy,
|
||||
bool isSigned) const {
|
||||
if (C->getType() == DestTy)
|
||||
return C; // avoid calling Fold
|
||||
return Fold(ConstantExpr::getIntegerCast(C, DestTy, isSigned));
|
||||
}
|
||||
Constant *CreatePointerCast(Constant *C, const Type *DestTy) const {
|
||||
Constant *CreatePointerCast(Constant *C, Type *DestTy) const {
|
||||
return ConstantExpr::getPointerCast(C, DestTy);
|
||||
}
|
||||
Constant *CreateBitCast(Constant *C, const Type *DestTy) const {
|
||||
Constant *CreateBitCast(Constant *C, Type *DestTy) const {
|
||||
return CreateCast(Instruction::BitCast, C, DestTy);
|
||||
}
|
||||
Constant *CreateIntToPtr(Constant *C, const Type *DestTy) const {
|
||||
Constant *CreateIntToPtr(Constant *C, Type *DestTy) const {
|
||||
return CreateCast(Instruction::IntToPtr, C, DestTy);
|
||||
}
|
||||
Constant *CreatePtrToInt(Constant *C, const Type *DestTy) const {
|
||||
Constant *CreatePtrToInt(Constant *C, Type *DestTy) const {
|
||||
return CreateCast(Instruction::PtrToInt, C, DestTy);
|
||||
}
|
||||
Constant *CreateZExtOrBitCast(Constant *C, const Type *DestTy) const {
|
||||
Constant *CreateZExtOrBitCast(Constant *C, Type *DestTy) const {
|
||||
if (C->getType() == DestTy)
|
||||
return C; // avoid calling Fold
|
||||
return Fold(ConstantExpr::getZExtOrBitCast(C, DestTy));
|
||||
}
|
||||
Constant *CreateSExtOrBitCast(Constant *C, const Type *DestTy) const {
|
||||
Constant *CreateSExtOrBitCast(Constant *C, Type *DestTy) const {
|
||||
if (C->getType() == DestTy)
|
||||
return C; // avoid calling Fold
|
||||
return Fold(ConstantExpr::getSExtOrBitCast(C, DestTy));
|
||||
}
|
||||
Constant *CreateTruncOrBitCast(Constant *C, const Type *DestTy) const {
|
||||
Constant *CreateTruncOrBitCast(Constant *C, Type *DestTy) const {
|
||||
if (C->getType() == DestTy)
|
||||
return C; // avoid calling Fold
|
||||
return Fold(ConstantExpr::getTruncOrBitCast(C, DestTy));
|
||||
|
@ -90,9 +90,9 @@ private:
|
||||
void setAlignment(AlignTypeEnum align_type, unsigned abi_align,
|
||||
unsigned pref_align, uint32_t bit_width);
|
||||
unsigned getAlignmentInfo(AlignTypeEnum align_type, uint32_t bit_width,
|
||||
bool ABIAlign, const Type *Ty) const;
|
||||
bool ABIAlign, Type *Ty) const;
|
||||
//! Internal helper method that returns requested alignment for type.
|
||||
unsigned getAlignment(const Type *Ty, bool abi_or_pref) const;
|
||||
unsigned getAlignment(Type *Ty, bool abi_or_pref) const;
|
||||
|
||||
/// Valid alignment predicate.
|
||||
///
|
||||
@ -200,19 +200,19 @@ public:
|
||||
|
||||
/// getTypeSizeInBits - Return the number of bits necessary to hold the
|
||||
/// specified type. For example, returns 36 for i36 and 80 for x86_fp80.
|
||||
uint64_t getTypeSizeInBits(const Type* Ty) const;
|
||||
uint64_t getTypeSizeInBits(Type* Ty) const;
|
||||
|
||||
/// getTypeStoreSize - Return the maximum number of bytes that may be
|
||||
/// overwritten by storing the specified type. For example, returns 5
|
||||
/// for i36 and 10 for x86_fp80.
|
||||
uint64_t getTypeStoreSize(const Type *Ty) const {
|
||||
uint64_t getTypeStoreSize(Type *Ty) const {
|
||||
return (getTypeSizeInBits(Ty)+7)/8;
|
||||
}
|
||||
|
||||
/// getTypeStoreSizeInBits - Return the maximum number of bits that may be
|
||||
/// overwritten by storing the specified type; always a multiple of 8. For
|
||||
/// example, returns 40 for i36 and 80 for x86_fp80.
|
||||
uint64_t getTypeStoreSizeInBits(const Type *Ty) const {
|
||||
uint64_t getTypeStoreSizeInBits(Type *Ty) const {
|
||||
return 8*getTypeStoreSize(Ty);
|
||||
}
|
||||
|
||||
@ -220,7 +220,7 @@ public:
|
||||
/// of the specified type, including alignment padding. This is the amount
|
||||
/// that alloca reserves for this type. For example, returns 12 or 16 for
|
||||
/// x86_fp80, depending on alignment.
|
||||
uint64_t getTypeAllocSize(const Type* Ty) const {
|
||||
uint64_t getTypeAllocSize(Type* Ty) const {
|
||||
// Round up to the next alignment boundary.
|
||||
return RoundUpAlignment(getTypeStoreSize(Ty), getABITypeAlignment(Ty));
|
||||
}
|
||||
@ -229,13 +229,13 @@ public:
|
||||
/// objects of the specified type, including alignment padding; always a
|
||||
/// multiple of 8. This is the amount that alloca reserves for this type.
|
||||
/// For example, returns 96 or 128 for x86_fp80, depending on alignment.
|
||||
uint64_t getTypeAllocSizeInBits(const Type* Ty) const {
|
||||
uint64_t getTypeAllocSizeInBits(Type* Ty) const {
|
||||
return 8*getTypeAllocSize(Ty);
|
||||
}
|
||||
|
||||
/// getABITypeAlignment - Return the minimum ABI-required alignment for the
|
||||
/// specified type.
|
||||
unsigned getABITypeAlignment(const Type *Ty) const;
|
||||
unsigned getABITypeAlignment(Type *Ty) const;
|
||||
|
||||
/// getABIIntegerTypeAlignment - Return the minimum ABI-required alignment for
|
||||
/// an integer type of the specified bitwidth.
|
||||
@ -244,17 +244,17 @@ public:
|
||||
|
||||
/// getCallFrameTypeAlignment - Return the minimum ABI-required alignment
|
||||
/// for the specified type when it is part of a call frame.
|
||||
unsigned getCallFrameTypeAlignment(const Type *Ty) const;
|
||||
unsigned getCallFrameTypeAlignment(Type *Ty) const;
|
||||
|
||||
|
||||
/// getPrefTypeAlignment - Return the preferred stack/global alignment for
|
||||
/// the specified type. This is always at least as good as the ABI alignment.
|
||||
unsigned getPrefTypeAlignment(const Type *Ty) const;
|
||||
unsigned getPrefTypeAlignment(Type *Ty) const;
|
||||
|
||||
/// getPreferredTypeAlignmentShift - Return the preferred alignment for the
|
||||
/// specified type, returned as log2 of the value (a shift amount).
|
||||
///
|
||||
unsigned getPreferredTypeAlignmentShift(const Type *Ty) const;
|
||||
unsigned getPreferredTypeAlignmentShift(Type *Ty) const;
|
||||
|
||||
/// getIntPtrType - Return an unsigned integer type that is the same size or
|
||||
/// greater to the host pointer size.
|
||||
@ -264,13 +264,13 @@ public:
|
||||
/// getIndexedOffset - return the offset from the beginning of the type for
|
||||
/// the specified indices. This is used to implement getelementptr.
|
||||
///
|
||||
uint64_t getIndexedOffset(const Type *Ty,
|
||||
uint64_t getIndexedOffset(Type *Ty,
|
||||
Value* const* Indices, unsigned NumIndices) const;
|
||||
|
||||
/// getStructLayout - Return a StructLayout object, indicating the alignment
|
||||
/// of the struct, its size, and the offsets of its fields. Note that this
|
||||
/// information is lazily cached.
|
||||
const StructLayout *getStructLayout(const StructType *Ty) const;
|
||||
const StructLayout *getStructLayout(StructType *Ty) const;
|
||||
|
||||
/// getPreferredAlignment - Return the preferred alignment of the specified
|
||||
/// global. This includes an explicitly requested alignment (if the global
|
||||
@ -333,7 +333,7 @@ public:
|
||||
|
||||
private:
|
||||
friend class TargetData; // Only TargetData can create this class
|
||||
StructLayout(const StructType *ST, const TargetData &TD);
|
||||
StructLayout(StructType *ST, const TargetData &TD);
|
||||
};
|
||||
|
||||
} // End llvm namespace
|
||||
|
@ -39,7 +39,7 @@ public:
|
||||
/// intrinsic, Tys should point to an array of numTys pointers to Type,
|
||||
/// and must provide exactly one type for each overloaded type in the
|
||||
/// intrinsic.
|
||||
virtual std::string getName(unsigned IID, const Type **Tys = 0,
|
||||
virtual std::string getName(unsigned IID, Type **Tys = 0,
|
||||
unsigned numTys = 0) const = 0;
|
||||
|
||||
/// Look up target intrinsic by name. Return intrinsic ID or 0 for unknown
|
||||
@ -55,7 +55,7 @@ public:
|
||||
/// Create or insert an LLVM Function declaration for an intrinsic,
|
||||
/// and return it. The Tys and numTys are for intrinsics with overloaded
|
||||
/// types. See above for more information.
|
||||
virtual Function *getDeclaration(Module *M, unsigned ID, const Type **Tys = 0,
|
||||
virtual Function *getDeclaration(Module *M, unsigned ID, Type **Tys = 0,
|
||||
unsigned numTys = 0) const = 0;
|
||||
};
|
||||
|
||||
|
@ -501,7 +501,7 @@ public:
|
||||
/// This is fixed by the LLVM operations except for the pointer size. If
|
||||
/// AllowUnknown is true, this will return MVT::Other for types with no EVT
|
||||
/// counterpart (e.g. structs), otherwise it will assert.
|
||||
EVT getValueType(const Type *Ty, bool AllowUnknown = false) const {
|
||||
EVT getValueType(Type *Ty, bool AllowUnknown = false) const {
|
||||
EVT VT = EVT::getEVT(Ty, AllowUnknown);
|
||||
return VT == MVT::iPTR ? PointerTy : VT;
|
||||
}
|
||||
@ -509,7 +509,7 @@ public:
|
||||
/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
|
||||
/// function arguments in the caller parameter area. This is the actual
|
||||
/// alignment, not its logarithm.
|
||||
virtual unsigned getByValTypeAlignment(const Type *Ty) const;
|
||||
virtual unsigned getByValTypeAlignment(Type *Ty) const;
|
||||
|
||||
/// getRegisterType - Return the type of registers that this ValueType will
|
||||
/// eventually require.
|
||||
@ -1166,7 +1166,7 @@ public:
|
||||
/// lowering.
|
||||
struct ArgListEntry {
|
||||
SDValue Node;
|
||||
const Type* Ty;
|
||||
Type* Ty;
|
||||
bool isSExt : 1;
|
||||
bool isZExt : 1;
|
||||
bool isInReg : 1;
|
||||
@ -1180,7 +1180,7 @@ public:
|
||||
};
|
||||
typedef std::vector<ArgListEntry> ArgListTy;
|
||||
std::pair<SDValue, SDValue>
|
||||
LowerCallTo(SDValue Chain, const Type *RetTy, bool RetSExt, bool RetZExt,
|
||||
LowerCallTo(SDValue Chain, Type *RetTy, bool RetSExt, bool RetZExt,
|
||||
bool isVarArg, bool isInreg, unsigned NumFixedArgs,
|
||||
CallingConv::ID CallConv, bool isTailCall,
|
||||
bool isReturnValueUsed, SDValue Callee, ArgListTy &Args,
|
||||
@ -1485,12 +1485,12 @@ public:
|
||||
/// The type may be VoidTy, in which case only return true if the addressing
|
||||
/// mode is legal for a load/store of any legal type.
|
||||
/// TODO: Handle pre/postinc as well.
|
||||
virtual bool isLegalAddressingMode(const AddrMode &AM, const Type *Ty) const;
|
||||
virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const;
|
||||
|
||||
/// isTruncateFree - Return true if it's free to truncate a value of
|
||||
/// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in
|
||||
/// register EAX to i16 by referencing its sub-register AX.
|
||||
virtual bool isTruncateFree(const Type *Ty1, const Type *Ty2) const {
|
||||
virtual bool isTruncateFree(Type *Ty1, Type *Ty2) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -1506,7 +1506,7 @@ public:
|
||||
/// does not necessarily apply to truncate instructions. e.g. on x86-64,
|
||||
/// all instructions that define 32-bit values implicit zero-extend the
|
||||
/// result out to 64 bits.
|
||||
virtual bool isZExtFree(const Type *Ty1, const Type *Ty2) const {
|
||||
virtual bool isZExtFree(Type *Ty1, Type *Ty2) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -1963,7 +1963,7 @@ private:
|
||||
/// GetReturnInfo - Given an LLVM IR type and return type attributes,
|
||||
/// compute the return value EVTs and flags, and optionally also
|
||||
/// the offsets, if the return value is being lowered to memory.
|
||||
void GetReturnInfo(const Type* ReturnType, Attributes attr,
|
||||
void GetReturnInfo(Type* ReturnType, Attributes attr,
|
||||
SmallVectorImpl<ISD::OutputArg> &Outs,
|
||||
const TargetLowering &TLI,
|
||||
SmallVectorImpl<uint64_t> *Offsets = 0);
|
||||
|
@ -58,7 +58,7 @@ class AddressingModeMatcher {
|
||||
|
||||
/// AccessTy/MemoryInst - This is the type for the access (e.g. double) and
|
||||
/// the memory instruction that we're computing this address for.
|
||||
const Type *AccessTy;
|
||||
Type *AccessTy;
|
||||
Instruction *MemoryInst;
|
||||
|
||||
/// AddrMode - This is the addressing mode that we're building up. This is
|
||||
@ -71,7 +71,7 @@ class AddressingModeMatcher {
|
||||
bool IgnoreProfitability;
|
||||
|
||||
AddressingModeMatcher(SmallVectorImpl<Instruction*> &AMI,
|
||||
const TargetLowering &T, const Type *AT,
|
||||
const TargetLowering &T, Type *AT,
|
||||
Instruction *MI, ExtAddrMode &AM)
|
||||
: AddrModeInsts(AMI), TLI(T), AccessTy(AT), MemoryInst(MI), AddrMode(AM) {
|
||||
IgnoreProfitability = false;
|
||||
@ -81,7 +81,7 @@ public:
|
||||
/// Match - Find the maximal addressing mode that a load/store of V can fold,
|
||||
/// give an access type of AccessTy. This returns a list of involved
|
||||
/// instructions in AddrModeInsts.
|
||||
static ExtAddrMode Match(Value *V, const Type *AccessTy,
|
||||
static ExtAddrMode Match(Value *V, Type *AccessTy,
|
||||
Instruction *MemoryInst,
|
||||
SmallVectorImpl<Instruction*> &AddrModeInsts,
|
||||
const TargetLowering &TLI) {
|
||||
|
@ -39,7 +39,7 @@ private:
|
||||
void *AV;
|
||||
|
||||
/// ProtoType holds the type of the values being rewritten.
|
||||
const Type *ProtoType;
|
||||
Type *ProtoType;
|
||||
|
||||
// PHI nodes are given a name based on ProtoName.
|
||||
std::string ProtoName;
|
||||
@ -56,7 +56,7 @@ public:
|
||||
|
||||
/// Initialize - Reset this object to get ready for a new set of SSA
|
||||
/// updates with type 'Ty'. PHI nodes get a name based on 'Name'.
|
||||
void Initialize(const Type *Ty, StringRef Name);
|
||||
void Initialize(Type *Ty, StringRef Name);
|
||||
|
||||
/// AddAvailableValue - Indicate that a rewritten value is available at the
|
||||
/// end of the specified block with the specified value.
|
||||
|
@ -193,7 +193,7 @@ public:
|
||||
/// are valid for types of the same size only where no re-interpretation of
|
||||
/// the bits is done.
|
||||
/// @brief Determine if this type could be losslessly bitcast to Ty
|
||||
bool canLosslesslyBitCastTo(const Type *Ty) const;
|
||||
bool canLosslesslyBitCastTo(Type *Ty) const;
|
||||
|
||||
/// isEmptyTy - Return true if this type is empty, that is, it has no
|
||||
/// elements or all its elements are empty.
|
||||
@ -262,7 +262,7 @@ public:
|
||||
/// getScalarSizeInBits - If this is a vector type, return the
|
||||
/// getPrimitiveSizeInBits value for the element type. Otherwise return the
|
||||
/// getPrimitiveSizeInBits value for this type.
|
||||
unsigned getScalarSizeInBits() const;
|
||||
unsigned getScalarSizeInBits();
|
||||
|
||||
/// getFPMantissaWidth - Return the width of the mantissa of this type. This
|
||||
/// is only valid on floating point types. If the FP type does not
|
||||
@ -271,7 +271,7 @@ public:
|
||||
|
||||
/// getScalarType - If this is a vector type, return the element type,
|
||||
/// otherwise return 'this'.
|
||||
const Type *getScalarType() const;
|
||||
Type *getScalarType();
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
// Type Iteration support.
|
||||
@ -342,7 +342,7 @@ public:
|
||||
|
||||
/// getPointerTo - Return a pointer to the current type. This is equivalent
|
||||
/// to PointerType::get(Foo, AddrSpace).
|
||||
PointerType *getPointerTo(unsigned AddrSpace = 0) const;
|
||||
PointerType *getPointerTo(unsigned AddrSpace = 0);
|
||||
|
||||
private:
|
||||
/// isSizedDerivedType - Derived types like structures and arrays are sized
|
||||
@ -352,7 +352,7 @@ private:
|
||||
};
|
||||
|
||||
// Printing of types.
|
||||
static inline raw_ostream &operator<<(raw_ostream &OS, const Type &T) {
|
||||
static inline raw_ostream &operator<<(raw_ostream &OS, Type &T) {
|
||||
T.print(OS);
|
||||
return OS;
|
||||
}
|
||||
@ -387,7 +387,7 @@ template <> struct GraphTraits<const Type*> {
|
||||
typedef const Type NodeType;
|
||||
typedef Type::subtype_iterator ChildIteratorType;
|
||||
|
||||
static inline NodeType *getEntryNode(const Type *T) { return T; }
|
||||
static inline NodeType *getEntryNode(NodeType *T) { return T; }
|
||||
static inline ChildIteratorType child_begin(NodeType *N) {
|
||||
return N->subtype_begin();
|
||||
}
|
||||
|
@ -47,7 +47,7 @@ protected:
|
||||
unsigned NumOperands;
|
||||
|
||||
void *operator new(size_t s, unsigned Us);
|
||||
User(const Type *ty, unsigned vty, Use *OpList, unsigned NumOps)
|
||||
User(Type *ty, unsigned vty, Use *OpList, unsigned NumOps)
|
||||
: Value(ty, vty), OperandList(OpList), NumOperands(NumOps) {}
|
||||
Use *allocHungoffUses(unsigned) const;
|
||||
void dropHungoffUses() {
|
||||
|
@ -91,7 +91,7 @@ protected:
|
||||
/// printing behavior.
|
||||
virtual void printCustom(raw_ostream &O) const;
|
||||
|
||||
Value(const Type *Ty, unsigned scid);
|
||||
Value(Type *Ty, unsigned scid);
|
||||
public:
|
||||
virtual ~Value();
|
||||
|
||||
|
@ -341,7 +341,7 @@ void AliasAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
|
||||
/// getTypeStoreSize - Return the TargetData store size for the given type,
|
||||
/// if known, or a conservative value otherwise.
|
||||
///
|
||||
uint64_t AliasAnalysis::getTypeStoreSize(const Type *Ty) {
|
||||
uint64_t AliasAnalysis::getTypeStoreSize(Type *Ty) {
|
||||
return TD ? TD->getTypeStoreSize(Ty) : UnknownSize;
|
||||
}
|
||||
|
||||
|
@ -171,12 +171,12 @@ bool AAEval::runOnFunction(Function &F) {
|
||||
for (SetVector<Value *>::iterator I1 = Pointers.begin(), E = Pointers.end();
|
||||
I1 != E; ++I1) {
|
||||
uint64_t I1Size = AliasAnalysis::UnknownSize;
|
||||
const Type *I1ElTy = cast<PointerType>((*I1)->getType())->getElementType();
|
||||
Type *I1ElTy = cast<PointerType>((*I1)->getType())->getElementType();
|
||||
if (I1ElTy->isSized()) I1Size = AA.getTypeStoreSize(I1ElTy);
|
||||
|
||||
for (SetVector<Value *>::iterator I2 = Pointers.begin(); I2 != I1; ++I2) {
|
||||
uint64_t I2Size = AliasAnalysis::UnknownSize;
|
||||
const Type *I2ElTy =cast<PointerType>((*I2)->getType())->getElementType();
|
||||
Type *I2ElTy =cast<PointerType>((*I2)->getType())->getElementType();
|
||||
if (I2ElTy->isSized()) I2Size = AA.getTypeStoreSize(I2ElTy);
|
||||
|
||||
switch (AA.alias(*I1, I1Size, *I2, I2Size)) {
|
||||
@ -207,7 +207,7 @@ bool AAEval::runOnFunction(Function &F) {
|
||||
for (SetVector<Value *>::iterator V = Pointers.begin(), Ve = Pointers.end();
|
||||
V != Ve; ++V) {
|
||||
uint64_t Size = AliasAnalysis::UnknownSize;
|
||||
const Type *ElTy = cast<PointerType>((*V)->getType())->getElementType();
|
||||
Type *ElTy = cast<PointerType>((*V)->getType())->getElementType();
|
||||
if (ElTy->isSized()) Size = AA.getTypeStoreSize(ElTy);
|
||||
|
||||
switch (AA.getModRefInfo(*C, *V, Size)) {
|
||||
|
@ -100,7 +100,7 @@ static bool isEscapeSource(const Value *V) {
|
||||
/// getObjectSize - Return the size of the object specified by V, or
|
||||
/// UnknownSize if unknown.
|
||||
static uint64_t getObjectSize(const Value *V, const TargetData &TD) {
|
||||
const Type *AccessTy;
|
||||
Type *AccessTy;
|
||||
if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
|
||||
if (!GV->hasDefinitiveInitializer())
|
||||
return AliasAnalysis::UnknownSize;
|
||||
@ -317,7 +317,7 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
|
||||
E = GEPOp->op_end(); I != E; ++I) {
|
||||
Value *Index = *I;
|
||||
// Compute the (potentially symbolic) offset in bytes for this index.
|
||||
if (const StructType *STy = dyn_cast<StructType>(*GTI++)) {
|
||||
if (StructType *STy = dyn_cast<StructType>(*GTI++)) {
|
||||
// For a struct, add the member offset.
|
||||
unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
|
||||
if (FieldNo == 0) continue;
|
||||
|
@ -43,11 +43,11 @@ using namespace llvm;
|
||||
/// FoldBitCast - Constant fold bitcast, symbolically evaluating it with
|
||||
/// TargetData. This always returns a non-null constant, but it may be a
|
||||
/// ConstantExpr if unfoldable.
|
||||
static Constant *FoldBitCast(Constant *C, const Type *DestTy,
|
||||
static Constant *FoldBitCast(Constant *C, Type *DestTy,
|
||||
const TargetData &TD) {
|
||||
|
||||
// This only handles casts to vectors currently.
|
||||
const VectorType *DestVTy = dyn_cast<VectorType>(DestTy);
|
||||
VectorType *DestVTy = dyn_cast<VectorType>(DestTy);
|
||||
if (DestVTy == 0)
|
||||
return ConstantExpr::getBitCast(C, DestTy);
|
||||
|
||||
@ -69,8 +69,8 @@ static Constant *FoldBitCast(Constant *C, const Type *DestTy,
|
||||
if (NumDstElt == NumSrcElt)
|
||||
return ConstantExpr::getBitCast(C, DestTy);
|
||||
|
||||
const Type *SrcEltTy = CV->getType()->getElementType();
|
||||
const Type *DstEltTy = DestVTy->getElementType();
|
||||
Type *SrcEltTy = CV->getType()->getElementType();
|
||||
Type *DstEltTy = DestVTy->getElementType();
|
||||
|
||||
// Otherwise, we're changing the number of elements in a vector, which
|
||||
// requires endianness information to do the right thing. For example,
|
||||
@ -85,7 +85,7 @@ static Constant *FoldBitCast(Constant *C, const Type *DestTy,
|
||||
if (DstEltTy->isFloatingPointTy()) {
|
||||
// Fold to an vector of integers with same size as our FP type.
|
||||
unsigned FPWidth = DstEltTy->getPrimitiveSizeInBits();
|
||||
const Type *DestIVTy =
|
||||
Type *DestIVTy =
|
||||
VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumDstElt);
|
||||
// Recursively handle this integer conversion, if possible.
|
||||
C = FoldBitCast(C, DestIVTy, TD);
|
||||
@ -99,7 +99,7 @@ static Constant *FoldBitCast(Constant *C, const Type *DestTy,
|
||||
// it to integer first.
|
||||
if (SrcEltTy->isFloatingPointTy()) {
|
||||
unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
|
||||
const Type *SrcIVTy =
|
||||
Type *SrcIVTy =
|
||||
VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElt);
|
||||
// Ask VMCore to do the conversion now that #elts line up.
|
||||
C = ConstantExpr::getBitCast(C, SrcIVTy);
|
||||
@ -212,11 +212,11 @@ static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
|
||||
if (!CI) return false; // Index isn't a simple constant?
|
||||
if (CI->isZero()) continue; // Not adding anything.
|
||||
|
||||
if (const StructType *ST = dyn_cast<StructType>(*GTI)) {
|
||||
if (StructType *ST = dyn_cast<StructType>(*GTI)) {
|
||||
// N = N + Offset
|
||||
Offset += TD.getStructLayout(ST)->getElementOffset(CI->getZExtValue());
|
||||
} else {
|
||||
const SequentialType *SQT = cast<SequentialType>(*GTI);
|
||||
SequentialType *SQT = cast<SequentialType>(*GTI);
|
||||
Offset += TD.getTypeAllocSize(SQT->getElementType())*CI->getSExtValue();
|
||||
}
|
||||
}
|
||||
@ -354,8 +354,8 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,
|
||||
|
||||
static Constant *FoldReinterpretLoadFromConstPtr(Constant *C,
|
||||
const TargetData &TD) {
|
||||
const Type *LoadTy = cast<PointerType>(C->getType())->getElementType();
|
||||
const IntegerType *IntType = dyn_cast<IntegerType>(LoadTy);
|
||||
Type *LoadTy = cast<PointerType>(C->getType())->getElementType();
|
||||
IntegerType *IntType = dyn_cast<IntegerType>(LoadTy);
|
||||
|
||||
// If this isn't an integer load we can't fold it directly.
|
||||
if (!IntType) {
|
||||
@ -363,7 +363,7 @@ static Constant *FoldReinterpretLoadFromConstPtr(Constant *C,
|
||||
// and then bitcast the result. This can be useful for union cases. Note
|
||||
// that address spaces don't matter here since we're not going to result in
|
||||
// an actual new load.
|
||||
const Type *MapTy;
|
||||
Type *MapTy;
|
||||
if (LoadTy->isFloatTy())
|
||||
MapTy = Type::getInt32PtrTy(C->getContext());
|
||||
else if (LoadTy->isDoubleTy())
|
||||
@ -443,7 +443,7 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C,
|
||||
std::string Str;
|
||||
if (TD && GetConstantStringInfo(CE, Str) && !Str.empty()) {
|
||||
unsigned StrLen = Str.length();
|
||||
const Type *Ty = cast<PointerType>(CE->getType())->getElementType();
|
||||
Type *Ty = cast<PointerType>(CE->getType())->getElementType();
|
||||
unsigned NumBits = Ty->getPrimitiveSizeInBits();
|
||||
// Replace load with immediate integer if the result is an integer or fp
|
||||
// value.
|
||||
@ -478,7 +478,7 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C,
|
||||
if (GlobalVariable *GV =
|
||||
dyn_cast<GlobalVariable>(GetUnderlyingObject(CE, TD))) {
|
||||
if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
|
||||
const Type *ResTy = cast<PointerType>(C->getType())->getElementType();
|
||||
Type *ResTy = cast<PointerType>(C->getType())->getElementType();
|
||||
if (GV->getInitializer()->isNullValue())
|
||||
return Constant::getNullValue(ResTy);
|
||||
if (isa<UndefValue>(GV->getInitializer()))
|
||||
@ -537,10 +537,10 @@ static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0,
|
||||
/// explicitly cast them so that they aren't implicitly casted by the
|
||||
/// getelementptr.
|
||||
static Constant *CastGEPIndices(Constant *const *Ops, unsigned NumOps,
|
||||
const Type *ResultTy,
|
||||
Type *ResultTy,
|
||||
const TargetData *TD) {
|
||||
if (!TD) return 0;
|
||||
const Type *IntPtrTy = TD->getIntPtrType(ResultTy->getContext());
|
||||
Type *IntPtrTy = TD->getIntPtrType(ResultTy->getContext());
|
||||
|
||||
bool Any = false;
|
||||
SmallVector<Constant*, 32> NewIdxs;
|
||||
@ -572,13 +572,13 @@ static Constant *CastGEPIndices(Constant *const *Ops, unsigned NumOps,
|
||||
/// SymbolicallyEvaluateGEP - If we can symbolically evaluate the specified GEP
|
||||
/// constant expression, do so.
|
||||
static Constant *SymbolicallyEvaluateGEP(Constant *const *Ops, unsigned NumOps,
|
||||
const Type *ResultTy,
|
||||
Type *ResultTy,
|
||||
const TargetData *TD) {
|
||||
Constant *Ptr = Ops[0];
|
||||
if (!TD || !cast<PointerType>(Ptr->getType())->getElementType()->isSized())
|
||||
return 0;
|
||||
|
||||
const Type *IntPtrTy = TD->getIntPtrType(Ptr->getContext());
|
||||
Type *IntPtrTy = TD->getIntPtrType(Ptr->getContext());
|
||||
|
||||
// If this is a constant expr gep that is effectively computing an
|
||||
// "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12'
|
||||
@ -649,10 +649,10 @@ static Constant *SymbolicallyEvaluateGEP(Constant *const *Ops, unsigned NumOps,
|
||||
// we eliminate over-indexing of the notional static type array bounds.
|
||||
// This makes it easy to determine if the getelementptr is "inbounds".
|
||||
// Also, this helps GlobalOpt do SROA on GlobalVariables.
|
||||
const Type *Ty = Ptr->getType();
|
||||
Type *Ty = Ptr->getType();
|
||||
SmallVector<Constant*, 32> NewIdxs;
|
||||
do {
|
||||
if (const SequentialType *ATy = dyn_cast<SequentialType>(Ty)) {
|
||||
if (SequentialType *ATy = dyn_cast<SequentialType>(Ty)) {
|
||||
if (ATy->isPointerTy()) {
|
||||
// The only pointer indexing we'll do is on the first index of the GEP.
|
||||
if (!NewIdxs.empty())
|
||||
@ -665,7 +665,7 @@ static Constant *SymbolicallyEvaluateGEP(Constant *const *Ops, unsigned NumOps,
|
||||
|
||||
// Determine which element of the array the offset points into.
|
||||
APInt ElemSize(BitWidth, TD->getTypeAllocSize(ATy->getElementType()));
|
||||
const IntegerType *IntPtrTy = TD->getIntPtrType(Ty->getContext());
|
||||
IntegerType *IntPtrTy = TD->getIntPtrType(Ty->getContext());
|
||||
if (ElemSize == 0)
|
||||
// The element size is 0. This may be [0 x Ty]*, so just use a zero
|
||||
// index for this level and proceed to the next level to see if it can
|
||||
@ -679,7 +679,7 @@ static Constant *SymbolicallyEvaluateGEP(Constant *const *Ops, unsigned NumOps,
|
||||
NewIdxs.push_back(ConstantInt::get(IntPtrTy, NewIdx));
|
||||
}
|
||||
Ty = ATy->getElementType();
|
||||
} else if (const StructType *STy = dyn_cast<StructType>(Ty)) {
|
||||
} else if (StructType *STy = dyn_cast<StructType>(Ty)) {
|
||||
// Determine which field of the struct the offset points into. The
|
||||
// getZExtValue is at least as safe as the StructLayout API because we
|
||||
// know the offset is within the struct at this point.
|
||||
@ -814,7 +814,7 @@ Constant *llvm::ConstantFoldConstantExpression(const ConstantExpr *CE,
|
||||
/// information, due to only being passed an opcode and operands. Constant
|
||||
/// folding using this function strips this information.
|
||||
///
|
||||
Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy,
|
||||
Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
|
||||
Constant* const* Ops, unsigned NumOps,
|
||||
const TargetData *TD) {
|
||||
// Handle easy binops first.
|
||||
@ -912,7 +912,7 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
|
||||
// around to know if bit truncation is happening.
|
||||
if (ConstantExpr *CE0 = dyn_cast<ConstantExpr>(Ops0)) {
|
||||
if (TD && Ops1->isNullValue()) {
|
||||
const Type *IntPtrTy = TD->getIntPtrType(CE0->getContext());
|
||||
Type *IntPtrTy = TD->getIntPtrType(CE0->getContext());
|
||||
if (CE0->getOpcode() == Instruction::IntToPtr) {
|
||||
// Convert the integer value to the right size to ensure we get the
|
||||
// proper extension or truncation.
|
||||
@ -934,7 +934,7 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
|
||||
|
||||
if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(Ops1)) {
|
||||
if (TD && CE0->getOpcode() == CE1->getOpcode()) {
|
||||
const Type *IntPtrTy = TD->getIntPtrType(CE0->getContext());
|
||||
Type *IntPtrTy = TD->getIntPtrType(CE0->getContext());
|
||||
|
||||
if (CE0->getOpcode() == Instruction::IntToPtr) {
|
||||
// Convert the integer value to the right size to ensure we get the
|
||||
@ -987,7 +987,7 @@ Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
|
||||
// addressing...
|
||||
gep_type_iterator I = gep_type_begin(CE), E = gep_type_end(CE);
|
||||
for (++I; I != E; ++I)
|
||||
if (const StructType *STy = dyn_cast<StructType>(*I)) {
|
||||
if (StructType *STy = dyn_cast<StructType>(*I)) {
|
||||
ConstantInt *CU = cast<ConstantInt>(I.getOperand());
|
||||
assert(CU->getZExtValue() < STy->getNumElements() &&
|
||||
"Struct index out of range!");
|
||||
@ -1002,7 +1002,7 @@ Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
|
||||
return 0;
|
||||
}
|
||||
} else if (ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand())) {
|
||||
if (const ArrayType *ATy = dyn_cast<ArrayType>(*I)) {
|
||||
if (ArrayType *ATy = dyn_cast<ArrayType>(*I)) {
|
||||
if (CI->getZExtValue() >= ATy->getNumElements())
|
||||
return 0;
|
||||
if (ConstantArray *CA = dyn_cast<ConstantArray>(C))
|
||||
@ -1013,7 +1013,7 @@ Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
|
||||
C = UndefValue::get(ATy->getElementType());
|
||||
else
|
||||
return 0;
|
||||
} else if (const VectorType *VTy = dyn_cast<VectorType>(*I)) {
|
||||
} else if (VectorType *VTy = dyn_cast<VectorType>(*I)) {
|
||||
if (CI->getZExtValue() >= VTy->getNumElements())
|
||||
return 0;
|
||||
if (ConstantVector *CP = dyn_cast<ConstantVector>(C))
|
||||
@ -1101,7 +1101,7 @@ llvm::canConstantFoldCallTo(const Function *F) {
|
||||
}
|
||||
|
||||
static Constant *ConstantFoldFP(double (*NativeFP)(double), double V,
|
||||
const Type *Ty) {
|
||||
Type *Ty) {
|
||||
sys::llvm_fenv_clearexcept();
|
||||
V = NativeFP(V);
|
||||
if (sys::llvm_fenv_testexcept()) {
|
||||
@ -1118,7 +1118,7 @@ static Constant *ConstantFoldFP(double (*NativeFP)(double), double V,
|
||||
}
|
||||
|
||||
static Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double),
|
||||
double V, double W, const Type *Ty) {
|
||||
double V, double W, Type *Ty) {
|
||||
sys::llvm_fenv_clearexcept();
|
||||
V = NativeFP(V, W);
|
||||
if (sys::llvm_fenv_testexcept()) {
|
||||
@ -1143,7 +1143,7 @@ static Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double),
|
||||
/// performed, otherwise returns the Constant value resulting from the
|
||||
/// conversion.
|
||||
static Constant *ConstantFoldConvertToInt(ConstantFP *Op, bool roundTowardZero,
|
||||
const Type *Ty) {
|
||||
Type *Ty) {
|
||||
assert(Op && "Called with NULL operand");
|
||||
APFloat Val(Op->getValueAPF());
|
||||
|
||||
@ -1172,7 +1172,7 @@ llvm::ConstantFoldCall(Function *F,
|
||||
if (!F->hasName()) return 0;
|
||||
StringRef Name = F->getName();
|
||||
|
||||
const Type *Ty = F->getReturnType();
|
||||
Type *Ty = F->getReturnType();
|
||||
if (NumOperands == 1) {
|
||||
if (ConstantFP *Op = dyn_cast<ConstantFP>(Operands[0])) {
|
||||
if (F->getIntrinsicID() == Intrinsic::convert_to_fp16) {
|
||||
|
@ -29,7 +29,7 @@ INITIALIZE_PASS(FindUsedTypes, "print-used-types",
|
||||
// IncorporateType - Incorporate one type and all of its subtypes into the
|
||||
// collection of used types.
|
||||
//
|
||||
void FindUsedTypes::IncorporateType(const Type *Ty) {
|
||||
void FindUsedTypes::IncorporateType(Type *Ty) {
|
||||
// If ty doesn't already exist in the used types map, add it now, otherwise
|
||||
// return.
|
||||
if (!UsedTypes.insert(Ty)) return; // Already contain Ty.
|
||||
@ -94,7 +94,7 @@ bool FindUsedTypes::runOnModule(Module &m) {
|
||||
//
|
||||
void FindUsedTypes::print(raw_ostream &OS, const Module *M) const {
|
||||
OS << "Types in use by this module:\n";
|
||||
for (SetVector<const Type *>::const_iterator I = UsedTypes.begin(),
|
||||
for (SetVector<Type *>::const_iterator I = UsedTypes.begin(),
|
||||
E = UsedTypes.end(); I != E; ++I) {
|
||||
OS << " " << **I << '\n';
|
||||
}
|
||||
|
@ -1372,7 +1372,7 @@ Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const TargetData *TD,
|
||||
return ::SimplifyXorInst(Op0, Op1, TD, DT, RecursionLimit);
|
||||
}
|
||||
|
||||
static const Type *GetCompareTy(Value *Op) {
|
||||
static Type *GetCompareTy(Value *Op) {
|
||||
return CmpInst::makeCmpResultType(Op->getType());
|
||||
}
|
||||
|
||||
@ -1413,8 +1413,8 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
|
||||
Pred = CmpInst::getSwappedPredicate(Pred);
|
||||
}
|
||||
|
||||
const Type *ITy = GetCompareTy(LHS); // The return type.
|
||||
const Type *OpTy = LHS->getType(); // The operand type.
|
||||
Type *ITy = GetCompareTy(LHS); // The return type.
|
||||
Type *OpTy = LHS->getType(); // The operand type.
|
||||
|
||||
// icmp X, X -> true/false
|
||||
// X icmp undef -> true/false. For example, icmp ugt %X, undef -> false
|
||||
@ -1593,8 +1593,8 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
|
||||
if (isa<CastInst>(LHS) && (isa<Constant>(RHS) || isa<CastInst>(RHS))) {
|
||||
Instruction *LI = cast<CastInst>(LHS);
|
||||
Value *SrcOp = LI->getOperand(0);
|
||||
const Type *SrcTy = SrcOp->getType();
|
||||
const Type *DstTy = LI->getType();
|
||||
Type *SrcTy = SrcOp->getType();
|
||||
Type *DstTy = LI->getType();
|
||||
|
||||
// Turn icmp (ptrtoint x), (ptrtoint/constant) into a compare of the input
|
||||
// if the integer type is the same size as the pointer type.
|
||||
@ -2222,7 +2222,7 @@ Value *llvm::SimplifySelectInst(Value *CondVal, Value *TrueVal, Value *FalseVal,
|
||||
Value *llvm::SimplifyGEPInst(Value *const *Ops, unsigned NumOps,
|
||||
const TargetData *TD, const DominatorTree *) {
|
||||
// The type of the GEP pointer operand.
|
||||
const PointerType *PtrTy = cast<PointerType>(Ops[0]->getType());
|
||||
PointerType *PtrTy = cast<PointerType>(Ops[0]->getType());
|
||||
|
||||
// getelementptr P -> P.
|
||||
if (NumOps == 1)
|
||||
@ -2230,9 +2230,9 @@ Value *llvm::SimplifyGEPInst(Value *const *Ops, unsigned NumOps,
|
||||
|
||||
if (isa<UndefValue>(Ops[0])) {
|
||||
// Compute the (pointer) type returned by the GEP instruction.
|
||||
const Type *LastType = GetElementPtrInst::getIndexedType(PtrTy, &Ops[1],
|
||||
Type *LastType = GetElementPtrInst::getIndexedType(PtrTy, &Ops[1],
|
||||
NumOps-1);
|
||||
const Type *GEPTy = PointerType::get(LastType, PtrTy->getAddressSpace());
|
||||
Type *GEPTy = PointerType::get(LastType, PtrTy->getAddressSpace());
|
||||
return UndefValue::get(GEPTy);
|
||||
}
|
||||
|
||||
@ -2243,7 +2243,7 @@ Value *llvm::SimplifyGEPInst(Value *const *Ops, unsigned NumOps,
|
||||
return Ops[0];
|
||||
// getelementptr P, N -> P if P points to a type of zero size.
|
||||
if (TD) {
|
||||
const Type *Ty = PtrTy->getElementType();
|
||||
Type *Ty = PtrTy->getElementType();
|
||||
if (Ty->isSized() && TD->getTypeAllocSize(Ty) == 0)
|
||||
return Ops[0];
|
||||
}
|
||||
|
@ -630,7 +630,7 @@ bool LazyValueInfoCache::solveBlockValueNonLocal(LVILatticeVal &BBLV,
|
||||
if (BB == &BB->getParent()->getEntryBlock()) {
|
||||
assert(isa<Argument>(Val) && "Unknown live-in to the entry block");
|
||||
if (NotNull) {
|
||||
const PointerType *PTy = cast<PointerType>(Val->getType());
|
||||
PointerType *PTy = cast<PointerType>(Val->getType());
|
||||
Result = LVILatticeVal::getNot(ConstantPointerNull::get(PTy));
|
||||
} else {
|
||||
Result.markOverdefined();
|
||||
@ -658,7 +658,7 @@ bool LazyValueInfoCache::solveBlockValueNonLocal(LVILatticeVal &BBLV,
|
||||
// If we previously determined that this is a pointer that can't be null
|
||||
// then return that rather than giving up entirely.
|
||||
if (NotNull) {
|
||||
const PointerType *PTy = cast<PointerType>(Val->getType());
|
||||
PointerType *PTy = cast<PointerType>(Val->getType());
|
||||
Result = LVILatticeVal::getNot(ConstantPointerNull::get(PTy));
|
||||
}
|
||||
|
||||
@ -728,7 +728,7 @@ bool LazyValueInfoCache::solveBlockValueConstantRange(LVILatticeVal &BBLV,
|
||||
|
||||
ConstantRange LHSRange = LHSVal.getConstantRange();
|
||||
ConstantRange RHSRange(1);
|
||||
const IntegerType *ResultTy = cast<IntegerType>(BBI->getType());
|
||||
IntegerType *ResultTy = cast<IntegerType>(BBI->getType());
|
||||
if (isa<BinaryOperator>(BBI)) {
|
||||
if (ConstantInt *RHS = dyn_cast<ConstantInt>(BBI->getOperand(1))) {
|
||||
RHSRange = ConstantRange(RHS->getValue());
|
||||
|
@ -71,7 +71,7 @@ namespace {
|
||||
void visitCallSite(CallSite CS);
|
||||
void visitMemoryReference(Instruction &I, Value *Ptr,
|
||||
uint64_t Size, unsigned Align,
|
||||
const Type *Ty, unsigned Flags);
|
||||
Type *Ty, unsigned Flags);
|
||||
|
||||
void visitCallInst(CallInst &I);
|
||||
void visitInvokeInst(InvokeInst &I);
|
||||
@ -201,7 +201,7 @@ void Lint::visitCallSite(CallSite CS) {
|
||||
"Undefined behavior: Caller and callee calling convention differ",
|
||||
&I);
|
||||
|
||||
const FunctionType *FT = F->getFunctionType();
|
||||
FunctionType *FT = F->getFunctionType();
|
||||
unsigned NumActualArgs = unsigned(CS.arg_end()-CS.arg_begin());
|
||||
|
||||
Assert1(FT->isVarArg() ?
|
||||
@ -240,7 +240,7 @@ void Lint::visitCallSite(CallSite CS) {
|
||||
|
||||
// Check that an sret argument points to valid memory.
|
||||
if (Formal->hasStructRetAttr() && Actual->getType()->isPointerTy()) {
|
||||
const Type *Ty =
|
||||
Type *Ty =
|
||||
cast<PointerType>(Formal->getType())->getElementType();
|
||||
visitMemoryReference(I, Actual, AA->getTypeStoreSize(Ty),
|
||||
TD ? TD->getABITypeAlignment(Ty) : 0,
|
||||
@ -364,7 +364,7 @@ void Lint::visitReturnInst(ReturnInst &I) {
|
||||
// TODO: Check readnone/readonly function attributes.
|
||||
void Lint::visitMemoryReference(Instruction &I,
|
||||
Value *Ptr, uint64_t Size, unsigned Align,
|
||||
const Type *Ty, unsigned Flags) {
|
||||
Type *Ty, unsigned Flags) {
|
||||
// If no memory is being referenced, it doesn't matter if the pointer
|
||||
// is valid.
|
||||
if (Size == 0)
|
||||
|
@ -90,7 +90,7 @@ bool llvm::isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
|
||||
if (TD)
|
||||
Base = getUnderlyingObjectWithOffset(V, TD, ByteOffset);
|
||||
|
||||
const Type *BaseType = 0;
|
||||
Type *BaseType = 0;
|
||||
unsigned BaseAlign = 0;
|
||||
if (const AllocaInst *AI = dyn_cast<AllocaInst>(Base)) {
|
||||
// An alloca is safe to load from as load as it is suitably aligned.
|
||||
@ -114,7 +114,7 @@ bool llvm::isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
|
||||
return true; // Loading directly from an alloca or global is OK.
|
||||
|
||||
// Check if the load is within the bounds of the underlying object.
|
||||
const PointerType *AddrTy = cast<PointerType>(V->getType());
|
||||
PointerType *AddrTy = cast<PointerType>(V->getType());
|
||||
uint64_t LoadSize = TD->getTypeStoreSize(AddrTy->getElementType());
|
||||
if (ByteOffset + LoadSize <= TD->getTypeAllocSize(BaseType) &&
|
||||
(Align == 0 || (ByteOffset % Align) == 0))
|
||||
@ -169,7 +169,7 @@ Value *llvm::FindAvailableLoadedValue(Value *Ptr, BasicBlock *ScanBB,
|
||||
// If we're using alias analysis to disambiguate get the size of *Ptr.
|
||||
uint64_t AccessSize = 0;
|
||||
if (AA) {
|
||||
const Type *AccessTy = cast<PointerType>(Ptr->getType())->getElementType();
|
||||
Type *AccessTy = cast<PointerType>(Ptr->getType())->getElementType();
|
||||
AccessSize = AA->getTypeStoreSize(AccessTy);
|
||||
}
|
||||
|
||||
|
@ -47,7 +47,7 @@ static bool isMallocCall(const CallInst *CI) {
|
||||
// Check malloc prototype.
|
||||
// FIXME: workaround for PR5130, this will be obsolete when a nobuiltin
|
||||
// attribute will exist.
|
||||
const FunctionType *FTy = Callee->getFunctionType();
|
||||
FunctionType *FTy = Callee->getFunctionType();
|
||||
if (FTy->getNumParams() != 1)
|
||||
return false;
|
||||
return FTy->getParamType(0)->isIntegerTy(32) ||
|
||||
@ -94,12 +94,12 @@ static Value *computeArraySize(const CallInst *CI, const TargetData *TD,
|
||||
return NULL;
|
||||
|
||||
// The size of the malloc's result type must be known to determine array size.
|
||||
const Type *T = getMallocAllocatedType(CI);
|
||||
Type *T = getMallocAllocatedType(CI);
|
||||
if (!T || !T->isSized() || !TD)
|
||||
return NULL;
|
||||
|
||||
unsigned ElementSize = TD->getTypeAllocSize(T);
|
||||
if (const StructType *ST = dyn_cast<StructType>(T))
|
||||
if (StructType *ST = dyn_cast<StructType>(T))
|
||||
ElementSize = TD->getStructLayout(ST)->getSizeInBytes();
|
||||
|
||||
// If malloc call's arg can be determined to be a multiple of ElementSize,
|
||||
@ -133,10 +133,10 @@ const CallInst *llvm::isArrayMalloc(const Value *I, const TargetData *TD) {
|
||||
/// 0: PointerType is the calls' return type.
|
||||
/// 1: PointerType is the bitcast's result type.
|
||||
/// >1: Unique PointerType cannot be determined, return NULL.
|
||||
const PointerType *llvm::getMallocType(const CallInst *CI) {
|
||||
PointerType *llvm::getMallocType(const CallInst *CI) {
|
||||
assert(isMalloc(CI) && "getMallocType and not malloc call");
|
||||
|
||||
const PointerType *MallocType = NULL;
|
||||
PointerType *MallocType = NULL;
|
||||
unsigned NumOfBitCastUses = 0;
|
||||
|
||||
// Determine if CallInst has a bitcast use.
|
||||
@ -164,8 +164,8 @@ const PointerType *llvm::getMallocType(const CallInst *CI) {
|
||||
/// 0: PointerType is the malloc calls' return type.
|
||||
/// 1: PointerType is the bitcast's result type.
|
||||
/// >1: Unique PointerType cannot be determined, return NULL.
|
||||
const Type *llvm::getMallocAllocatedType(const CallInst *CI) {
|
||||
const PointerType *PT = getMallocType(CI);
|
||||
Type *llvm::getMallocAllocatedType(const CallInst *CI) {
|
||||
PointerType *PT = getMallocType(CI);
|
||||
return PT ? PT->getElementType() : NULL;
|
||||
}
|
||||
|
||||
@ -201,7 +201,7 @@ const CallInst *llvm::isFreeCall(const Value *I) {
|
||||
// Check free prototype.
|
||||
// FIXME: workaround for PR5130, this will be obsolete when a nobuiltin
|
||||
// attribute will exist.
|
||||
const FunctionType *FTy = Callee->getFunctionType();
|
||||
FunctionType *FTy = Callee->getFunctionType();
|
||||
if (!FTy->getReturnType()->isVoidTy())
|
||||
return 0;
|
||||
if (FTy->getNumParams() != 1)
|
||||
|
@ -382,7 +382,7 @@ getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad,
|
||||
// location is 1 byte at P+1). If so, return it as a load/load
|
||||
// clobber result, allowing the client to decide to widen the load if
|
||||
// it wants to.
|
||||
if (const IntegerType *ITy = dyn_cast<IntegerType>(LI->getType()))
|
||||
if (IntegerType *ITy = dyn_cast<IntegerType>(LI->getType()))
|
||||
if (LI->getAlignment()*8 > ITy->getPrimitiveSizeInBits() &&
|
||||
isLoadLoadClobberIfExtendedToFullWidth(MemLoc, MemLocBase,
|
||||
MemLocOffset, LI, TD))
|
||||
|
@ -197,7 +197,7 @@ void SCEV::print(raw_ostream &OS) const {
|
||||
}
|
||||
case scUnknown: {
|
||||
const SCEVUnknown *U = cast<SCEVUnknown>(this);
|
||||
const Type *AllocTy;
|
||||
Type *AllocTy;
|
||||
if (U->isSizeOf(AllocTy)) {
|
||||
OS << "sizeof(" << *AllocTy << ")";
|
||||
return;
|
||||
@ -207,7 +207,7 @@ void SCEV::print(raw_ostream &OS) const {
|
||||
return;
|
||||
}
|
||||
|
||||
const Type *CTy;
|
||||
Type *CTy;
|
||||
Constant *FieldNo;
|
||||
if (U->isOffsetOf(CTy, FieldNo)) {
|
||||
OS << "offsetof(" << *CTy << ", ";
|
||||
@ -228,7 +228,7 @@ void SCEV::print(raw_ostream &OS) const {
|
||||
llvm_unreachable("Unknown SCEV kind!");
|
||||
}
|
||||
|
||||
const Type *SCEV::getType() const {
|
||||
Type *SCEV::getType() const {
|
||||
switch (getSCEVType()) {
|
||||
case scConstant:
|
||||
return cast<SCEVConstant>(this)->getType();
|
||||
@ -297,17 +297,17 @@ const SCEV *ScalarEvolution::getConstant(const APInt& Val) {
|
||||
}
|
||||
|
||||
const SCEV *
|
||||
ScalarEvolution::getConstant(const Type *Ty, uint64_t V, bool isSigned) {
|
||||
const IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
|
||||
ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) {
|
||||
IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
|
||||
return getConstant(ConstantInt::get(ITy, V, isSigned));
|
||||
}
|
||||
|
||||
SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID,
|
||||
unsigned SCEVTy, const SCEV *op, const Type *ty)
|
||||
unsigned SCEVTy, const SCEV *op, Type *ty)
|
||||
: SCEV(ID, SCEVTy), Op(op), Ty(ty) {}
|
||||
|
||||
SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID,
|
||||
const SCEV *op, const Type *ty)
|
||||
const SCEV *op, Type *ty)
|
||||
: SCEVCastExpr(ID, scTruncate, op, ty) {
|
||||
assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
|
||||
(Ty->isIntegerTy() || Ty->isPointerTy()) &&
|
||||
@ -315,7 +315,7 @@ SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID,
|
||||
}
|
||||
|
||||
SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
|
||||
const SCEV *op, const Type *ty)
|
||||
const SCEV *op, Type *ty)
|
||||
: SCEVCastExpr(ID, scZeroExtend, op, ty) {
|
||||
assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
|
||||
(Ty->isIntegerTy() || Ty->isPointerTy()) &&
|
||||
@ -323,7 +323,7 @@ SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
|
||||
}
|
||||
|
||||
SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
|
||||
const SCEV *op, const Type *ty)
|
||||
const SCEV *op, Type *ty)
|
||||
: SCEVCastExpr(ID, scSignExtend, op, ty) {
|
||||
assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
|
||||
(Ty->isIntegerTy() || Ty->isPointerTy()) &&
|
||||
@ -354,7 +354,7 @@ void SCEVUnknown::allUsesReplacedWith(Value *New) {
|
||||
setValPtr(New);
|
||||
}
|
||||
|
||||
bool SCEVUnknown::isSizeOf(const Type *&AllocTy) const {
|
||||
bool SCEVUnknown::isSizeOf(Type *&AllocTy) const {
|
||||
if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
|
||||
if (VCE->getOpcode() == Instruction::PtrToInt)
|
||||
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
|
||||
@ -371,15 +371,15 @@ bool SCEVUnknown::isSizeOf(const Type *&AllocTy) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool SCEVUnknown::isAlignOf(const Type *&AllocTy) const {
|
||||
bool SCEVUnknown::isAlignOf(Type *&AllocTy) const {
|
||||
if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
|
||||
if (VCE->getOpcode() == Instruction::PtrToInt)
|
||||
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
|
||||
if (CE->getOpcode() == Instruction::GetElementPtr &&
|
||||
CE->getOperand(0)->isNullValue()) {
|
||||
const Type *Ty =
|
||||
Type *Ty =
|
||||
cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
|
||||
if (const StructType *STy = dyn_cast<StructType>(Ty))
|
||||
if (StructType *STy = dyn_cast<StructType>(Ty))
|
||||
if (!STy->isPacked() &&
|
||||
CE->getNumOperands() == 3 &&
|
||||
CE->getOperand(1)->isNullValue()) {
|
||||
@ -396,7 +396,7 @@ bool SCEVUnknown::isAlignOf(const Type *&AllocTy) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool SCEVUnknown::isOffsetOf(const Type *&CTy, Constant *&FieldNo) const {
|
||||
bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const {
|
||||
if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
|
||||
if (VCE->getOpcode() == Instruction::PtrToInt)
|
||||
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
|
||||
@ -404,7 +404,7 @@ bool SCEVUnknown::isOffsetOf(const Type *&CTy, Constant *&FieldNo) const {
|
||||
CE->getNumOperands() == 3 &&
|
||||
CE->getOperand(0)->isNullValue() &&
|
||||
CE->getOperand(1)->isNullValue()) {
|
||||
const Type *Ty =
|
||||
Type *Ty =
|
||||
cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
|
||||
// Ignore vector types here so that ScalarEvolutionExpander doesn't
|
||||
// emit getelementptrs that index into vectors.
|
||||
@ -652,7 +652,7 @@ static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops,
|
||||
/// Assume, K > 0.
|
||||
static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K,
|
||||
ScalarEvolution &SE,
|
||||
const Type* ResultTy) {
|
||||
Type* ResultTy) {
|
||||
// Handle the simplest case efficiently.
|
||||
if (K == 1)
|
||||
return SE.getTruncateOrZeroExtend(It, ResultTy);
|
||||
@ -742,7 +742,7 @@ static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K,
|
||||
MultiplyFactor = MultiplyFactor.trunc(W);
|
||||
|
||||
// Calculate the product, at width T+W
|
||||
const IntegerType *CalculationTy = IntegerType::get(SE.getContext(),
|
||||
IntegerType *CalculationTy = IntegerType::get(SE.getContext(),
|
||||
CalculationBits);
|
||||
const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy);
|
||||
for (unsigned i = 1; i != K; ++i) {
|
||||
@ -790,7 +790,7 @@ const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It,
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
|
||||
const Type *Ty) {
|
||||
Type *Ty) {
|
||||
assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) &&
|
||||
"This is not a truncating conversion!");
|
||||
assert(isSCEVable(Ty) &&
|
||||
@ -877,7 +877,7 @@ const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
|
||||
}
|
||||
|
||||
const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
|
||||
const Type *Ty) {
|
||||
Type *Ty) {
|
||||
assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
|
||||
"This is not an extending conversion!");
|
||||
assert(isSCEVable(Ty) &&
|
||||
@ -954,7 +954,7 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
|
||||
const SCEV *RecastedMaxBECount =
|
||||
getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
|
||||
if (MaxBECount == RecastedMaxBECount) {
|
||||
const Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
|
||||
Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
|
||||
// Check whether Start+Step*MaxBECount has no unsigned overflow.
|
||||
const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step);
|
||||
const SCEV *Add = getAddExpr(Start, ZMul);
|
||||
@ -1062,7 +1062,7 @@ static const SCEV *getOverflowLimitForStep(const SCEV *Step,
|
||||
// result, the expression "Step + sext(PreIncAR)" is congruent with
|
||||
// "sext(PostIncAR)"
|
||||
static const SCEV *getPreStartForSignExtend(const SCEVAddRecExpr *AR,
|
||||
const Type *Ty,
|
||||
Type *Ty,
|
||||
ScalarEvolution *SE) {
|
||||
const Loop *L = AR->getLoop();
|
||||
const SCEV *Start = AR->getStart();
|
||||
@ -1086,7 +1086,7 @@ static const SCEV *getPreStartForSignExtend(const SCEVAddRecExpr *AR,
|
||||
|
||||
// 2. Direct overflow check on the step operation's expression.
|
||||
unsigned BitWidth = SE->getTypeSizeInBits(AR->getType());
|
||||
const Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2);
|
||||
Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2);
|
||||
const SCEV *OperandExtendedStart =
|
||||
SE->getAddExpr(SE->getSignExtendExpr(PreStart, WideTy),
|
||||
SE->getSignExtendExpr(Step, WideTy));
|
||||
@ -1112,7 +1112,7 @@ static const SCEV *getPreStartForSignExtend(const SCEVAddRecExpr *AR,
|
||||
|
||||
// Get the normalized sign-extended expression for this AddRec's Start.
|
||||
static const SCEV *getSignExtendAddRecStart(const SCEVAddRecExpr *AR,
|
||||
const Type *Ty,
|
||||
Type *Ty,
|
||||
ScalarEvolution *SE) {
|
||||
const SCEV *PreStart = getPreStartForSignExtend(AR, Ty, SE);
|
||||
if (!PreStart)
|
||||
@ -1123,7 +1123,7 @@ static const SCEV *getSignExtendAddRecStart(const SCEVAddRecExpr *AR,
|
||||
}
|
||||
|
||||
const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
|
||||
const Type *Ty) {
|
||||
Type *Ty) {
|
||||
assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
|
||||
"This is not an extending conversion!");
|
||||
assert(isSCEVable(Ty) &&
|
||||
@ -1208,7 +1208,7 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
|
||||
const SCEV *RecastedMaxBECount =
|
||||
getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
|
||||
if (MaxBECount == RecastedMaxBECount) {
|
||||
const Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
|
||||
Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
|
||||
// Check whether Start+Step*MaxBECount has no signed overflow.
|
||||
const SCEV *SMul = getMulExpr(CastedMaxBECount, Step);
|
||||
const SCEV *Add = getAddExpr(Start, SMul);
|
||||
@ -1275,7 +1275,7 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
|
||||
/// unspecified bits out to the given type.
|
||||
///
|
||||
const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
|
||||
const Type *Ty) {
|
||||
Type *Ty) {
|
||||
assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
|
||||
"This is not an extending conversion!");
|
||||
assert(isSCEVable(Ty) &&
|
||||
@ -1438,7 +1438,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
|
||||
assert(!Ops.empty() && "Cannot get empty add!");
|
||||
if (Ops.size() == 1) return Ops[0];
|
||||
#ifndef NDEBUG
|
||||
const Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
|
||||
Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
|
||||
for (unsigned i = 1, e = Ops.size(); i != e; ++i)
|
||||
assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
|
||||
"SCEVAddExpr operand types don't match!");
|
||||
@ -1488,7 +1488,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
|
||||
// Okay, check to see if the same value occurs in the operand list more than
|
||||
// once. If so, merge them together into an multiply expression. Since we
|
||||
// sorted the list, these values are required to be adjacent.
|
||||
const Type *Ty = Ops[0]->getType();
|
||||
Type *Ty = Ops[0]->getType();
|
||||
bool FoundMatch = false;
|
||||
for (unsigned i = 0, e = Ops.size(); i != e-1; ++i)
|
||||
if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2
|
||||
@ -1515,8 +1515,8 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
|
||||
// if the contents of the resulting outer trunc fold to something simple.
|
||||
for (; Idx < Ops.size() && isa<SCEVTruncateExpr>(Ops[Idx]); ++Idx) {
|
||||
const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(Ops[Idx]);
|
||||
const Type *DstType = Trunc->getType();
|
||||
const Type *SrcType = Trunc->getOperand()->getType();
|
||||
Type *DstType = Trunc->getType();
|
||||
Type *SrcType = Trunc->getOperand()->getType();
|
||||
SmallVector<const SCEV *, 8> LargeOps;
|
||||
bool Ok = true;
|
||||
// Check all the operands to see if they can be represented in the
|
||||
@ -1809,7 +1809,7 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
|
||||
assert(!Ops.empty() && "Cannot get empty mul!");
|
||||
if (Ops.size() == 1) return Ops[0];
|
||||
#ifndef NDEBUG
|
||||
const Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
|
||||
Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
|
||||
for (unsigned i = 1, e = Ops.size(); i != e; ++i)
|
||||
assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
|
||||
"SCEVMulExpr operand types don't match!");
|
||||
@ -2042,14 +2042,14 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
|
||||
// Determine if the division can be folded into the operands of
|
||||
// its operands.
|
||||
// TODO: Generalize this to non-constants by using known-bits information.
|
||||
const Type *Ty = LHS->getType();
|
||||
Type *Ty = LHS->getType();
|
||||
unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros();
|
||||
unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1;
|
||||
// For non-power-of-two values, effectively round the value up to the
|
||||
// nearest power of two.
|
||||
if (!RHSC->getValue()->getValue().isPowerOf2())
|
||||
++MaxShiftAmt;
|
||||
const IntegerType *ExtTy =
|
||||
IntegerType *ExtTy =
|
||||
IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt);
|
||||
// {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
|
||||
if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
|
||||
@ -2151,7 +2151,7 @@ ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
|
||||
const Loop *L, SCEV::NoWrapFlags Flags) {
|
||||
if (Operands.size() == 1) return Operands[0];
|
||||
#ifndef NDEBUG
|
||||
const Type *ETy = getEffectiveSCEVType(Operands[0]->getType());
|
||||
Type *ETy = getEffectiveSCEVType(Operands[0]->getType());
|
||||
for (unsigned i = 1, e = Operands.size(); i != e; ++i)
|
||||
assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy &&
|
||||
"SCEVAddRecExpr operand types don't match!");
|
||||
@ -2269,7 +2269,7 @@ ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
|
||||
assert(!Ops.empty() && "Cannot get empty smax!");
|
||||
if (Ops.size() == 1) return Ops[0];
|
||||
#ifndef NDEBUG
|
||||
const Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
|
||||
Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
|
||||
for (unsigned i = 1, e = Ops.size(); i != e; ++i)
|
||||
assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
|
||||
"SCEVSMaxExpr operand types don't match!");
|
||||
@ -2373,7 +2373,7 @@ ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
|
||||
assert(!Ops.empty() && "Cannot get empty umax!");
|
||||
if (Ops.size() == 1) return Ops[0];
|
||||
#ifndef NDEBUG
|
||||
const Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
|
||||
Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
|
||||
for (unsigned i = 1, e = Ops.size(); i != e; ++i)
|
||||
assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
|
||||
"SCEVUMaxExpr operand types don't match!");
|
||||
@ -2476,7 +2476,7 @@ const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS,
|
||||
return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
|
||||
}
|
||||
|
||||
const SCEV *ScalarEvolution::getSizeOfExpr(const Type *AllocTy) {
|
||||
const SCEV *ScalarEvolution::getSizeOfExpr(Type *AllocTy) {
|
||||
// If we have TargetData, we can bypass creating a target-independent
|
||||
// constant expression and then folding it back into a ConstantInt.
|
||||
// This is just a compile-time optimization.
|
||||
@ -2488,20 +2488,20 @@ const SCEV *ScalarEvolution::getSizeOfExpr(const Type *AllocTy) {
|
||||
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
|
||||
if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
|
||||
C = Folded;
|
||||
const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
|
||||
Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
|
||||
return getTruncateOrZeroExtend(getSCEV(C), Ty);
|
||||
}
|
||||
|
||||
const SCEV *ScalarEvolution::getAlignOfExpr(const Type *AllocTy) {
|
||||
const SCEV *ScalarEvolution::getAlignOfExpr(Type *AllocTy) {
|
||||
Constant *C = ConstantExpr::getAlignOf(AllocTy);
|
||||
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
|
||||
if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
|
||||
C = Folded;
|
||||
const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
|
||||
Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
|
||||
return getTruncateOrZeroExtend(getSCEV(C), Ty);
|
||||
}
|
||||
|
||||
const SCEV *ScalarEvolution::getOffsetOfExpr(const StructType *STy,
|
||||
const SCEV *ScalarEvolution::getOffsetOfExpr(StructType *STy,
|
||||
unsigned FieldNo) {
|
||||
// If we have TargetData, we can bypass creating a target-independent
|
||||
// constant expression and then folding it back into a ConstantInt.
|
||||
@ -2514,17 +2514,17 @@ const SCEV *ScalarEvolution::getOffsetOfExpr(const StructType *STy,
|
||||
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
|
||||
if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
|
||||
C = Folded;
|
||||
const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(STy));
|
||||
Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(STy));
|
||||
return getTruncateOrZeroExtend(getSCEV(C), Ty);
|
||||
}
|
||||
|
||||
const SCEV *ScalarEvolution::getOffsetOfExpr(const Type *CTy,
|
||||
const SCEV *ScalarEvolution::getOffsetOfExpr(Type *CTy,
|
||||
Constant *FieldNo) {
|
||||
Constant *C = ConstantExpr::getOffsetOf(CTy, FieldNo);
|
||||
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
|
||||
if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
|
||||
C = Folded;
|
||||
const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(CTy));
|
||||
Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(CTy));
|
||||
return getTruncateOrZeroExtend(getSCEV(C), Ty);
|
||||
}
|
||||
|
||||
@ -2558,14 +2558,14 @@ const SCEV *ScalarEvolution::getUnknown(Value *V) {
|
||||
/// the SCEV framework. This primarily includes integer types, and it
|
||||
/// can optionally include pointer types if the ScalarEvolution class
|
||||
/// has access to target-specific information.
|
||||
bool ScalarEvolution::isSCEVable(const Type *Ty) const {
|
||||
bool ScalarEvolution::isSCEVable(Type *Ty) const {
|
||||
// Integers and pointers are always SCEVable.
|
||||
return Ty->isIntegerTy() || Ty->isPointerTy();
|
||||
}
|
||||
|
||||
/// getTypeSizeInBits - Return the size in bits of the specified type,
|
||||
/// for which isSCEVable must return true.
|
||||
uint64_t ScalarEvolution::getTypeSizeInBits(const Type *Ty) const {
|
||||
uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const {
|
||||
assert(isSCEVable(Ty) && "Type is not SCEVable!");
|
||||
|
||||
// If we have a TargetData, use it!
|
||||
@ -2586,7 +2586,7 @@ uint64_t ScalarEvolution::getTypeSizeInBits(const Type *Ty) const {
|
||||
/// the given type and which represents how SCEV will treat the given
|
||||
/// type, for which isSCEVable must return true. For pointer types,
|
||||
/// this is the pointer-sized integer type.
|
||||
const Type *ScalarEvolution::getEffectiveSCEVType(const Type *Ty) const {
|
||||
Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const {
|
||||
assert(isSCEVable(Ty) && "Type is not SCEVable!");
|
||||
|
||||
if (Ty->isIntegerTy())
|
||||
@ -2628,7 +2628,7 @@ const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V) {
|
||||
return getConstant(
|
||||
cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue())));
|
||||
|
||||
const Type *Ty = V->getType();
|
||||
Type *Ty = V->getType();
|
||||
Ty = getEffectiveSCEVType(Ty);
|
||||
return getMulExpr(V,
|
||||
getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))));
|
||||
@ -2640,7 +2640,7 @@ const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) {
|
||||
return getConstant(
|
||||
cast<ConstantInt>(ConstantExpr::getNot(VC->getValue())));
|
||||
|
||||
const Type *Ty = V->getType();
|
||||
Type *Ty = V->getType();
|
||||
Ty = getEffectiveSCEVType(Ty);
|
||||
const SCEV *AllOnes =
|
||||
getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty)));
|
||||
@ -2664,8 +2664,8 @@ const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
|
||||
/// input value to the specified type. If the type must be extended, it is zero
|
||||
/// extended.
|
||||
const SCEV *
|
||||
ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, const Type *Ty) {
|
||||
const Type *SrcTy = V->getType();
|
||||
ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty) {
|
||||
Type *SrcTy = V->getType();
|
||||
assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
|
||||
(Ty->isIntegerTy() || Ty->isPointerTy()) &&
|
||||
"Cannot truncate or zero extend with non-integer arguments!");
|
||||
@ -2681,8 +2681,8 @@ ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, const Type *Ty) {
|
||||
/// extended.
|
||||
const SCEV *
|
||||
ScalarEvolution::getTruncateOrSignExtend(const SCEV *V,
|
||||
const Type *Ty) {
|
||||
const Type *SrcTy = V->getType();
|
||||
Type *Ty) {
|
||||
Type *SrcTy = V->getType();
|
||||
assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
|
||||
(Ty->isIntegerTy() || Ty->isPointerTy()) &&
|
||||
"Cannot truncate or zero extend with non-integer arguments!");
|
||||
@ -2697,8 +2697,8 @@ ScalarEvolution::getTruncateOrSignExtend(const SCEV *V,
|
||||
/// input value to the specified type. If the type must be extended, it is zero
|
||||
/// extended. The conversion must not be narrowing.
|
||||
const SCEV *
|
||||
ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, const Type *Ty) {
|
||||
const Type *SrcTy = V->getType();
|
||||
ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) {
|
||||
Type *SrcTy = V->getType();
|
||||
assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
|
||||
(Ty->isIntegerTy() || Ty->isPointerTy()) &&
|
||||
"Cannot noop or zero extend with non-integer arguments!");
|
||||
@ -2713,8 +2713,8 @@ ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, const Type *Ty) {
|
||||
/// input value to the specified type. If the type must be extended, it is sign
|
||||
/// extended. The conversion must not be narrowing.
|
||||
const SCEV *
|
||||
ScalarEvolution::getNoopOrSignExtend(const SCEV *V, const Type *Ty) {
|
||||
const Type *SrcTy = V->getType();
|
||||
ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) {
|
||||
Type *SrcTy = V->getType();
|
||||
assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
|
||||
(Ty->isIntegerTy() || Ty->isPointerTy()) &&
|
||||
"Cannot noop or sign extend with non-integer arguments!");
|
||||
@ -2730,8 +2730,8 @@ ScalarEvolution::getNoopOrSignExtend(const SCEV *V, const Type *Ty) {
|
||||
/// it is extended with unspecified bits. The conversion must not be
|
||||
/// narrowing.
|
||||
const SCEV *
|
||||
ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, const Type *Ty) {
|
||||
const Type *SrcTy = V->getType();
|
||||
ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) {
|
||||
Type *SrcTy = V->getType();
|
||||
assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
|
||||
(Ty->isIntegerTy() || Ty->isPointerTy()) &&
|
||||
"Cannot noop or any extend with non-integer arguments!");
|
||||
@ -2745,8 +2745,8 @@ ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, const Type *Ty) {
|
||||
/// getTruncateOrNoop - Return a SCEV corresponding to a conversion of the
|
||||
/// input value to the specified type. The conversion must not be widening.
|
||||
const SCEV *
|
||||
ScalarEvolution::getTruncateOrNoop(const SCEV *V, const Type *Ty) {
|
||||
const Type *SrcTy = V->getType();
|
||||
ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) {
|
||||
Type *SrcTy = V->getType();
|
||||
assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
|
||||
(Ty->isIntegerTy() || Ty->isPointerTy()) &&
|
||||
"Cannot truncate or noop with non-integer arguments!");
|
||||
@ -3032,7 +3032,7 @@ const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
|
||||
// context.
|
||||
bool isInBounds = GEP->isInBounds();
|
||||
|
||||
const Type *IntPtrTy = getEffectiveSCEVType(GEP->getType());
|
||||
Type *IntPtrTy = getEffectiveSCEVType(GEP->getType());
|
||||
Value *Base = GEP->getOperand(0);
|
||||
// Don't attempt to analyze GEPs over unsized objects.
|
||||
if (!cast<PointerType>(Base->getType())->getElementType()->isSized())
|
||||
@ -3044,7 +3044,7 @@ const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
|
||||
I != E; ++I) {
|
||||
Value *Index = *I;
|
||||
// Compute the (potentially symbolic) offset in bytes for this index.
|
||||
if (const StructType *STy = dyn_cast<StructType>(*GTI++)) {
|
||||
if (StructType *STy = dyn_cast<StructType>(*GTI++)) {
|
||||
// For a struct, add the member offset.
|
||||
unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
|
||||
const SCEV *FieldOffset = getOffsetOfExpr(STy, FieldNo);
|
||||
@ -3244,7 +3244,7 @@ ScalarEvolution::getUnsignedRange(const SCEV *S) {
|
||||
|
||||
// TODO: non-affine addrec
|
||||
if (AddRec->isAffine()) {
|
||||
const Type *Ty = AddRec->getType();
|
||||
Type *Ty = AddRec->getType();
|
||||
const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop());
|
||||
if (!isa<SCEVCouldNotCompute>(MaxBECount) &&
|
||||
getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) {
|
||||
@ -3396,7 +3396,7 @@ ScalarEvolution::getSignedRange(const SCEV *S) {
|
||||
|
||||
// TODO: non-affine addrec
|
||||
if (AddRec->isAffine()) {
|
||||
const Type *Ty = AddRec->getType();
|
||||
Type *Ty = AddRec->getType();
|
||||
const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop());
|
||||
if (!isa<SCEVCouldNotCompute>(MaxBECount) &&
|
||||
getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) {
|
||||
@ -3601,9 +3601,9 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
|
||||
LCI->getValue() == CI->getValue())
|
||||
if (const SCEVZeroExtendExpr *Z =
|
||||
dyn_cast<SCEVZeroExtendExpr>(getSCEV(U->getOperand(0)))) {
|
||||
const Type *UTy = U->getType();
|
||||
Type *UTy = U->getType();
|
||||
const SCEV *Z0 = Z->getOperand();
|
||||
const Type *Z0Ty = Z0->getType();
|
||||
Type *Z0Ty = Z0->getType();
|
||||
unsigned Z0TySize = getTypeSizeInBits(Z0Ty);
|
||||
|
||||
// If C is a low-bits mask, the zero extend is serving to
|
||||
@ -4321,10 +4321,10 @@ GetAddressedElementFromGlobal(GlobalVariable *GV,
|
||||
if (Idx >= CA->getNumOperands()) return 0; // Bogus program
|
||||
Init = cast<Constant>(CA->getOperand(Idx));
|
||||
} else if (isa<ConstantAggregateZero>(Init)) {
|
||||
if (const StructType *STy = dyn_cast<StructType>(Init->getType())) {
|
||||
if (StructType *STy = dyn_cast<StructType>(Init->getType())) {
|
||||
assert(Idx < STy->getNumElements() && "Bad struct index!");
|
||||
Init = Constant::getNullValue(STy->getElementType(Idx));
|
||||
} else if (const ArrayType *ATy = dyn_cast<ArrayType>(Init->getType())) {
|
||||
} else if (ArrayType *ATy = dyn_cast<ArrayType>(Init->getType())) {
|
||||
if (Idx >= ATy->getNumElements()) return 0; // Bogus program
|
||||
Init = Constant::getNullValue(ATy->getElementType());
|
||||
} else {
|
||||
@ -5741,7 +5741,7 @@ const SCEV *ScalarEvolution::getBECount(const SCEV *Start,
|
||||
assert(!isKnownNegative(Step) &&
|
||||
"This code doesn't handle negative strides yet!");
|
||||
|
||||
const Type *Ty = Start->getType();
|
||||
Type *Ty = Start->getType();
|
||||
|
||||
// When Start == End, we have an exact BECount == 0. Short-circuit this case
|
||||
// here because SCEV may not be able to determine that the unsigned division
|
||||
@ -5760,7 +5760,7 @@ const SCEV *ScalarEvolution::getBECount(const SCEV *Start,
|
||||
if (!NoWrap) {
|
||||
// Check Add for unsigned overflow.
|
||||
// TODO: More sophisticated things could be done here.
|
||||
const Type *WideTy = IntegerType::get(getContext(),
|
||||
Type *WideTy = IntegerType::get(getContext(),
|
||||
getTypeSizeInBits(Ty) + 1);
|
||||
const SCEV *EDiff = getZeroExtendExpr(Diff, WideTy);
|
||||
const SCEV *ERoundUp = getZeroExtendExpr(RoundUp, WideTy);
|
||||
|
@ -26,7 +26,7 @@ using namespace llvm;
|
||||
/// reusing an existing cast if a suitable one exists, moving an existing
|
||||
/// cast if a suitable one exists but isn't in the right place, or
|
||||
/// creating a new one.
|
||||
Value *SCEVExpander::ReuseOrCreateCast(Value *V, const Type *Ty,
|
||||
Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty,
|
||||
Instruction::CastOps Op,
|
||||
BasicBlock::iterator IP) {
|
||||
// Check to see if there is already a cast!
|
||||
@ -62,7 +62,7 @@ Value *SCEVExpander::ReuseOrCreateCast(Value *V, const Type *Ty,
|
||||
/// InsertNoopCastOfTo - Insert a cast of V to the specified type,
|
||||
/// which must be possible with a noop cast, doing what we can to share
|
||||
/// the casts.
|
||||
Value *SCEVExpander::InsertNoopCastOfTo(Value *V, const Type *Ty) {
|
||||
Value *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) {
|
||||
Instruction::CastOps Op = CastInst::getCastOpcode(V, false, Ty, false);
|
||||
assert((Op == Instruction::BitCast ||
|
||||
Op == Instruction::PtrToInt ||
|
||||
@ -277,7 +277,7 @@ static bool FactorOutConstant(const SCEV *&S,
|
||||
/// the list.
|
||||
///
|
||||
static void SimplifyAddOperands(SmallVectorImpl<const SCEV *> &Ops,
|
||||
const Type *Ty,
|
||||
Type *Ty,
|
||||
ScalarEvolution &SE) {
|
||||
unsigned NumAddRecs = 0;
|
||||
for (unsigned i = Ops.size(); i > 0 && isa<SCEVAddRecExpr>(Ops[i-1]); --i)
|
||||
@ -306,7 +306,7 @@ static void SimplifyAddOperands(SmallVectorImpl<const SCEV *> &Ops,
|
||||
/// into GEP indices.
|
||||
///
|
||||
static void SplitAddRecs(SmallVectorImpl<const SCEV *> &Ops,
|
||||
const Type *Ty,
|
||||
Type *Ty,
|
||||
ScalarEvolution &SE) {
|
||||
// Find the addrecs.
|
||||
SmallVector<const SCEV *, 8> AddRecs;
|
||||
@ -365,10 +365,10 @@ static void SplitAddRecs(SmallVectorImpl<const SCEV *> &Ops,
|
||||
///
|
||||
Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
|
||||
const SCEV *const *op_end,
|
||||
const PointerType *PTy,
|
||||
const Type *Ty,
|
||||
PointerType *PTy,
|
||||
Type *Ty,
|
||||
Value *V) {
|
||||
const Type *ElTy = PTy->getElementType();
|
||||
Type *ElTy = PTy->getElementType();
|
||||
SmallVector<Value *, 4> GepIndices;
|
||||
SmallVector<const SCEV *, 8> Ops(op_begin, op_end);
|
||||
bool AnyNonZeroIndices = false;
|
||||
@ -423,7 +423,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
|
||||
GepIndices.push_back(Scaled);
|
||||
|
||||
// Collect struct field index operands.
|
||||
while (const StructType *STy = dyn_cast<StructType>(ElTy)) {
|
||||
while (StructType *STy = dyn_cast<StructType>(ElTy)) {
|
||||
bool FoundFieldNo = false;
|
||||
// An empty struct has no fields.
|
||||
if (STy->getNumElements() == 0) break;
|
||||
@ -451,7 +451,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
|
||||
// appropriate struct type.
|
||||
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
|
||||
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Ops[i])) {
|
||||
const Type *CTy;
|
||||
Type *CTy;
|
||||
Constant *FieldNo;
|
||||
if (U->isOffsetOf(CTy, FieldNo) && CTy == STy) {
|
||||
GepIndices.push_back(FieldNo);
|
||||
@ -474,7 +474,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
|
||||
}
|
||||
}
|
||||
|
||||
if (const ArrayType *ATy = dyn_cast<ArrayType>(ElTy))
|
||||
if (ArrayType *ATy = dyn_cast<ArrayType>(ElTy))
|
||||
ElTy = ATy->getElementType();
|
||||
else
|
||||
break;
|
||||
@ -691,7 +691,7 @@ public:
|
||||
}
|
||||
|
||||
Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
|
||||
const Type *Ty = SE.getEffectiveSCEVType(S->getType());
|
||||
Type *Ty = SE.getEffectiveSCEVType(S->getType());
|
||||
|
||||
// Collect all the add operands in a loop, along with their associated loops.
|
||||
// Iterate in reverse so that constants are emitted last, all else equal, and
|
||||
@ -717,7 +717,7 @@ Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
|
||||
// This is the first operand. Just expand it.
|
||||
Sum = expand(Op);
|
||||
++I;
|
||||
} else if (const PointerType *PTy = dyn_cast<PointerType>(Sum->getType())) {
|
||||
} else if (PointerType *PTy = dyn_cast<PointerType>(Sum->getType())) {
|
||||
// The running sum expression is a pointer. Try to form a getelementptr
|
||||
// at this level with that as the base.
|
||||
SmallVector<const SCEV *, 4> NewOps;
|
||||
@ -731,7 +731,7 @@ Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
|
||||
NewOps.push_back(X);
|
||||
}
|
||||
Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, Sum);
|
||||
} else if (const PointerType *PTy = dyn_cast<PointerType>(Op->getType())) {
|
||||
} else if (PointerType *PTy = dyn_cast<PointerType>(Op->getType())) {
|
||||
// The running sum is an integer, and there's a pointer at this level.
|
||||
// Try to form a getelementptr. If the running sum is instructions,
|
||||
// use a SCEVUnknown to avoid re-analyzing them.
|
||||
@ -762,7 +762,7 @@ Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
|
||||
}
|
||||
|
||||
Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) {
|
||||
const Type *Ty = SE.getEffectiveSCEVType(S->getType());
|
||||
Type *Ty = SE.getEffectiveSCEVType(S->getType());
|
||||
|
||||
// Collect all the mul operands in a loop, along with their associated loops.
|
||||
// Iterate in reverse so that constants are emitted last, all else equal.
|
||||
@ -804,7 +804,7 @@ Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) {
|
||||
}
|
||||
|
||||
Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) {
|
||||
const Type *Ty = SE.getEffectiveSCEVType(S->getType());
|
||||
Type *Ty = SE.getEffectiveSCEVType(S->getType());
|
||||
|
||||
Value *LHS = expandCodeFor(S->getLHS(), Ty);
|
||||
if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getRHS())) {
|
||||
@ -847,8 +847,8 @@ static void ExposePointerBase(const SCEV *&Base, const SCEV *&Rest,
|
||||
PHINode *
|
||||
SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
|
||||
const Loop *L,
|
||||
const Type *ExpandTy,
|
||||
const Type *IntTy) {
|
||||
Type *ExpandTy,
|
||||
Type *IntTy) {
|
||||
assert((!IVIncInsertLoop||IVIncInsertPos) && "Uninitialized insert position");
|
||||
|
||||
// Reuse a previously-inserted PHI, if present.
|
||||
@ -969,7 +969,7 @@ SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
|
||||
Value *IncV;
|
||||
// If the PHI is a pointer, use a GEP, otherwise use an add or sub.
|
||||
if (isPointer) {
|
||||
const PointerType *GEPPtrTy = cast<PointerType>(ExpandTy);
|
||||
PointerType *GEPPtrTy = cast<PointerType>(ExpandTy);
|
||||
// If the step isn't constant, don't use an implicitly scaled GEP, because
|
||||
// that would require a multiply inside the loop.
|
||||
if (!isa<ConstantInt>(StepV))
|
||||
@ -1001,8 +1001,8 @@ SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
|
||||
}
|
||||
|
||||
Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
|
||||
const Type *STy = S->getType();
|
||||
const Type *IntTy = SE.getEffectiveSCEVType(STy);
|
||||
Type *STy = S->getType();
|
||||
Type *IntTy = SE.getEffectiveSCEVType(STy);
|
||||
const Loop *L = S->getLoop();
|
||||
|
||||
// Determine a normalized form of this expression, which is the expression
|
||||
@ -1045,7 +1045,7 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
|
||||
|
||||
// Expand the core addrec. If we need post-loop scaling, force it to
|
||||
// expand to an integer type to avoid the need for additional casting.
|
||||
const Type *ExpandTy = PostLoopScale ? IntTy : STy;
|
||||
Type *ExpandTy = PostLoopScale ? IntTy : STy;
|
||||
PHINode *PN = getAddRecExprPHILiterally(Normalized, L, ExpandTy, IntTy);
|
||||
|
||||
// Accommodate post-inc mode, if necessary.
|
||||
@ -1069,7 +1069,7 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
|
||||
|
||||
// Re-apply any non-loop-dominating offset.
|
||||
if (PostLoopOffset) {
|
||||
if (const PointerType *PTy = dyn_cast<PointerType>(ExpandTy)) {
|
||||
if (PointerType *PTy = dyn_cast<PointerType>(ExpandTy)) {
|
||||
const SCEV *const OffsetArray[1] = { PostLoopOffset };
|
||||
Result = expandAddToGEP(OffsetArray, OffsetArray+1, PTy, IntTy, Result);
|
||||
} else {
|
||||
@ -1086,7 +1086,7 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
|
||||
Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
|
||||
if (!CanonicalMode) return expandAddRecExprLiterally(S);
|
||||
|
||||
const Type *Ty = SE.getEffectiveSCEVType(S->getType());
|
||||
Type *Ty = SE.getEffectiveSCEVType(S->getType());
|
||||
const Loop *L = S->getLoop();
|
||||
|
||||
// First check for an existing canonical IV in a suitable type.
|
||||
@ -1132,7 +1132,7 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
|
||||
// Dig into the expression to find the pointer base for a GEP.
|
||||
ExposePointerBase(Base, RestArray[0], SE);
|
||||
// If we found a pointer, expand the AddRec with a GEP.
|
||||
if (const PointerType *PTy = dyn_cast<PointerType>(Base->getType())) {
|
||||
if (PointerType *PTy = dyn_cast<PointerType>(Base->getType())) {
|
||||
// Make sure the Base isn't something exotic, such as a multiplied
|
||||
// or divided pointer value. In those cases, the result type isn't
|
||||
// actually a pointer type.
|
||||
@ -1216,7 +1216,7 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
|
||||
}
|
||||
|
||||
Value *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) {
|
||||
const Type *Ty = SE.getEffectiveSCEVType(S->getType());
|
||||
Type *Ty = SE.getEffectiveSCEVType(S->getType());
|
||||
Value *V = expandCodeFor(S->getOperand(),
|
||||
SE.getEffectiveSCEVType(S->getOperand()->getType()));
|
||||
Value *I = Builder.CreateTrunc(V, Ty, "tmp");
|
||||
@ -1225,7 +1225,7 @@ Value *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) {
|
||||
}
|
||||
|
||||
Value *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) {
|
||||
const Type *Ty = SE.getEffectiveSCEVType(S->getType());
|
||||
Type *Ty = SE.getEffectiveSCEVType(S->getType());
|
||||
Value *V = expandCodeFor(S->getOperand(),
|
||||
SE.getEffectiveSCEVType(S->getOperand()->getType()));
|
||||
Value *I = Builder.CreateZExt(V, Ty, "tmp");
|
||||
@ -1234,7 +1234,7 @@ Value *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) {
|
||||
}
|
||||
|
||||
Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) {
|
||||
const Type *Ty = SE.getEffectiveSCEVType(S->getType());
|
||||
Type *Ty = SE.getEffectiveSCEVType(S->getType());
|
||||
Value *V = expandCodeFor(S->getOperand(),
|
||||
SE.getEffectiveSCEVType(S->getOperand()->getType()));
|
||||
Value *I = Builder.CreateSExt(V, Ty, "tmp");
|
||||
@ -1244,7 +1244,7 @@ Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) {
|
||||
|
||||
Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) {
|
||||
Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
|
||||
const Type *Ty = LHS->getType();
|
||||
Type *Ty = LHS->getType();
|
||||
for (int i = S->getNumOperands()-2; i >= 0; --i) {
|
||||
// In the case of mixed integer and pointer types, do the
|
||||
// rest of the comparisons as integer.
|
||||
@ -1268,7 +1268,7 @@ Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) {
|
||||
|
||||
Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) {
|
||||
Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
|
||||
const Type *Ty = LHS->getType();
|
||||
Type *Ty = LHS->getType();
|
||||
for (int i = S->getNumOperands()-2; i >= 0; --i) {
|
||||
// In the case of mixed integer and pointer types, do the
|
||||
// rest of the comparisons as integer.
|
||||
@ -1290,7 +1290,7 @@ Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) {
|
||||
return LHS;
|
||||
}
|
||||
|
||||
Value *SCEVExpander::expandCodeFor(const SCEV *SH, const Type *Ty,
|
||||
Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty,
|
||||
Instruction *I) {
|
||||
BasicBlock::iterator IP = I;
|
||||
while (isInsertedInstruction(IP) || isa<DbgInfoIntrinsic>(IP))
|
||||
@ -1299,7 +1299,7 @@ Value *SCEVExpander::expandCodeFor(const SCEV *SH, const Type *Ty,
|
||||
return expandCodeFor(SH, Ty);
|
||||
}
|
||||
|
||||
Value *SCEVExpander::expandCodeFor(const SCEV *SH, const Type *Ty) {
|
||||
Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty) {
|
||||
// Expand the code for this SCEV.
|
||||
Value *V = expand(SH);
|
||||
if (Ty) {
|
||||
@ -1384,7 +1384,7 @@ void SCEVExpander::restoreInsertPoint(BasicBlock *BB, BasicBlock::iterator I) {
|
||||
/// starts at zero and steps by one on each iteration.
|
||||
PHINode *
|
||||
SCEVExpander::getOrInsertCanonicalInductionVariable(const Loop *L,
|
||||
const Type *Ty) {
|
||||
Type *Ty) {
|
||||
assert(Ty->isIntegerTy() && "Can only insert integer induction variables!");
|
||||
|
||||
// Build a SCEV for {0,+,1}<L>.
|
||||
|
@ -34,7 +34,7 @@ const unsigned MaxDepth = 6;
|
||||
|
||||
/// getBitWidth - Returns the bitwidth of the given scalar or pointer type (if
|
||||
/// unknown returns 0). For vector types, returns the element type's bitwidth.
|
||||
static unsigned getBitWidth(const Type *Ty, const TargetData *TD) {
|
||||
static unsigned getBitWidth(Type *Ty, const TargetData *TD) {
|
||||
if (unsigned BitWidth = Ty->getScalarSizeInBits())
|
||||
return BitWidth;
|
||||
assert(isa<PointerType>(Ty) && "Expected a pointer type!");
|
||||
@ -103,7 +103,7 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
|
||||
if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
|
||||
unsigned Align = GV->getAlignment();
|
||||
if (Align == 0 && TD && GV->getType()->getElementType()->isSized()) {
|
||||
const Type *ObjectType = GV->getType()->getElementType();
|
||||
Type *ObjectType = GV->getType()->getElementType();
|
||||
// If the object is defined in the current Module, we'll be giving
|
||||
// it the preferred alignment. Otherwise, we have to assume that it
|
||||
// may only have the minimum ABI alignment.
|
||||
@ -268,7 +268,7 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
|
||||
// FALL THROUGH and handle them the same as zext/trunc.
|
||||
case Instruction::ZExt:
|
||||
case Instruction::Trunc: {
|
||||
const Type *SrcTy = I->getOperand(0)->getType();
|
||||
Type *SrcTy = I->getOperand(0)->getType();
|
||||
|
||||
unsigned SrcBitWidth;
|
||||
// Note that we handle pointer operands here because of inttoptr/ptrtoint
|
||||
@ -291,7 +291,7 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
|
||||
return;
|
||||
}
|
||||
case Instruction::BitCast: {
|
||||
const Type *SrcTy = I->getOperand(0)->getType();
|
||||
Type *SrcTy = I->getOperand(0)->getType();
|
||||
if ((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
|
||||
// TODO: For now, not handling conversions like:
|
||||
// (bitcast i64 %x to <2 x i32>)
|
||||
@ -559,7 +559,7 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
|
||||
gep_type_iterator GTI = gep_type_begin(I);
|
||||
for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
|
||||
Value *Index = I->getOperand(i);
|
||||
if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
|
||||
if (StructType *STy = dyn_cast<StructType>(*GTI)) {
|
||||
// Handle struct member offset arithmetic.
|
||||
if (!TD) return;
|
||||
const StructLayout *SL = TD->getStructLayout(STy);
|
||||
@ -569,7 +569,7 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
|
||||
CountTrailingZeros_64(Offset));
|
||||
} else {
|
||||
// Handle array index arithmetic.
|
||||
const Type *IndexedTy = GTI.getIndexedType();
|
||||
Type *IndexedTy = GTI.getIndexedType();
|
||||
if (!IndexedTy->isSized()) return;
|
||||
unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits();
|
||||
uint64_t TypeSize = TD ? TD->getTypeAllocSize(IndexedTy) : 1;
|
||||
@ -898,7 +898,7 @@ unsigned llvm::ComputeNumSignBits(Value *V, const TargetData *TD,
|
||||
assert((TD || V->getType()->isIntOrIntVectorTy()) &&
|
||||
"ComputeNumSignBits requires a TargetData object to operate "
|
||||
"on non-integer values!");
|
||||
const Type *Ty = V->getType();
|
||||
Type *Ty = V->getType();
|
||||
unsigned TyBits = TD ? TD->getTypeSizeInBits(V->getType()->getScalarType()) :
|
||||
Ty->getScalarSizeInBits();
|
||||
unsigned Tmp, Tmp2;
|
||||
@ -1078,7 +1078,7 @@ bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
|
||||
assert(Depth <= MaxDepth && "Limit Search Depth");
|
||||
assert(V->getType()->isIntegerTy() && "Not integer or pointer type!");
|
||||
|
||||
const Type *T = V->getType();
|
||||
Type *T = V->getType();
|
||||
|
||||
ConstantInt *CI = dyn_cast<ConstantInt>(V);
|
||||
|
||||
@ -1315,11 +1315,11 @@ Value *llvm::isBytewiseValue(Value *V) {
|
||||
// indices from Idxs that should be left out when inserting into the resulting
|
||||
// struct. To is the result struct built so far, new insertvalue instructions
|
||||
// build on that.
|
||||
static Value *BuildSubAggregate(Value *From, Value* To, const Type *IndexedType,
|
||||
static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
|
||||
SmallVector<unsigned, 10> &Idxs,
|
||||
unsigned IdxSkip,
|
||||
Instruction *InsertBefore) {
|
||||
const llvm::StructType *STy = llvm::dyn_cast<llvm::StructType>(IndexedType);
|
||||
llvm::StructType *STy = llvm::dyn_cast<llvm::StructType>(IndexedType);
|
||||
if (STy) {
|
||||
// Save the original To argument so we can modify it
|
||||
Value *OrigTo = To;
|
||||
@ -1378,7 +1378,7 @@ static Value *BuildSubAggregate(Value *From, Value* To, const Type *IndexedType,
|
||||
static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
|
||||
Instruction *InsertBefore) {
|
||||
assert(InsertBefore && "Must have someplace to insert!");
|
||||
const Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
|
||||
Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
|
||||
idx_range);
|
||||
Value *To = UndefValue::get(IndexedType);
|
||||
SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
|
||||
@ -1404,7 +1404,7 @@ Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
|
||||
&& "Not looking at a struct or array?");
|
||||
assert(ExtractValueInst::getIndexedType(V->getType(), idx_range)
|
||||
&& "Invalid indices for type?");
|
||||
const CompositeType *PTy = cast<CompositeType>(V->getType());
|
||||
CompositeType *PTy = cast<CompositeType>(V->getType());
|
||||
|
||||
if (isa<UndefValue>(V))
|
||||
return UndefValue::get(ExtractValueInst::getIndexedType(PTy,
|
||||
@ -1506,7 +1506,7 @@ Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
|
||||
if (OpC->isZero()) continue;
|
||||
|
||||
// Handle a struct and array indices which add their offset to the pointer.
|
||||
if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
|
||||
if (StructType *STy = dyn_cast<StructType>(*GTI)) {
|
||||
Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
|
||||
} else {
|
||||
uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
|
||||
@ -1557,8 +1557,8 @@ bool llvm::GetConstantStringInfo(const Value *V, std::string &Str,
|
||||
return false;
|
||||
|
||||
// Make sure the index-ee is a pointer to array of i8.
|
||||
const PointerType *PT = cast<PointerType>(GEP->getOperand(0)->getType());
|
||||
const ArrayType *AT = dyn_cast<ArrayType>(PT->getElementType());
|
||||
PointerType *PT = cast<PointerType>(GEP->getOperand(0)->getType());
|
||||
ArrayType *AT = dyn_cast<ArrayType>(PT->getElementType());
|
||||
if (AT == 0 || !AT->getElementType()->isIntegerTy(8))
|
||||
return false;
|
||||
|
||||
|
@ -26,7 +26,7 @@
|
||||
#include "llvm/Support/raw_ostream.h"
|
||||
using namespace llvm;
|
||||
|
||||
static std::string getTypeString(const Type *T) {
|
||||
static std::string getTypeString(Type *T) {
|
||||
std::string Result;
|
||||
raw_string_ostream Tmp(Result);
|
||||
Tmp << *T;
|
||||
@ -744,9 +744,9 @@ bool LLParser::ParseGlobal(const std::string &Name, LocTy NameLoc,
|
||||
/// GetGlobalVal - Get a value with the specified name or ID, creating a
|
||||
/// forward reference record if needed. This can return null if the value
|
||||
/// exists but does not have the right type.
|
||||
GlobalValue *LLParser::GetGlobalVal(const std::string &Name, const Type *Ty,
|
||||
GlobalValue *LLParser::GetGlobalVal(const std::string &Name, Type *Ty,
|
||||
LocTy Loc) {
|
||||
const PointerType *PTy = dyn_cast<PointerType>(Ty);
|
||||
PointerType *PTy = dyn_cast<PointerType>(Ty);
|
||||
if (PTy == 0) {
|
||||
Error(Loc, "global variable reference must have pointer type");
|
||||
return 0;
|
||||
@ -775,7 +775,7 @@ GlobalValue *LLParser::GetGlobalVal(const std::string &Name, const Type *Ty,
|
||||
|
||||
// Otherwise, create a new forward reference for this value and remember it.
|
||||
GlobalValue *FwdVal;
|
||||
if (const FunctionType *FT = dyn_cast<FunctionType>(PTy->getElementType()))
|
||||
if (FunctionType *FT = dyn_cast<FunctionType>(PTy->getElementType()))
|
||||
FwdVal = Function::Create(FT, GlobalValue::ExternalWeakLinkage, Name, M);
|
||||
else
|
||||
FwdVal = new GlobalVariable(*M, PTy->getElementType(), false,
|
||||
@ -785,8 +785,8 @@ GlobalValue *LLParser::GetGlobalVal(const std::string &Name, const Type *Ty,
|
||||
return FwdVal;
|
||||
}
|
||||
|
||||
GlobalValue *LLParser::GetGlobalVal(unsigned ID, const Type *Ty, LocTy Loc) {
|
||||
const PointerType *PTy = dyn_cast<PointerType>(Ty);
|
||||
GlobalValue *LLParser::GetGlobalVal(unsigned ID, Type *Ty, LocTy Loc) {
|
||||
PointerType *PTy = dyn_cast<PointerType>(Ty);
|
||||
if (PTy == 0) {
|
||||
Error(Loc, "global variable reference must have pointer type");
|
||||
return 0;
|
||||
@ -813,7 +813,7 @@ GlobalValue *LLParser::GetGlobalVal(unsigned ID, const Type *Ty, LocTy Loc) {
|
||||
|
||||
// Otherwise, create a new forward reference for this value and remember it.
|
||||
GlobalValue *FwdVal;
|
||||
if (const FunctionType *FT = dyn_cast<FunctionType>(PTy->getElementType()))
|
||||
if (FunctionType *FT = dyn_cast<FunctionType>(PTy->getElementType()))
|
||||
FwdVal = Function::Create(FT, GlobalValue::ExternalWeakLinkage, "", M);
|
||||
else
|
||||
FwdVal = new GlobalVariable(*M, PTy->getElementType(), false,
|
||||
@ -1668,7 +1668,7 @@ bool LLParser::PerFunctionState::FinishFunction() {
|
||||
/// forward reference record if needed. This can return null if the value
|
||||
/// exists but does not have the right type.
|
||||
Value *LLParser::PerFunctionState::GetVal(const std::string &Name,
|
||||
const Type *Ty, LocTy Loc) {
|
||||
Type *Ty, LocTy Loc) {
|
||||
// Look this name up in the normal function symbol table.
|
||||
Value *Val = F.getValueSymbolTable().lookup(Name);
|
||||
|
||||
@ -1709,7 +1709,7 @@ Value *LLParser::PerFunctionState::GetVal(const std::string &Name,
|
||||
return FwdVal;
|
||||
}
|
||||
|
||||
Value *LLParser::PerFunctionState::GetVal(unsigned ID, const Type *Ty,
|
||||
Value *LLParser::PerFunctionState::GetVal(unsigned ID, Type *Ty,
|
||||
LocTy Loc) {
|
||||
// Look this name up in the normal function symbol table.
|
||||
Value *Val = ID < NumberedVals.size() ? NumberedVals[ID] : 0;
|
||||
@ -2323,7 +2323,7 @@ bool LLParser::ParseValID(ValID &ID, PerFunctionState *PFS) {
|
||||
}
|
||||
|
||||
/// ParseGlobalValue - Parse a global value with the specified type.
|
||||
bool LLParser::ParseGlobalValue(const Type *Ty, Constant *&C) {
|
||||
bool LLParser::ParseGlobalValue(Type *Ty, Constant *&C) {
|
||||
C = 0;
|
||||
ValID ID;
|
||||
Value *V = NULL;
|
||||
@ -2410,7 +2410,7 @@ bool LLParser::ParseMetadataValue(ValID &ID, PerFunctionState *PFS) {
|
||||
// Function Parsing.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
bool LLParser::ConvertValIDToValue(const Type *Ty, ValID &ID, Value *&V,
|
||||
bool LLParser::ConvertValIDToValue(Type *Ty, ValID &ID, Value *&V,
|
||||
PerFunctionState *PFS) {
|
||||
if (Ty->isFunctionTy())
|
||||
return Error(ID.Loc, "functions are not values, refer to them as pointers");
|
||||
@ -2426,8 +2426,8 @@ bool LLParser::ConvertValIDToValue(const Type *Ty, ValID &ID, Value *&V,
|
||||
V = PFS->GetVal(ID.StrVal, Ty, ID.Loc);
|
||||
return (V == 0);
|
||||
case ValID::t_InlineAsm: {
|
||||
const PointerType *PTy = dyn_cast<PointerType>(Ty);
|
||||
const FunctionType *FTy =
|
||||
PointerType *PTy = dyn_cast<PointerType>(Ty);
|
||||
FunctionType *FTy =
|
||||
PTy ? dyn_cast<FunctionType>(PTy->getElementType()) : 0;
|
||||
if (!FTy || !InlineAsm::Verify(FTy, ID.StrVal2))
|
||||
return Error(ID.Loc, "invalid type for inline asm constraint string");
|
||||
@ -2506,7 +2506,7 @@ bool LLParser::ConvertValIDToValue(const Type *Ty, ValID &ID, Value *&V,
|
||||
return false;
|
||||
case ValID::t_ConstantStruct:
|
||||
case ValID::t_PackedConstantStruct:
|
||||
if (const StructType *ST = dyn_cast<StructType>(Ty)) {
|
||||
if (StructType *ST = dyn_cast<StructType>(Ty)) {
|
||||
if (ST->getNumElements() != ID.UIntVal)
|
||||
return Error(ID.Loc,
|
||||
"initializer with struct type has wrong # elements");
|
||||
@ -2527,7 +2527,7 @@ bool LLParser::ConvertValIDToValue(const Type *Ty, ValID &ID, Value *&V,
|
||||
}
|
||||
}
|
||||
|
||||
bool LLParser::ParseValue(const Type *Ty, Value *&V, PerFunctionState *PFS) {
|
||||
bool LLParser::ParseValue(Type *Ty, Value *&V, PerFunctionState *PFS) {
|
||||
V = 0;
|
||||
ValID ID;
|
||||
return ParseValID(ID, PFS) ||
|
||||
@ -2671,9 +2671,9 @@ bool LLParser::ParseFunctionHeader(Function *&Fn, bool isDefine) {
|
||||
if (PAL.paramHasAttr(1, Attribute::StructRet) && !RetType->isVoidTy())
|
||||
return Error(RetTypeLoc, "functions with 'sret' argument must return void");
|
||||
|
||||
const FunctionType *FT =
|
||||
FunctionType *FT =
|
||||
FunctionType::get(RetType, ParamTypeList, isVarArg);
|
||||
const PointerType *PFT = PointerType::getUnqual(FT);
|
||||
PointerType *PFT = PointerType::getUnqual(FT);
|
||||
|
||||
Fn = 0;
|
||||
if (!FunctionName.empty()) {
|
||||
@ -3162,8 +3162,8 @@ bool LLParser::ParseInvoke(Instruction *&Inst, PerFunctionState &PFS) {
|
||||
// If RetType is a non-function pointer type, then this is the short syntax
|
||||
// for the call, which means that RetType is just the return type. Infer the
|
||||
// rest of the function argument types from the arguments that are present.
|
||||
const PointerType *PFTy = 0;
|
||||
const FunctionType *Ty = 0;
|
||||
PointerType *PFTy = 0;
|
||||
FunctionType *Ty = 0;
|
||||
if (!(PFTy = dyn_cast<PointerType>(RetType)) ||
|
||||
!(Ty = dyn_cast<FunctionType>(PFTy->getElementType()))) {
|
||||
// Pull out the types of all of the arguments...
|
||||
@ -3194,7 +3194,7 @@ bool LLParser::ParseInvoke(Instruction *&Inst, PerFunctionState &PFS) {
|
||||
FunctionType::param_iterator I = Ty->param_begin();
|
||||
FunctionType::param_iterator E = Ty->param_end();
|
||||
for (unsigned i = 0, e = ArgList.size(); i != e; ++i) {
|
||||
const Type *ExpectedTy = 0;
|
||||
Type *ExpectedTy = 0;
|
||||
if (I != E) {
|
||||
ExpectedTy = *I++;
|
||||
} else if (!Ty->isVarArg()) {
|
||||
@ -3498,8 +3498,8 @@ bool LLParser::ParseCall(Instruction *&Inst, PerFunctionState &PFS,
|
||||
// If RetType is a non-function pointer type, then this is the short syntax
|
||||
// for the call, which means that RetType is just the return type. Infer the
|
||||
// rest of the function argument types from the arguments that are present.
|
||||
const PointerType *PFTy = 0;
|
||||
const FunctionType *Ty = 0;
|
||||
PointerType *PFTy = 0;
|
||||
FunctionType *Ty = 0;
|
||||
if (!(PFTy = dyn_cast<PointerType>(RetType)) ||
|
||||
!(Ty = dyn_cast<FunctionType>(PFTy->getElementType()))) {
|
||||
// Pull out the types of all of the arguments...
|
||||
@ -3530,7 +3530,7 @@ bool LLParser::ParseCall(Instruction *&Inst, PerFunctionState &PFS,
|
||||
FunctionType::param_iterator I = Ty->param_begin();
|
||||
FunctionType::param_iterator E = Ty->param_end();
|
||||
for (unsigned i = 0, e = ArgList.size(); i != e; ++i) {
|
||||
const Type *ExpectedTy = 0;
|
||||
Type *ExpectedTy = 0;
|
||||
if (I != E) {
|
||||
ExpectedTy = *I++;
|
||||
} else if (!Ty->isVarArg()) {
|
||||
|
@ -142,8 +142,8 @@ namespace llvm {
|
||||
/// GetGlobalVal - Get a value with the specified name or ID, creating a
|
||||
/// forward reference record if needed. This can return null if the value
|
||||
/// exists but does not have the right type.
|
||||
GlobalValue *GetGlobalVal(const std::string &N, const Type *Ty, LocTy Loc);
|
||||
GlobalValue *GetGlobalVal(unsigned ID, const Type *Ty, LocTy Loc);
|
||||
GlobalValue *GetGlobalVal(const std::string &N, Type *Ty, LocTy Loc);
|
||||
GlobalValue *GetGlobalVal(unsigned ID, Type *Ty, LocTy Loc);
|
||||
|
||||
// Helper Routines.
|
||||
bool ParseToken(lltok::Kind T, const char *ErrMsg);
|
||||
@ -249,8 +249,8 @@ namespace llvm {
|
||||
/// GetVal - Get a value with the specified name or ID, creating a
|
||||
/// forward reference record if needed. This can return null if the value
|
||||
/// exists but does not have the right type.
|
||||
Value *GetVal(const std::string &Name, const Type *Ty, LocTy Loc);
|
||||
Value *GetVal(unsigned ID, const Type *Ty, LocTy Loc);
|
||||
Value *GetVal(const std::string &Name, Type *Ty, LocTy Loc);
|
||||
Value *GetVal(unsigned ID, Type *Ty, LocTy Loc);
|
||||
|
||||
/// SetInstName - After an instruction is parsed and inserted into its
|
||||
/// basic block, this installs its name.
|
||||
@ -269,14 +269,14 @@ namespace llvm {
|
||||
BasicBlock *DefineBB(const std::string &Name, LocTy Loc);
|
||||
};
|
||||
|
||||
bool ConvertValIDToValue(const Type *Ty, ValID &ID, Value *&V,
|
||||
bool ConvertValIDToValue(Type *Ty, ValID &ID, Value *&V,
|
||||
PerFunctionState *PFS);
|
||||
|
||||
bool ParseValue(const Type *Ty, Value *&V, PerFunctionState *PFS);
|
||||
bool ParseValue(const Type *Ty, Value *&V, PerFunctionState &PFS) {
|
||||
bool ParseValue(Type *Ty, Value *&V, PerFunctionState *PFS);
|
||||
bool ParseValue(Type *Ty, Value *&V, PerFunctionState &PFS) {
|
||||
return ParseValue(Ty, V, &PFS);
|
||||
}
|
||||
bool ParseValue(const Type *Ty, Value *&V, LocTy &Loc,
|
||||
bool ParseValue(Type *Ty, Value *&V, LocTy &Loc,
|
||||
PerFunctionState &PFS) {
|
||||
Loc = Lex.getLoc();
|
||||
return ParseValue(Ty, V, &PFS);
|
||||
@ -310,7 +310,7 @@ namespace llvm {
|
||||
|
||||
// Constant Parsing.
|
||||
bool ParseValID(ValID &ID, PerFunctionState *PFS = NULL);
|
||||
bool ParseGlobalValue(const Type *Ty, Constant *&V);
|
||||
bool ParseGlobalValue(Type *Ty, Constant *&V);
|
||||
bool ParseGlobalTypeAndValue(Constant *&V);
|
||||
bool ParseGlobalValueVector(SmallVectorImpl<Constant*> &Elts);
|
||||
bool ParseMetadataListValue(ValID &ID, PerFunctionState *PFS);
|
||||
|
@ -107,7 +107,7 @@ static int GetDecodedCastOpcode(unsigned Val) {
|
||||
case bitc::CAST_BITCAST : return Instruction::BitCast;
|
||||
}
|
||||
}
|
||||
static int GetDecodedBinaryOpcode(unsigned Val, const Type *Ty) {
|
||||
static int GetDecodedBinaryOpcode(unsigned Val, Type *Ty) {
|
||||
switch (Val) {
|
||||
default: return -1;
|
||||
case bitc::BINOP_ADD:
|
||||
@ -142,7 +142,7 @@ namespace {
|
||||
void *operator new(size_t s) {
|
||||
return User::operator new(s, 1);
|
||||
}
|
||||
explicit ConstantPlaceHolder(const Type *Ty, LLVMContext& Context)
|
||||
explicit ConstantPlaceHolder(Type *Ty, LLVMContext& Context)
|
||||
: ConstantExpr(Ty, Instruction::UserOp1, &Op<0>(), 1) {
|
||||
Op<0>() = UndefValue::get(Type::getInt32Ty(Context));
|
||||
}
|
||||
@ -198,7 +198,7 @@ void BitcodeReaderValueList::AssignValue(Value *V, unsigned Idx) {
|
||||
|
||||
|
||||
Constant *BitcodeReaderValueList::getConstantFwdRef(unsigned Idx,
|
||||
const Type *Ty) {
|
||||
Type *Ty) {
|
||||
if (Idx >= size())
|
||||
resize(Idx + 1);
|
||||
|
||||
@ -213,7 +213,7 @@ Constant *BitcodeReaderValueList::getConstantFwdRef(unsigned Idx,
|
||||
return C;
|
||||
}
|
||||
|
||||
Value *BitcodeReaderValueList::getValueFwdRef(unsigned Idx, const Type *Ty) {
|
||||
Value *BitcodeReaderValueList::getValueFwdRef(unsigned Idx, Type *Ty) {
|
||||
if (Idx >= size())
|
||||
resize(Idx + 1);
|
||||
|
||||
@ -1063,7 +1063,7 @@ bool BitcodeReader::ParseMetadata() {
|
||||
unsigned Size = Record.size();
|
||||
SmallVector<Value*, 8> Elts;
|
||||
for (unsigned i = 0; i != Size; i += 2) {
|
||||
const Type *Ty = getTypeByID(Record[i]);
|
||||
Type *Ty = getTypeByID(Record[i]);
|
||||
if (!Ty) return Error("Invalid METADATA_NODE record");
|
||||
if (Ty->isMetadataTy())
|
||||
Elts.push_back(MDValueList.getValueFwdRef(Record[i+1]));
|
||||
@ -1163,7 +1163,7 @@ bool BitcodeReader::ParseConstants() {
|
||||
SmallVector<uint64_t, 64> Record;
|
||||
|
||||
// Read all the records for this value table.
|
||||
const Type *CurTy = Type::getInt32Ty(Context);
|
||||
Type *CurTy = Type::getInt32Ty(Context);
|
||||
unsigned NextCstNo = ValueList.size();
|
||||
while (1) {
|
||||
unsigned Code = Stream.ReadCode();
|
||||
@ -1250,18 +1250,18 @@ bool BitcodeReader::ParseConstants() {
|
||||
unsigned Size = Record.size();
|
||||
std::vector<Constant*> Elts;
|
||||
|
||||
if (const StructType *STy = dyn_cast<StructType>(CurTy)) {
|
||||
if (StructType *STy = dyn_cast<StructType>(CurTy)) {
|
||||
for (unsigned i = 0; i != Size; ++i)
|
||||
Elts.push_back(ValueList.getConstantFwdRef(Record[i],
|
||||
STy->getElementType(i)));
|
||||
V = ConstantStruct::get(STy, Elts);
|
||||
} else if (const ArrayType *ATy = dyn_cast<ArrayType>(CurTy)) {
|
||||
const Type *EltTy = ATy->getElementType();
|
||||
} else if (ArrayType *ATy = dyn_cast<ArrayType>(CurTy)) {
|
||||
Type *EltTy = ATy->getElementType();
|
||||
for (unsigned i = 0; i != Size; ++i)
|
||||
Elts.push_back(ValueList.getConstantFwdRef(Record[i], EltTy));
|
||||
V = ConstantArray::get(ATy, Elts);
|
||||
} else if (const VectorType *VTy = dyn_cast<VectorType>(CurTy)) {
|
||||
const Type *EltTy = VTy->getElementType();
|
||||
} else if (VectorType *VTy = dyn_cast<VectorType>(CurTy)) {
|
||||
Type *EltTy = VTy->getElementType();
|
||||
for (unsigned i = 0; i != Size; ++i)
|
||||
Elts.push_back(ValueList.getConstantFwdRef(Record[i], EltTy));
|
||||
V = ConstantVector::get(Elts);
|
||||
@ -1274,8 +1274,8 @@ bool BitcodeReader::ParseConstants() {
|
||||
if (Record.empty())
|
||||
return Error("Invalid CST_AGGREGATE record");
|
||||
|
||||
const ArrayType *ATy = cast<ArrayType>(CurTy);
|
||||
const Type *EltTy = ATy->getElementType();
|
||||
ArrayType *ATy = cast<ArrayType>(CurTy);
|
||||
Type *EltTy = ATy->getElementType();
|
||||
|
||||
unsigned Size = Record.size();
|
||||
std::vector<Constant*> Elts;
|
||||
@ -1288,8 +1288,8 @@ bool BitcodeReader::ParseConstants() {
|
||||
if (Record.empty())
|
||||
return Error("Invalid CST_AGGREGATE record");
|
||||
|
||||
const ArrayType *ATy = cast<ArrayType>(CurTy);
|
||||
const Type *EltTy = ATy->getElementType();
|
||||
ArrayType *ATy = cast<ArrayType>(CurTy);
|
||||
Type *EltTy = ATy->getElementType();
|
||||
|
||||
unsigned Size = Record.size();
|
||||
std::vector<Constant*> Elts;
|
||||
@ -1335,7 +1335,7 @@ bool BitcodeReader::ParseConstants() {
|
||||
if (Opc < 0) {
|
||||
V = UndefValue::get(CurTy); // Unknown cast.
|
||||
} else {
|
||||
const Type *OpTy = getTypeByID(Record[1]);
|
||||
Type *OpTy = getTypeByID(Record[1]);
|
||||
if (!OpTy) return Error("Invalid CE_CAST record");
|
||||
Constant *Op = ValueList.getConstantFwdRef(Record[2], OpTy);
|
||||
V = ConstantExpr::getCast(Opc, Op, CurTy);
|
||||
@ -1347,7 +1347,7 @@ bool BitcodeReader::ParseConstants() {
|
||||
if (Record.size() & 1) return Error("Invalid CE_GEP record");
|
||||
SmallVector<Constant*, 16> Elts;
|
||||
for (unsigned i = 0, e = Record.size(); i != e; i += 2) {
|
||||
const Type *ElTy = getTypeByID(Record[i]);
|
||||
Type *ElTy = getTypeByID(Record[i]);
|
||||
if (!ElTy) return Error("Invalid CE_GEP record");
|
||||
Elts.push_back(ValueList.getConstantFwdRef(Record[i+1], ElTy));
|
||||
}
|
||||
@ -1368,7 +1368,7 @@ bool BitcodeReader::ParseConstants() {
|
||||
break;
|
||||
case bitc::CST_CODE_CE_EXTRACTELT: { // CE_EXTRACTELT: [opty, opval, opval]
|
||||
if (Record.size() < 3) return Error("Invalid CE_EXTRACTELT record");
|
||||
const VectorType *OpTy =
|
||||
VectorType *OpTy =
|
||||
dyn_cast_or_null<VectorType>(getTypeByID(Record[0]));
|
||||
if (OpTy == 0) return Error("Invalid CE_EXTRACTELT record");
|
||||
Constant *Op0 = ValueList.getConstantFwdRef(Record[1], OpTy);
|
||||
@ -1377,7 +1377,7 @@ bool BitcodeReader::ParseConstants() {
|
||||
break;
|
||||
}
|
||||
case bitc::CST_CODE_CE_INSERTELT: { // CE_INSERTELT: [opval, opval, opval]
|
||||
const VectorType *OpTy = dyn_cast<VectorType>(CurTy);
|
||||
VectorType *OpTy = dyn_cast<VectorType>(CurTy);
|
||||
if (Record.size() < 3 || OpTy == 0)
|
||||
return Error("Invalid CE_INSERTELT record");
|
||||
Constant *Op0 = ValueList.getConstantFwdRef(Record[0], OpTy);
|
||||
@ -1388,26 +1388,26 @@ bool BitcodeReader::ParseConstants() {
|
||||
break;
|
||||
}
|
||||
case bitc::CST_CODE_CE_SHUFFLEVEC: { // CE_SHUFFLEVEC: [opval, opval, opval]
|
||||
const VectorType *OpTy = dyn_cast<VectorType>(CurTy);
|
||||
VectorType *OpTy = dyn_cast<VectorType>(CurTy);
|
||||
if (Record.size() < 3 || OpTy == 0)
|
||||
return Error("Invalid CE_SHUFFLEVEC record");
|
||||
Constant *Op0 = ValueList.getConstantFwdRef(Record[0], OpTy);
|
||||
Constant *Op1 = ValueList.getConstantFwdRef(Record[1], OpTy);
|
||||
const Type *ShufTy = VectorType::get(Type::getInt32Ty(Context),
|
||||
Type *ShufTy = VectorType::get(Type::getInt32Ty(Context),
|
||||
OpTy->getNumElements());
|
||||
Constant *Op2 = ValueList.getConstantFwdRef(Record[2], ShufTy);
|
||||
V = ConstantExpr::getShuffleVector(Op0, Op1, Op2);
|
||||
break;
|
||||
}
|
||||
case bitc::CST_CODE_CE_SHUFVEC_EX: { // [opty, opval, opval, opval]
|
||||
const VectorType *RTy = dyn_cast<VectorType>(CurTy);
|
||||
const VectorType *OpTy =
|
||||
VectorType *RTy = dyn_cast<VectorType>(CurTy);
|
||||
VectorType *OpTy =
|
||||
dyn_cast_or_null<VectorType>(getTypeByID(Record[0]));
|
||||
if (Record.size() < 4 || RTy == 0 || OpTy == 0)
|
||||
return Error("Invalid CE_SHUFVEC_EX record");
|
||||
Constant *Op0 = ValueList.getConstantFwdRef(Record[1], OpTy);
|
||||
Constant *Op1 = ValueList.getConstantFwdRef(Record[2], OpTy);
|
||||
const Type *ShufTy = VectorType::get(Type::getInt32Ty(Context),
|
||||
Type *ShufTy = VectorType::get(Type::getInt32Ty(Context),
|
||||
RTy->getNumElements());
|
||||
Constant *Op2 = ValueList.getConstantFwdRef(Record[3], ShufTy);
|
||||
V = ConstantExpr::getShuffleVector(Op0, Op1, Op2);
|
||||
@ -1415,7 +1415,7 @@ bool BitcodeReader::ParseConstants() {
|
||||
}
|
||||
case bitc::CST_CODE_CE_CMP: { // CE_CMP: [opty, opval, opval, pred]
|
||||
if (Record.size() < 4) return Error("Invalid CE_CMP record");
|
||||
const Type *OpTy = getTypeByID(Record[0]);
|
||||
Type *OpTy = getTypeByID(Record[0]);
|
||||
if (OpTy == 0) return Error("Invalid CE_CMP record");
|
||||
Constant *Op0 = ValueList.getConstantFwdRef(Record[1], OpTy);
|
||||
Constant *Op1 = ValueList.getConstantFwdRef(Record[2], OpTy);
|
||||
@ -1442,14 +1442,14 @@ bool BitcodeReader::ParseConstants() {
|
||||
AsmStr += (char)Record[2+i];
|
||||
for (unsigned i = 0; i != ConstStrSize; ++i)
|
||||
ConstrStr += (char)Record[3+AsmStrSize+i];
|
||||
const PointerType *PTy = cast<PointerType>(CurTy);
|
||||
PointerType *PTy = cast<PointerType>(CurTy);
|
||||
V = InlineAsm::get(cast<FunctionType>(PTy->getElementType()),
|
||||
AsmStr, ConstrStr, HasSideEffects, IsAlignStack);
|
||||
break;
|
||||
}
|
||||
case bitc::CST_CODE_BLOCKADDRESS:{
|
||||
if (Record.size() < 3) return Error("Invalid CE_BLOCKADDRESS record");
|
||||
const Type *FnTy = getTypeByID(Record[0]);
|
||||
Type *FnTy = getTypeByID(Record[0]);
|
||||
if (FnTy == 0) return Error("Invalid CE_BLOCKADDRESS record");
|
||||
Function *Fn =
|
||||
dyn_cast_or_null<Function>(ValueList.getConstantFwdRef(Record[1],FnTy));
|
||||
@ -1662,7 +1662,7 @@ bool BitcodeReader::ParseModule() {
|
||||
case bitc::MODULE_CODE_GLOBALVAR: {
|
||||
if (Record.size() < 6)
|
||||
return Error("Invalid MODULE_CODE_GLOBALVAR record");
|
||||
const Type *Ty = getTypeByID(Record[0]);
|
||||
Type *Ty = getTypeByID(Record[0]);
|
||||
if (!Ty) return Error("Invalid MODULE_CODE_GLOBALVAR record");
|
||||
if (!Ty->isPointerTy())
|
||||
return Error("Global not a pointer type!");
|
||||
@ -1711,11 +1711,11 @@ bool BitcodeReader::ParseModule() {
|
||||
case bitc::MODULE_CODE_FUNCTION: {
|
||||
if (Record.size() < 8)
|
||||
return Error("Invalid MODULE_CODE_FUNCTION record");
|
||||
const Type *Ty = getTypeByID(Record[0]);
|
||||
Type *Ty = getTypeByID(Record[0]);
|
||||
if (!Ty) return Error("Invalid MODULE_CODE_FUNCTION record");
|
||||
if (!Ty->isPointerTy())
|
||||
return Error("Function not a pointer type!");
|
||||
const FunctionType *FTy =
|
||||
FunctionType *FTy =
|
||||
dyn_cast<FunctionType>(cast<PointerType>(Ty)->getElementType());
|
||||
if (!FTy)
|
||||
return Error("Function not a pointer to function type!");
|
||||
@ -1757,7 +1757,7 @@ bool BitcodeReader::ParseModule() {
|
||||
case bitc::MODULE_CODE_ALIAS: {
|
||||
if (Record.size() < 3)
|
||||
return Error("Invalid MODULE_ALIAS record");
|
||||
const Type *Ty = getTypeByID(Record[0]);
|
||||
Type *Ty = getTypeByID(Record[0]);
|
||||
if (!Ty) return Error("Invalid MODULE_ALIAS record");
|
||||
if (!Ty->isPointerTy())
|
||||
return Error("Function not a pointer type!");
|
||||
@ -2160,7 +2160,7 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
|
||||
OpNum+2 != Record.size())
|
||||
return Error("Invalid CAST record");
|
||||
|
||||
const Type *ResTy = getTypeByID(Record[OpNum]);
|
||||
Type *ResTy = getTypeByID(Record[OpNum]);
|
||||
int Opc = GetDecodedCastOpcode(Record[OpNum+1]);
|
||||
if (Opc == -1 || ResTy == 0)
|
||||
return Error("Invalid CAST record");
|
||||
@ -2261,8 +2261,8 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
|
||||
return Error("Invalid SELECT record");
|
||||
|
||||
// select condition can be either i1 or [N x i1]
|
||||
if (const VectorType* vector_type =
|
||||
dyn_cast<const VectorType>(Cond->getType())) {
|
||||
if (VectorType* vector_type =
|
||||
dyn_cast<VectorType>(Cond->getType())) {
|
||||
// expect <n x i1>
|
||||
if (vector_type->getElementType() != Type::getInt1Ty(Context))
|
||||
return Error("Invalid SELECT condition type");
|
||||
@ -2381,7 +2381,7 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
|
||||
case bitc::FUNC_CODE_INST_SWITCH: { // SWITCH: [opty, op0, op1, ...]
|
||||
if (Record.size() < 3 || (Record.size() & 1) == 0)
|
||||
return Error("Invalid SWITCH record");
|
||||
const Type *OpTy = getTypeByID(Record[0]);
|
||||
Type *OpTy = getTypeByID(Record[0]);
|
||||
Value *Cond = getFnValueByID(Record[1], OpTy);
|
||||
BasicBlock *Default = getBasicBlock(Record[2]);
|
||||
if (OpTy == 0 || Cond == 0 || Default == 0)
|
||||
@ -2405,7 +2405,7 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
|
||||
case bitc::FUNC_CODE_INST_INDIRECTBR: { // INDIRECTBR: [opty, op0, op1, ...]
|
||||
if (Record.size() < 2)
|
||||
return Error("Invalid INDIRECTBR record");
|
||||
const Type *OpTy = getTypeByID(Record[0]);
|
||||
Type *OpTy = getTypeByID(Record[0]);
|
||||
Value *Address = getFnValueByID(Record[1], OpTy);
|
||||
if (OpTy == 0 || Address == 0)
|
||||
return Error("Invalid INDIRECTBR record");
|
||||
@ -2437,8 +2437,8 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
|
||||
if (getValueTypePair(Record, OpNum, NextValueNo, Callee))
|
||||
return Error("Invalid INVOKE record");
|
||||
|
||||
const PointerType *CalleeTy = dyn_cast<PointerType>(Callee->getType());
|
||||
const FunctionType *FTy = !CalleeTy ? 0 :
|
||||
PointerType *CalleeTy = dyn_cast<PointerType>(Callee->getType());
|
||||
FunctionType *FTy = !CalleeTy ? 0 :
|
||||
dyn_cast<FunctionType>(CalleeTy->getElementType());
|
||||
|
||||
// Check that the right number of fixed parameters are here.
|
||||
@ -2483,7 +2483,7 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
|
||||
case bitc::FUNC_CODE_INST_PHI: { // PHI: [ty, val0,bb0, ...]
|
||||
if (Record.size() < 1 || ((Record.size()-1)&1))
|
||||
return Error("Invalid PHI record");
|
||||
const Type *Ty = getTypeByID(Record[0]);
|
||||
Type *Ty = getTypeByID(Record[0]);
|
||||
if (!Ty) return Error("Invalid PHI record");
|
||||
|
||||
PHINode *PN = PHINode::Create(Ty, (Record.size()-1)/2);
|
||||
@ -2502,9 +2502,9 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
|
||||
case bitc::FUNC_CODE_INST_ALLOCA: { // ALLOCA: [instty, opty, op, align]
|
||||
if (Record.size() != 4)
|
||||
return Error("Invalid ALLOCA record");
|
||||
const PointerType *Ty =
|
||||
PointerType *Ty =
|
||||
dyn_cast_or_null<PointerType>(getTypeByID(Record[0]));
|
||||
const Type *OpTy = getTypeByID(Record[1]);
|
||||
Type *OpTy = getTypeByID(Record[1]);
|
||||
Value *Size = getFnValueByID(Record[2], OpTy);
|
||||
unsigned Align = Record[3];
|
||||
if (!Ty || !Size) return Error("Invalid ALLOCA record");
|
||||
@ -2549,8 +2549,8 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
|
||||
if (getValueTypePair(Record, OpNum, NextValueNo, Callee))
|
||||
return Error("Invalid CALL record");
|
||||
|
||||
const PointerType *OpTy = dyn_cast<PointerType>(Callee->getType());
|
||||
const FunctionType *FTy = 0;
|
||||
PointerType *OpTy = dyn_cast<PointerType>(Callee->getType());
|
||||
FunctionType *FTy = 0;
|
||||
if (OpTy) FTy = dyn_cast<FunctionType>(OpTy->getElementType());
|
||||
if (!FTy || Record.size() < FTy->getNumParams()+OpNum)
|
||||
return Error("Invalid CALL record");
|
||||
@ -2589,9 +2589,9 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
|
||||
case bitc::FUNC_CODE_INST_VAARG: { // VAARG: [valistty, valist, instty]
|
||||
if (Record.size() < 3)
|
||||
return Error("Invalid VAARG record");
|
||||
const Type *OpTy = getTypeByID(Record[0]);
|
||||
Type *OpTy = getTypeByID(Record[0]);
|
||||
Value *Op = getFnValueByID(Record[1], OpTy);
|
||||
const Type *ResTy = getTypeByID(Record[2]);
|
||||
Type *ResTy = getTypeByID(Record[2]);
|
||||
if (!OpTy || !Op || !ResTy)
|
||||
return Error("Invalid VAARG record");
|
||||
I = new VAArgInst(Op, ResTy);
|
||||
|
@ -76,8 +76,8 @@ public:
|
||||
ValuePtrs.resize(N);
|
||||
}
|
||||
|
||||
Constant *getConstantFwdRef(unsigned Idx, const Type *Ty);
|
||||
Value *getValueFwdRef(unsigned Idx, const Type *Ty);
|
||||
Constant *getConstantFwdRef(unsigned Idx, Type *Ty);
|
||||
Value *getValueFwdRef(unsigned Idx, Type *Ty);
|
||||
|
||||
void AssignValue(Value *V, unsigned Idx);
|
||||
|
||||
@ -212,7 +212,7 @@ public:
|
||||
private:
|
||||
Type *getTypeByID(unsigned ID);
|
||||
Type *getTypeByIDOrNull(unsigned ID);
|
||||
Value *getFnValueByID(unsigned ID, const Type *Ty) {
|
||||
Value *getFnValueByID(unsigned ID, Type *Ty) {
|
||||
if (Ty && Ty->isMetadataTy())
|
||||
return MDValueList.getValueFwdRef(ID);
|
||||
return ValueList.getValueFwdRef(ID, Ty);
|
||||
@ -248,7 +248,7 @@ private:
|
||||
return ResVal == 0;
|
||||
}
|
||||
bool getValue(SmallVector<uint64_t, 64> &Record, unsigned &Slot,
|
||||
const Type *Ty, Value *&ResVal) {
|
||||
Type *Ty, Value *&ResVal) {
|
||||
if (Slot == Record.size()) return true;
|
||||
unsigned ValNo = (unsigned)Record[Slot++];
|
||||
ResVal = getFnValueByID(ValNo, Ty);
|
||||
|
@ -216,7 +216,7 @@ static void WriteTypeTable(const ValueEnumerator &VE, BitstreamWriter &Stream) {
|
||||
|
||||
// Loop over all of the types, emitting each in turn.
|
||||
for (unsigned i = 0, e = TypeList.size(); i != e; ++i) {
|
||||
const Type *T = TypeList[i];
|
||||
Type *T = TypeList[i];
|
||||
int AbbrevToUse = 0;
|
||||
unsigned Code = 0;
|
||||
|
||||
@ -237,7 +237,7 @@ static void WriteTypeTable(const ValueEnumerator &VE, BitstreamWriter &Stream) {
|
||||
TypeVals.push_back(cast<IntegerType>(T)->getBitWidth());
|
||||
break;
|
||||
case Type::PointerTyID: {
|
||||
const PointerType *PTy = cast<PointerType>(T);
|
||||
PointerType *PTy = cast<PointerType>(T);
|
||||
// POINTER: [pointee type, address space]
|
||||
Code = bitc::TYPE_CODE_POINTER;
|
||||
TypeVals.push_back(VE.getTypeID(PTy->getElementType()));
|
||||
@ -247,7 +247,7 @@ static void WriteTypeTable(const ValueEnumerator &VE, BitstreamWriter &Stream) {
|
||||
break;
|
||||
}
|
||||
case Type::FunctionTyID: {
|
||||
const FunctionType *FT = cast<FunctionType>(T);
|
||||
FunctionType *FT = cast<FunctionType>(T);
|
||||
// FUNCTION: [isvararg, attrid, retty, paramty x N]
|
||||
Code = bitc::TYPE_CODE_FUNCTION;
|
||||
TypeVals.push_back(FT->isVarArg());
|
||||
@ -259,7 +259,7 @@ static void WriteTypeTable(const ValueEnumerator &VE, BitstreamWriter &Stream) {
|
||||
break;
|
||||
}
|
||||
case Type::StructTyID: {
|
||||
const StructType *ST = cast<StructType>(T);
|
||||
StructType *ST = cast<StructType>(T);
|
||||
// STRUCT: [ispacked, eltty x N]
|
||||
TypeVals.push_back(ST->isPacked());
|
||||
// Output all of the element types.
|
||||
@ -286,7 +286,7 @@ static void WriteTypeTable(const ValueEnumerator &VE, BitstreamWriter &Stream) {
|
||||
break;
|
||||
}
|
||||
case Type::ArrayTyID: {
|
||||
const ArrayType *AT = cast<ArrayType>(T);
|
||||
ArrayType *AT = cast<ArrayType>(T);
|
||||
// ARRAY: [numelts, eltty]
|
||||
Code = bitc::TYPE_CODE_ARRAY;
|
||||
TypeVals.push_back(AT->getNumElements());
|
||||
@ -295,7 +295,7 @@ static void WriteTypeTable(const ValueEnumerator &VE, BitstreamWriter &Stream) {
|
||||
break;
|
||||
}
|
||||
case Type::VectorTyID: {
|
||||
const VectorType *VT = cast<VectorType>(T);
|
||||
VectorType *VT = cast<VectorType>(T);
|
||||
// VECTOR [numelts, eltty]
|
||||
Code = bitc::TYPE_CODE_VECTOR;
|
||||
TypeVals.push_back(VT->getNumElements());
|
||||
@ -716,7 +716,7 @@ static void WriteConstants(unsigned FirstVal, unsigned LastVal,
|
||||
SmallVector<uint64_t, 64> Record;
|
||||
|
||||
const ValueEnumerator::ValueList &Vals = VE.getValues();
|
||||
const Type *LastTy = 0;
|
||||
Type *LastTy = 0;
|
||||
for (unsigned i = FirstVal; i != LastVal; ++i) {
|
||||
const Value *V = Vals[i].first;
|
||||
// If we need to switch types, do so now.
|
||||
@ -781,7 +781,7 @@ static void WriteConstants(unsigned FirstVal, unsigned LastVal,
|
||||
}
|
||||
} else if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
|
||||
Code = bitc::CST_CODE_FLOAT;
|
||||
const Type *Ty = CFP->getType();
|
||||
Type *Ty = CFP->getType();
|
||||
if (Ty->isFloatTy() || Ty->isDoubleTy()) {
|
||||
Record.push_back(CFP->getValueAPF().bitcastToAPInt().getZExtValue());
|
||||
} else if (Ty->isX86_FP80Ty()) {
|
||||
@ -1083,8 +1083,8 @@ static void WriteInstruction(const Instruction &I, unsigned InstID,
|
||||
case Instruction::Invoke: {
|
||||
const InvokeInst *II = cast<InvokeInst>(&I);
|
||||
const Value *Callee(II->getCalledValue());
|
||||
const PointerType *PTy = cast<PointerType>(Callee->getType());
|
||||
const FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
|
||||
PointerType *PTy = cast<PointerType>(Callee->getType());
|
||||
FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
|
||||
Code = bitc::FUNC_CODE_INST_INVOKE;
|
||||
|
||||
Vals.push_back(VE.getAttributeID(II->getAttributes()));
|
||||
@ -1149,8 +1149,8 @@ static void WriteInstruction(const Instruction &I, unsigned InstID,
|
||||
break;
|
||||
case Instruction::Call: {
|
||||
const CallInst &CI = cast<CallInst>(I);
|
||||
const PointerType *PTy = cast<PointerType>(CI.getCalledValue()->getType());
|
||||
const FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
|
||||
PointerType *PTy = cast<PointerType>(CI.getCalledValue()->getType());
|
||||
FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
|
||||
|
||||
Code = bitc::FUNC_CODE_INST_CALL;
|
||||
|
||||
|
@ -315,7 +315,7 @@ void ValueEnumerator::EnumerateValue(const Value *V) {
|
||||
}
|
||||
|
||||
|
||||
void ValueEnumerator::EnumerateType(const Type *Ty) {
|
||||
void ValueEnumerator::EnumerateType(Type *Ty) {
|
||||
unsigned *TypeID = &TypeMap[Ty];
|
||||
|
||||
// We've already seen this type.
|
||||
@ -325,7 +325,7 @@ void ValueEnumerator::EnumerateType(const Type *Ty) {
|
||||
// If it is a non-anonymous struct, mark the type as being visited so that we
|
||||
// don't recursively visit it. This is safe because we allow forward
|
||||
// references of these in the bitcode reader.
|
||||
if (const StructType *STy = dyn_cast<StructType>(Ty))
|
||||
if (StructType *STy = dyn_cast<StructType>(Ty))
|
||||
if (!STy->isAnonymous())
|
||||
*TypeID = ~0U;
|
||||
|
||||
|
@ -35,12 +35,12 @@ class MDSymbolTable;
|
||||
|
||||
class ValueEnumerator {
|
||||
public:
|
||||
typedef std::vector<const Type*> TypeList;
|
||||
typedef std::vector<Type*> TypeList;
|
||||
|
||||
// For each value, we remember its Value* and occurrence frequency.
|
||||
typedef std::vector<std::pair<const Value*, unsigned> > ValueList;
|
||||
private:
|
||||
typedef DenseMap<const Type*, unsigned> TypeMapType;
|
||||
typedef DenseMap<Type*, unsigned> TypeMapType;
|
||||
TypeMapType TypeMap;
|
||||
TypeList Types;
|
||||
|
||||
@ -85,7 +85,7 @@ public:
|
||||
|
||||
unsigned getValueID(const Value *V) const;
|
||||
|
||||
unsigned getTypeID(const Type *T) const {
|
||||
unsigned getTypeID(Type *T) const {
|
||||
TypeMapType::const_iterator I = TypeMap.find(T);
|
||||
assert(I != TypeMap.end() && "Type not in ValueEnumerator!");
|
||||
return I->second-1;
|
||||
@ -140,7 +140,7 @@ private:
|
||||
void EnumerateFunctionLocalMetadata(const MDNode *N);
|
||||
void EnumerateNamedMDNode(const NamedMDNode *NMD);
|
||||
void EnumerateValue(const Value *V);
|
||||
void EnumerateType(const Type *T);
|
||||
void EnumerateType(Type *T);
|
||||
void EnumerateOperandType(const Value *V);
|
||||
void EnumerateAttributes(const AttrListPtr &PAL);
|
||||
|
||||
|
@ -31,7 +31,7 @@ using namespace llvm;
|
||||
/// of insertvalue or extractvalue indices that identify a member, return
|
||||
/// the linearized index of the start of the member.
|
||||
///
|
||||
unsigned llvm::ComputeLinearIndex(const Type *Ty,
|
||||
unsigned llvm::ComputeLinearIndex(Type *Ty,
|
||||
const unsigned *Indices,
|
||||
const unsigned *IndicesEnd,
|
||||
unsigned CurIndex) {
|
||||
@ -40,7 +40,7 @@ unsigned llvm::ComputeLinearIndex(const Type *Ty,
|
||||
return CurIndex;
|
||||
|
||||
// Given a struct type, recursively traverse the elements.
|
||||
if (const StructType *STy = dyn_cast<StructType>(Ty)) {
|
||||
if (StructType *STy = dyn_cast<StructType>(Ty)) {
|
||||
for (StructType::element_iterator EB = STy->element_begin(),
|
||||
EI = EB,
|
||||
EE = STy->element_end();
|
||||
@ -52,8 +52,8 @@ unsigned llvm::ComputeLinearIndex(const Type *Ty,
|
||||
return CurIndex;
|
||||
}
|
||||
// Given an array type, recursively traverse the elements.
|
||||
else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
|
||||
const Type *EltTy = ATy->getElementType();
|
||||
else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
|
||||
Type *EltTy = ATy->getElementType();
|
||||
for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) {
|
||||
if (Indices && *Indices == i)
|
||||
return ComputeLinearIndex(EltTy, Indices+1, IndicesEnd, CurIndex);
|
||||
@ -72,12 +72,12 @@ unsigned llvm::ComputeLinearIndex(const Type *Ty,
|
||||
/// If Offsets is non-null, it points to a vector to be filled in
|
||||
/// with the in-memory offsets of each of the individual values.
|
||||
///
|
||||
void llvm::ComputeValueVTs(const TargetLowering &TLI, const Type *Ty,
|
||||
void llvm::ComputeValueVTs(const TargetLowering &TLI, Type *Ty,
|
||||
SmallVectorImpl<EVT> &ValueVTs,
|
||||
SmallVectorImpl<uint64_t> *Offsets,
|
||||
uint64_t StartingOffset) {
|
||||
// Given a struct type, recursively traverse the elements.
|
||||
if (const StructType *STy = dyn_cast<StructType>(Ty)) {
|
||||
if (StructType *STy = dyn_cast<StructType>(Ty)) {
|
||||
const StructLayout *SL = TLI.getTargetData()->getStructLayout(STy);
|
||||
for (StructType::element_iterator EB = STy->element_begin(),
|
||||
EI = EB,
|
||||
@ -88,8 +88,8 @@ void llvm::ComputeValueVTs(const TargetLowering &TLI, const Type *Ty,
|
||||
return;
|
||||
}
|
||||
// Given an array type, recursively traverse the elements.
|
||||
if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
|
||||
const Type *EltTy = ATy->getElementType();
|
||||
if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
|
||||
Type *EltTy = ATy->getElementType();
|
||||
uint64_t EltSize = TLI.getTargetData()->getTypeAllocSize(EltTy);
|
||||
for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
|
||||
ComputeValueVTs(TLI, EltTy, ValueVTs, Offsets,
|
||||
|
@ -1009,7 +1009,7 @@ void AsmPrinter::EmitConstantPool() {
|
||||
unsigned NewOffset = (Offset + AlignMask) & ~AlignMask;
|
||||
OutStreamer.EmitFill(NewOffset - Offset, 0/*fillval*/, 0/*addrspace*/);
|
||||
|
||||
const Type *Ty = CPE.getType();
|
||||
Type *Ty = CPE.getType();
|
||||
Offset = NewOffset + TM.getTargetData()->getTypeAllocSize(Ty);
|
||||
OutStreamer.EmitLabel(GetCPISymbol(CPI));
|
||||
|
||||
@ -1447,7 +1447,7 @@ static const MCExpr *LowerConstant(const Constant *CV, AsmPrinter &AP) {
|
||||
// Support only foldable casts to/from pointers that can be eliminated by
|
||||
// changing the pointer to the appropriately sized integer type.
|
||||
Constant *Op = CE->getOperand(0);
|
||||
const Type *Ty = CE->getType();
|
||||
Type *Ty = CE->getType();
|
||||
|
||||
const MCExpr *OpExpr = LowerConstant(Op, AP);
|
||||
|
||||
|
@ -482,7 +482,7 @@ void ELFWriter::EmitGlobalConstant(const Constant *CV, ELFSection &GblS) {
|
||||
EmitGlobalConstantLargeInt(CI, GblS);
|
||||
return;
|
||||
} else if (const ConstantVector *CP = dyn_cast<ConstantVector>(CV)) {
|
||||
const VectorType *PTy = CP->getType();
|
||||
VectorType *PTy = CP->getType();
|
||||
for (unsigned I = 0, E = PTy->getNumElements(); I < E; ++I)
|
||||
EmitGlobalConstant(CP->getOperand(I), GblS);
|
||||
return;
|
||||
@ -552,7 +552,7 @@ CstExprResTy ELFWriter::ResolveConstantExpr(const Constant *CV) {
|
||||
}
|
||||
case Instruction::PtrToInt: {
|
||||
Constant *Op = CE->getOperand(0);
|
||||
const Type *Ty = CE->getType();
|
||||
Type *Ty = CE->getType();
|
||||
|
||||
// We can emit the pointer value into this slot if the slot is an
|
||||
// integer slot greater or equal to the size of the pointer.
|
||||
|
@ -27,7 +27,7 @@ using namespace llvm;
|
||||
template <class ArgIt>
|
||||
static void EnsureFunctionExists(Module &M, const char *Name,
|
||||
ArgIt ArgBegin, ArgIt ArgEnd,
|
||||
const Type *RetTy) {
|
||||
Type *RetTy) {
|
||||
// Insert a correctly-typed definition now.
|
||||
std::vector<Type *> ParamTys;
|
||||
for (ArgIt I = ArgBegin; I != ArgEnd; ++I)
|
||||
@ -64,7 +64,7 @@ static void EnsureFPIntrinsicsExist(Module &M, Function *Fn,
|
||||
template <class ArgIt>
|
||||
static CallInst *ReplaceCallWith(const char *NewFn, CallInst *CI,
|
||||
ArgIt ArgBegin, ArgIt ArgEnd,
|
||||
const Type *RetTy) {
|
||||
Type *RetTy) {
|
||||
// If we haven't already looked up this function, check to see if the
|
||||
// program already contains a function with this name.
|
||||
Module *M = CI->getParent()->getParent()->getParent();
|
||||
@ -462,7 +462,7 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
|
||||
break; // Strip out annotate intrinsic
|
||||
|
||||
case Intrinsic::memcpy: {
|
||||
const IntegerType *IntPtr = TD.getIntPtrType(Context);
|
||||
IntegerType *IntPtr = TD.getIntPtrType(Context);
|
||||
Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
|
||||
/* isSigned */ false);
|
||||
Value *Ops[3];
|
||||
@ -473,7 +473,7 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
|
||||
break;
|
||||
}
|
||||
case Intrinsic::memmove: {
|
||||
const IntegerType *IntPtr = TD.getIntPtrType(Context);
|
||||
IntegerType *IntPtr = TD.getIntPtrType(Context);
|
||||
Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
|
||||
/* isSigned */ false);
|
||||
Value *Ops[3];
|
||||
@ -484,7 +484,7 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
|
||||
break;
|
||||
}
|
||||
case Intrinsic::memset: {
|
||||
const IntegerType *IntPtr = TD.getIntPtrType(Context);
|
||||
IntegerType *IntPtr = TD.getIntPtrType(Context);
|
||||
Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
|
||||
/* isSigned */ false);
|
||||
Value *Ops[3];
|
||||
|
@ -619,7 +619,7 @@ void MachineJumpTableInfo::dump() const { print(dbgs()); }
|
||||
// MachineConstantPool implementation
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
const Type *MachineConstantPoolEntry::getType() const {
|
||||
Type *MachineConstantPoolEntry::getType() const {
|
||||
if (isMachineConstantPoolEntry())
|
||||
return Val.MachineCPVal->getType();
|
||||
return Val.ConstVal->getType();
|
||||
|
@ -6479,7 +6479,7 @@ SDValue DAGCombiner::ReduceLoadOpStoreWidth(SDNode *N) {
|
||||
PtrOff = (BitWidth + 7 - NewBW) / 8 - PtrOff;
|
||||
|
||||
unsigned NewAlign = MinAlign(LD->getAlignment(), PtrOff);
|
||||
const Type *NewVTTy = NewVT.getTypeForEVT(*DAG.getContext());
|
||||
Type *NewVTTy = NewVT.getTypeForEVT(*DAG.getContext());
|
||||
if (NewAlign < TLI.getTargetData()->getABITypeAlignment(NewVTTy))
|
||||
return SDValue();
|
||||
|
||||
@ -6542,7 +6542,7 @@ SDValue DAGCombiner::TransformFPLoadStorePair(SDNode *N) {
|
||||
|
||||
unsigned LDAlign = LD->getAlignment();
|
||||
unsigned STAlign = ST->getAlignment();
|
||||
const Type *IntVTTy = IntVT.getTypeForEVT(*DAG.getContext());
|
||||
Type *IntVTTy = IntVT.getTypeForEVT(*DAG.getContext());
|
||||
unsigned ABIAlign = TLI.getTargetData()->getABITypeAlignment(IntVTTy);
|
||||
if (LDAlign < ABIAlign || STAlign < ABIAlign)
|
||||
return SDValue();
|
||||
@ -7447,7 +7447,7 @@ SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1,
|
||||
const_cast<ConstantFP*>(FV->getConstantFPValue()),
|
||||
const_cast<ConstantFP*>(TV->getConstantFPValue())
|
||||
};
|
||||
const Type *FPTy = Elts[0]->getType();
|
||||
Type *FPTy = Elts[0]->getType();
|
||||
const TargetData &TD = *TLI.getTargetData();
|
||||
|
||||
// Create a ConstantArray of the two constants.
|
||||
|
@ -422,12 +422,12 @@ bool FastISel::SelectGetElementPtr(const User *I) {
|
||||
|
||||
bool NIsKill = hasTrivialKill(I->getOperand(0));
|
||||
|
||||
const Type *Ty = I->getOperand(0)->getType();
|
||||
Type *Ty = I->getOperand(0)->getType();
|
||||
MVT VT = TLI.getPointerTy();
|
||||
for (GetElementPtrInst::const_op_iterator OI = I->op_begin()+1,
|
||||
E = I->op_end(); OI != E; ++OI) {
|
||||
const Value *Idx = *OI;
|
||||
if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
|
||||
if (StructType *StTy = dyn_cast<StructType>(Ty)) {
|
||||
unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
|
||||
if (Field) {
|
||||
// N = N + Offset
|
||||
@ -839,7 +839,7 @@ FastISel::SelectExtractValue(const User *U) {
|
||||
return false;
|
||||
|
||||
const Value *Op0 = EVI->getOperand(0);
|
||||
const Type *AggTy = Op0->getType();
|
||||
Type *AggTy = Op0->getType();
|
||||
|
||||
// Get the base result register.
|
||||
unsigned ResultReg;
|
||||
@ -1074,7 +1074,7 @@ unsigned FastISel::FastEmit_ri_(MVT VT, unsigned Opcode,
|
||||
if (MaterialReg == 0) {
|
||||
// This is a bit ugly/slow, but failing here means falling out of
|
||||
// fast-isel, which would be very slow.
|
||||
const IntegerType *ITy = IntegerType::get(FuncInfo.Fn->getContext(),
|
||||
IntegerType *ITy = IntegerType::get(FuncInfo.Fn->getContext(),
|
||||
VT.getSizeInBits());
|
||||
MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm));
|
||||
}
|
||||
|
@ -78,7 +78,7 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf) {
|
||||
for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I != E; ++I)
|
||||
if (const AllocaInst *AI = dyn_cast<AllocaInst>(I))
|
||||
if (const ConstantInt *CUI = dyn_cast<ConstantInt>(AI->getArraySize())) {
|
||||
const Type *Ty = AI->getAllocatedType();
|
||||
Type *Ty = AI->getAllocatedType();
|
||||
uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
|
||||
unsigned Align =
|
||||
std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty),
|
||||
@ -216,7 +216,7 @@ unsigned FunctionLoweringInfo::CreateReg(EVT VT) {
|
||||
/// In the case that the given value has struct or array type, this function
|
||||
/// will assign registers for each member or element.
|
||||
///
|
||||
unsigned FunctionLoweringInfo::CreateRegs(const Type *Ty) {
|
||||
unsigned FunctionLoweringInfo::CreateRegs(Type *Ty) {
|
||||
SmallVector<EVT, 4> ValueVTs;
|
||||
ComputeValueVTs(TLI, Ty, ValueVTs);
|
||||
|
||||
@ -260,7 +260,7 @@ FunctionLoweringInfo::GetLiveOutRegInfo(unsigned Reg, unsigned BitWidth) {
|
||||
/// ComputePHILiveOutRegInfo - Compute LiveOutInfo for a PHI's destination
|
||||
/// register based on the LiveOutInfo of its operands.
|
||||
void FunctionLoweringInfo::ComputePHILiveOutRegInfo(const PHINode *PN) {
|
||||
const Type *Ty = PN->getType();
|
||||
Type *Ty = PN->getType();
|
||||
if (!Ty->isIntegerTy() || Ty->isVectorTy())
|
||||
return;
|
||||
|
||||
|
@ -356,7 +356,7 @@ void InstrEmitter::AddOperand(MachineInstr *MI, SDValue Op,
|
||||
} else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op)) {
|
||||
int Offset = CP->getOffset();
|
||||
unsigned Align = CP->getAlignment();
|
||||
const Type *Type = CP->getType();
|
||||
Type *Type = CP->getType();
|
||||
// MachineConstantPool wants an explicit alignment.
|
||||
if (Align == 0) {
|
||||
Align = TM->getTargetData()->getPrefTypeAlignment(Type);
|
||||
|
@ -365,7 +365,7 @@ static SDValue ExpandConstantFP(ConstantFPSDNode *CFP, bool UseCP,
|
||||
// smaller type.
|
||||
TLI.isLoadExtLegal(ISD::EXTLOAD, SVT) &&
|
||||
TLI.ShouldShrinkFPConstant(OrigVT)) {
|
||||
const Type *SType = SVT.getTypeForEVT(*DAG.getContext());
|
||||
Type *SType = SVT.getTypeForEVT(*DAG.getContext());
|
||||
LLVMC = cast<ConstantFP>(ConstantExpr::getFPTrunc(LLVMC, SType));
|
||||
VT = SVT;
|
||||
Extend = true;
|
||||
@ -1124,7 +1124,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
|
||||
// If this is an unaligned load and the target doesn't support it,
|
||||
// expand it.
|
||||
if (!TLI.allowsUnalignedMemoryAccesses(LD->getMemoryVT())) {
|
||||
const Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext());
|
||||
Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext());
|
||||
unsigned ABIAlignment = TLI.getTargetData()->getABITypeAlignment(Ty);
|
||||
if (LD->getAlignment() < ABIAlignment){
|
||||
Result = ExpandUnalignedLoad(cast<LoadSDNode>(Result.getNode()),
|
||||
@ -1311,7 +1311,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
|
||||
// If this is an unaligned load and the target doesn't support it,
|
||||
// expand it.
|
||||
if (!TLI.allowsUnalignedMemoryAccesses(LD->getMemoryVT())) {
|
||||
const Type *Ty =
|
||||
Type *Ty =
|
||||
LD->getMemoryVT().getTypeForEVT(*DAG.getContext());
|
||||
unsigned ABIAlignment =
|
||||
TLI.getTargetData()->getABITypeAlignment(Ty);
|
||||
@ -1491,7 +1491,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
|
||||
// If this is an unaligned store and the target doesn't support it,
|
||||
// expand it.
|
||||
if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT())) {
|
||||
const Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext());
|
||||
Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext());
|
||||
unsigned ABIAlignment= TLI.getTargetData()->getABITypeAlignment(Ty);
|
||||
if (ST->getAlignment() < ABIAlignment)
|
||||
Result = ExpandUnalignedStore(cast<StoreSDNode>(Result.getNode()),
|
||||
@ -1596,7 +1596,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
|
||||
// If this is an unaligned store and the target doesn't support it,
|
||||
// expand it.
|
||||
if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT())) {
|
||||
const Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext());
|
||||
Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext());
|
||||
unsigned ABIAlignment= TLI.getTargetData()->getABITypeAlignment(Ty);
|
||||
if (ST->getAlignment() < ABIAlignment)
|
||||
Result = ExpandUnalignedStore(cast<StoreSDNode>(Result.getNode()),
|
||||
@ -1999,7 +1999,7 @@ SDValue SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp,
|
||||
unsigned SrcSize = SrcOp.getValueType().getSizeInBits();
|
||||
unsigned SlotSize = SlotVT.getSizeInBits();
|
||||
unsigned DestSize = DestVT.getSizeInBits();
|
||||
const Type *DestType = DestVT.getTypeForEVT(*DAG.getContext());
|
||||
Type *DestType = DestVT.getTypeForEVT(*DAG.getContext());
|
||||
unsigned DestAlign = TLI.getTargetData()->getPrefTypeAlignment(DestType);
|
||||
|
||||
// Emit a store to the stack slot. Use a truncstore if the input value is
|
||||
@ -2106,7 +2106,7 @@ SDValue SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) {
|
||||
}
|
||||
} else {
|
||||
assert(Node->getOperand(i).getOpcode() == ISD::UNDEF);
|
||||
const Type *OpNTy = EltVT.getTypeForEVT(*DAG.getContext());
|
||||
Type *OpNTy = EltVT.getTypeForEVT(*DAG.getContext());
|
||||
CV.push_back(UndefValue::get(OpNTy));
|
||||
}
|
||||
}
|
||||
@ -2159,7 +2159,7 @@ SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node,
|
||||
TargetLowering::ArgListEntry Entry;
|
||||
for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) {
|
||||
EVT ArgVT = Node->getOperand(i).getValueType();
|
||||
const Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
|
||||
Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
|
||||
Entry.Node = Node->getOperand(i); Entry.Ty = ArgTy;
|
||||
Entry.isSExt = isSigned;
|
||||
Entry.isZExt = !isSigned;
|
||||
@ -2169,7 +2169,7 @@ SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node,
|
||||
TLI.getPointerTy());
|
||||
|
||||
// Splice the libcall in wherever FindInputOutputChains tells us to.
|
||||
const Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext());
|
||||
Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext());
|
||||
|
||||
// isTailCall may be true since the callee does not reference caller stack
|
||||
// frame. Check if it's in the right position.
|
||||
@ -2210,7 +2210,7 @@ SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, EVT RetVT,
|
||||
SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
|
||||
TLI.getPointerTy());
|
||||
|
||||
const Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
|
||||
Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
|
||||
std::pair<SDValue,SDValue> CallInfo =
|
||||
TLI.LowerCallTo(DAG.getEntryNode(), RetTy, isSigned, !isSigned, false,
|
||||
false, 0, TLI.getLibcallCallingConv(LC), false,
|
||||
@ -2237,7 +2237,7 @@ SelectionDAGLegalize::ExpandChainLibCall(RTLIB::Libcall LC,
|
||||
TargetLowering::ArgListEntry Entry;
|
||||
for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i) {
|
||||
EVT ArgVT = Node->getOperand(i).getValueType();
|
||||
const Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
|
||||
Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
|
||||
Entry.Node = Node->getOperand(i);
|
||||
Entry.Ty = ArgTy;
|
||||
Entry.isSExt = isSigned;
|
||||
@ -2248,7 +2248,7 @@ SelectionDAGLegalize::ExpandChainLibCall(RTLIB::Libcall LC,
|
||||
TLI.getPointerTy());
|
||||
|
||||
// Splice the libcall in wherever FindInputOutputChains tells us to.
|
||||
const Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext());
|
||||
Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext());
|
||||
std::pair<SDValue, SDValue> CallInfo =
|
||||
TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false,
|
||||
0, TLI.getLibcallCallingConv(LC), /*isTailCall=*/false,
|
||||
@ -2360,13 +2360,13 @@ SelectionDAGLegalize::ExpandDivRemLibCall(SDNode *Node,
|
||||
SDValue InChain = DAG.getEntryNode();
|
||||
|
||||
EVT RetVT = Node->getValueType(0);
|
||||
const Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
|
||||
Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
|
||||
|
||||
TargetLowering::ArgListTy Args;
|
||||
TargetLowering::ArgListEntry Entry;
|
||||
for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) {
|
||||
EVT ArgVT = Node->getOperand(i).getValueType();
|
||||
const Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
|
||||
Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
|
||||
Entry.Node = Node->getOperand(i); Entry.Ty = ArgTy;
|
||||
Entry.isSExt = isSigned;
|
||||
Entry.isZExt = !isSigned;
|
||||
|
@ -2176,9 +2176,9 @@ void DAGTypeLegalizer::ExpandIntRes_UADDSUBO(SDNode *N,
|
||||
void DAGTypeLegalizer::ExpandIntRes_XMULO(SDNode *N,
|
||||
SDValue &Lo, SDValue &Hi) {
|
||||
EVT VT = N->getValueType(0);
|
||||
const Type *RetTy = VT.getTypeForEVT(*DAG.getContext());
|
||||
Type *RetTy = VT.getTypeForEVT(*DAG.getContext());
|
||||
EVT PtrVT = TLI.getPointerTy();
|
||||
const Type *PtrTy = PtrVT.getTypeForEVT(*DAG.getContext());
|
||||
Type *PtrTy = PtrVT.getTypeForEVT(*DAG.getContext());
|
||||
DebugLoc dl = N->getDebugLoc();
|
||||
|
||||
// A divide for UMULO should be faster than a function call.
|
||||
@ -2222,7 +2222,7 @@ void DAGTypeLegalizer::ExpandIntRes_XMULO(SDNode *N,
|
||||
TargetLowering::ArgListEntry Entry;
|
||||
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
|
||||
EVT ArgVT = N->getOperand(i).getValueType();
|
||||
const Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
|
||||
Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
|
||||
Entry.Node = N->getOperand(i);
|
||||
Entry.Ty = ArgTy;
|
||||
Entry.isSExt = true;
|
||||
|
@ -1046,7 +1046,7 @@ SDValue DAGTypeLegalizer::MakeLibCall(RTLIB::Libcall LC, EVT RetVT,
|
||||
SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
|
||||
TLI.getPointerTy());
|
||||
|
||||
const Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
|
||||
Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
|
||||
std::pair<SDValue,SDValue> CallInfo =
|
||||
TLI.LowerCallTo(DAG.getEntryNode(), RetTy, isSigned, !isSigned, false,
|
||||
false, 0, TLI.getLibcallCallingConv(LC), false,
|
||||
@ -1067,7 +1067,7 @@ DAGTypeLegalizer::ExpandChainLibCall(RTLIB::Libcall LC,
|
||||
TargetLowering::ArgListEntry Entry;
|
||||
for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i) {
|
||||
EVT ArgVT = Node->getOperand(i).getValueType();
|
||||
const Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
|
||||
Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
|
||||
Entry.Node = Node->getOperand(i);
|
||||
Entry.Ty = ArgTy;
|
||||
Entry.isSExt = isSigned;
|
||||
@ -1078,7 +1078,7 @@ DAGTypeLegalizer::ExpandChainLibCall(RTLIB::Libcall LC,
|
||||
TLI.getPointerTy());
|
||||
|
||||
// Splice the libcall in wherever FindInputOutputChains tells us to.
|
||||
const Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext());
|
||||
Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext());
|
||||
std::pair<SDValue, SDValue> CallInfo =
|
||||
TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false,
|
||||
0, TLI.getLibcallCallingConv(LC), /*isTailCall=*/false,
|
||||
|
@ -670,7 +670,7 @@ void DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT(SDNode *N, SDValue &Lo,
|
||||
// Store the new element. This may be larger than the vector element type,
|
||||
// so use a truncating store.
|
||||
SDValue EltPtr = GetVectorElementPointer(StackPtr, EltVT, Idx);
|
||||
const Type *VecType = VecVT.getTypeForEVT(*DAG.getContext());
|
||||
Type *VecType = VecVT.getTypeForEVT(*DAG.getContext());
|
||||
unsigned Alignment =
|
||||
TLI.getTargetData()->getPrefTypeAlignment(VecType);
|
||||
Store = DAG.getTruncStore(Store, dl, Elt, EltPtr, MachinePointerInfo(), EltVT,
|
||||
|
@ -821,7 +821,7 @@ static void VerifyMachineNode(SDNode *N) {
|
||||
/// given type.
|
||||
///
|
||||
unsigned SelectionDAG::getEVTAlignment(EVT VT) const {
|
||||
const Type *Ty = VT == MVT::iPTR ?
|
||||
Type *Ty = VT == MVT::iPTR ?
|
||||
PointerType::get(Type::getInt8Ty(*getContext()), 0) :
|
||||
VT.getTypeForEVT(*getContext());
|
||||
|
||||
@ -1432,7 +1432,7 @@ SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) {
|
||||
SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) {
|
||||
MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo();
|
||||
unsigned ByteSize = VT.getStoreSize();
|
||||
const Type *Ty = VT.getTypeForEVT(*getContext());
|
||||
Type *Ty = VT.getTypeForEVT(*getContext());
|
||||
unsigned StackAlign =
|
||||
std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty), minAlign);
|
||||
|
||||
@ -1445,8 +1445,8 @@ SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) {
|
||||
SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) {
|
||||
unsigned Bytes = std::max(VT1.getStoreSizeInBits(),
|
||||
VT2.getStoreSizeInBits())/8;
|
||||
const Type *Ty1 = VT1.getTypeForEVT(*getContext());
|
||||
const Type *Ty2 = VT2.getTypeForEVT(*getContext());
|
||||
Type *Ty1 = VT1.getTypeForEVT(*getContext());
|
||||
Type *Ty2 = VT2.getTypeForEVT(*getContext());
|
||||
const TargetData *TD = TLI.getTargetData();
|
||||
unsigned Align = std::max(TD->getPrefTypeAlignment(Ty1),
|
||||
TD->getPrefTypeAlignment(Ty2));
|
||||
@ -3425,7 +3425,7 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
|
||||
return SDValue();
|
||||
|
||||
if (DstAlignCanChange) {
|
||||
const Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
|
||||
Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
|
||||
unsigned NewAlign = (unsigned) TLI.getTargetData()->getABITypeAlignment(Ty);
|
||||
if (NewAlign > Align) {
|
||||
// Give the stack frame object a larger alignment if needed.
|
||||
@ -3514,7 +3514,7 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
|
||||
return SDValue();
|
||||
|
||||
if (DstAlignCanChange) {
|
||||
const Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
|
||||
Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
|
||||
unsigned NewAlign = (unsigned) TLI.getTargetData()->getABITypeAlignment(Ty);
|
||||
if (NewAlign > Align) {
|
||||
// Give the stack frame object a larger alignment if needed.
|
||||
@ -3589,7 +3589,7 @@ static SDValue getMemsetStores(SelectionDAG &DAG, DebugLoc dl,
|
||||
return SDValue();
|
||||
|
||||
if (DstAlignCanChange) {
|
||||
const Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
|
||||
Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
|
||||
unsigned NewAlign = (unsigned) TLI.getTargetData()->getABITypeAlignment(Ty);
|
||||
if (NewAlign > Align) {
|
||||
// Give the stack frame object a larger alignment if needed.
|
||||
@ -3782,7 +3782,7 @@ SDValue SelectionDAG::getMemset(SDValue Chain, DebugLoc dl, SDValue Dst,
|
||||
return Result;
|
||||
|
||||
// Emit a library call.
|
||||
const Type *IntPtrTy = TLI.getTargetData()->getIntPtrType(*getContext());
|
||||
Type *IntPtrTy = TLI.getTargetData()->getIntPtrType(*getContext());
|
||||
TargetLowering::ArgListTy Args;
|
||||
TargetLowering::ArgListEntry Entry;
|
||||
Entry.Node = Dst; Entry.Ty = IntPtrTy;
|
||||
@ -6528,7 +6528,7 @@ unsigned GlobalAddressSDNode::getAddressSpace() const {
|
||||
}
|
||||
|
||||
|
||||
const Type *ConstantPoolSDNode::getType() const {
|
||||
Type *ConstantPoolSDNode::getType() const {
|
||||
if (isMachineConstantPoolEntry())
|
||||
return Val.MachineCPVal->getType();
|
||||
return Val.ConstVal->getType();
|
||||
|
@ -578,7 +578,7 @@ namespace {
|
||||
: ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs) {}
|
||||
|
||||
RegsForValue(LLVMContext &Context, const TargetLowering &tli,
|
||||
unsigned Reg, const Type *Ty) {
|
||||
unsigned Reg, Type *Ty) {
|
||||
ComputeValueVTs(tli, Ty, ValueVTs);
|
||||
|
||||
for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
|
||||
@ -1069,7 +1069,7 @@ SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
|
||||
if (const BlockAddress *BA = dyn_cast<BlockAddress>(C))
|
||||
return DAG.getBlockAddress(BA, VT);
|
||||
|
||||
const VectorType *VecTy = cast<VectorType>(V->getType());
|
||||
VectorType *VecTy = cast<VectorType>(V->getType());
|
||||
unsigned NumElements = VecTy->getNumElements();
|
||||
|
||||
// Now that we know the number and type of the elements, get that number of
|
||||
@ -2458,7 +2458,7 @@ void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) {
|
||||
|
||||
void SelectionDAGBuilder::visitFSub(const User &I) {
|
||||
// -0.0 - X --> fneg
|
||||
const Type *Ty = I.getType();
|
||||
Type *Ty = I.getType();
|
||||
if (isa<Constant>(I.getOperand(0)) &&
|
||||
I.getOperand(0) == ConstantFP::getZeroValueForNegation(Ty)) {
|
||||
SDValue Op2 = getValue(I.getOperand(1));
|
||||
@ -2886,8 +2886,8 @@ void SelectionDAGBuilder::visitShuffleVector(const User &I) {
|
||||
void SelectionDAGBuilder::visitInsertValue(const InsertValueInst &I) {
|
||||
const Value *Op0 = I.getOperand(0);
|
||||
const Value *Op1 = I.getOperand(1);
|
||||
const Type *AggTy = I.getType();
|
||||
const Type *ValTy = Op1->getType();
|
||||
Type *AggTy = I.getType();
|
||||
Type *ValTy = Op1->getType();
|
||||
bool IntoUndef = isa<UndefValue>(Op0);
|
||||
bool FromUndef = isa<UndefValue>(Op1);
|
||||
|
||||
@ -2927,8 +2927,8 @@ void SelectionDAGBuilder::visitInsertValue(const InsertValueInst &I) {
|
||||
|
||||
void SelectionDAGBuilder::visitExtractValue(const ExtractValueInst &I) {
|
||||
const Value *Op0 = I.getOperand(0);
|
||||
const Type *AggTy = Op0->getType();
|
||||
const Type *ValTy = I.getType();
|
||||
Type *AggTy = Op0->getType();
|
||||
Type *ValTy = I.getType();
|
||||
bool OutOfUndef = isa<UndefValue>(Op0);
|
||||
|
||||
unsigned LinearIndex = ComputeLinearIndex(AggTy, I.getIndices());
|
||||
@ -2961,12 +2961,12 @@ void SelectionDAGBuilder::visitExtractValue(const ExtractValueInst &I) {
|
||||
|
||||
void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
|
||||
SDValue N = getValue(I.getOperand(0));
|
||||
const Type *Ty = I.getOperand(0)->getType();
|
||||
Type *Ty = I.getOperand(0)->getType();
|
||||
|
||||
for (GetElementPtrInst::const_op_iterator OI = I.op_begin()+1, E = I.op_end();
|
||||
OI != E; ++OI) {
|
||||
const Value *Idx = *OI;
|
||||
if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
|
||||
if (StructType *StTy = dyn_cast<StructType>(Ty)) {
|
||||
unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
|
||||
if (Field) {
|
||||
// N = N + Offset
|
||||
@ -3037,7 +3037,7 @@ void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) {
|
||||
if (FuncInfo.StaticAllocaMap.count(&I))
|
||||
return; // getValue will auto-populate this.
|
||||
|
||||
const Type *Ty = I.getAllocatedType();
|
||||
Type *Ty = I.getAllocatedType();
|
||||
uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
|
||||
unsigned Align =
|
||||
std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty),
|
||||
@ -3087,7 +3087,7 @@ void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
|
||||
const Value *SV = I.getOperand(0);
|
||||
SDValue Ptr = getValue(SV);
|
||||
|
||||
const Type *Ty = I.getType();
|
||||
Type *Ty = I.getType();
|
||||
|
||||
bool isVolatile = I.isVolatile();
|
||||
bool isNonTemporal = I.getMetadata("nontemporal") != 0;
|
||||
@ -3290,7 +3290,7 @@ void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
|
||||
}
|
||||
|
||||
if (!I.getType()->isVoidTy()) {
|
||||
if (const VectorType *PTy = dyn_cast<VectorType>(I.getType())) {
|
||||
if (VectorType *PTy = dyn_cast<VectorType>(I.getType())) {
|
||||
EVT VT = TLI.getValueType(PTy);
|
||||
Result = DAG.getNode(ISD::BITCAST, getCurDebugLoc(), VT, Result);
|
||||
}
|
||||
@ -4918,9 +4918,9 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
|
||||
void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
|
||||
bool isTailCall,
|
||||
MachineBasicBlock *LandingPad) {
|
||||
const PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
|
||||
const FunctionType *FTy = cast<FunctionType>(PT->getElementType());
|
||||
const Type *RetTy = FTy->getReturnType();
|
||||
PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
|
||||
FunctionType *FTy = cast<FunctionType>(PT->getElementType());
|
||||
Type *RetTy = FTy->getReturnType();
|
||||
MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
|
||||
MCSymbol *BeginLabel = 0;
|
||||
|
||||
@ -4949,7 +4949,7 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
|
||||
FTy->getReturnType());
|
||||
MachineFunction &MF = DAG.getMachineFunction();
|
||||
DemoteStackIdx = MF.getFrameInfo()->CreateStackObject(TySize, Align, false);
|
||||
const Type *StackSlotPtrType = PointerType::getUnqual(FTy->getReturnType());
|
||||
Type *StackSlotPtrType = PointerType::getUnqual(FTy->getReturnType());
|
||||
|
||||
DemoteStackSlot = DAG.getFrameIndex(DemoteStackIdx, TLI.getPointerTy());
|
||||
Entry.Node = DemoteStackSlot;
|
||||
@ -5037,7 +5037,7 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
|
||||
// The instruction result is the result of loading from the
|
||||
// hidden sret parameter.
|
||||
SmallVector<EVT, 1> PVTs;
|
||||
const Type *PtrRetTy = PointerType::getUnqual(FTy->getReturnType());
|
||||
Type *PtrRetTy = PointerType::getUnqual(FTy->getReturnType());
|
||||
|
||||
ComputeValueVTs(TLI, PtrRetTy, PVTs);
|
||||
assert(PVTs.size() == 1 && "Pointers should fit in one register");
|
||||
@ -5130,7 +5130,7 @@ static bool IsOnlyUsedInZeroEqualityComparison(const Value *V) {
|
||||
}
|
||||
|
||||
static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT,
|
||||
const Type *LoadTy,
|
||||
Type *LoadTy,
|
||||
SelectionDAGBuilder &Builder) {
|
||||
|
||||
// Check to see if this load can be trivially constant folded, e.g. if the
|
||||
@ -5193,7 +5193,7 @@ bool SelectionDAGBuilder::visitMemCmpCall(const CallInst &I) {
|
||||
if (Size && IsOnlyUsedInZeroEqualityComparison(&I)) {
|
||||
bool ActuallyDoIt = true;
|
||||
MVT LoadVT;
|
||||
const Type *LoadTy;
|
||||
Type *LoadTy;
|
||||
switch (Size->getZExtValue()) {
|
||||
default:
|
||||
LoadVT = MVT::Other;
|
||||
@ -5261,14 +5261,14 @@ void SelectionDAGBuilder::visitCall(const CallInst &I) {
|
||||
|
||||
// See if any floating point values are being passed to this function. This is
|
||||
// used to emit an undefined reference to fltused on Windows.
|
||||
const FunctionType *FT =
|
||||
FunctionType *FT =
|
||||
cast<FunctionType>(I.getCalledValue()->getType()->getContainedType(0));
|
||||
MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
|
||||
if (FT->isVarArg() &&
|
||||
!MMI.callsExternalVAFunctionWithFloatingPointArguments()) {
|
||||
for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) {
|
||||
const Type* T = I.getArgOperand(i)->getType();
|
||||
for (po_iterator<const Type*> i = po_begin(T), e = po_end(T);
|
||||
Type* T = I.getArgOperand(i)->getType();
|
||||
for (po_iterator<Type*> i = po_begin(T), e = po_end(T);
|
||||
i != e; ++i) {
|
||||
if (!i->isFloatingPointTy()) continue;
|
||||
MMI.setCallsExternalVAFunctionWithFloatingPointArguments(true);
|
||||
@ -5412,20 +5412,20 @@ public:
|
||||
if (isa<BasicBlock>(CallOperandVal))
|
||||
return TLI.getPointerTy();
|
||||
|
||||
const llvm::Type *OpTy = CallOperandVal->getType();
|
||||
llvm::Type *OpTy = CallOperandVal->getType();
|
||||
|
||||
// FIXME: code duplicated from TargetLowering::ParseConstraints().
|
||||
// If this is an indirect operand, the operand is a pointer to the
|
||||
// accessed type.
|
||||
if (isIndirect) {
|
||||
const llvm::PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
|
||||
llvm::PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
|
||||
if (!PtrTy)
|
||||
report_fatal_error("Indirect operand for inline asm not a pointer!");
|
||||
OpTy = PtrTy->getElementType();
|
||||
}
|
||||
|
||||
// Look for vector wrapped in a struct. e.g. { <16 x i8> }.
|
||||
if (const StructType *STy = dyn_cast<StructType>(OpTy))
|
||||
if (StructType *STy = dyn_cast<StructType>(OpTy))
|
||||
if (STy->getNumElements() == 1)
|
||||
OpTy = STy->getElementType(0);
|
||||
|
||||
@ -5639,7 +5639,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
|
||||
// corresponding argument.
|
||||
assert(!CS.getType()->isVoidTy() &&
|
||||
"Bad inline asm!");
|
||||
if (const StructType *STy = dyn_cast<StructType>(CS.getType())) {
|
||||
if (StructType *STy = dyn_cast<StructType>(CS.getType())) {
|
||||
OpVT = TLI.getValueType(STy->getElementType(ResNo));
|
||||
} else {
|
||||
assert(ResNo == 0 && "Asm only has one result!");
|
||||
@ -5750,7 +5750,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
|
||||
} else {
|
||||
// Otherwise, create a stack slot and emit a store to it before the
|
||||
// asm.
|
||||
const Type *Ty = OpVal->getType();
|
||||
Type *Ty = OpVal->getType();
|
||||
uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
|
||||
unsigned Align = TLI.getTargetData()->getPrefTypeAlignment(Ty);
|
||||
MachineFunction &MF = DAG.getMachineFunction();
|
||||
@ -6111,7 +6111,7 @@ void SelectionDAGBuilder::visitVACopy(const CallInst &I) {
|
||||
/// FIXME: When all targets are
|
||||
/// migrated to using LowerCall, this hook should be integrated into SDISel.
|
||||
std::pair<SDValue, SDValue>
|
||||
TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy,
|
||||
TargetLowering::LowerCallTo(SDValue Chain, Type *RetTy,
|
||||
bool RetSExt, bool RetZExt, bool isVarArg,
|
||||
bool isInreg, unsigned NumFixedArgs,
|
||||
CallingConv::ID CallConv, bool isTailCall,
|
||||
@ -6128,7 +6128,7 @@ TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy,
|
||||
for (unsigned Value = 0, NumValues = ValueVTs.size();
|
||||
Value != NumValues; ++Value) {
|
||||
EVT VT = ValueVTs[Value];
|
||||
const Type *ArgTy = VT.getTypeForEVT(RetTy->getContext());
|
||||
Type *ArgTy = VT.getTypeForEVT(RetTy->getContext());
|
||||
SDValue Op = SDValue(Args[i].Node.getNode(),
|
||||
Args[i].Node.getResNo() + Value);
|
||||
ISD::ArgFlagsTy Flags;
|
||||
@ -6145,8 +6145,8 @@ TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy,
|
||||
Flags.setSRet();
|
||||
if (Args[i].isByVal) {
|
||||
Flags.setByVal();
|
||||
const PointerType *Ty = cast<PointerType>(Args[i].Ty);
|
||||
const Type *ElementTy = Ty->getElementType();
|
||||
PointerType *Ty = cast<PointerType>(Args[i].Ty);
|
||||
Type *ElementTy = Ty->getElementType();
|
||||
Flags.setByValSize(getTargetData()->getTypeAllocSize(ElementTy));
|
||||
// For ByVal, alignment should come from FE. BE will guess if this
|
||||
// info is not there but there are cases it cannot get right.
|
||||
@ -6356,7 +6356,7 @@ void SelectionDAGISel::LowerArguments(const BasicBlock *LLVMBB) {
|
||||
for (unsigned Value = 0, NumValues = ValueVTs.size();
|
||||
Value != NumValues; ++Value) {
|
||||
EVT VT = ValueVTs[Value];
|
||||
const Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
|
||||
Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
|
||||
ISD::ArgFlagsTy Flags;
|
||||
unsigned OriginalAlignment =
|
||||
TD->getABITypeAlignment(ArgTy);
|
||||
@ -6371,8 +6371,8 @@ void SelectionDAGISel::LowerArguments(const BasicBlock *LLVMBB) {
|
||||
Flags.setSRet();
|
||||
if (F.paramHasAttr(Idx, Attribute::ByVal)) {
|
||||
Flags.setByVal();
|
||||
const PointerType *Ty = cast<PointerType>(I->getType());
|
||||
const Type *ElementTy = Ty->getElementType();
|
||||
PointerType *Ty = cast<PointerType>(I->getType());
|
||||
Type *ElementTy = Ty->getElementType();
|
||||
Flags.setByValSize(TD->getTypeAllocSize(ElementTy));
|
||||
// For ByVal, alignment should be passed from FE. BE will guess if
|
||||
// this info is not there but there are cases it cannot get right.
|
||||
|
@ -996,7 +996,7 @@ unsigned TargetLowering::getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
|
||||
/// type of the given function. This does not require a DAG or a return value,
|
||||
/// and is suitable for use before any DAGs for the function are constructed.
|
||||
/// TODO: Move this out of TargetLowering.cpp.
|
||||
void llvm::GetReturnInfo(const Type* ReturnType, Attributes attr,
|
||||
void llvm::GetReturnInfo(Type* ReturnType, Attributes attr,
|
||||
SmallVectorImpl<ISD::OutputArg> &Outs,
|
||||
const TargetLowering &TLI,
|
||||
SmallVectorImpl<uint64_t> *Offsets) {
|
||||
@ -1054,7 +1054,7 @@ void llvm::GetReturnInfo(const Type* ReturnType, Attributes attr,
|
||||
/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
|
||||
/// function arguments in the caller parameter area. This is the actual
|
||||
/// alignment, not its logarithm.
|
||||
unsigned TargetLowering::getByValTypeAlignment(const Type *Ty) const {
|
||||
unsigned TargetLowering::getByValTypeAlignment(Type *Ty) const {
|
||||
return TD->getCallFrameTypeAlignment(Ty);
|
||||
}
|
||||
|
||||
@ -2840,7 +2840,7 @@ TargetLowering::AsmOperandInfoVector TargetLowering::ParseConstraints(
|
||||
// corresponding argument.
|
||||
assert(!CS.getType()->isVoidTy() &&
|
||||
"Bad inline asm!");
|
||||
if (const StructType *STy = dyn_cast<StructType>(CS.getType())) {
|
||||
if (StructType *STy = dyn_cast<StructType>(CS.getType())) {
|
||||
OpInfo.ConstraintVT = getValueType(STy->getElementType(ResNo));
|
||||
} else {
|
||||
assert(ResNo == 0 && "Asm only has one result!");
|
||||
@ -2857,16 +2857,16 @@ TargetLowering::AsmOperandInfoVector TargetLowering::ParseConstraints(
|
||||
}
|
||||
|
||||
if (OpInfo.CallOperandVal) {
|
||||
const llvm::Type *OpTy = OpInfo.CallOperandVal->getType();
|
||||
llvm::Type *OpTy = OpInfo.CallOperandVal->getType();
|
||||
if (OpInfo.isIndirect) {
|
||||
const llvm::PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
|
||||
llvm::PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
|
||||
if (!PtrTy)
|
||||
report_fatal_error("Indirect operand for inline asm not a pointer!");
|
||||
OpTy = PtrTy->getElementType();
|
||||
}
|
||||
|
||||
// Look for vector wrapped in a struct. e.g. { <16 x i8> }.
|
||||
if (const StructType *STy = dyn_cast<StructType>(OpTy))
|
||||
if (StructType *STy = dyn_cast<StructType>(OpTy))
|
||||
if (STy->getNumElements() == 1)
|
||||
OpTy = STy->getElementType(0);
|
||||
|
||||
@ -3187,7 +3187,7 @@ void TargetLowering::ComputeConstraintToUse(AsmOperandInfo &OpInfo,
|
||||
/// isLegalAddressingMode - Return true if the addressing mode represented
|
||||
/// by AM is legal for this target, for a load/store of the specified type.
|
||||
bool TargetLowering::isLegalAddressingMode(const AddrMode &AM,
|
||||
const Type *Ty) const {
|
||||
Type *Ty) const {
|
||||
// The default implementation of this implements a conservative RISCy, r+r and
|
||||
// r+i addr mode.
|
||||
|
||||
|
@ -61,7 +61,7 @@ namespace {
|
||||
private:
|
||||
bool IsNullValue(Value *V);
|
||||
Constant *GetFrameMap(Function &F);
|
||||
const Type* GetConcreteStackEntryType(Function &F);
|
||||
Type* GetConcreteStackEntryType(Function &F);
|
||||
void CollectRoots(Function &F);
|
||||
static GetElementPtrInst *CreateGEP(LLVMContext &Context,
|
||||
IRBuilder<> &B, Value *BasePtr,
|
||||
@ -190,7 +190,7 @@ ShadowStackGC::ShadowStackGC() : Head(0), StackEntryTy(0) {
|
||||
|
||||
Constant *ShadowStackGC::GetFrameMap(Function &F) {
|
||||
// doInitialization creates the abstract type of this value.
|
||||
const Type *VoidPtr = Type::getInt8PtrTy(F.getContext());
|
||||
Type *VoidPtr = Type::getInt8PtrTy(F.getContext());
|
||||
|
||||
// Truncate the ShadowStackDescriptor if some metadata is null.
|
||||
unsigned NumMeta = 0;
|
||||
@ -203,7 +203,7 @@ Constant *ShadowStackGC::GetFrameMap(Function &F) {
|
||||
}
|
||||
Metadata.resize(NumMeta);
|
||||
|
||||
const Type *Int32Ty = Type::getInt32Ty(F.getContext());
|
||||
Type *Int32Ty = Type::getInt32Ty(F.getContext());
|
||||
|
||||
Constant *BaseElts[] = {
|
||||
ConstantInt::get(Int32Ty, Roots.size(), false),
|
||||
@ -244,7 +244,7 @@ Constant *ShadowStackGC::GetFrameMap(Function &F) {
|
||||
return ConstantExpr::getGetElementPtr(GV, GEPIndices, 2);
|
||||
}
|
||||
|
||||
const Type* ShadowStackGC::GetConcreteStackEntryType(Function &F) {
|
||||
Type* ShadowStackGC::GetConcreteStackEntryType(Function &F) {
|
||||
// doInitialization creates the generic version of this type.
|
||||
std::vector<Type*> EltTys;
|
||||
EltTys.push_back(StackEntryTy);
|
||||
@ -282,7 +282,7 @@ bool ShadowStackGC::initializeCustomLowering(Module &M) {
|
||||
EltTys.push_back(PointerType::getUnqual(StackEntryTy));
|
||||
EltTys.push_back(FrameMapPtrTy);
|
||||
StackEntryTy->setBody(EltTys);
|
||||
const PointerType *StackEntryPtrTy = PointerType::getUnqual(StackEntryTy);
|
||||
PointerType *StackEntryPtrTy = PointerType::getUnqual(StackEntryTy);
|
||||
|
||||
// Get the root chain if it already exists.
|
||||
Head = M.getGlobalVariable("llvm_gc_root_chain");
|
||||
@ -373,7 +373,7 @@ bool ShadowStackGC::performCustomLowering(Function &F) {
|
||||
|
||||
// Build the constant map and figure the type of the shadow stack entry.
|
||||
Value *FrameMap = GetFrameMap(F);
|
||||
const Type *ConcreteStackEntryTy = GetConcreteStackEntryType(F);
|
||||
Type *ConcreteStackEntryTy = GetConcreteStackEntryType(F);
|
||||
|
||||
// Build the shadow stack entry at the very start of the function.
|
||||
BasicBlock::iterator IP = F.getEntryBlock().begin();
|
||||
|
@ -40,7 +40,7 @@ namespace {
|
||||
|
||||
const TargetLowering *TLI;
|
||||
|
||||
const Type *FunctionContextTy;
|
||||
Type *FunctionContextTy;
|
||||
Constant *RegisterFn;
|
||||
Constant *UnregisterFn;
|
||||
Constant *BuiltinSetjmpFn;
|
||||
@ -204,7 +204,7 @@ splitLiveRangesAcrossInvokes(SmallVector<InvokeInst*,16> &Invokes) {
|
||||
++AfterAllocaInsertPt;
|
||||
for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end();
|
||||
AI != E; ++AI) {
|
||||
const Type *Ty = AI->getType();
|
||||
Type *Ty = AI->getType();
|
||||
// Aggregate types can't be cast, but are legal argument types, so we have
|
||||
// to handle them differently. We use an extract/insert pair as a
|
||||
// lightweight method to achieve the same goal.
|
||||
@ -381,7 +381,7 @@ bool SjLjEHPass::insertSjLjEHSupport(Function &F) {
|
||||
"fcn_context", F.begin()->begin());
|
||||
|
||||
Value *Idxs[2];
|
||||
const Type *Int32Ty = Type::getInt32Ty(F.getContext());
|
||||
Type *Int32Ty = Type::getInt32Ty(F.getContext());
|
||||
Value *Zero = ConstantInt::get(Int32Ty, 0);
|
||||
// We need to also keep around a reference to the call_site field
|
||||
Idxs[0] = Zero;
|
||||
@ -423,7 +423,7 @@ bool SjLjEHPass::insertSjLjEHSupport(Function &F) {
|
||||
// instruction hasn't already been removed.
|
||||
if (!I->getParent()) continue;
|
||||
Value *Val = new LoadInst(ExceptionAddr, "exception", true, I);
|
||||
const Type *Ty = Type::getInt8PtrTy(F.getContext());
|
||||
Type *Ty = Type::getInt8PtrTy(F.getContext());
|
||||
Val = CastInst::Create(Instruction::IntToPtr, Val, Ty, "", I);
|
||||
|
||||
I->replaceAllUsesWith(Val);
|
||||
|
@ -123,7 +123,7 @@ bool StackProtector::RequiresStackProtector() const {
|
||||
// protectors.
|
||||
return true;
|
||||
|
||||
if (const ArrayType *AT = dyn_cast<ArrayType>(AI->getAllocatedType())) {
|
||||
if (ArrayType *AT = dyn_cast<ArrayType>(AI->getAllocatedType())) {
|
||||
// We apparently only care about character arrays.
|
||||
if (!AT->getElementType()->isIntegerTy(8))
|
||||
continue;
|
||||
@ -165,7 +165,7 @@ bool StackProtector::InsertStackProtectors() {
|
||||
// StackGuard = load __stack_chk_guard
|
||||
// call void @llvm.stackprotect.create(StackGuard, StackGuardSlot)
|
||||
//
|
||||
const PointerType *PtrTy = Type::getInt8PtrTy(RI->getContext());
|
||||
PointerType *PtrTy = Type::getInt8PtrTy(RI->getContext());
|
||||
unsigned AddressSpace, Offset;
|
||||
if (TLI->getStackCookieLocation(AddressSpace, Offset)) {
|
||||
Constant *OffsetVal =
|
||||
|
@ -93,7 +93,7 @@ public:
|
||||
/// \brief Returns the address the GlobalVariable should be written into. The
|
||||
/// GVMemoryBlock object prefixes that.
|
||||
static char *Create(const GlobalVariable *GV, const TargetData& TD) {
|
||||
const Type *ElTy = GV->getType()->getElementType();
|
||||
Type *ElTy = GV->getType()->getElementType();
|
||||
size_t GVSize = (size_t)TD.getTypeAllocSize(ElTy);
|
||||
void *RawMemory = ::operator new(
|
||||
TargetData::RoundUpAlignment(sizeof(GVMemoryBlock),
|
||||
@ -272,7 +272,7 @@ void *ArgvArray::reset(LLVMContext &C, ExecutionEngine *EE,
|
||||
Array = new char[(InputArgv.size()+1)*PtrSize];
|
||||
|
||||
DEBUG(dbgs() << "JIT: ARGV = " << (void*)Array << "\n");
|
||||
const Type *SBytePtr = Type::getInt8PtrTy(C);
|
||||
Type *SBytePtr = Type::getInt8PtrTy(C);
|
||||
|
||||
for (unsigned i = 0; i != InputArgv.size(); ++i) {
|
||||
unsigned Size = InputArgv[i].size()+1;
|
||||
@ -361,8 +361,8 @@ int ExecutionEngine::runFunctionAsMain(Function *Fn,
|
||||
|
||||
// Check main() type
|
||||
unsigned NumArgs = Fn->getFunctionType()->getNumParams();
|
||||
const FunctionType *FTy = Fn->getFunctionType();
|
||||
const Type* PPInt8Ty = Type::getInt8PtrTy(Fn->getContext())->getPointerTo();
|
||||
FunctionType *FTy = Fn->getFunctionType();
|
||||
Type* PPInt8Ty = Type::getInt8PtrTy(Fn->getContext())->getPointerTo();
|
||||
|
||||
// Check the argument types.
|
||||
if (NumArgs > 3)
|
||||
@ -651,7 +651,7 @@ GenericValue ExecutionEngine::getConstantValue(const Constant *C) {
|
||||
}
|
||||
case Instruction::BitCast: {
|
||||
GenericValue GV = getConstantValue(Op0);
|
||||
const Type* DestTy = CE->getType();
|
||||
Type* DestTy = CE->getType();
|
||||
switch (Op0->getType()->getTypeID()) {
|
||||
default: llvm_unreachable("Invalid bitcast operand");
|
||||
case Type::IntegerTyID:
|
||||
@ -847,7 +847,7 @@ static void StoreIntToMemory(const APInt &IntVal, uint8_t *Dst,
|
||||
}
|
||||
|
||||
void ExecutionEngine::StoreValueToMemory(const GenericValue &Val,
|
||||
GenericValue *Ptr, const Type *Ty) {
|
||||
GenericValue *Ptr, Type *Ty) {
|
||||
const unsigned StoreBytes = getTargetData()->getTypeStoreSize(Ty);
|
||||
|
||||
switch (Ty->getTypeID()) {
|
||||
@ -909,7 +909,7 @@ static void LoadIntFromMemory(APInt &IntVal, uint8_t *Src, unsigned LoadBytes) {
|
||||
///
|
||||
void ExecutionEngine::LoadValueFromMemory(GenericValue &Result,
|
||||
GenericValue *Ptr,
|
||||
const Type *Ty) {
|
||||
Type *Ty) {
|
||||
const unsigned LoadBytes = getTargetData()->getTypeStoreSize(Ty);
|
||||
|
||||
switch (Ty->getTypeID()) {
|
||||
@ -986,7 +986,7 @@ void ExecutionEngine::emitGlobals() {
|
||||
// Loop over all of the global variables in the program, allocating the memory
|
||||
// to hold them. If there is more than one module, do a prepass over globals
|
||||
// to figure out how the different modules should link together.
|
||||
std::map<std::pair<std::string, const Type*>,
|
||||
std::map<std::pair<std::string, Type*>,
|
||||
const GlobalValue*> LinkedGlobalsMap;
|
||||
|
||||
if (Modules.size() != 1) {
|
||||
@ -1101,7 +1101,7 @@ void ExecutionEngine::EmitGlobalVariable(const GlobalVariable *GV) {
|
||||
if (!GV->isThreadLocal())
|
||||
InitializeMemory(GV->getInitializer(), GA);
|
||||
|
||||
const Type *ElTy = GV->getType()->getElementType();
|
||||
Type *ElTy = GV->getType()->getElementType();
|
||||
size_t GVSize = (size_t)getTargetData()->getTypeAllocSize(ElTy);
|
||||
NumInitBytes += (unsigned)GVSize;
|
||||
++NumGlobals;
|
||||
|
@ -51,7 +51,7 @@ static void SetValue(Value *V, GenericValue Val, ExecutionContext &SF) {
|
||||
break
|
||||
|
||||
static void executeFAddInst(GenericValue &Dest, GenericValue Src1,
|
||||
GenericValue Src2, const Type *Ty) {
|
||||
GenericValue Src2, Type *Ty) {
|
||||
switch (Ty->getTypeID()) {
|
||||
IMPLEMENT_BINARY_OPERATOR(+, Float);
|
||||
IMPLEMENT_BINARY_OPERATOR(+, Double);
|
||||
@ -62,7 +62,7 @@ static void executeFAddInst(GenericValue &Dest, GenericValue Src1,
|
||||
}
|
||||
|
||||
static void executeFSubInst(GenericValue &Dest, GenericValue Src1,
|
||||
GenericValue Src2, const Type *Ty) {
|
||||
GenericValue Src2, Type *Ty) {
|
||||
switch (Ty->getTypeID()) {
|
||||
IMPLEMENT_BINARY_OPERATOR(-, Float);
|
||||
IMPLEMENT_BINARY_OPERATOR(-, Double);
|
||||
@ -73,7 +73,7 @@ static void executeFSubInst(GenericValue &Dest, GenericValue Src1,
|
||||
}
|
||||
|
||||
static void executeFMulInst(GenericValue &Dest, GenericValue Src1,
|
||||
GenericValue Src2, const Type *Ty) {
|
||||
GenericValue Src2, Type *Ty) {
|
||||
switch (Ty->getTypeID()) {
|
||||
IMPLEMENT_BINARY_OPERATOR(*, Float);
|
||||
IMPLEMENT_BINARY_OPERATOR(*, Double);
|
||||
@ -84,7 +84,7 @@ static void executeFMulInst(GenericValue &Dest, GenericValue Src1,
|
||||
}
|
||||
|
||||
static void executeFDivInst(GenericValue &Dest, GenericValue Src1,
|
||||
GenericValue Src2, const Type *Ty) {
|
||||
GenericValue Src2, Type *Ty) {
|
||||
switch (Ty->getTypeID()) {
|
||||
IMPLEMENT_BINARY_OPERATOR(/, Float);
|
||||
IMPLEMENT_BINARY_OPERATOR(/, Double);
|
||||
@ -95,7 +95,7 @@ static void executeFDivInst(GenericValue &Dest, GenericValue Src1,
|
||||
}
|
||||
|
||||
static void executeFRemInst(GenericValue &Dest, GenericValue Src1,
|
||||
GenericValue Src2, const Type *Ty) {
|
||||
GenericValue Src2, Type *Ty) {
|
||||
switch (Ty->getTypeID()) {
|
||||
case Type::FloatTyID:
|
||||
Dest.FloatVal = fmod(Src1.FloatVal, Src2.FloatVal);
|
||||
@ -125,7 +125,7 @@ static void executeFRemInst(GenericValue &Dest, GenericValue Src1,
|
||||
break;
|
||||
|
||||
static GenericValue executeICMP_EQ(GenericValue Src1, GenericValue Src2,
|
||||
const Type *Ty) {
|
||||
Type *Ty) {
|
||||
GenericValue Dest;
|
||||
switch (Ty->getTypeID()) {
|
||||
IMPLEMENT_INTEGER_ICMP(eq,Ty);
|
||||
@ -138,7 +138,7 @@ static GenericValue executeICMP_EQ(GenericValue Src1, GenericValue Src2,
|
||||
}
|
||||
|
||||
static GenericValue executeICMP_NE(GenericValue Src1, GenericValue Src2,
|
||||
const Type *Ty) {
|
||||
Type *Ty) {
|
||||
GenericValue Dest;
|
||||
switch (Ty->getTypeID()) {
|
||||
IMPLEMENT_INTEGER_ICMP(ne,Ty);
|
||||
@ -151,7 +151,7 @@ static GenericValue executeICMP_NE(GenericValue Src1, GenericValue Src2,
|
||||
}
|
||||
|
||||
static GenericValue executeICMP_ULT(GenericValue Src1, GenericValue Src2,
|
||||
const Type *Ty) {
|
||||
Type *Ty) {
|
||||
GenericValue Dest;
|
||||
switch (Ty->getTypeID()) {
|
||||
IMPLEMENT_INTEGER_ICMP(ult,Ty);
|
||||
@ -164,7 +164,7 @@ static GenericValue executeICMP_ULT(GenericValue Src1, GenericValue Src2,
|
||||
}
|
||||
|
||||
static GenericValue executeICMP_SLT(GenericValue Src1, GenericValue Src2,
|
||||
const Type *Ty) {
|
||||
Type *Ty) {
|
||||
GenericValue Dest;
|
||||
switch (Ty->getTypeID()) {
|
||||
IMPLEMENT_INTEGER_ICMP(slt,Ty);
|
||||
@ -177,7 +177,7 @@ static GenericValue executeICMP_SLT(GenericValue Src1, GenericValue Src2,
|
||||
}
|
||||
|
||||
static GenericValue executeICMP_UGT(GenericValue Src1, GenericValue Src2,
|
||||
const Type *Ty) {
|
||||
Type *Ty) {
|
||||
GenericValue Dest;
|
||||
switch (Ty->getTypeID()) {
|
||||
IMPLEMENT_INTEGER_ICMP(ugt,Ty);
|
||||
@ -190,7 +190,7 @@ static GenericValue executeICMP_UGT(GenericValue Src1, GenericValue Src2,
|
||||
}
|
||||
|
||||
static GenericValue executeICMP_SGT(GenericValue Src1, GenericValue Src2,
|
||||
const Type *Ty) {
|
||||
Type *Ty) {
|
||||
GenericValue Dest;
|
||||
switch (Ty->getTypeID()) {
|
||||
IMPLEMENT_INTEGER_ICMP(sgt,Ty);
|
||||
@ -203,7 +203,7 @@ static GenericValue executeICMP_SGT(GenericValue Src1, GenericValue Src2,
|
||||
}
|
||||
|
||||
static GenericValue executeICMP_ULE(GenericValue Src1, GenericValue Src2,
|
||||
const Type *Ty) {
|
||||
Type *Ty) {
|
||||
GenericValue Dest;
|
||||
switch (Ty->getTypeID()) {
|
||||
IMPLEMENT_INTEGER_ICMP(ule,Ty);
|
||||
@ -216,7 +216,7 @@ static GenericValue executeICMP_ULE(GenericValue Src1, GenericValue Src2,
|
||||
}
|
||||
|
||||
static GenericValue executeICMP_SLE(GenericValue Src1, GenericValue Src2,
|
||||
const Type *Ty) {
|
||||
Type *Ty) {
|
||||
GenericValue Dest;
|
||||
switch (Ty->getTypeID()) {
|
||||
IMPLEMENT_INTEGER_ICMP(sle,Ty);
|
||||
@ -229,7 +229,7 @@ static GenericValue executeICMP_SLE(GenericValue Src1, GenericValue Src2,
|
||||
}
|
||||
|
||||
static GenericValue executeICMP_UGE(GenericValue Src1, GenericValue Src2,
|
||||
const Type *Ty) {
|
||||
Type *Ty) {
|
||||
GenericValue Dest;
|
||||
switch (Ty->getTypeID()) {
|
||||
IMPLEMENT_INTEGER_ICMP(uge,Ty);
|
||||
@ -242,7 +242,7 @@ static GenericValue executeICMP_UGE(GenericValue Src1, GenericValue Src2,
|
||||
}
|
||||
|
||||
static GenericValue executeICMP_SGE(GenericValue Src1, GenericValue Src2,
|
||||
const Type *Ty) {
|
||||
Type *Ty) {
|
||||
GenericValue Dest;
|
||||
switch (Ty->getTypeID()) {
|
||||
IMPLEMENT_INTEGER_ICMP(sge,Ty);
|
||||
@ -256,7 +256,7 @@ static GenericValue executeICMP_SGE(GenericValue Src1, GenericValue Src2,
|
||||
|
||||
void Interpreter::visitICmpInst(ICmpInst &I) {
|
||||
ExecutionContext &SF = ECStack.back();
|
||||
const Type *Ty = I.getOperand(0)->getType();
|
||||
Type *Ty = I.getOperand(0)->getType();
|
||||
GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
|
||||
GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
|
||||
GenericValue R; // Result
|
||||
@ -286,7 +286,7 @@ void Interpreter::visitICmpInst(ICmpInst &I) {
|
||||
break
|
||||
|
||||
static GenericValue executeFCMP_OEQ(GenericValue Src1, GenericValue Src2,
|
||||
const Type *Ty) {
|
||||
Type *Ty) {
|
||||
GenericValue Dest;
|
||||
switch (Ty->getTypeID()) {
|
||||
IMPLEMENT_FCMP(==, Float);
|
||||
@ -299,7 +299,7 @@ static GenericValue executeFCMP_OEQ(GenericValue Src1, GenericValue Src2,
|
||||
}
|
||||
|
||||
static GenericValue executeFCMP_ONE(GenericValue Src1, GenericValue Src2,
|
||||
const Type *Ty) {
|
||||
Type *Ty) {
|
||||
GenericValue Dest;
|
||||
switch (Ty->getTypeID()) {
|
||||
IMPLEMENT_FCMP(!=, Float);
|
||||
@ -313,7 +313,7 @@ static GenericValue executeFCMP_ONE(GenericValue Src1, GenericValue Src2,
|
||||
}
|
||||
|
||||
static GenericValue executeFCMP_OLE(GenericValue Src1, GenericValue Src2,
|
||||
const Type *Ty) {
|
||||
Type *Ty) {
|
||||
GenericValue Dest;
|
||||
switch (Ty->getTypeID()) {
|
||||
IMPLEMENT_FCMP(<=, Float);
|
||||
@ -326,7 +326,7 @@ static GenericValue executeFCMP_OLE(GenericValue Src1, GenericValue Src2,
|
||||
}
|
||||
|
||||
static GenericValue executeFCMP_OGE(GenericValue Src1, GenericValue Src2,
|
||||
const Type *Ty) {
|
||||
Type *Ty) {
|
||||
GenericValue Dest;
|
||||
switch (Ty->getTypeID()) {
|
||||
IMPLEMENT_FCMP(>=, Float);
|
||||
@ -339,7 +339,7 @@ static GenericValue executeFCMP_OGE(GenericValue Src1, GenericValue Src2,
|
||||
}
|
||||
|
||||
static GenericValue executeFCMP_OLT(GenericValue Src1, GenericValue Src2,
|
||||
const Type *Ty) {
|
||||
Type *Ty) {
|
||||
GenericValue Dest;
|
||||
switch (Ty->getTypeID()) {
|
||||
IMPLEMENT_FCMP(<, Float);
|
||||
@ -352,7 +352,7 @@ static GenericValue executeFCMP_OLT(GenericValue Src1, GenericValue Src2,
|
||||
}
|
||||
|
||||
static GenericValue executeFCMP_OGT(GenericValue Src1, GenericValue Src2,
|
||||
const Type *Ty) {
|
||||
Type *Ty) {
|
||||
GenericValue Dest;
|
||||
switch (Ty->getTypeID()) {
|
||||
IMPLEMENT_FCMP(>, Float);
|
||||
@ -377,49 +377,49 @@ static GenericValue executeFCMP_OGT(GenericValue Src1, GenericValue Src2,
|
||||
|
||||
|
||||
static GenericValue executeFCMP_UEQ(GenericValue Src1, GenericValue Src2,
|
||||
const Type *Ty) {
|
||||
Type *Ty) {
|
||||
GenericValue Dest;
|
||||
IMPLEMENT_UNORDERED(Ty, Src1, Src2)
|
||||
return executeFCMP_OEQ(Src1, Src2, Ty);
|
||||
}
|
||||
|
||||
static GenericValue executeFCMP_UNE(GenericValue Src1, GenericValue Src2,
|
||||
const Type *Ty) {
|
||||
Type *Ty) {
|
||||
GenericValue Dest;
|
||||
IMPLEMENT_UNORDERED(Ty, Src1, Src2)
|
||||
return executeFCMP_ONE(Src1, Src2, Ty);
|
||||
}
|
||||
|
||||
static GenericValue executeFCMP_ULE(GenericValue Src1, GenericValue Src2,
|
||||
const Type *Ty) {
|
||||
Type *Ty) {
|
||||
GenericValue Dest;
|
||||
IMPLEMENT_UNORDERED(Ty, Src1, Src2)
|
||||
return executeFCMP_OLE(Src1, Src2, Ty);
|
||||
}
|
||||
|
||||
static GenericValue executeFCMP_UGE(GenericValue Src1, GenericValue Src2,
|
||||
const Type *Ty) {
|
||||
Type *Ty) {
|
||||
GenericValue Dest;
|
||||
IMPLEMENT_UNORDERED(Ty, Src1, Src2)
|
||||
return executeFCMP_OGE(Src1, Src2, Ty);
|
||||
}
|
||||
|
||||
static GenericValue executeFCMP_ULT(GenericValue Src1, GenericValue Src2,
|
||||
const Type *Ty) {
|
||||
Type *Ty) {
|
||||
GenericValue Dest;
|
||||
IMPLEMENT_UNORDERED(Ty, Src1, Src2)
|
||||
return executeFCMP_OLT(Src1, Src2, Ty);
|
||||
}
|
||||
|
||||
static GenericValue executeFCMP_UGT(GenericValue Src1, GenericValue Src2,
|
||||
const Type *Ty) {
|
||||
Type *Ty) {
|
||||
GenericValue Dest;
|
||||
IMPLEMENT_UNORDERED(Ty, Src1, Src2)
|
||||
return executeFCMP_OGT(Src1, Src2, Ty);
|
||||
}
|
||||
|
||||
static GenericValue executeFCMP_ORD(GenericValue Src1, GenericValue Src2,
|
||||
const Type *Ty) {
|
||||
Type *Ty) {
|
||||
GenericValue Dest;
|
||||
if (Ty->isFloatTy())
|
||||
Dest.IntVal = APInt(1,(Src1.FloatVal == Src1.FloatVal &&
|
||||
@ -431,7 +431,7 @@ static GenericValue executeFCMP_ORD(GenericValue Src1, GenericValue Src2,
|
||||
}
|
||||
|
||||
static GenericValue executeFCMP_UNO(GenericValue Src1, GenericValue Src2,
|
||||
const Type *Ty) {
|
||||
Type *Ty) {
|
||||
GenericValue Dest;
|
||||
if (Ty->isFloatTy())
|
||||
Dest.IntVal = APInt(1,(Src1.FloatVal != Src1.FloatVal ||
|
||||
@ -444,7 +444,7 @@ static GenericValue executeFCMP_UNO(GenericValue Src1, GenericValue Src2,
|
||||
|
||||
void Interpreter::visitFCmpInst(FCmpInst &I) {
|
||||
ExecutionContext &SF = ECStack.back();
|
||||
const Type *Ty = I.getOperand(0)->getType();
|
||||
Type *Ty = I.getOperand(0)->getType();
|
||||
GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
|
||||
GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
|
||||
GenericValue R; // Result
|
||||
@ -475,7 +475,7 @@ void Interpreter::visitFCmpInst(FCmpInst &I) {
|
||||
}
|
||||
|
||||
static GenericValue executeCmpInst(unsigned predicate, GenericValue Src1,
|
||||
GenericValue Src2, const Type *Ty) {
|
||||
GenericValue Src2, Type *Ty) {
|
||||
GenericValue Result;
|
||||
switch (predicate) {
|
||||
case ICmpInst::ICMP_EQ: return executeICMP_EQ(Src1, Src2, Ty);
|
||||
@ -520,7 +520,7 @@ static GenericValue executeCmpInst(unsigned predicate, GenericValue Src1,
|
||||
|
||||
void Interpreter::visitBinaryOperator(BinaryOperator &I) {
|
||||
ExecutionContext &SF = ECStack.back();
|
||||
const Type *Ty = I.getOperand(0)->getType();
|
||||
Type *Ty = I.getOperand(0)->getType();
|
||||
GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
|
||||
GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
|
||||
GenericValue R; // Result
|
||||
@ -585,7 +585,7 @@ void Interpreter::exitCalled(GenericValue GV) {
|
||||
/// care of switching to the normal destination BB, if we are returning
|
||||
/// from an invoke.
|
||||
///
|
||||
void Interpreter::popStackAndReturnValueToCaller(const Type *RetTy,
|
||||
void Interpreter::popStackAndReturnValueToCaller(Type *RetTy,
|
||||
GenericValue Result) {
|
||||
// Pop the current stack frame.
|
||||
ECStack.pop_back();
|
||||
@ -613,7 +613,7 @@ void Interpreter::popStackAndReturnValueToCaller(const Type *RetTy,
|
||||
|
||||
void Interpreter::visitReturnInst(ReturnInst &I) {
|
||||
ExecutionContext &SF = ECStack.back();
|
||||
const Type *RetTy = Type::getVoidTy(I.getContext());
|
||||
Type *RetTy = Type::getVoidTy(I.getContext());
|
||||
GenericValue Result;
|
||||
|
||||
// Save away the return value... (if we are not 'ret void')
|
||||
@ -663,7 +663,7 @@ void Interpreter::visitBranchInst(BranchInst &I) {
|
||||
void Interpreter::visitSwitchInst(SwitchInst &I) {
|
||||
ExecutionContext &SF = ECStack.back();
|
||||
GenericValue CondVal = getOperandValue(I.getOperand(0), SF);
|
||||
const Type *ElTy = I.getOperand(0)->getType();
|
||||
Type *ElTy = I.getOperand(0)->getType();
|
||||
|
||||
// Check to see if any of the cases match...
|
||||
BasicBlock *Dest = 0;
|
||||
@ -730,7 +730,7 @@ void Interpreter::SwitchToNewBasicBlock(BasicBlock *Dest, ExecutionContext &SF){
|
||||
void Interpreter::visitAllocaInst(AllocaInst &I) {
|
||||
ExecutionContext &SF = ECStack.back();
|
||||
|
||||
const Type *Ty = I.getType()->getElementType(); // Type to be allocated
|
||||
Type *Ty = I.getType()->getElementType(); // Type to be allocated
|
||||
|
||||
// Get the number of elements being allocated by the array...
|
||||
unsigned NumElements =
|
||||
@ -767,7 +767,7 @@ GenericValue Interpreter::executeGEPOperation(Value *Ptr, gep_type_iterator I,
|
||||
uint64_t Total = 0;
|
||||
|
||||
for (; I != E; ++I) {
|
||||
if (const StructType *STy = dyn_cast<StructType>(*I)) {
|
||||
if (StructType *STy = dyn_cast<StructType>(*I)) {
|
||||
const StructLayout *SLO = TD.getStructLayout(STy);
|
||||
|
||||
const ConstantInt *CPU = cast<ConstantInt>(I.getOperand());
|
||||
@ -775,7 +775,7 @@ GenericValue Interpreter::executeGEPOperation(Value *Ptr, gep_type_iterator I,
|
||||
|
||||
Total += SLO->getElementOffset(Index);
|
||||
} else {
|
||||
const SequentialType *ST = cast<SequentialType>(*I);
|
||||
SequentialType *ST = cast<SequentialType>(*I);
|
||||
// Get the index number for the array... which must be long type...
|
||||
GenericValue IdxGV = getOperandValue(I.getOperand(), SF);
|
||||
|
||||
@ -929,34 +929,34 @@ void Interpreter::visitAShr(BinaryOperator &I) {
|
||||
SetValue(&I, Dest, SF);
|
||||
}
|
||||
|
||||
GenericValue Interpreter::executeTruncInst(Value *SrcVal, const Type *DstTy,
|
||||
GenericValue Interpreter::executeTruncInst(Value *SrcVal, Type *DstTy,
|
||||
ExecutionContext &SF) {
|
||||
GenericValue Dest, Src = getOperandValue(SrcVal, SF);
|
||||
const IntegerType *DITy = cast<IntegerType>(DstTy);
|
||||
IntegerType *DITy = cast<IntegerType>(DstTy);
|
||||
unsigned DBitWidth = DITy->getBitWidth();
|
||||
Dest.IntVal = Src.IntVal.trunc(DBitWidth);
|
||||
return Dest;
|
||||
}
|
||||
|
||||
GenericValue Interpreter::executeSExtInst(Value *SrcVal, const Type *DstTy,
|
||||
GenericValue Interpreter::executeSExtInst(Value *SrcVal, Type *DstTy,
|
||||
ExecutionContext &SF) {
|
||||
GenericValue Dest, Src = getOperandValue(SrcVal, SF);
|
||||
const IntegerType *DITy = cast<IntegerType>(DstTy);
|
||||
IntegerType *DITy = cast<IntegerType>(DstTy);
|
||||
unsigned DBitWidth = DITy->getBitWidth();
|
||||
Dest.IntVal = Src.IntVal.sext(DBitWidth);
|
||||
return Dest;
|
||||
}
|
||||
|
||||
GenericValue Interpreter::executeZExtInst(Value *SrcVal, const Type *DstTy,
|
||||
GenericValue Interpreter::executeZExtInst(Value *SrcVal, Type *DstTy,
|
||||
ExecutionContext &SF) {
|
||||
GenericValue Dest, Src = getOperandValue(SrcVal, SF);
|
||||
const IntegerType *DITy = cast<IntegerType>(DstTy);
|
||||
IntegerType *DITy = cast<IntegerType>(DstTy);
|
||||
unsigned DBitWidth = DITy->getBitWidth();
|
||||
Dest.IntVal = Src.IntVal.zext(DBitWidth);
|
||||
return Dest;
|
||||
}
|
||||
|
||||
GenericValue Interpreter::executeFPTruncInst(Value *SrcVal, const Type *DstTy,
|
||||
GenericValue Interpreter::executeFPTruncInst(Value *SrcVal, Type *DstTy,
|
||||
ExecutionContext &SF) {
|
||||
GenericValue Dest, Src = getOperandValue(SrcVal, SF);
|
||||
assert(SrcVal->getType()->isDoubleTy() && DstTy->isFloatTy() &&
|
||||
@ -965,7 +965,7 @@ GenericValue Interpreter::executeFPTruncInst(Value *SrcVal, const Type *DstTy,
|
||||
return Dest;
|
||||
}
|
||||
|
||||
GenericValue Interpreter::executeFPExtInst(Value *SrcVal, const Type *DstTy,
|
||||
GenericValue Interpreter::executeFPExtInst(Value *SrcVal, Type *DstTy,
|
||||
ExecutionContext &SF) {
|
||||
GenericValue Dest, Src = getOperandValue(SrcVal, SF);
|
||||
assert(SrcVal->getType()->isFloatTy() && DstTy->isDoubleTy() &&
|
||||
@ -974,9 +974,9 @@ GenericValue Interpreter::executeFPExtInst(Value *SrcVal, const Type *DstTy,
|
||||
return Dest;
|
||||
}
|
||||
|
||||
GenericValue Interpreter::executeFPToUIInst(Value *SrcVal, const Type *DstTy,
|
||||
GenericValue Interpreter::executeFPToUIInst(Value *SrcVal, Type *DstTy,
|
||||
ExecutionContext &SF) {
|
||||
const Type *SrcTy = SrcVal->getType();
|
||||
Type *SrcTy = SrcVal->getType();
|
||||
uint32_t DBitWidth = cast<IntegerType>(DstTy)->getBitWidth();
|
||||
GenericValue Dest, Src = getOperandValue(SrcVal, SF);
|
||||
assert(SrcTy->isFloatingPointTy() && "Invalid FPToUI instruction");
|
||||
@ -988,9 +988,9 @@ GenericValue Interpreter::executeFPToUIInst(Value *SrcVal, const Type *DstTy,
|
||||
return Dest;
|
||||
}
|
||||
|
||||
GenericValue Interpreter::executeFPToSIInst(Value *SrcVal, const Type *DstTy,
|
||||
GenericValue Interpreter::executeFPToSIInst(Value *SrcVal, Type *DstTy,
|
||||
ExecutionContext &SF) {
|
||||
const Type *SrcTy = SrcVal->getType();
|
||||
Type *SrcTy = SrcVal->getType();
|
||||
uint32_t DBitWidth = cast<IntegerType>(DstTy)->getBitWidth();
|
||||
GenericValue Dest, Src = getOperandValue(SrcVal, SF);
|
||||
assert(SrcTy->isFloatingPointTy() && "Invalid FPToSI instruction");
|
||||
@ -1002,7 +1002,7 @@ GenericValue Interpreter::executeFPToSIInst(Value *SrcVal, const Type *DstTy,
|
||||
return Dest;
|
||||
}
|
||||
|
||||
GenericValue Interpreter::executeUIToFPInst(Value *SrcVal, const Type *DstTy,
|
||||
GenericValue Interpreter::executeUIToFPInst(Value *SrcVal, Type *DstTy,
|
||||
ExecutionContext &SF) {
|
||||
GenericValue Dest, Src = getOperandValue(SrcVal, SF);
|
||||
assert(DstTy->isFloatingPointTy() && "Invalid UIToFP instruction");
|
||||
@ -1014,7 +1014,7 @@ GenericValue Interpreter::executeUIToFPInst(Value *SrcVal, const Type *DstTy,
|
||||
return Dest;
|
||||
}
|
||||
|
||||
GenericValue Interpreter::executeSIToFPInst(Value *SrcVal, const Type *DstTy,
|
||||
GenericValue Interpreter::executeSIToFPInst(Value *SrcVal, Type *DstTy,
|
||||
ExecutionContext &SF) {
|
||||
GenericValue Dest, Src = getOperandValue(SrcVal, SF);
|
||||
assert(DstTy->isFloatingPointTy() && "Invalid SIToFP instruction");
|
||||
@ -1027,7 +1027,7 @@ GenericValue Interpreter::executeSIToFPInst(Value *SrcVal, const Type *DstTy,
|
||||
|
||||
}
|
||||
|
||||
GenericValue Interpreter::executePtrToIntInst(Value *SrcVal, const Type *DstTy,
|
||||
GenericValue Interpreter::executePtrToIntInst(Value *SrcVal, Type *DstTy,
|
||||
ExecutionContext &SF) {
|
||||
uint32_t DBitWidth = cast<IntegerType>(DstTy)->getBitWidth();
|
||||
GenericValue Dest, Src = getOperandValue(SrcVal, SF);
|
||||
@ -1037,7 +1037,7 @@ GenericValue Interpreter::executePtrToIntInst(Value *SrcVal, const Type *DstTy,
|
||||
return Dest;
|
||||
}
|
||||
|
||||
GenericValue Interpreter::executeIntToPtrInst(Value *SrcVal, const Type *DstTy,
|
||||
GenericValue Interpreter::executeIntToPtrInst(Value *SrcVal, Type *DstTy,
|
||||
ExecutionContext &SF) {
|
||||
GenericValue Dest, Src = getOperandValue(SrcVal, SF);
|
||||
assert(DstTy->isPointerTy() && "Invalid PtrToInt instruction");
|
||||
@ -1050,10 +1050,10 @@ GenericValue Interpreter::executeIntToPtrInst(Value *SrcVal, const Type *DstTy,
|
||||
return Dest;
|
||||
}
|
||||
|
||||
GenericValue Interpreter::executeBitCastInst(Value *SrcVal, const Type *DstTy,
|
||||
GenericValue Interpreter::executeBitCastInst(Value *SrcVal, Type *DstTy,
|
||||
ExecutionContext &SF) {
|
||||
|
||||
const Type *SrcTy = SrcVal->getType();
|
||||
Type *SrcTy = SrcVal->getType();
|
||||
GenericValue Dest, Src = getOperandValue(SrcVal, SF);
|
||||
if (DstTy->isPointerTy()) {
|
||||
assert(SrcTy->isPointerTy() && "Invalid BitCast");
|
||||
@ -1155,7 +1155,7 @@ void Interpreter::visitVAArgInst(VAArgInst &I) {
|
||||
GenericValue Dest;
|
||||
GenericValue Src = ECStack[VAList.UIntPairVal.first]
|
||||
.VarArgs[VAList.UIntPairVal.second];
|
||||
const Type *Ty = I.getType();
|
||||
Type *Ty = I.getType();
|
||||
switch (Ty->getTypeID()) {
|
||||
case Type::IntegerTyID: Dest.IntVal = Src.IntVal;
|
||||
IMPLEMENT_VAARG(Pointer);
|
||||
@ -1222,7 +1222,7 @@ GenericValue Interpreter::getConstantExprValue (ConstantExpr *CE,
|
||||
GenericValue Op0 = getOperandValue(CE->getOperand(0), SF);
|
||||
GenericValue Op1 = getOperandValue(CE->getOperand(1), SF);
|
||||
GenericValue Dest;
|
||||
const Type * Ty = CE->getOperand(0)->getType();
|
||||
Type * Ty = CE->getOperand(0)->getType();
|
||||
switch (CE->getOpcode()) {
|
||||
case Instruction::Add: Dest.IntVal = Op0.IntVal + Op1.IntVal; break;
|
||||
case Instruction::Sub: Dest.IntVal = Op0.IntVal - Op1.IntVal; break;
|
||||
|
@ -48,7 +48,7 @@ using namespace llvm;
|
||||
|
||||
static ManagedStatic<sys::Mutex> FunctionsLock;
|
||||
|
||||
typedef GenericValue (*ExFunc)(const FunctionType *,
|
||||
typedef GenericValue (*ExFunc)(FunctionType *,
|
||||
const std::vector<GenericValue> &);
|
||||
static ManagedStatic<std::map<const Function *, ExFunc> > ExportedFunctions;
|
||||
static std::map<std::string, ExFunc> FuncNames;
|
||||
@ -60,7 +60,7 @@ static ManagedStatic<std::map<const Function *, RawFunc> > RawFunctions;
|
||||
|
||||
static Interpreter *TheInterpreter;
|
||||
|
||||
static char getTypeID(const Type *Ty) {
|
||||
static char getTypeID(Type *Ty) {
|
||||
switch (Ty->getTypeID()) {
|
||||
case Type::VoidTyID: return 'V';
|
||||
case Type::IntegerTyID:
|
||||
@ -91,7 +91,7 @@ static ExFunc lookupFunction(const Function *F) {
|
||||
// Function not found, look it up... start by figuring out what the
|
||||
// composite function name should be.
|
||||
std::string ExtName = "lle_";
|
||||
const FunctionType *FT = F->getFunctionType();
|
||||
FunctionType *FT = F->getFunctionType();
|
||||
for (unsigned i = 0, e = FT->getNumContainedTypes(); i != e; ++i)
|
||||
ExtName += getTypeID(FT->getContainedType(i));
|
||||
ExtName + "_" + F->getNameStr();
|
||||
@ -109,7 +109,7 @@ static ExFunc lookupFunction(const Function *F) {
|
||||
}
|
||||
|
||||
#ifdef USE_LIBFFI
|
||||
static ffi_type *ffiTypeFor(const Type *Ty) {
|
||||
static ffi_type *ffiTypeFor(Type *Ty) {
|
||||
switch (Ty->getTypeID()) {
|
||||
case Type::VoidTyID: return &ffi_type_void;
|
||||
case Type::IntegerTyID:
|
||||
@ -129,7 +129,7 @@ static ffi_type *ffiTypeFor(const Type *Ty) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void *ffiValueFor(const Type *Ty, const GenericValue &AV,
|
||||
static void *ffiValueFor(Type *Ty, const GenericValue &AV,
|
||||
void *ArgDataPtr) {
|
||||
switch (Ty->getTypeID()) {
|
||||
case Type::IntegerTyID:
|
||||
@ -181,7 +181,7 @@ static bool ffiInvoke(RawFunc Fn, Function *F,
|
||||
const std::vector<GenericValue> &ArgVals,
|
||||
const TargetData *TD, GenericValue &Result) {
|
||||
ffi_cif cif;
|
||||
const FunctionType *FTy = F->getFunctionType();
|
||||
FunctionType *FTy = F->getFunctionType();
|
||||
const unsigned NumArgs = F->arg_size();
|
||||
|
||||
// TODO: We don't have type information about the remaining arguments, because
|
||||
@ -197,7 +197,7 @@ static bool ffiInvoke(RawFunc Fn, Function *F,
|
||||
for (Function::const_arg_iterator A = F->arg_begin(), E = F->arg_end();
|
||||
A != E; ++A) {
|
||||
const unsigned ArgNo = A->getArgNo();
|
||||
const Type *ArgTy = FTy->getParamType(ArgNo);
|
||||
Type *ArgTy = FTy->getParamType(ArgNo);
|
||||
args[ArgNo] = ffiTypeFor(ArgTy);
|
||||
ArgBytes += TD->getTypeStoreSize(ArgTy);
|
||||
}
|
||||
@ -209,12 +209,12 @@ static bool ffiInvoke(RawFunc Fn, Function *F,
|
||||
for (Function::const_arg_iterator A = F->arg_begin(), E = F->arg_end();
|
||||
A != E; ++A) {
|
||||
const unsigned ArgNo = A->getArgNo();
|
||||
const Type *ArgTy = FTy->getParamType(ArgNo);
|
||||
Type *ArgTy = FTy->getParamType(ArgNo);
|
||||
values[ArgNo] = ffiValueFor(ArgTy, ArgVals[ArgNo], ArgDataPtr);
|
||||
ArgDataPtr += TD->getTypeStoreSize(ArgTy);
|
||||
}
|
||||
|
||||
const Type *RetTy = FTy->getReturnType();
|
||||
Type *RetTy = FTy->getReturnType();
|
||||
ffi_type *rtype = ffiTypeFor(RetTy);
|
||||
|
||||
if (ffi_prep_cif(&cif, FFI_DEFAULT_ABI, NumArgs, rtype, &args[0]) == FFI_OK) {
|
||||
@ -304,7 +304,7 @@ GenericValue Interpreter::callExternalFunction(Function *F,
|
||||
extern "C" { // Don't add C++ manglings to llvm mangling :)
|
||||
|
||||
// void atexit(Function*)
|
||||
GenericValue lle_X_atexit(const FunctionType *FT,
|
||||
GenericValue lle_X_atexit(FunctionType *FT,
|
||||
const std::vector<GenericValue> &Args) {
|
||||
assert(Args.size() == 1);
|
||||
TheInterpreter->addAtExitHandler((Function*)GVTOP(Args[0]));
|
||||
@ -314,14 +314,14 @@ GenericValue lle_X_atexit(const FunctionType *FT,
|
||||
}
|
||||
|
||||
// void exit(int)
|
||||
GenericValue lle_X_exit(const FunctionType *FT,
|
||||
GenericValue lle_X_exit(FunctionType *FT,
|
||||
const std::vector<GenericValue> &Args) {
|
||||
TheInterpreter->exitCalled(Args[0]);
|
||||
return GenericValue();
|
||||
}
|
||||
|
||||
// void abort(void)
|
||||
GenericValue lle_X_abort(const FunctionType *FT,
|
||||
GenericValue lle_X_abort(FunctionType *FT,
|
||||
const std::vector<GenericValue> &Args) {
|
||||
//FIXME: should we report or raise here?
|
||||
//report_fatal_error("Interpreted program raised SIGABRT");
|
||||
@ -331,7 +331,7 @@ GenericValue lle_X_abort(const FunctionType *FT,
|
||||
|
||||
// int sprintf(char *, const char *, ...) - a very rough implementation to make
|
||||
// output useful.
|
||||
GenericValue lle_X_sprintf(const FunctionType *FT,
|
||||
GenericValue lle_X_sprintf(FunctionType *FT,
|
||||
const std::vector<GenericValue> &Args) {
|
||||
char *OutputBuffer = (char *)GVTOP(Args[0]);
|
||||
const char *FmtStr = (const char *)GVTOP(Args[1]);
|
||||
@ -413,7 +413,7 @@ GenericValue lle_X_sprintf(const FunctionType *FT,
|
||||
|
||||
// int printf(const char *, ...) - a very rough implementation to make output
|
||||
// useful.
|
||||
GenericValue lle_X_printf(const FunctionType *FT,
|
||||
GenericValue lle_X_printf(FunctionType *FT,
|
||||
const std::vector<GenericValue> &Args) {
|
||||
char Buffer[10000];
|
||||
std::vector<GenericValue> NewArgs;
|
||||
@ -425,7 +425,7 @@ GenericValue lle_X_printf(const FunctionType *FT,
|
||||
}
|
||||
|
||||
// int sscanf(const char *format, ...);
|
||||
GenericValue lle_X_sscanf(const FunctionType *FT,
|
||||
GenericValue lle_X_sscanf(FunctionType *FT,
|
||||
const std::vector<GenericValue> &args) {
|
||||
assert(args.size() < 10 && "Only handle up to 10 args to sscanf right now!");
|
||||
|
||||
@ -440,7 +440,7 @@ GenericValue lle_X_sscanf(const FunctionType *FT,
|
||||
}
|
||||
|
||||
// int scanf(const char *format, ...);
|
||||
GenericValue lle_X_scanf(const FunctionType *FT,
|
||||
GenericValue lle_X_scanf(FunctionType *FT,
|
||||
const std::vector<GenericValue> &args) {
|
||||
assert(args.size() < 10 && "Only handle up to 10 args to scanf right now!");
|
||||
|
||||
@ -456,7 +456,7 @@ GenericValue lle_X_scanf(const FunctionType *FT,
|
||||
|
||||
// int fprintf(FILE *, const char *, ...) - a very rough implementation to make
|
||||
// output useful.
|
||||
GenericValue lle_X_fprintf(const FunctionType *FT,
|
||||
GenericValue lle_X_fprintf(FunctionType *FT,
|
||||
const std::vector<GenericValue> &Args) {
|
||||
assert(Args.size() >= 2);
|
||||
char Buffer[10000];
|
||||
|
@ -207,33 +207,33 @@ private: // Helper functions
|
||||
void initializeExternalFunctions();
|
||||
GenericValue getConstantExprValue(ConstantExpr *CE, ExecutionContext &SF);
|
||||
GenericValue getOperandValue(Value *V, ExecutionContext &SF);
|
||||
GenericValue executeTruncInst(Value *SrcVal, const Type *DstTy,
|
||||
GenericValue executeTruncInst(Value *SrcVal, Type *DstTy,
|
||||
ExecutionContext &SF);
|
||||
GenericValue executeSExtInst(Value *SrcVal, const Type *DstTy,
|
||||
GenericValue executeSExtInst(Value *SrcVal, Type *DstTy,
|
||||
ExecutionContext &SF);
|
||||
GenericValue executeZExtInst(Value *SrcVal, const Type *DstTy,
|
||||
GenericValue executeZExtInst(Value *SrcVal, Type *DstTy,
|
||||
ExecutionContext &SF);
|
||||
GenericValue executeFPTruncInst(Value *SrcVal, const Type *DstTy,
|
||||
GenericValue executeFPTruncInst(Value *SrcVal, Type *DstTy,
|
||||
ExecutionContext &SF);
|
||||
GenericValue executeFPExtInst(Value *SrcVal, const Type *DstTy,
|
||||
GenericValue executeFPExtInst(Value *SrcVal, Type *DstTy,
|
||||
ExecutionContext &SF);
|
||||
GenericValue executeFPToUIInst(Value *SrcVal, const Type *DstTy,
|
||||
GenericValue executeFPToUIInst(Value *SrcVal, Type *DstTy,
|
||||
ExecutionContext &SF);
|
||||
GenericValue executeFPToSIInst(Value *SrcVal, const Type *DstTy,
|
||||
GenericValue executeFPToSIInst(Value *SrcVal, Type *DstTy,
|
||||
ExecutionContext &SF);
|
||||
GenericValue executeUIToFPInst(Value *SrcVal, const Type *DstTy,
|
||||
GenericValue executeUIToFPInst(Value *SrcVal, Type *DstTy,
|
||||
ExecutionContext &SF);
|
||||
GenericValue executeSIToFPInst(Value *SrcVal, const Type *DstTy,
|
||||
GenericValue executeSIToFPInst(Value *SrcVal, Type *DstTy,
|
||||
ExecutionContext &SF);
|
||||
GenericValue executePtrToIntInst(Value *SrcVal, const Type *DstTy,
|
||||
GenericValue executePtrToIntInst(Value *SrcVal, Type *DstTy,
|
||||
ExecutionContext &SF);
|
||||
GenericValue executeIntToPtrInst(Value *SrcVal, const Type *DstTy,
|
||||
GenericValue executeIntToPtrInst(Value *SrcVal, Type *DstTy,
|
||||
ExecutionContext &SF);
|
||||
GenericValue executeBitCastInst(Value *SrcVal, const Type *DstTy,
|
||||
GenericValue executeBitCastInst(Value *SrcVal, Type *DstTy,
|
||||
ExecutionContext &SF);
|
||||
GenericValue executeCastOperation(Instruction::CastOps opcode, Value *SrcVal,
|
||||
const Type *Ty, ExecutionContext &SF);
|
||||
void popStackAndReturnValueToCaller(const Type *RetTy, GenericValue Result);
|
||||
Type *Ty, ExecutionContext &SF);
|
||||
void popStackAndReturnValueToCaller(Type *RetTy, GenericValue Result);
|
||||
|
||||
};
|
||||
|
||||
|
@ -390,8 +390,8 @@ GenericValue JIT::runFunction(Function *F,
|
||||
|
||||
void *FPtr = getPointerToFunction(F);
|
||||
assert(FPtr && "Pointer to fn's code was null after getPointerToFunction");
|
||||
const FunctionType *FTy = F->getFunctionType();
|
||||
const Type *RetTy = FTy->getReturnType();
|
||||
FunctionType *FTy = F->getFunctionType();
|
||||
Type *RetTy = FTy->getReturnType();
|
||||
|
||||
assert((FTy->getNumParams() == ArgValues.size() ||
|
||||
(FTy->isVarArg() && FTy->getNumParams() <= ArgValues.size())) &&
|
||||
@ -500,7 +500,7 @@ GenericValue JIT::runFunction(Function *F,
|
||||
SmallVector<Value*, 8> Args;
|
||||
for (unsigned i = 0, e = ArgValues.size(); i != e; ++i) {
|
||||
Constant *C = 0;
|
||||
const Type *ArgTy = FTy->getParamType(i);
|
||||
Type *ArgTy = FTy->getParamType(i);
|
||||
const GenericValue &AV = ArgValues[i];
|
||||
switch (ArgTy->getTypeID()) {
|
||||
default: llvm_unreachable("Unknown argument type for function call!");
|
||||
@ -788,7 +788,7 @@ char* JIT::getMemoryForGV(const GlobalVariable* GV) {
|
||||
// be allocated into the same buffer, but in general globals are allocated
|
||||
// through the memory manager which puts them near the code but not in the
|
||||
// same buffer.
|
||||
const Type *GlobalType = GV->getType()->getElementType();
|
||||
Type *GlobalType = GV->getType()->getElementType();
|
||||
size_t S = getTargetData()->getTypeAllocSize(GlobalType);
|
||||
size_t A = getTargetData()->getPreferredAlignment(GV);
|
||||
if (GV->isThreadLocal()) {
|
||||
|
@ -770,7 +770,7 @@ static unsigned GetConstantPoolSizeInBytes(MachineConstantPool *MCP,
|
||||
MachineConstantPoolEntry CPE = Constants[i];
|
||||
unsigned AlignMask = CPE.getAlignment() - 1;
|
||||
Size = (Size + AlignMask) & ~AlignMask;
|
||||
const Type *Ty = CPE.getType();
|
||||
Type *Ty = CPE.getType();
|
||||
Size += TD->getTypeAllocSize(Ty);
|
||||
}
|
||||
return Size;
|
||||
@ -1098,7 +1098,7 @@ void JITEmitter::emitConstantPool(MachineConstantPool *MCP) {
|
||||
DEBUG(dbgs() << "JIT: CP" << i << " at [0x";
|
||||
dbgs().write_hex(CAddr) << "]\n");
|
||||
|
||||
const Type *Ty = CPE.Val.ConstVal->getType();
|
||||
Type *Ty = CPE.Val.ConstVal->getType();
|
||||
Offset += TheJIT->getTargetData()->getTypeAllocSize(Ty);
|
||||
}
|
||||
}
|
||||
|
@ -124,8 +124,8 @@ GenericValue MCJIT::runFunction(Function *F,
|
||||
|
||||
void *FPtr = getPointerToFunction(F);
|
||||
assert(FPtr && "Pointer to fn's code was null after getPointerToFunction");
|
||||
const FunctionType *FTy = F->getFunctionType();
|
||||
const Type *RetTy = FTy->getReturnType();
|
||||
FunctionType *FTy = F->getFunctionType();
|
||||
Type *RetTy = FTy->getReturnType();
|
||||
|
||||
assert((FTy->getNumParams() == ArgValues.size() ||
|
||||
(FTy->isVarArg() && FTy->getNumParams() <= ArgValues.size())) &&
|
||||
|
@ -26,7 +26,7 @@ ARMConstantPoolValue::ARMConstantPoolValue(const Constant *cval, unsigned id,
|
||||
unsigned char PCAdj,
|
||||
ARMCP::ARMCPModifier Modif,
|
||||
bool AddCA)
|
||||
: MachineConstantPoolValue((const Type*)cval->getType()),
|
||||
: MachineConstantPoolValue((Type*)cval->getType()),
|
||||
CVal(cval), S(NULL), LabelId(id), Kind(K), PCAdjust(PCAdj),
|
||||
Modifier(Modif), AddCurrentAddress(AddCA) {}
|
||||
|
||||
@ -35,13 +35,13 @@ ARMConstantPoolValue::ARMConstantPoolValue(LLVMContext &C,
|
||||
unsigned char PCAdj,
|
||||
ARMCP::ARMCPModifier Modif,
|
||||
bool AddCA)
|
||||
: MachineConstantPoolValue((const Type*)Type::getInt32Ty(C)),
|
||||
: MachineConstantPoolValue((Type*)Type::getInt32Ty(C)),
|
||||
CVal(NULL), S(strdup(s)), LabelId(id), Kind(ARMCP::CPExtSymbol),
|
||||
PCAdjust(PCAdj), Modifier(Modif), AddCurrentAddress(AddCA) {}
|
||||
|
||||
ARMConstantPoolValue::ARMConstantPoolValue(const GlobalValue *gv,
|
||||
ARMCP::ARMCPModifier Modif)
|
||||
: MachineConstantPoolValue((const Type*)Type::getInt32Ty(gv->getContext())),
|
||||
: MachineConstantPoolValue((Type*)Type::getInt32Ty(gv->getContext())),
|
||||
CVal(gv), S(NULL), LabelId(0), Kind(ARMCP::CPValue), PCAdjust(0),
|
||||
Modifier(Modif), AddCurrentAddress(false) {}
|
||||
|
||||
|
@ -171,8 +171,8 @@ class ARMFastISel : public FastISel {
|
||||
|
||||
// Utility routines.
|
||||
private:
|
||||
bool isTypeLegal(const Type *Ty, MVT &VT);
|
||||
bool isLoadTypeLegal(const Type *Ty, MVT &VT);
|
||||
bool isTypeLegal(Type *Ty, MVT &VT);
|
||||
bool isLoadTypeLegal(Type *Ty, MVT &VT);
|
||||
bool ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr);
|
||||
bool ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr);
|
||||
bool ARMComputeAddress(const Value *Obj, Address &Addr);
|
||||
@ -673,7 +673,7 @@ unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool ARMFastISel::isTypeLegal(const Type *Ty, MVT &VT) {
|
||||
bool ARMFastISel::isTypeLegal(Type *Ty, MVT &VT) {
|
||||
EVT evt = TLI.getValueType(Ty, true);
|
||||
|
||||
// Only handle simple types.
|
||||
@ -685,7 +685,7 @@ bool ARMFastISel::isTypeLegal(const Type *Ty, MVT &VT) {
|
||||
return TLI.isTypeLegal(VT);
|
||||
}
|
||||
|
||||
bool ARMFastISel::isLoadTypeLegal(const Type *Ty, MVT &VT) {
|
||||
bool ARMFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) {
|
||||
if (isTypeLegal(Ty, VT)) return true;
|
||||
|
||||
// If this is a type than can be sign or zero-extended to a basic operation
|
||||
@ -714,7 +714,7 @@ bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) {
|
||||
U = C;
|
||||
}
|
||||
|
||||
if (const PointerType *Ty = dyn_cast<PointerType>(Obj->getType()))
|
||||
if (PointerType *Ty = dyn_cast<PointerType>(Obj->getType()))
|
||||
if (Ty->getAddressSpace() > 255)
|
||||
// Fast instruction selection doesn't support the special
|
||||
// address spaces.
|
||||
@ -749,7 +749,7 @@ bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) {
|
||||
for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end();
|
||||
i != e; ++i, ++GTI) {
|
||||
const Value *Op = *i;
|
||||
if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
|
||||
if (StructType *STy = dyn_cast<StructType>(*GTI)) {
|
||||
const StructLayout *SL = TD.getStructLayout(STy);
|
||||
unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
|
||||
TmpOffset += SL->getElementOffset(Idx);
|
||||
@ -1085,7 +1085,7 @@ bool ARMFastISel::SelectBranch(const Instruction *I) {
|
||||
// TODO: Factor this out.
|
||||
if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
|
||||
MVT SourceVT;
|
||||
const Type *Ty = CI->getOperand(0)->getType();
|
||||
Type *Ty = CI->getOperand(0)->getType();
|
||||
if (CI->hasOneUse() && (CI->getParent() == I->getParent())
|
||||
&& isTypeLegal(Ty, SourceVT)) {
|
||||
bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy());
|
||||
@ -1201,7 +1201,7 @@ bool ARMFastISel::SelectCmp(const Instruction *I) {
|
||||
const CmpInst *CI = cast<CmpInst>(I);
|
||||
|
||||
MVT VT;
|
||||
const Type *Ty = CI->getOperand(0)->getType();
|
||||
Type *Ty = CI->getOperand(0)->getType();
|
||||
if (!isTypeLegal(Ty, VT))
|
||||
return false;
|
||||
|
||||
@ -1309,7 +1309,7 @@ bool ARMFastISel::SelectSIToFP(const Instruction *I) {
|
||||
if (!Subtarget->hasVFP2()) return false;
|
||||
|
||||
MVT DstVT;
|
||||
const Type *Ty = I->getType();
|
||||
Type *Ty = I->getType();
|
||||
if (!isTypeLegal(Ty, DstVT))
|
||||
return false;
|
||||
|
||||
@ -1343,7 +1343,7 @@ bool ARMFastISel::SelectFPToSI(const Instruction *I) {
|
||||
if (!Subtarget->hasVFP2()) return false;
|
||||
|
||||
MVT DstVT;
|
||||
const Type *RetTy = I->getType();
|
||||
Type *RetTy = I->getType();
|
||||
if (!isTypeLegal(RetTy, DstVT))
|
||||
return false;
|
||||
|
||||
@ -1351,7 +1351,7 @@ bool ARMFastISel::SelectFPToSI(const Instruction *I) {
|
||||
if (Op == 0) return false;
|
||||
|
||||
unsigned Opc;
|
||||
const Type *OpTy = I->getOperand(0)->getType();
|
||||
Type *OpTy = I->getOperand(0)->getType();
|
||||
if (OpTy->isFloatTy()) Opc = ARM::VTOSIZS;
|
||||
else if (OpTy->isDoubleTy()) Opc = ARM::VTOSIZD;
|
||||
else return 0;
|
||||
@ -1401,7 +1401,7 @@ bool ARMFastISel::SelectSelect(const Instruction *I) {
|
||||
|
||||
bool ARMFastISel::SelectSDiv(const Instruction *I) {
|
||||
MVT VT;
|
||||
const Type *Ty = I->getType();
|
||||
Type *Ty = I->getType();
|
||||
if (!isTypeLegal(Ty, VT))
|
||||
return false;
|
||||
|
||||
@ -1429,7 +1429,7 @@ bool ARMFastISel::SelectSDiv(const Instruction *I) {
|
||||
|
||||
bool ARMFastISel::SelectSRem(const Instruction *I) {
|
||||
MVT VT;
|
||||
const Type *Ty = I->getType();
|
||||
Type *Ty = I->getType();
|
||||
if (!isTypeLegal(Ty, VT))
|
||||
return false;
|
||||
|
||||
@ -1456,7 +1456,7 @@ bool ARMFastISel::SelectBinaryOp(const Instruction *I, unsigned ISDOpcode) {
|
||||
// operations, but can't figure out how to. Just use the vfp instructions
|
||||
// if we have them.
|
||||
// FIXME: It'd be nice to use NEON instructions.
|
||||
const Type *Ty = I->getType();
|
||||
Type *Ty = I->getType();
|
||||
bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy());
|
||||
if (isFloat && !Subtarget->hasVFP2())
|
||||
return false;
|
||||
@ -1778,7 +1778,7 @@ bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) {
|
||||
CallingConv::ID CC = TLI.getLibcallCallingConv(Call);
|
||||
|
||||
// Handle *simple* calls for now.
|
||||
const Type *RetTy = I->getType();
|
||||
Type *RetTy = I->getType();
|
||||
MVT RetVT;
|
||||
if (RetTy->isVoidTy())
|
||||
RetVT = MVT::isVoid;
|
||||
@ -1802,7 +1802,7 @@ bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) {
|
||||
unsigned Arg = getRegForValue(Op);
|
||||
if (Arg == 0) return false;
|
||||
|
||||
const Type *ArgTy = Op->getType();
|
||||
Type *ArgTy = Op->getType();
|
||||
MVT ArgVT;
|
||||
if (!isTypeLegal(ArgTy, ArgVT)) return false;
|
||||
|
||||
@ -1870,13 +1870,13 @@ bool ARMFastISel::SelectCall(const Instruction *I) {
|
||||
// TODO: Avoid some calling conventions?
|
||||
|
||||
// Let SDISel handle vararg functions.
|
||||
const PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
|
||||
const FunctionType *FTy = cast<FunctionType>(PT->getElementType());
|
||||
PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
|
||||
FunctionType *FTy = cast<FunctionType>(PT->getElementType());
|
||||
if (FTy->isVarArg())
|
||||
return false;
|
||||
|
||||
// Handle *simple* calls for now.
|
||||
const Type *RetTy = I->getType();
|
||||
Type *RetTy = I->getType();
|
||||
MVT RetVT;
|
||||
if (RetTy->isVoidTy())
|
||||
RetVT = MVT::isVoid;
|
||||
@ -1915,7 +1915,7 @@ bool ARMFastISel::SelectCall(const Instruction *I) {
|
||||
CS.paramHasAttr(AttrInd, Attribute::ByVal))
|
||||
return false;
|
||||
|
||||
const Type *ArgTy = (*i)->getType();
|
||||
Type *ArgTy = (*i)->getType();
|
||||
MVT ArgVT;
|
||||
if (!isTypeLegal(ArgTy, ArgVT))
|
||||
return false;
|
||||
@ -1969,9 +1969,9 @@ bool ARMFastISel::SelectIntCast(const Instruction *I) {
|
||||
// On ARM, in general, integer casts don't involve legal types; this code
|
||||
// handles promotable integers. The high bits for a type smaller than
|
||||
// the register size are assumed to be undefined.
|
||||
const Type *DestTy = I->getType();
|
||||
Type *DestTy = I->getType();
|
||||
Value *Op = I->getOperand(0);
|
||||
const Type *SrcTy = Op->getType();
|
||||
Type *SrcTy = Op->getType();
|
||||
|
||||
EVT SrcVT, DestVT;
|
||||
SrcVT = TLI.getValueType(SrcTy, true);
|
||||
|
@ -100,8 +100,8 @@ namespace {
|
||||
GlobalCmp(const TargetData *td) : TD(td) { }
|
||||
|
||||
bool operator()(const GlobalVariable *GV1, const GlobalVariable *GV2) {
|
||||
const Type *Ty1 = cast<PointerType>(GV1->getType())->getElementType();
|
||||
const Type *Ty2 = cast<PointerType>(GV2->getType())->getElementType();
|
||||
Type *Ty1 = cast<PointerType>(GV1->getType())->getElementType();
|
||||
Type *Ty2 = cast<PointerType>(GV2->getType())->getElementType();
|
||||
|
||||
return (TD->getTypeAllocSize(Ty1) < TD->getTypeAllocSize(Ty2));
|
||||
}
|
||||
@ -123,7 +123,7 @@ bool ARMGlobalMerge::doMerge(SmallVectorImpl<GlobalVariable*> &Globals,
|
||||
// FIXME: Find better heuristics
|
||||
std::stable_sort(Globals.begin(), Globals.end(), GlobalCmp(TD));
|
||||
|
||||
const Type *Int32Ty = Type::getInt32Ty(M.getContext());
|
||||
Type *Int32Ty = Type::getInt32Ty(M.getContext());
|
||||
|
||||
for (size_t i = 0, e = Globals.size(); i != e; ) {
|
||||
size_t j = 0;
|
||||
@ -176,7 +176,7 @@ bool ARMGlobalMerge::doInitialization(Module &M) {
|
||||
|
||||
// Ignore fancy-aligned globals for now.
|
||||
unsigned Alignment = I->getAlignment();
|
||||
const Type *Ty = I->getType()->getElementType();
|
||||
Type *Ty = I->getType()->getElementType();
|
||||
if (Alignment > TD->getABITypeAlignment(Ty))
|
||||
continue;
|
||||
|
||||
|
@ -1982,11 +1982,11 @@ ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
|
||||
ArgListTy Args;
|
||||
ArgListEntry Entry;
|
||||
Entry.Node = Argument;
|
||||
Entry.Ty = (const Type *) Type::getInt32Ty(*DAG.getContext());
|
||||
Entry.Ty = (Type *) Type::getInt32Ty(*DAG.getContext());
|
||||
Args.push_back(Entry);
|
||||
// FIXME: is there useful debug info available here?
|
||||
std::pair<SDValue, SDValue> CallResult =
|
||||
LowerCallTo(Chain, (const Type *) Type::getInt32Ty(*DAG.getContext()),
|
||||
LowerCallTo(Chain, (Type *) Type::getInt32Ty(*DAG.getContext()),
|
||||
false, false, false, false,
|
||||
0, CallingConv::C, false, /*isReturnValueUsed=*/true,
|
||||
DAG.getExternalSymbol("__tls_get_addr", PtrVT), Args, DAG, dl);
|
||||
@ -7235,7 +7235,7 @@ bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM,
|
||||
/// isLegalAddressingMode - Return true if the addressing mode represented
|
||||
/// by AM is legal for this target, for a load/store of the specified type.
|
||||
bool ARMTargetLowering::isLegalAddressingMode(const AddrMode &AM,
|
||||
const Type *Ty) const {
|
||||
Type *Ty) const {
|
||||
EVT VT = getValueType(Ty, true);
|
||||
if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget))
|
||||
return false;
|
||||
@ -7536,7 +7536,7 @@ bool ARMTargetLowering::ExpandInlineAsm(CallInst *CI) const {
|
||||
if (AsmPieces.size() == 3 &&
|
||||
AsmPieces[0] == "rev" && AsmPieces[1] == "$0" && AsmPieces[2] == "$1" &&
|
||||
IA->getConstraintString().compare(0, 4, "=l,l") == 0) {
|
||||
const IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
|
||||
IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
|
||||
if (Ty && Ty->getBitWidth() == 32)
|
||||
return IntrinsicLowering::LowerToByteSwap(CI);
|
||||
}
|
||||
@ -7582,7 +7582,7 @@ ARMTargetLowering::getSingleConstraintMatchWeight(
|
||||
// but allow it at the lowest weight.
|
||||
if (CallOperandVal == NULL)
|
||||
return CW_Default;
|
||||
const Type *type = CallOperandVal->getType();
|
||||
Type *type = CallOperandVal->getType();
|
||||
// Look at the constraint type.
|
||||
switch (*constraint) {
|
||||
default:
|
||||
@ -7933,7 +7933,7 @@ bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
|
||||
// Conservatively set memVT to the entire set of vectors stored.
|
||||
unsigned NumElts = 0;
|
||||
for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) {
|
||||
const Type *ArgTy = I.getArgOperand(ArgI)->getType();
|
||||
Type *ArgTy = I.getArgOperand(ArgI)->getType();
|
||||
if (!ArgTy->isVectorTy())
|
||||
break;
|
||||
NumElts += getTargetData()->getTypeAllocSize(ArgTy) / 8;
|
||||
|
@ -256,7 +256,7 @@ namespace llvm {
|
||||
|
||||
/// isLegalAddressingMode - Return true if the addressing mode represented
|
||||
/// by AM is legal for this target, for a load/store of the specified type.
|
||||
virtual bool isLegalAddressingMode(const AddrMode &AM, const Type *Ty)const;
|
||||
virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty)const;
|
||||
bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const;
|
||||
|
||||
/// isLegalICmpImmediate - Return true if the specified immediate is legal
|
||||
|
@ -155,7 +155,7 @@ ARMSelectionDAGInfo::EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl,
|
||||
TargetLowering::ArgListEntry Entry;
|
||||
|
||||
// First argument: data pointer
|
||||
const Type *IntPtrTy = TLI.getTargetData()->getIntPtrType(*DAG.getContext());
|
||||
Type *IntPtrTy = TLI.getTargetData()->getIntPtrType(*DAG.getContext());
|
||||
Entry.Node = Dst;
|
||||
Entry.Ty = IntPtrTy;
|
||||
Args.push_back(Entry);
|
||||
|
@ -34,7 +34,7 @@ namespace bfinIntrinsic {
|
||||
|
||||
}
|
||||
|
||||
std::string BlackfinIntrinsicInfo::getName(unsigned IntrID, const Type **Tys,
|
||||
std::string BlackfinIntrinsicInfo::getName(unsigned IntrID, Type **Tys,
|
||||
unsigned numTys) const {
|
||||
static const char *const names[] = {
|
||||
#define GET_INTRINSIC_NAME_TABLE
|
||||
@ -81,8 +81,8 @@ bool BlackfinIntrinsicInfo::isOverloaded(unsigned IntrID) const {
|
||||
#include "BlackfinGenIntrinsics.inc"
|
||||
#undef GET_INTRINSIC_ATTRIBUTES
|
||||
|
||||
static const FunctionType *getType(LLVMContext &Context, unsigned id) {
|
||||
const Type *ResultTy = NULL;
|
||||
static FunctionType *getType(LLVMContext &Context, unsigned id) {
|
||||
Type *ResultTy = NULL;
|
||||
std::vector<Type*> ArgTys;
|
||||
bool IsVarArg = false;
|
||||
|
||||
@ -94,7 +94,7 @@ static const FunctionType *getType(LLVMContext &Context, unsigned id) {
|
||||
}
|
||||
|
||||
Function *BlackfinIntrinsicInfo::getDeclaration(Module *M, unsigned IntrID,
|
||||
const Type **Tys,
|
||||
Type **Tys,
|
||||
unsigned numTy) const {
|
||||
assert(!isOverloaded(IntrID) && "Blackfin intrinsics are not overloaded");
|
||||
AttrListPtr AList = getAttributes((bfinIntrinsic::ID) IntrID);
|
||||
|
@ -19,11 +19,11 @@ namespace llvm {
|
||||
|
||||
class BlackfinIntrinsicInfo : public TargetIntrinsicInfo {
|
||||
public:
|
||||
std::string getName(unsigned IntrID, const Type **Tys = 0,
|
||||
std::string getName(unsigned IntrID, Type **Tys = 0,
|
||||
unsigned numTys = 0) const;
|
||||
unsigned lookupName(const char *Name, unsigned Len) const;
|
||||
bool isOverloaded(unsigned IID) const;
|
||||
Function *getDeclaration(Module *M, unsigned ID, const Type **Tys = 0,
|
||||
Function *getDeclaration(Module *M, unsigned ID, Type **Tys = 0,
|
||||
unsigned numTys = 0) const;
|
||||
};
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user