1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-22 18:54:02 +01:00
llvm-mirror/lib/AsmParser/LLParser.h

503 lines
20 KiB
C
Raw Normal View History

//===-- LLParser.h - Parser Class -------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the parser class for .ll files.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_ASMPARSER_LLPARSER_H
#define LLVM_LIB_ASMPARSER_LLPARSER_H
#include "LLLexer.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/ValueHandle.h"
#include <map>
namespace llvm {
class Module;
class OpaqueType;
class Function;
class Value;
class BasicBlock;
class Instruction;
class Constant;
class GlobalValue;
class Comdat;
class MDString;
class MDNode;
struct SlotMapping;
class StructType;
/// ValID - Represents a reference of a definition of some sort with no type.
/// There are several cases where we have to parse the value but where the
/// type can depend on later context. This may either be a numeric reference
/// or a symbolic (%var) reference. This is just a discriminated union.
struct ValID {
enum {
t_LocalID, t_GlobalID, // ID in UIntVal.
t_LocalName, t_GlobalName, // Name in StrVal.
t_APSInt, t_APFloat, // Value in APSIntVal/APFloatVal.
t_Null, t_Undef, t_Zero, t_None, // No value.
t_EmptyArray, // No value: []
t_Constant, // Value in ConstantVal.
t_InlineAsm, // Value in FTy/StrVal/StrVal2/UIntVal.
t_ConstantStruct, // Value in ConstantStructElts.
t_PackedConstantStruct // Value in ConstantStructElts.
} Kind = t_LocalID;
LLLexer::LocTy Loc;
unsigned UIntVal;
FunctionType *FTy = nullptr;
std::string StrVal, StrVal2;
APSInt APSIntVal;
APFloat APFloatVal{0.0};
Constant *ConstantVal;
std::unique_ptr<Constant *[]> ConstantStructElts;
ValID() = default;
ValID(const ValID &RHS)
: Kind(RHS.Kind), Loc(RHS.Loc), UIntVal(RHS.UIntVal), FTy(RHS.FTy),
StrVal(RHS.StrVal), StrVal2(RHS.StrVal2), APSIntVal(RHS.APSIntVal),
APFloatVal(RHS.APFloatVal), ConstantVal(RHS.ConstantVal) {
assert(!RHS.ConstantStructElts);
}
bool operator<(const ValID &RHS) const {
if (Kind == t_LocalID || Kind == t_GlobalID)
return UIntVal < RHS.UIntVal;
assert((Kind == t_LocalName || Kind == t_GlobalName ||
Kind == t_ConstantStruct || Kind == t_PackedConstantStruct) &&
"Ordering not defined for this ValID kind yet");
return StrVal < RHS.StrVal;
}
};
class LLParser {
public:
typedef LLLexer::LocTy LocTy;
private:
LLVMContext &Context;
LLLexer Lex;
Module *M;
SlotMapping *Slots;
// Instruction metadata resolution. Each instruction can have a list of
// MDRef info associated with them.
//
// The simpler approach of just creating temporary MDNodes and then calling
// RAUW on them when the definition is processed doesn't work because some
// instruction metadata kinds, such as dbg, get stored in the IR in an
// "optimized" format which doesn't participate in the normal value use
// lists. This means that RAUW doesn't work, even on temporary MDNodes
// which otherwise support RAUW. Instead, we defer resolving MDNode
// references until the definitions have been processed.
struct MDRef {
SMLoc Loc;
unsigned MDKind, MDSlot;
};
SmallVector<Instruction*, 64> InstsWithTBAATag;
// Type resolution handling data structures. The location is set when we
// have processed a use of the type but not a definition yet.
StringMap<std::pair<Type*, LocTy> > NamedTypes;
std::map<unsigned, std::pair<Type*, LocTy> > NumberedTypes;
std::map<unsigned, TrackingMDNodeRef> NumberedMetadata;
std::map<unsigned, std::pair<TempMDTuple, LocTy>> ForwardRefMDNodes;
// Global Value reference information.
std::map<std::string, std::pair<GlobalValue*, LocTy> > ForwardRefVals;
std::map<unsigned, std::pair<GlobalValue*, LocTy> > ForwardRefValIDs;
std::vector<GlobalValue*> NumberedVals;
// Comdat forward reference information.
std::map<std::string, LocTy> ForwardRefComdats;
// References to blockaddress. The key is the function ValID, the value is
// a list of references to blocks in that function.
std::map<ValID, std::map<ValID, GlobalValue *>> ForwardRefBlockAddresses;
class PerFunctionState;
/// Reference to per-function state to allow basic blocks to be
/// forward-referenced by blockaddress instructions within the same
/// function.
PerFunctionState *BlockAddressPFS;
// Attribute builder reference information.
std::map<Value*, std::vector<unsigned> > ForwardRefAttrGroups;
std::map<unsigned, AttrBuilder> NumberedAttrBuilders;
public:
LLParser(StringRef F, SourceMgr &SM, SMDiagnostic &Err, Module *M,
SlotMapping *Slots = nullptr)
: Context(M->getContext()), Lex(F, SM, Err, M->getContext()), M(M),
Slots(Slots), BlockAddressPFS(nullptr) {}
bool Run();
bool parseStandaloneConstantValue(Constant *&C, const SlotMapping *Slots);
LLVMContext &getContext() { return Context; }
private:
bool Error(LocTy L, const Twine &Msg) const {
return Lex.Error(L, Msg);
}
bool TokError(const Twine &Msg) const {
return Error(Lex.getLoc(), Msg);
}
/// Restore the internal name and slot mappings using the mappings that
/// were created at an earlier parsing stage.
void restoreParsingState(const SlotMapping *Slots);
/// GetGlobalVal - Get a value with the specified name or ID, creating a
/// forward reference record if needed. This can return null if the value
/// exists but does not have the right type.
GlobalValue *GetGlobalVal(const std::string &N, Type *Ty, LocTy Loc);
GlobalValue *GetGlobalVal(unsigned ID, Type *Ty, LocTy Loc);
/// Get a Comdat with the specified name, creating a forward reference
/// record if needed.
Comdat *getComdat(const std::string &N, LocTy Loc);
// Helper Routines.
bool ParseToken(lltok::Kind T, const char *ErrMsg);
bool EatIfPresent(lltok::Kind T) {
if (Lex.getKind() != T) return false;
Lex.Lex();
return true;
}
FastMathFlags EatFastMathFlagsIfPresent() {
FastMathFlags FMF;
while (true)
switch (Lex.getKind()) {
case lltok::kw_fast: FMF.setUnsafeAlgebra(); Lex.Lex(); continue;
case lltok::kw_nnan: FMF.setNoNaNs(); Lex.Lex(); continue;
case lltok::kw_ninf: FMF.setNoInfs(); Lex.Lex(); continue;
case lltok::kw_nsz: FMF.setNoSignedZeros(); Lex.Lex(); continue;
case lltok::kw_arcp: FMF.setAllowReciprocal(); Lex.Lex(); continue;
default: return FMF;
}
return FMF;
}
bool ParseOptionalToken(lltok::Kind T, bool &Present,
LocTy *Loc = nullptr) {
if (Lex.getKind() != T) {
Present = false;
} else {
if (Loc)
*Loc = Lex.getLoc();
Lex.Lex();
Present = true;
}
return false;
}
bool ParseStringConstant(std::string &Result);
bool ParseUInt32(unsigned &Val);
bool ParseUInt32(unsigned &Val, LocTy &Loc) {
Loc = Lex.getLoc();
return ParseUInt32(Val);
}
bool ParseUInt64(uint64_t &Val);
bool ParseUInt64(uint64_t &Val, LocTy &Loc) {
Loc = Lex.getLoc();
return ParseUInt64(Val);
}
bool ParseStringAttribute(AttrBuilder &B);
bool ParseTLSModel(GlobalVariable::ThreadLocalMode &TLM);
bool ParseOptionalThreadLocal(GlobalVariable::ThreadLocalMode &TLM);
bool parseOptionalUnnamedAddr(bool &UnnamedAddr) {
return ParseOptionalToken(lltok::kw_unnamed_addr, UnnamedAddr);
}
bool ParseOptionalAddrSpace(unsigned &AddrSpace);
bool ParseOptionalParamAttrs(AttrBuilder &B);
bool ParseOptionalReturnAttrs(AttrBuilder &B);
bool ParseOptionalLinkage(unsigned &Linkage, bool &HasLinkage);
bool ParseOptionalLinkage(unsigned &Linkage) {
bool HasLinkage; return ParseOptionalLinkage(Linkage, HasLinkage);
}
bool ParseOptionalVisibility(unsigned &Visibility);
bool ParseOptionalDLLStorageClass(unsigned &DLLStorageClass);
bool ParseOptionalCallingConv(unsigned &CC);
bool ParseOptionalAlignment(unsigned &Alignment);
bool ParseOptionalDerefAttrBytes(lltok::Kind AttrKind, uint64_t &Bytes);
bool ParseScopeAndOrdering(bool isAtomic, SynchronizationScope &Scope,
AtomicOrdering &Ordering);
bool ParseOrdering(AtomicOrdering &Ordering);
bool ParseOptionalStackAlignment(unsigned &Alignment);
bool ParseOptionalCommaAlign(unsigned &Alignment, bool &AteExtraComma);
bool ParseOptionalCommaInAlloca(bool &IsInAlloca);
bool ParseIndexList(SmallVectorImpl<unsigned> &Indices,bool &AteExtraComma);
bool ParseIndexList(SmallVectorImpl<unsigned> &Indices) {
bool AteExtraComma;
if (ParseIndexList(Indices, AteExtraComma)) return true;
if (AteExtraComma)
return TokError("expected index");
return false;
}
// Top-Level Entities
bool ParseTopLevelEntities();
bool ValidateEndOfModule();
bool ParseTargetDefinition();
bool ParseModuleAsm();
bool ParseDepLibs(); // FIXME: Remove in 4.0.
bool ParseUnnamedType();
bool ParseNamedType();
bool ParseDeclare();
bool ParseDefine();
bool ParseGlobalType(bool &IsConstant);
bool ParseUnnamedGlobal();
bool ParseNamedGlobal();
bool ParseGlobal(const std::string &Name, LocTy Loc, unsigned Linkage,
bool HasLinkage, unsigned Visibility,
unsigned DLLStorageClass,
GlobalVariable::ThreadLocalMode TLM, bool UnnamedAddr);
bool ParseAlias(const std::string &Name, LocTy Loc, unsigned Linkage,
unsigned Visibility, unsigned DLLStorageClass,
GlobalVariable::ThreadLocalMode TLM, bool UnnamedAddr);
bool parseComdat();
bool ParseStandaloneMetadata();
2009-07-29 02:34:02 +02:00
bool ParseNamedMetadata();
bool ParseMDString(MDString *&Result);
bool ParseMDNodeID(MDNode *&Result);
bool ParseUnnamedAttrGrp();
bool ParseFnAttributeValuePairs(AttrBuilder &B,
std::vector<unsigned> &FwdRefAttrGrps,
bool inAttrGrp, LocTy &BuiltinLoc);
// Type Parsing.
IR: Make metadata typeless in assembly Now that `Metadata` is typeless, reflect that in the assembly. These are the matching assembly changes for the metadata/value split in r223802. - Only use the `metadata` type when referencing metadata from a call intrinsic -- i.e., only when it's used as a `Value`. - Stop pretending that `ValueAsMetadata` is wrapped in an `MDNode` when referencing it from call intrinsics. So, assembly like this: define @foo(i32 %v) { call void @llvm.foo(metadata !{i32 %v}, metadata !0) call void @llvm.foo(metadata !{i32 7}, metadata !0) call void @llvm.foo(metadata !1, metadata !0) call void @llvm.foo(metadata !3, metadata !0) call void @llvm.foo(metadata !{metadata !3}, metadata !0) ret void, !bar !2 } !0 = metadata !{metadata !2} !1 = metadata !{i32* @global} !2 = metadata !{metadata !3} !3 = metadata !{} turns into this: define @foo(i32 %v) { call void @llvm.foo(metadata i32 %v, metadata !0) call void @llvm.foo(metadata i32 7, metadata !0) call void @llvm.foo(metadata i32* @global, metadata !0) call void @llvm.foo(metadata !3, metadata !0) call void @llvm.foo(metadata !{!3}, metadata !0) ret void, !bar !2 } !0 = !{!2} !1 = !{i32* @global} !2 = !{!3} !3 = !{} I wrote an upgrade script that handled almost all of the tests in llvm and many of the tests in cfe (even handling many `CHECK` lines). I've attached it (or will attach it in a moment if you're speedy) to PR21532 to help everyone update their out-of-tree testcases. This is part of PR21532. llvm-svn: 224257
2014-12-15 20:07:53 +01:00
bool ParseType(Type *&Result, const Twine &Msg, bool AllowVoid = false);
bool ParseType(Type *&Result, bool AllowVoid = false) {
return ParseType(Result, "expected type", AllowVoid);
}
bool ParseType(Type *&Result, const Twine &Msg, LocTy &Loc,
bool AllowVoid = false) {
Loc = Lex.getLoc();
return ParseType(Result, Msg, AllowVoid);
}
bool ParseType(Type *&Result, LocTy &Loc, bool AllowVoid = false) {
Loc = Lex.getLoc();
return ParseType(Result, AllowVoid);
}
bool ParseAnonStructType(Type *&Result, bool Packed);
bool ParseStructBody(SmallVectorImpl<Type*> &Body);
bool ParseStructDefinition(SMLoc TypeLoc, StringRef Name,
std::pair<Type*, LocTy> &Entry,
Type *&ResultTy);
bool ParseArrayVectorType(Type *&Result, bool isVector);
bool ParseFunctionType(Type *&Result);
// Function Semantic Analysis.
class PerFunctionState {
LLParser &P;
Function &F;
std::map<std::string, std::pair<Value*, LocTy> > ForwardRefVals;
std::map<unsigned, std::pair<Value*, LocTy> > ForwardRefValIDs;
std::vector<Value*> NumberedVals;
/// FunctionNumber - If this is an unnamed function, this is the slot
/// number of it, otherwise it is -1.
int FunctionNumber;
public:
PerFunctionState(LLParser &p, Function &f, int FunctionNumber);
~PerFunctionState();
Function &getFunction() const { return F; }
bool FinishFunction();
/// GetVal - Get a value with the specified name or ID, creating a
/// forward reference record if needed. This can return null if the value
/// exists but does not have the right type.
[IR] Reformulate LLVM's EH funclet IR While we have successfully implemented a funclet-oriented EH scheme on top of LLVM IR, our scheme has some notable deficiencies: - catchendpad and cleanupendpad are necessary in the current design but they are difficult to explain to others, even to seasoned LLVM experts. - catchendpad and cleanupendpad are optimization barriers. They cannot be split and force all potentially throwing call-sites to be invokes. This has a noticable effect on the quality of our code generation. - catchpad, while similar in some aspects to invoke, is fairly awkward. It is unsplittable, starts a funclet, and has control flow to other funclets. - The nesting relationship between funclets is currently a property of control flow edges. Because of this, we are forced to carefully analyze the flow graph to see if there might potentially exist illegal nesting among funclets. While we have logic to clone funclets when they are illegally nested, it would be nicer if we had a representation which forbade them upfront. Let's clean this up a bit by doing the following: - Instead, make catchpad more like cleanuppad and landingpad: no control flow, just a bunch of simple operands; catchpad would be splittable. - Introduce catchswitch, a control flow instruction designed to model the constraints of funclet oriented EH. - Make funclet scoping explicit by having funclet instructions consume the token produced by the funclet which contains them. - Remove catchendpad and cleanupendpad. Their presence can be inferred implicitly using coloring information. N.B. The state numbering code for the CLR has been updated but the veracity of it's output cannot be spoken for. An expert should take a look to make sure the results are reasonable. Reviewers: rnk, JosephTremoulet, andrew.w.kaylor Differential Revision: http://reviews.llvm.org/D15139 llvm-svn: 255422
2015-12-12 06:38:55 +01:00
Value *GetVal(const std::string &Name, Type *Ty, LocTy Loc);
Value *GetVal(unsigned ID, Type *Ty, LocTy Loc);
/// SetInstName - After an instruction is parsed and inserted into its
/// basic block, this installs its name.
bool SetInstName(int NameID, const std::string &NameStr, LocTy NameLoc,
Instruction *Inst);
/// GetBB - Get a basic block with the specified name or ID, creating a
/// forward reference record if needed. This can return null if the value
/// is not a BasicBlock.
BasicBlock *GetBB(const std::string &Name, LocTy Loc);
BasicBlock *GetBB(unsigned ID, LocTy Loc);
/// DefineBB - Define the specified basic block, which is either named or
/// unnamed. If there is an error, this returns null otherwise it returns
/// the block being defined.
BasicBlock *DefineBB(const std::string &Name, LocTy Loc);
bool resolveForwardRefBlockAddresses();
};
bool ConvertValIDToValue(Type *Ty, ValID &ID, Value *&V,
[IR] Reformulate LLVM's EH funclet IR While we have successfully implemented a funclet-oriented EH scheme on top of LLVM IR, our scheme has some notable deficiencies: - catchendpad and cleanupendpad are necessary in the current design but they are difficult to explain to others, even to seasoned LLVM experts. - catchendpad and cleanupendpad are optimization barriers. They cannot be split and force all potentially throwing call-sites to be invokes. This has a noticable effect on the quality of our code generation. - catchpad, while similar in some aspects to invoke, is fairly awkward. It is unsplittable, starts a funclet, and has control flow to other funclets. - The nesting relationship between funclets is currently a property of control flow edges. Because of this, we are forced to carefully analyze the flow graph to see if there might potentially exist illegal nesting among funclets. While we have logic to clone funclets when they are illegally nested, it would be nicer if we had a representation which forbade them upfront. Let's clean this up a bit by doing the following: - Instead, make catchpad more like cleanuppad and landingpad: no control flow, just a bunch of simple operands; catchpad would be splittable. - Introduce catchswitch, a control flow instruction designed to model the constraints of funclet oriented EH. - Make funclet scoping explicit by having funclet instructions consume the token produced by the funclet which contains them. - Remove catchendpad and cleanupendpad. Their presence can be inferred implicitly using coloring information. N.B. The state numbering code for the CLR has been updated but the veracity of it's output cannot be spoken for. An expert should take a look to make sure the results are reasonable. Reviewers: rnk, JosephTremoulet, andrew.w.kaylor Differential Revision: http://reviews.llvm.org/D15139 llvm-svn: 255422
2015-12-12 06:38:55 +01:00
PerFunctionState *PFS);
bool parseConstantValue(Type *Ty, Constant *&C);
[IR] Reformulate LLVM's EH funclet IR While we have successfully implemented a funclet-oriented EH scheme on top of LLVM IR, our scheme has some notable deficiencies: - catchendpad and cleanupendpad are necessary in the current design but they are difficult to explain to others, even to seasoned LLVM experts. - catchendpad and cleanupendpad are optimization barriers. They cannot be split and force all potentially throwing call-sites to be invokes. This has a noticable effect on the quality of our code generation. - catchpad, while similar in some aspects to invoke, is fairly awkward. It is unsplittable, starts a funclet, and has control flow to other funclets. - The nesting relationship between funclets is currently a property of control flow edges. Because of this, we are forced to carefully analyze the flow graph to see if there might potentially exist illegal nesting among funclets. While we have logic to clone funclets when they are illegally nested, it would be nicer if we had a representation which forbade them upfront. Let's clean this up a bit by doing the following: - Instead, make catchpad more like cleanuppad and landingpad: no control flow, just a bunch of simple operands; catchpad would be splittable. - Introduce catchswitch, a control flow instruction designed to model the constraints of funclet oriented EH. - Make funclet scoping explicit by having funclet instructions consume the token produced by the funclet which contains them. - Remove catchendpad and cleanupendpad. Their presence can be inferred implicitly using coloring information. N.B. The state numbering code for the CLR has been updated but the veracity of it's output cannot be spoken for. An expert should take a look to make sure the results are reasonable. Reviewers: rnk, JosephTremoulet, andrew.w.kaylor Differential Revision: http://reviews.llvm.org/D15139 llvm-svn: 255422
2015-12-12 06:38:55 +01:00
bool ParseValue(Type *Ty, Value *&V, PerFunctionState *PFS);
bool ParseValue(Type *Ty, Value *&V, PerFunctionState &PFS) {
return ParseValue(Ty, V, &PFS);
}
[IR] Reformulate LLVM's EH funclet IR While we have successfully implemented a funclet-oriented EH scheme on top of LLVM IR, our scheme has some notable deficiencies: - catchendpad and cleanupendpad are necessary in the current design but they are difficult to explain to others, even to seasoned LLVM experts. - catchendpad and cleanupendpad are optimization barriers. They cannot be split and force all potentially throwing call-sites to be invokes. This has a noticable effect on the quality of our code generation. - catchpad, while similar in some aspects to invoke, is fairly awkward. It is unsplittable, starts a funclet, and has control flow to other funclets. - The nesting relationship between funclets is currently a property of control flow edges. Because of this, we are forced to carefully analyze the flow graph to see if there might potentially exist illegal nesting among funclets. While we have logic to clone funclets when they are illegally nested, it would be nicer if we had a representation which forbade them upfront. Let's clean this up a bit by doing the following: - Instead, make catchpad more like cleanuppad and landingpad: no control flow, just a bunch of simple operands; catchpad would be splittable. - Introduce catchswitch, a control flow instruction designed to model the constraints of funclet oriented EH. - Make funclet scoping explicit by having funclet instructions consume the token produced by the funclet which contains them. - Remove catchendpad and cleanupendpad. Their presence can be inferred implicitly using coloring information. N.B. The state numbering code for the CLR has been updated but the veracity of it's output cannot be spoken for. An expert should take a look to make sure the results are reasonable. Reviewers: rnk, JosephTremoulet, andrew.w.kaylor Differential Revision: http://reviews.llvm.org/D15139 llvm-svn: 255422
2015-12-12 06:38:55 +01:00
bool ParseValue(Type *Ty, Value *&V, LocTy &Loc,
PerFunctionState &PFS) {
Loc = Lex.getLoc();
return ParseValue(Ty, V, &PFS);
}
bool ParseTypeAndValue(Value *&V, PerFunctionState *PFS);
bool ParseTypeAndValue(Value *&V, PerFunctionState &PFS) {
return ParseTypeAndValue(V, &PFS);
}
bool ParseTypeAndValue(Value *&V, LocTy &Loc, PerFunctionState &PFS) {
Loc = Lex.getLoc();
return ParseTypeAndValue(V, PFS);
}
bool ParseTypeAndBasicBlock(BasicBlock *&BB, LocTy &Loc,
PerFunctionState &PFS);
bool ParseTypeAndBasicBlock(BasicBlock *&BB, PerFunctionState &PFS) {
LocTy Loc;
return ParseTypeAndBasicBlock(BB, Loc, PFS);
}
struct ParamInfo {
LocTy Loc;
Value *V;
AttributeSet Attrs;
ParamInfo(LocTy loc, Value *v, AttributeSet attrs)
: Loc(loc), V(v), Attrs(attrs) {}
};
bool ParseParameterList(SmallVectorImpl<ParamInfo> &ArgList,
PerFunctionState &PFS,
bool IsMustTailCall = false,
bool InVarArgsFunc = false);
bool
ParseOptionalOperandBundles(SmallVectorImpl<OperandBundleDef> &BundleList,
PerFunctionState &PFS);
bool ParseExceptionArgs(SmallVectorImpl<Value *> &Args,
PerFunctionState &PFS);
// Constant Parsing.
bool ParseValID(ValID &ID, PerFunctionState *PFS = nullptr);
bool ParseGlobalValue(Type *Ty, Constant *&V);
bool ParseGlobalTypeAndValue(Constant *&V);
bool ParseGlobalValueVector(SmallVectorImpl<Constant *> &Elts);
bool parseOptionalComdat(StringRef GlobalName, Comdat *&C);
IR: Make metadata typeless in assembly Now that `Metadata` is typeless, reflect that in the assembly. These are the matching assembly changes for the metadata/value split in r223802. - Only use the `metadata` type when referencing metadata from a call intrinsic -- i.e., only when it's used as a `Value`. - Stop pretending that `ValueAsMetadata` is wrapped in an `MDNode` when referencing it from call intrinsics. So, assembly like this: define @foo(i32 %v) { call void @llvm.foo(metadata !{i32 %v}, metadata !0) call void @llvm.foo(metadata !{i32 7}, metadata !0) call void @llvm.foo(metadata !1, metadata !0) call void @llvm.foo(metadata !3, metadata !0) call void @llvm.foo(metadata !{metadata !3}, metadata !0) ret void, !bar !2 } !0 = metadata !{metadata !2} !1 = metadata !{i32* @global} !2 = metadata !{metadata !3} !3 = metadata !{} turns into this: define @foo(i32 %v) { call void @llvm.foo(metadata i32 %v, metadata !0) call void @llvm.foo(metadata i32 7, metadata !0) call void @llvm.foo(metadata i32* @global, metadata !0) call void @llvm.foo(metadata !3, metadata !0) call void @llvm.foo(metadata !{!3}, metadata !0) ret void, !bar !2 } !0 = !{!2} !1 = !{i32* @global} !2 = !{!3} !3 = !{} I wrote an upgrade script that handled almost all of the tests in llvm and many of the tests in cfe (even handling many `CHECK` lines). I've attached it (or will attach it in a moment if you're speedy) to PR21532 to help everyone update their out-of-tree testcases. This is part of PR21532. llvm-svn: 224257
2014-12-15 20:07:53 +01:00
bool ParseMetadataAsValue(Value *&V, PerFunctionState &PFS);
bool ParseValueAsMetadata(Metadata *&MD, const Twine &TypeMsg,
PerFunctionState *PFS);
IR: Split Metadata from Value Split `Metadata` away from the `Value` class hierarchy, as part of PR21532. Assembly and bitcode changes are in the wings, but this is the bulk of the change for the IR C++ API. I have a follow-up patch prepared for `clang`. If this breaks other sub-projects, I apologize in advance :(. Help me compile it on Darwin I'll try to fix it. FWIW, the errors should be easy to fix, so it may be simpler to just fix it yourself. This breaks the build for all metadata-related code that's out-of-tree. Rest assured the transition is mechanical and the compiler should catch almost all of the problems. Here's a quick guide for updating your code: - `Metadata` is the root of a class hierarchy with three main classes: `MDNode`, `MDString`, and `ValueAsMetadata`. It is distinct from the `Value` class hierarchy. It is typeless -- i.e., instances do *not* have a `Type`. - `MDNode`'s operands are all `Metadata *` (instead of `Value *`). - `TrackingVH<MDNode>` and `WeakVH` referring to metadata can be replaced with `TrackingMDNodeRef` and `TrackingMDRef`, respectively. If you're referring solely to resolved `MDNode`s -- post graph construction -- just use `MDNode*`. - `MDNode` (and the rest of `Metadata`) have only limited support for `replaceAllUsesWith()`. As long as an `MDNode` is pointing at a forward declaration -- the result of `MDNode::getTemporary()` -- it maintains a side map of its uses and can RAUW itself. Once the forward declarations are fully resolved RAUW support is dropped on the ground. This means that uniquing collisions on changing operands cause nodes to become "distinct". (This already happened fairly commonly, whenever an operand went to null.) If you're constructing complex (non self-reference) `MDNode` cycles, you need to call `MDNode::resolveCycles()` on each node (or on a top-level node that somehow references all of the nodes). Also, don't do that. Metadata cycles (and the RAUW machinery needed to construct them) are expensive. - An `MDNode` can only refer to a `Constant` through a bridge called `ConstantAsMetadata` (one of the subclasses of `ValueAsMetadata`). As a side effect, accessing an operand of an `MDNode` that is known to be, e.g., `ConstantInt`, takes three steps: first, cast from `Metadata` to `ConstantAsMetadata`; second, extract the `Constant`; third, cast down to `ConstantInt`. The eventual goal is to introduce `MDInt`/`MDFloat`/etc. and have metadata schema owners transition away from using `Constant`s when the type isn't important (and they don't care about referring to `GlobalValue`s). In the meantime, I've added transitional API to the `mdconst` namespace that matches semantics with the old code, in order to avoid adding the error-prone three-step equivalent to every call site. If your old code was: MDNode *N = foo(); bar(isa <ConstantInt>(N->getOperand(0))); baz(cast <ConstantInt>(N->getOperand(1))); bak(cast_or_null <ConstantInt>(N->getOperand(2))); bat(dyn_cast <ConstantInt>(N->getOperand(3))); bay(dyn_cast_or_null<ConstantInt>(N->getOperand(4))); you can trivially match its semantics with: MDNode *N = foo(); bar(mdconst::hasa <ConstantInt>(N->getOperand(0))); baz(mdconst::extract <ConstantInt>(N->getOperand(1))); bak(mdconst::extract_or_null <ConstantInt>(N->getOperand(2))); bat(mdconst::dyn_extract <ConstantInt>(N->getOperand(3))); bay(mdconst::dyn_extract_or_null<ConstantInt>(N->getOperand(4))); and when you transition your metadata schema to `MDInt`: MDNode *N = foo(); bar(isa <MDInt>(N->getOperand(0))); baz(cast <MDInt>(N->getOperand(1))); bak(cast_or_null <MDInt>(N->getOperand(2))); bat(dyn_cast <MDInt>(N->getOperand(3))); bay(dyn_cast_or_null<MDInt>(N->getOperand(4))); - A `CallInst` -- specifically, intrinsic instructions -- can refer to metadata through a bridge called `MetadataAsValue`. This is a subclass of `Value` where `getType()->isMetadataTy()`. `MetadataAsValue` is the *only* class that can legally refer to a `LocalAsMetadata`, which is a bridged form of non-`Constant` values like `Argument` and `Instruction`. It can also refer to any other `Metadata` subclass. (I'll break all your testcases in a follow-up commit, when I propagate this change to assembly.) llvm-svn: 223802
2014-12-09 19:38:53 +01:00
bool ParseMetadata(Metadata *&MD, PerFunctionState *PFS);
bool ParseMDTuple(MDNode *&MD, bool IsDistinct = false);
bool ParseMDNode(MDNode *&MD);
bool ParseMDNodeTail(MDNode *&MD);
IR: Make metadata typeless in assembly Now that `Metadata` is typeless, reflect that in the assembly. These are the matching assembly changes for the metadata/value split in r223802. - Only use the `metadata` type when referencing metadata from a call intrinsic -- i.e., only when it's used as a `Value`. - Stop pretending that `ValueAsMetadata` is wrapped in an `MDNode` when referencing it from call intrinsics. So, assembly like this: define @foo(i32 %v) { call void @llvm.foo(metadata !{i32 %v}, metadata !0) call void @llvm.foo(metadata !{i32 7}, metadata !0) call void @llvm.foo(metadata !1, metadata !0) call void @llvm.foo(metadata !3, metadata !0) call void @llvm.foo(metadata !{metadata !3}, metadata !0) ret void, !bar !2 } !0 = metadata !{metadata !2} !1 = metadata !{i32* @global} !2 = metadata !{metadata !3} !3 = metadata !{} turns into this: define @foo(i32 %v) { call void @llvm.foo(metadata i32 %v, metadata !0) call void @llvm.foo(metadata i32 7, metadata !0) call void @llvm.foo(metadata i32* @global, metadata !0) call void @llvm.foo(metadata !3, metadata !0) call void @llvm.foo(metadata !{!3}, metadata !0) ret void, !bar !2 } !0 = !{!2} !1 = !{i32* @global} !2 = !{!3} !3 = !{} I wrote an upgrade script that handled almost all of the tests in llvm and many of the tests in cfe (even handling many `CHECK` lines). I've attached it (or will attach it in a moment if you're speedy) to PR21532 to help everyone update their out-of-tree testcases. This is part of PR21532. llvm-svn: 224257
2014-12-15 20:07:53 +01:00
bool ParseMDNodeVector(SmallVectorImpl<Metadata *> &MDs);
bool ParseMetadataAttachment(unsigned &Kind, MDNode *&MD);
bool ParseInstructionMetadata(Instruction &Inst);
bool ParseOptionalFunctionMetadata(Function &F);
template <class FieldTy>
bool ParseMDField(LocTy Loc, StringRef Name, FieldTy &Result);
template <class FieldTy> bool ParseMDField(StringRef Name, FieldTy &Result);
template <class ParserTy>
bool ParseMDFieldsImplBody(ParserTy parseField);
template <class ParserTy>
bool ParseMDFieldsImpl(ParserTy parseField, LocTy &ClosingLoc);
bool ParseSpecializedMDNode(MDNode *&N, bool IsDistinct = false);
#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) \
bool Parse##CLASS(MDNode *&Result, bool IsDistinct);
#include "llvm/IR/Metadata.def"
// Function Parsing.
struct ArgInfo {
LocTy Loc;
Type *Ty;
AttributeSet Attrs;
std::string Name;
ArgInfo(LocTy L, Type *ty, AttributeSet Attr, const std::string &N)
: Loc(L), Ty(ty), Attrs(Attr), Name(N) {}
};
bool ParseArgumentList(SmallVectorImpl<ArgInfo> &ArgList, bool &isVarArg);
bool ParseFunctionHeader(Function *&Fn, bool isDefine);
bool ParseFunctionBody(Function &Fn);
bool ParseBasicBlock(PerFunctionState &PFS);
enum TailCallType { TCT_None, TCT_Tail, TCT_MustTail };
// Instruction Parsing. Each instruction parsing routine can return with a
// normal result, an error result, or return having eaten an extra comma.
enum InstResult { InstNormal = 0, InstError = 1, InstExtraComma = 2 };
int ParseInstruction(Instruction *&Inst, BasicBlock *BB,
PerFunctionState &PFS);
bool ParseCmpPredicate(unsigned &Pred, unsigned Opc);
bool ParseRet(Instruction *&Inst, BasicBlock *BB, PerFunctionState &PFS);
bool ParseBr(Instruction *&Inst, PerFunctionState &PFS);
bool ParseSwitch(Instruction *&Inst, PerFunctionState &PFS);
bool ParseIndirectBr(Instruction *&Inst, PerFunctionState &PFS);
bool ParseInvoke(Instruction *&Inst, PerFunctionState &PFS);
bool ParseResume(Instruction *&Inst, PerFunctionState &PFS);
bool ParseCleanupRet(Instruction *&Inst, PerFunctionState &PFS);
bool ParseCatchRet(Instruction *&Inst, PerFunctionState &PFS);
[IR] Reformulate LLVM's EH funclet IR While we have successfully implemented a funclet-oriented EH scheme on top of LLVM IR, our scheme has some notable deficiencies: - catchendpad and cleanupendpad are necessary in the current design but they are difficult to explain to others, even to seasoned LLVM experts. - catchendpad and cleanupendpad are optimization barriers. They cannot be split and force all potentially throwing call-sites to be invokes. This has a noticable effect on the quality of our code generation. - catchpad, while similar in some aspects to invoke, is fairly awkward. It is unsplittable, starts a funclet, and has control flow to other funclets. - The nesting relationship between funclets is currently a property of control flow edges. Because of this, we are forced to carefully analyze the flow graph to see if there might potentially exist illegal nesting among funclets. While we have logic to clone funclets when they are illegally nested, it would be nicer if we had a representation which forbade them upfront. Let's clean this up a bit by doing the following: - Instead, make catchpad more like cleanuppad and landingpad: no control flow, just a bunch of simple operands; catchpad would be splittable. - Introduce catchswitch, a control flow instruction designed to model the constraints of funclet oriented EH. - Make funclet scoping explicit by having funclet instructions consume the token produced by the funclet which contains them. - Remove catchendpad and cleanupendpad. Their presence can be inferred implicitly using coloring information. N.B. The state numbering code for the CLR has been updated but the veracity of it's output cannot be spoken for. An expert should take a look to make sure the results are reasonable. Reviewers: rnk, JosephTremoulet, andrew.w.kaylor Differential Revision: http://reviews.llvm.org/D15139 llvm-svn: 255422
2015-12-12 06:38:55 +01:00
bool ParseCatchSwitch(Instruction *&Inst, PerFunctionState &PFS);
bool ParseCatchPad(Instruction *&Inst, PerFunctionState &PFS);
bool ParseCleanupPad(Instruction *&Inst, PerFunctionState &PFS);
bool ParseArithmetic(Instruction *&I, PerFunctionState &PFS, unsigned Opc,
unsigned OperandType);
bool ParseLogical(Instruction *&I, PerFunctionState &PFS, unsigned Opc);
bool ParseCompare(Instruction *&I, PerFunctionState &PFS, unsigned Opc);
bool ParseCast(Instruction *&I, PerFunctionState &PFS, unsigned Opc);
bool ParseSelect(Instruction *&I, PerFunctionState &PFS);
bool ParseVA_Arg(Instruction *&I, PerFunctionState &PFS);
bool ParseExtractElement(Instruction *&I, PerFunctionState &PFS);
bool ParseInsertElement(Instruction *&I, PerFunctionState &PFS);
bool ParseShuffleVector(Instruction *&I, PerFunctionState &PFS);
int ParsePHI(Instruction *&I, PerFunctionState &PFS);
bool ParseLandingPad(Instruction *&I, PerFunctionState &PFS);
bool ParseCall(Instruction *&I, PerFunctionState &PFS,
CallInst::TailCallKind IsTail);
int ParseAlloc(Instruction *&I, PerFunctionState &PFS);
int ParseLoad(Instruction *&I, PerFunctionState &PFS);
int ParseStore(Instruction *&I, PerFunctionState &PFS);
int ParseCmpXchg(Instruction *&I, PerFunctionState &PFS);
int ParseAtomicRMW(Instruction *&I, PerFunctionState &PFS);
int ParseFence(Instruction *&I, PerFunctionState &PFS);
int ParseGetElementPtr(Instruction *&I, PerFunctionState &PFS);
int ParseExtractValue(Instruction *&I, PerFunctionState &PFS);
int ParseInsertValue(Instruction *&I, PerFunctionState &PFS);
// Use-list order directives.
bool ParseUseListOrder(PerFunctionState *PFS = nullptr);
bool ParseUseListOrderBB();
bool ParseUseListOrderIndexes(SmallVectorImpl<unsigned> &Indexes);
bool sortUseListOrder(Value *V, ArrayRef<unsigned> Indexes, SMLoc Loc);
};
} // End llvm namespace
#endif