mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-01-31 12:41:49 +01:00
[ORC] Add support for emulated TLS to ORCv2.
This commit adds a ManglingOptions struct to IRMaterializationUnit, and replaces IRCompileLayer::CompileFunction with a new IRCompileLayer::IRCompiler class. The ManglingOptions struct defines the emulated-TLS state (via a bool member, EmulatedTLS, which is true if emulated-TLS is enabled and false otherwise). The IRCompileLayer::IRCompiler class wraps an IRCompiler (the same way that the CompileFunction typedef used to), but adds a method to return the IRCompileLayer::ManglingOptions that the compiler will use. These changes allow us to correctly determine the symbols that will be produced when a thread local global variable defined at the IR level is compiled with or without emulated TLS. This is required for ORCv2, where MaterializationUnits must declare their interface up-front. Most ORCv2 clients should not require any changes. Clients writing custom IR compilers will need to wrap their compiler in an IRCompileLayer::IRCompiler, rather than an IRCompileLayer::CompileFunction, however this should be a straightforward change (see modifications to CompileUtils.* in this patch for an example).
This commit is contained in:
parent
09203c3fb4
commit
ac6d037b49
@ -13,7 +13,9 @@
|
||||
#ifndef LLVM_EXECUTIONENGINE_ORC_COMPILEUTILS_H
|
||||
#define LLVM_EXECUTIONENGINE_ORC_COMPILEUTILS_H
|
||||
|
||||
#include "llvm/ExecutionEngine/Orc/IRCompileLayer.h"
|
||||
#include "llvm/ExecutionEngine/Orc/JITTargetMachineBuilder.h"
|
||||
#include "llvm/ExecutionEngine/Orc/Layer.h"
|
||||
#include <memory>
|
||||
|
||||
namespace llvm {
|
||||
@ -28,24 +30,31 @@ namespace orc {
|
||||
|
||||
class JITTargetMachineBuilder;
|
||||
|
||||
IRMaterializationUnit::ManglingOptions
|
||||
irManglingOptionsFromTargetOptions(const TargetOptions &Opts);
|
||||
|
||||
/// Simple compile functor: Takes a single IR module and returns an ObjectFile.
|
||||
/// This compiler supports a single compilation thread and LLVMContext only.
|
||||
/// For multithreaded compilation, use ConcurrentIRCompiler below.
|
||||
class SimpleCompiler {
|
||||
class SimpleCompiler : public IRCompileLayer::IRCompiler {
|
||||
public:
|
||||
using CompileResult = std::unique_ptr<MemoryBuffer>;
|
||||
|
||||
/// Construct a simple compile functor with the given target.
|
||||
SimpleCompiler(TargetMachine &TM, ObjectCache *ObjCache = nullptr)
|
||||
: TM(TM), ObjCache(ObjCache) {}
|
||||
: IRCompiler(irManglingOptionsFromTargetOptions(TM.Options)), TM(TM),
|
||||
ObjCache(ObjCache) {}
|
||||
|
||||
/// Set an ObjectCache to query before compiling.
|
||||
void setObjectCache(ObjectCache *NewCache) { ObjCache = NewCache; }
|
||||
|
||||
/// Compile a Module to an ObjectFile.
|
||||
CompileResult operator()(Module &M);
|
||||
Expected<CompileResult> operator()(Module &M) override;
|
||||
|
||||
private:
|
||||
IRMaterializationUnit::ManglingOptions
|
||||
manglingOptionsForTargetMachine(const TargetMachine &TM);
|
||||
|
||||
CompileResult tryToLoadFromObjectCache(const Module &M);
|
||||
void notifyObjectCompiled(const Module &M, const MemoryBuffer &ObjBuffer);
|
||||
|
||||
@ -73,14 +82,14 @@ private:
|
||||
///
|
||||
/// This class creates a new TargetMachine and SimpleCompiler instance for each
|
||||
/// compile.
|
||||
class ConcurrentIRCompiler {
|
||||
class ConcurrentIRCompiler : public IRCompileLayer::IRCompiler {
|
||||
public:
|
||||
ConcurrentIRCompiler(JITTargetMachineBuilder JTMB,
|
||||
ObjectCache *ObjCache = nullptr);
|
||||
|
||||
void setObjectCache(ObjectCache *ObjCache) { this->ObjCache = ObjCache; }
|
||||
|
||||
std::unique_ptr<MemoryBuffer> operator()(Module &M);
|
||||
Expected<std::unique_ptr<MemoryBuffer>> operator()(Module &M) override;
|
||||
|
||||
private:
|
||||
JITTargetMachineBuilder JTMB;
|
||||
|
@ -29,14 +29,29 @@ namespace orc {
|
||||
|
||||
class IRCompileLayer : public IRLayer {
|
||||
public:
|
||||
using CompileFunction =
|
||||
std::function<Expected<std::unique_ptr<MemoryBuffer>>(Module &)>;
|
||||
class IRCompiler {
|
||||
public:
|
||||
IRCompiler(IRMaterializationUnit::ManglingOptions MO) : MO(std::move(MO)) {}
|
||||
virtual ~IRCompiler();
|
||||
const IRMaterializationUnit::ManglingOptions &getManglingOptions() const {
|
||||
return MO;
|
||||
}
|
||||
virtual Expected<std::unique_ptr<MemoryBuffer>> operator()(Module &M) = 0;
|
||||
|
||||
protected:
|
||||
IRMaterializationUnit::ManglingOptions &manglingOptions() { return MO; }
|
||||
|
||||
private:
|
||||
IRMaterializationUnit::ManglingOptions MO;
|
||||
};
|
||||
|
||||
using NotifyCompiledFunction =
|
||||
std::function<void(VModuleKey K, ThreadSafeModule TSM)>;
|
||||
|
||||
IRCompileLayer(ExecutionSession &ES, ObjectLayer &BaseLayer,
|
||||
CompileFunction Compile);
|
||||
std::unique_ptr<IRCompiler> Compile);
|
||||
|
||||
IRCompiler &getCompiler() { return *Compile; }
|
||||
|
||||
void setNotifyCompiled(NotifyCompiledFunction NotifyCompiled);
|
||||
|
||||
@ -45,7 +60,8 @@ public:
|
||||
private:
|
||||
mutable std::mutex IRLayerMutex;
|
||||
ObjectLayer &BaseLayer;
|
||||
CompileFunction Compile;
|
||||
std::unique_ptr<IRCompiler> Compile;
|
||||
const IRMaterializationUnit::ManglingOptions *ManglingOpts;
|
||||
NotifyCompiledFunction NotifyCompiled = NotifyCompiledFunction();
|
||||
};
|
||||
|
||||
@ -90,7 +106,10 @@ public:
|
||||
/// Compile the module, and add the resulting object to the base layer
|
||||
/// along with the given memory manager and symbol resolver.
|
||||
Error addModule(VModuleKey K, std::unique_ptr<Module> M) {
|
||||
if (auto Err = BaseLayer.addObject(std::move(K), Compile(*M)))
|
||||
auto Obj = Compile(*M);
|
||||
if (!Obj)
|
||||
return Obj.takeError();
|
||||
if (auto Err = BaseLayer.addObject(std::move(K), std::move(*Obj)))
|
||||
return Err;
|
||||
if (NotifyCompiled)
|
||||
NotifyCompiled(std::move(K), std::move(M));
|
||||
|
@ -130,7 +130,7 @@ protected:
|
||||
static std::unique_ptr<ObjectLayer>
|
||||
createObjectLinkingLayer(LLJITBuilderState &S, ExecutionSession &ES);
|
||||
|
||||
static Expected<IRCompileLayer::CompileFunction>
|
||||
static Expected<std::unique_ptr<IRCompileLayer::IRCompiler>>
|
||||
createCompileFunction(LLJITBuilderState &S, JITTargetMachineBuilder JTMB);
|
||||
|
||||
/// Create an LLJIT instance with a single compile thread.
|
||||
@ -193,7 +193,7 @@ public:
|
||||
ExecutionSession &, const Triple &TT)>;
|
||||
|
||||
using CompileFunctionCreator =
|
||||
std::function<Expected<IRCompileLayer::CompileFunction>(
|
||||
std::function<Expected<std::unique_ptr<IRCompileLayer::IRCompiler>>(
|
||||
JITTargetMachineBuilder JTMB)>;
|
||||
|
||||
std::unique_ptr<ExecutionSession> ES;
|
||||
|
@ -21,15 +21,62 @@
|
||||
namespace llvm {
|
||||
namespace orc {
|
||||
|
||||
/// IRMaterializationUnit is a convenient base class for MaterializationUnits
|
||||
/// wrapping LLVM IR. Represents materialization responsibility for all symbols
|
||||
/// in the given module. If symbols are overridden by other definitions, then
|
||||
/// their linkage is changed to available-externally.
|
||||
class IRMaterializationUnit : public MaterializationUnit {
|
||||
public:
|
||||
struct ManglingOptions {
|
||||
bool EmulatedTLS = false;
|
||||
};
|
||||
|
||||
using SymbolNameToDefinitionMap = std::map<SymbolStringPtr, GlobalValue *>;
|
||||
|
||||
/// Create an IRMaterializationLayer. Scans the module to build the
|
||||
/// SymbolFlags and SymbolToDefinition maps.
|
||||
IRMaterializationUnit(ExecutionSession &ES, const ManglingOptions &MO,
|
||||
ThreadSafeModule TSM, VModuleKey K);
|
||||
|
||||
/// Create an IRMaterializationLayer from a module, and pre-existing
|
||||
/// SymbolFlags and SymbolToDefinition maps. The maps must provide
|
||||
/// entries for each definition in M.
|
||||
/// This constructor is useful for delegating work from one
|
||||
/// IRMaterializationUnit to another.
|
||||
IRMaterializationUnit(ThreadSafeModule TSM, VModuleKey K,
|
||||
SymbolFlagsMap SymbolFlags,
|
||||
SymbolNameToDefinitionMap SymbolToDefinition);
|
||||
|
||||
/// Return the ModuleIdentifier as the name for this MaterializationUnit.
|
||||
StringRef getName() const override;
|
||||
|
||||
const ThreadSafeModule &getModule() const { return TSM; }
|
||||
|
||||
protected:
|
||||
ThreadSafeModule TSM;
|
||||
SymbolNameToDefinitionMap SymbolToDefinition;
|
||||
|
||||
private:
|
||||
void discard(const JITDylib &JD, const SymbolStringPtr &Name) override;
|
||||
};
|
||||
|
||||
/// Interface for layers that accept LLVM IR.
|
||||
class IRLayer {
|
||||
public:
|
||||
IRLayer(ExecutionSession &ES);
|
||||
IRLayer(ExecutionSession &ES,
|
||||
const IRMaterializationUnit::ManglingOptions *&MO)
|
||||
: ES(ES), MO(MO) {}
|
||||
|
||||
virtual ~IRLayer();
|
||||
|
||||
/// Returns the ExecutionSession for this layer.
|
||||
ExecutionSession &getExecutionSession() { return ES; }
|
||||
|
||||
/// Get the mangling options for this layer.
|
||||
const IRMaterializationUnit::ManglingOptions *&getManglingOptions() const {
|
||||
return MO;
|
||||
}
|
||||
|
||||
/// Sets the CloneToNewContextOnEmit flag (false by default).
|
||||
///
|
||||
/// When set, IR modules added to this layer will be cloned on to a new
|
||||
@ -57,49 +104,15 @@ public:
|
||||
private:
|
||||
bool CloneToNewContextOnEmit = false;
|
||||
ExecutionSession &ES;
|
||||
};
|
||||
|
||||
/// IRMaterializationUnit is a convenient base class for MaterializationUnits
|
||||
/// wrapping LLVM IR. Represents materialization responsibility for all symbols
|
||||
/// in the given module. If symbols are overridden by other definitions, then
|
||||
/// their linkage is changed to available-externally.
|
||||
class IRMaterializationUnit : public MaterializationUnit {
|
||||
public:
|
||||
using SymbolNameToDefinitionMap = std::map<SymbolStringPtr, GlobalValue *>;
|
||||
|
||||
/// Create an IRMaterializationLayer. Scans the module to build the
|
||||
/// SymbolFlags and SymbolToDefinition maps.
|
||||
IRMaterializationUnit(ExecutionSession &ES, ThreadSafeModule TSM,
|
||||
VModuleKey K);
|
||||
|
||||
/// Create an IRMaterializationLayer from a module, and pre-existing
|
||||
/// SymbolFlags and SymbolToDefinition maps. The maps must provide
|
||||
/// entries for each definition in M.
|
||||
/// This constructor is useful for delegating work from one
|
||||
/// IRMaterializationUnit to another.
|
||||
IRMaterializationUnit(ThreadSafeModule TSM, VModuleKey K,
|
||||
SymbolFlagsMap SymbolFlags,
|
||||
SymbolNameToDefinitionMap SymbolToDefinition);
|
||||
|
||||
/// Return the ModuleIdentifier as the name for this MaterializationUnit.
|
||||
StringRef getName() const override;
|
||||
|
||||
const ThreadSafeModule &getModule() const { return TSM; }
|
||||
|
||||
protected:
|
||||
ThreadSafeModule TSM;
|
||||
SymbolNameToDefinitionMap SymbolToDefinition;
|
||||
|
||||
private:
|
||||
void discard(const JITDylib &JD, const SymbolStringPtr &Name) override;
|
||||
const IRMaterializationUnit::ManglingOptions *&MO;
|
||||
};
|
||||
|
||||
/// MaterializationUnit that materializes modules by calling the 'emit' method
|
||||
/// on the given IRLayer.
|
||||
class BasicIRLayerMaterializationUnit : public IRMaterializationUnit {
|
||||
public:
|
||||
BasicIRLayerMaterializationUnit(IRLayer &L, VModuleKey K,
|
||||
ThreadSafeModule TSM);
|
||||
BasicIRLayerMaterializationUnit(IRLayer &L, const ManglingOptions &MO,
|
||||
ThreadSafeModule TSM, VModuleKey K);
|
||||
|
||||
private:
|
||||
|
||||
|
@ -182,8 +182,8 @@ public:
|
||||
IRSpeculationLayer(ExecutionSession &ES, IRCompileLayer &BaseLayer,
|
||||
Speculator &Spec, MangleAndInterner &Mangle,
|
||||
ResultEval Interpreter)
|
||||
: IRLayer(ES), NextLayer(BaseLayer), S(Spec), Mangle(Mangle),
|
||||
QueryAnalysis(Interpreter) {}
|
||||
: IRLayer(ES, BaseLayer.getManglingOptions()), NextLayer(BaseLayer),
|
||||
S(Spec), Mangle(Mangle), QueryAnalysis(Interpreter) {}
|
||||
|
||||
void emit(MaterializationResponsibility R, ThreadSafeModule TSM);
|
||||
|
||||
|
@ -67,9 +67,11 @@ namespace orc {
|
||||
|
||||
class PartitioningIRMaterializationUnit : public IRMaterializationUnit {
|
||||
public:
|
||||
PartitioningIRMaterializationUnit(ExecutionSession &ES, ThreadSafeModule TSM,
|
||||
VModuleKey K, CompileOnDemandLayer &Parent)
|
||||
: IRMaterializationUnit(ES, std::move(TSM), std::move(K)),
|
||||
PartitioningIRMaterializationUnit(ExecutionSession &ES,
|
||||
const ManglingOptions &MO,
|
||||
ThreadSafeModule TSM, VModuleKey K,
|
||||
CompileOnDemandLayer &Parent)
|
||||
: IRMaterializationUnit(ES, MO, std::move(TSM), std::move(K)),
|
||||
Parent(Parent) {}
|
||||
|
||||
PartitioningIRMaterializationUnit(
|
||||
@ -111,7 +113,8 @@ CompileOnDemandLayer::compileWholeModule(GlobalValueSet Requested) {
|
||||
CompileOnDemandLayer::CompileOnDemandLayer(
|
||||
ExecutionSession &ES, IRLayer &BaseLayer, LazyCallThroughManager &LCTMgr,
|
||||
IndirectStubsManagerBuilder BuildIndirectStubsManager)
|
||||
: IRLayer(ES), BaseLayer(BaseLayer), LCTMgr(LCTMgr),
|
||||
: IRLayer(ES, BaseLayer.getManglingOptions()), BaseLayer(BaseLayer),
|
||||
LCTMgr(LCTMgr),
|
||||
BuildIndirectStubsManager(std::move(BuildIndirectStubsManager)) {}
|
||||
|
||||
void CompileOnDemandLayer::setPartitionFunction(PartitionFunction Partition) {
|
||||
@ -136,27 +139,23 @@ void CompileOnDemandLayer::emit(MaterializationResponsibility R,
|
||||
TSM.withModuleDo([&](Module &M) {
|
||||
// First, do some cleanup on the module:
|
||||
cleanUpModule(M);
|
||||
|
||||
MangleAndInterner Mangle(ES, M.getDataLayout());
|
||||
for (auto &GV : M.global_values()) {
|
||||
if (GV.isDeclaration() || GV.hasLocalLinkage() ||
|
||||
GV.hasAppendingLinkage())
|
||||
continue;
|
||||
|
||||
auto Name = Mangle(GV.getName());
|
||||
auto Flags = JITSymbolFlags::fromGlobalValue(GV);
|
||||
if (Flags.isCallable())
|
||||
Callables[Name] = SymbolAliasMapEntry(Name, Flags);
|
||||
else
|
||||
NonCallables[Name] = SymbolAliasMapEntry(Name, Flags);
|
||||
}
|
||||
});
|
||||
|
||||
for (auto &KV : R.getSymbols()) {
|
||||
auto &Name = KV.first;
|
||||
auto &Flags = KV.second;
|
||||
if (Flags.isCallable())
|
||||
Callables[Name] = SymbolAliasMapEntry(Name, Flags);
|
||||
else
|
||||
NonCallables[Name] = SymbolAliasMapEntry(Name, Flags);
|
||||
}
|
||||
|
||||
// Create a partitioning materialization unit and lodge it with the
|
||||
// implementation dylib.
|
||||
if (auto Err = PDR.getImplDylib().define(
|
||||
std::make_unique<PartitioningIRMaterializationUnit>(
|
||||
ES, std::move(TSM), R.getVModuleKey(), *this))) {
|
||||
ES, *getManglingOptions(), std::move(TSM), R.getVModuleKey(),
|
||||
*this))) {
|
||||
ES.reportError(std::move(Err));
|
||||
R.failMaterialization();
|
||||
return;
|
||||
@ -316,7 +315,7 @@ void CompileOnDemandLayer::emitPartition(
|
||||
}
|
||||
|
||||
R.replace(std::make_unique<PartitioningIRMaterializationUnit>(
|
||||
ES, std::move(TSM), R.getVModuleKey(), *this));
|
||||
ES, *getManglingOptions(), std::move(TSM), R.getVModuleKey(), *this));
|
||||
BaseLayer.emit(std::move(R), std::move(*ExtractedTSM));
|
||||
}
|
||||
|
||||
|
@ -24,8 +24,17 @@
|
||||
namespace llvm {
|
||||
namespace orc {
|
||||
|
||||
IRMaterializationUnit::ManglingOptions
|
||||
irManglingOptionsFromTargetOptions(const TargetOptions &Opts) {
|
||||
IRMaterializationUnit::ManglingOptions MO;
|
||||
|
||||
MO.EmulatedTLS = Opts.EmulatedTLS;
|
||||
|
||||
return MO;
|
||||
}
|
||||
|
||||
/// Compile a Module to an ObjectFile.
|
||||
SimpleCompiler::CompileResult SimpleCompiler::operator()(Module &M) {
|
||||
Expected<SimpleCompiler::CompileResult> SimpleCompiler::operator()(Module &M) {
|
||||
CompileResult CachedObject = tryToLoadFromObjectCache(M);
|
||||
if (CachedObject)
|
||||
return CachedObject;
|
||||
@ -38,7 +47,8 @@ SimpleCompiler::CompileResult SimpleCompiler::operator()(Module &M) {
|
||||
legacy::PassManager PM;
|
||||
MCContext *Ctx;
|
||||
if (TM.addPassesToEmitMC(PM, Ctx, ObjStream))
|
||||
llvm_unreachable("Target does not support MC emission.");
|
||||
return make_error<StringError>("Target does not support MC emission",
|
||||
inconvertibleErrorCode());
|
||||
PM.run(M);
|
||||
}
|
||||
|
||||
@ -47,14 +57,11 @@ SimpleCompiler::CompileResult SimpleCompiler::operator()(Module &M) {
|
||||
|
||||
auto Obj = object::ObjectFile::createObjectFile(ObjBuffer->getMemBufferRef());
|
||||
|
||||
if (Obj) {
|
||||
notifyObjectCompiled(M, *ObjBuffer);
|
||||
return std::move(ObjBuffer);
|
||||
}
|
||||
if (!Obj)
|
||||
return Obj.takeError();
|
||||
|
||||
// TODO: Actually report errors helpfully.
|
||||
consumeError(Obj.takeError());
|
||||
return nullptr;
|
||||
notifyObjectCompiled(M, *ObjBuffer);
|
||||
return std::move(ObjBuffer);
|
||||
}
|
||||
|
||||
SimpleCompiler::CompileResult
|
||||
@ -73,9 +80,11 @@ void SimpleCompiler::notifyObjectCompiled(const Module &M,
|
||||
|
||||
ConcurrentIRCompiler::ConcurrentIRCompiler(JITTargetMachineBuilder JTMB,
|
||||
ObjectCache *ObjCache)
|
||||
: JTMB(std::move(JTMB)), ObjCache(ObjCache) {}
|
||||
: IRCompiler(irManglingOptionsFromTargetOptions(JTMB.getOptions())),
|
||||
JTMB(std::move(JTMB)), ObjCache(ObjCache) {}
|
||||
|
||||
std::unique_ptr<MemoryBuffer> ConcurrentIRCompiler::operator()(Module &M) {
|
||||
Expected<std::unique_ptr<MemoryBuffer>>
|
||||
ConcurrentIRCompiler::operator()(Module &M) {
|
||||
auto TM = cantFail(JTMB.createTargetMachine());
|
||||
SimpleCompiler C(*TM, ObjCache);
|
||||
return C(M);
|
||||
|
@ -11,9 +11,14 @@
|
||||
namespace llvm {
|
||||
namespace orc {
|
||||
|
||||
IRCompileLayer::IRCompiler::~IRCompiler() {}
|
||||
|
||||
IRCompileLayer::IRCompileLayer(ExecutionSession &ES, ObjectLayer &BaseLayer,
|
||||
CompileFunction Compile)
|
||||
: IRLayer(ES), BaseLayer(BaseLayer), Compile(std::move(Compile)) {}
|
||||
std::unique_ptr<IRCompiler> Compile)
|
||||
: IRLayer(ES, ManglingOpts), BaseLayer(BaseLayer),
|
||||
Compile(std::move(Compile)) {
|
||||
ManglingOpts = &this->Compile->getManglingOptions();
|
||||
}
|
||||
|
||||
void IRCompileLayer::setNotifyCompiled(NotifyCompiledFunction NotifyCompiled) {
|
||||
std::lock_guard<std::mutex> Lock(IRLayerMutex);
|
||||
@ -24,7 +29,7 @@ void IRCompileLayer::emit(MaterializationResponsibility R,
|
||||
ThreadSafeModule TSM) {
|
||||
assert(TSM && "Module must not be null");
|
||||
|
||||
if (auto Obj = TSM.withModuleDo(Compile)) {
|
||||
if (auto Obj = TSM.withModuleDo(*Compile)) {
|
||||
{
|
||||
std::lock_guard<std::mutex> Lock(IRLayerMutex);
|
||||
if (NotifyCompiled)
|
||||
|
@ -12,10 +12,10 @@
|
||||
namespace llvm {
|
||||
namespace orc {
|
||||
|
||||
IRTransformLayer::IRTransformLayer(ExecutionSession &ES,
|
||||
IRLayer &BaseLayer,
|
||||
TransformFunction Transform)
|
||||
: IRLayer(ES), BaseLayer(BaseLayer), Transform(std::move(Transform)) {}
|
||||
IRTransformLayer::IRTransformLayer(ExecutionSession &ES, IRLayer &BaseLayer,
|
||||
TransformFunction Transform)
|
||||
: IRLayer(ES, BaseLayer.getManglingOptions()), BaseLayer(BaseLayer),
|
||||
Transform(std::move(Transform)) {}
|
||||
|
||||
void IRTransformLayer::emit(MaterializationResponsibility R,
|
||||
ThreadSafeModule TSM) {
|
||||
|
@ -107,7 +107,7 @@ LLJIT::createObjectLinkingLayer(LLJITBuilderState &S, ExecutionSession &ES) {
|
||||
return std::unique_ptr<ObjectLayer>(std::move(ObjLinkingLayer));
|
||||
}
|
||||
|
||||
Expected<IRCompileLayer::CompileFunction>
|
||||
Expected<std::unique_ptr<IRCompileLayer::IRCompiler>>
|
||||
LLJIT::createCompileFunction(LLJITBuilderState &S,
|
||||
JITTargetMachineBuilder JTMB) {
|
||||
|
||||
@ -118,13 +118,13 @@ LLJIT::createCompileFunction(LLJITBuilderState &S,
|
||||
// Otherwise default to creating a SimpleCompiler, or ConcurrentIRCompiler,
|
||||
// depending on the number of threads requested.
|
||||
if (S.NumCompileThreads > 0)
|
||||
return ConcurrentIRCompiler(std::move(JTMB));
|
||||
return std::make_unique<ConcurrentIRCompiler>(std::move(JTMB));
|
||||
|
||||
auto TM = JTMB.createTargetMachine();
|
||||
if (!TM)
|
||||
return TM.takeError();
|
||||
|
||||
return TMOwningSimpleCompiler(std::move(*TM));
|
||||
return std::make_unique<TMOwningSimpleCompiler>(std::move(*TM));
|
||||
}
|
||||
|
||||
LLJIT::LLJIT(LLJITBuilderState &S, Error &Err)
|
||||
|
@ -7,6 +7,7 @@
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "llvm/ExecutionEngine/Orc/Layer.h"
|
||||
#include "llvm/IR/Constants.h"
|
||||
#include "llvm/Object/ObjectFile.h"
|
||||
#include "llvm/Support/Debug.h"
|
||||
|
||||
@ -15,15 +16,15 @@
|
||||
namespace llvm {
|
||||
namespace orc {
|
||||
|
||||
IRLayer::IRLayer(ExecutionSession &ES) : ES(ES) {}
|
||||
IRLayer::~IRLayer() {}
|
||||
|
||||
Error IRLayer::add(JITDylib &JD, ThreadSafeModule TSM, VModuleKey K) {
|
||||
return JD.define(std::make_unique<BasicIRLayerMaterializationUnit>(
|
||||
*this, std::move(K), std::move(TSM)));
|
||||
*this, *getManglingOptions(), std::move(TSM), std::move(K)));
|
||||
}
|
||||
|
||||
IRMaterializationUnit::IRMaterializationUnit(ExecutionSession &ES,
|
||||
const ManglingOptions &MO,
|
||||
ThreadSafeModule TSM, VModuleKey K)
|
||||
: MaterializationUnit(SymbolFlagsMap(), std::move(K)), TSM(std::move(TSM)) {
|
||||
|
||||
@ -32,12 +33,44 @@ IRMaterializationUnit::IRMaterializationUnit(ExecutionSession &ES,
|
||||
MangleAndInterner Mangle(ES, this->TSM.getModuleUnlocked()->getDataLayout());
|
||||
this->TSM.withModuleDo([&](Module &M) {
|
||||
for (auto &G : M.global_values()) {
|
||||
if (G.hasName() && !G.isDeclaration() && !G.hasLocalLinkage() &&
|
||||
!G.hasAvailableExternallyLinkage() && !G.hasAppendingLinkage()) {
|
||||
auto MangledName = Mangle(G.getName());
|
||||
SymbolFlags[MangledName] = JITSymbolFlags::fromGlobalValue(G);
|
||||
SymbolToDefinition[MangledName] = &G;
|
||||
// Skip globals that don't generate symbols.
|
||||
if (!G.hasName() || G.isDeclaration() || G.hasLocalLinkage() ||
|
||||
G.hasAvailableExternallyLinkage() || G.hasAppendingLinkage())
|
||||
continue;
|
||||
|
||||
// thread locals generate different symbols depending on whether or not
|
||||
// emulated TLS is enabled.
|
||||
if (G.isThreadLocal() && MO.EmulatedTLS) {
|
||||
auto &GV = cast<GlobalVariable>(G);
|
||||
|
||||
auto Flags = JITSymbolFlags::fromGlobalValue(GV);
|
||||
|
||||
auto EmuTLSV = Mangle(("__emutls_v." + GV.getName()).str());
|
||||
SymbolFlags[EmuTLSV] = Flags;
|
||||
SymbolToDefinition[EmuTLSV] = &GV;
|
||||
|
||||
// If this GV has a non-zero initializer we'll need to emit an
|
||||
// __emutls.t symbol too.
|
||||
if (GV.hasInitializer()) {
|
||||
const auto *InitVal = GV.getInitializer();
|
||||
|
||||
// Skip zero-initializers.
|
||||
if (isa<ConstantAggregateZero>(InitVal))
|
||||
continue;
|
||||
const auto *InitIntValue = dyn_cast<ConstantInt>(InitVal);
|
||||
if (InitIntValue && InitIntValue->isZero())
|
||||
continue;
|
||||
|
||||
auto EmuTLST = Mangle(("__emutls_t." + GV.getName()).str());
|
||||
SymbolFlags[EmuTLST] = Flags;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// Otherwise we just need a normal linker mangling.
|
||||
auto MangledName = Mangle(G.getName());
|
||||
SymbolFlags[MangledName] = JITSymbolFlags::fromGlobalValue(G);
|
||||
SymbolToDefinition[MangledName] = &G;
|
||||
}
|
||||
});
|
||||
}
|
||||
@ -72,8 +105,8 @@ void IRMaterializationUnit::discard(const JITDylib &JD,
|
||||
}
|
||||
|
||||
BasicIRLayerMaterializationUnit::BasicIRLayerMaterializationUnit(
|
||||
IRLayer &L, VModuleKey K, ThreadSafeModule TSM)
|
||||
: IRMaterializationUnit(L.getExecutionSession(), std::move(TSM),
|
||||
IRLayer &L, const ManglingOptions &MO, ThreadSafeModule TSM, VModuleKey K)
|
||||
: IRMaterializationUnit(L.getExecutionSession(), MO, std::move(TSM),
|
||||
std::move(K)),
|
||||
L(L), K(std::move(K)) {}
|
||||
|
||||
|
23
test/ExecutionEngine/OrcLazy/emulated-tls.ll
Normal file
23
test/ExecutionEngine/OrcLazy/emulated-tls.ll
Normal file
@ -0,0 +1,23 @@
|
||||
; RUN: not lli -no-process-syms -emulated-tls -jit-kind=orc-lazy %s 2>&1 \
|
||||
; RUN: | FileCheck %s
|
||||
;
|
||||
; Test that emulated-tls does not generate any unexpected errors.
|
||||
;
|
||||
; Unfortunately we cannot test successful execution of JIT'd code with
|
||||
; emulated-tls as this would require the JIT itself, in this case lli, to be
|
||||
; built with emulated-tls, which is not a common configuration. Instead we test
|
||||
; that the only error produced by the JIT for a thread-local with emulated-tls
|
||||
; enabled is a missing symbol error for __emutls_get_address. An unresolved
|
||||
; reference to this symbol (and only this symbol) implies (1) that the emulated
|
||||
; tls lowering was applied, and (2) that thread locals defined in the JIT'd code
|
||||
; were otherwise handled correctly.
|
||||
|
||||
; CHECK: JIT session error: Symbols not found: [ {{[^,]*}}__emutls_get_address ]
|
||||
|
||||
@x = thread_local global i32 42, align 4
|
||||
|
||||
define i32 @main(i32 %argc, i8** %argv) {
|
||||
entry:
|
||||
%0 = load i32, i32* @x, align 4
|
||||
ret i32 %0
|
||||
}
|
@ -197,6 +197,11 @@ namespace {
|
||||
cl::desc("Generate software floating point library calls"),
|
||||
cl::init(false));
|
||||
|
||||
cl::opt<bool> NoProcessSymbols(
|
||||
"no-process-syms",
|
||||
cl::desc("Do not resolve lli process symbols in JIT'd code"),
|
||||
cl::init(false));
|
||||
|
||||
enum class DumpKind {
|
||||
NoDump,
|
||||
DumpFuncsToStdOut,
|
||||
@ -795,12 +800,16 @@ int runOrcLazyJIT(const char *ProgName) {
|
||||
});
|
||||
|
||||
orc::MangleAndInterner Mangle(J->getExecutionSession(), J->getDataLayout());
|
||||
J->getMainJITDylib().addGenerator(
|
||||
ExitOnErr(orc::DynamicLibrarySearchGenerator::GetForCurrentProcess(
|
||||
J->getDataLayout().getGlobalPrefix(),
|
||||
[MainName = Mangle("main")](const orc::SymbolStringPtr &Name) {
|
||||
return Name != MainName;
|
||||
})));
|
||||
|
||||
// Unless they've been explicitly disabled, make process symbols available to
|
||||
// JIT'd code.
|
||||
if (!NoProcessSymbols)
|
||||
J->getMainJITDylib().addGenerator(
|
||||
ExitOnErr(orc::DynamicLibrarySearchGenerator::GetForCurrentProcess(
|
||||
J->getDataLayout().getGlobalPrefix(),
|
||||
[MainName = Mangle("main")](const orc::SymbolStringPtr &Name) {
|
||||
return Name != MainName;
|
||||
})));
|
||||
|
||||
orc::LocalCXXRuntimeOverrides CXXRuntimeOverrides;
|
||||
ExitOnErr(CXXRuntimeOverrides.enable(J->getMainJITDylib(), Mangle));
|
||||
|
@ -94,7 +94,7 @@ TEST(LegacyRTDyldObjectLinkingLayerTest, TestSetProcessAllSections) {
|
||||
if (!TM)
|
||||
return;
|
||||
|
||||
auto Obj = SimpleCompiler(*TM)(*M);
|
||||
auto Obj = cantFail(SimpleCompiler(*TM)(*M));
|
||||
|
||||
{
|
||||
// Test with ProcessAllSections = false (the default).
|
||||
@ -165,7 +165,7 @@ TEST_F(LegacyRTDyldObjectLinkingLayerExecutionTest, NoDuplicateFinalization) {
|
||||
Builder.CreateRet(FourtyTwo);
|
||||
}
|
||||
|
||||
auto Obj1 = Compile(*MB1.getModule());
|
||||
auto Obj1 = cantFail(Compile(*MB1.getModule()));
|
||||
|
||||
ModuleBuilder MB2(Context, "", "dummy");
|
||||
{
|
||||
@ -178,7 +178,7 @@ TEST_F(LegacyRTDyldObjectLinkingLayerExecutionTest, NoDuplicateFinalization) {
|
||||
IRBuilder<> Builder(FooEntry);
|
||||
Builder.CreateRet(Builder.CreateCall(BarDecl));
|
||||
}
|
||||
auto Obj2 = Compile(*MB2.getModule());
|
||||
auto Obj2 = cantFail(Compile(*MB2.getModule()));
|
||||
|
||||
auto K1 = ES.allocateVModule();
|
||||
Resolvers[K1] = std::make_shared<NullResolver>();
|
||||
@ -251,7 +251,7 @@ TEST_F(LegacyRTDyldObjectLinkingLayerExecutionTest, NoPrematureAllocation) {
|
||||
Builder.CreateRet(FourtyTwo);
|
||||
}
|
||||
|
||||
auto Obj1 = Compile(*MB1.getModule());
|
||||
auto Obj1 = cantFail(Compile(*MB1.getModule()));
|
||||
|
||||
ModuleBuilder MB2(Context, "", "dummy");
|
||||
{
|
||||
@ -264,7 +264,7 @@ TEST_F(LegacyRTDyldObjectLinkingLayerExecutionTest, NoPrematureAllocation) {
|
||||
Value *Seven = ConstantInt::getSigned(Int32Ty, 7);
|
||||
Builder.CreateRet(Seven);
|
||||
}
|
||||
auto Obj2 = Compile(*MB2.getModule());
|
||||
auto Obj2 = cantFail(Compile(*MB2.getModule()));
|
||||
|
||||
auto K = ES.allocateVModule();
|
||||
cantFail(ObjLayer.addObject(K, std::move(Obj1)));
|
||||
|
@ -296,7 +296,8 @@ TEST(LegacyObjectTransformLayerTest, Main) {
|
||||
LegacyObjectTransformLayer<decltype(BaseLayer), decltype(IdentityTransform)>
|
||||
TransformLayer(llvm::AcknowledgeORCv1Deprecation, BaseLayer,
|
||||
IdentityTransform);
|
||||
auto NullCompiler = [](llvm::Module &) {
|
||||
auto NullCompiler = [](llvm::Module &)
|
||||
-> llvm::Expected<std::unique_ptr<llvm::MemoryBuffer>> {
|
||||
return std::unique_ptr<llvm::MemoryBuffer>(nullptr);
|
||||
};
|
||||
LegacyIRCompileLayer<decltype(TransformLayer), decltype(NullCompiler)>
|
||||
|
@ -44,7 +44,7 @@ protected:
|
||||
return MB.takeModule();
|
||||
}
|
||||
|
||||
std::unique_ptr<MemoryBuffer> createTestObject() {
|
||||
Expected<std::unique_ptr<MemoryBuffer>> createTestObject() {
|
||||
orc::SimpleCompiler IRCompiler(*TM);
|
||||
auto M = createTestModule(TM->getTargetTriple());
|
||||
M->setDataLayout(TM->createDataLayout());
|
||||
@ -161,7 +161,7 @@ TEST_F(OrcCAPIExecutionTest, TestAddObjectFile) {
|
||||
if (!SupportsJIT)
|
||||
return;
|
||||
|
||||
auto ObjBuffer = createTestObject();
|
||||
auto ObjBuffer = cantFail(createTestObject());
|
||||
|
||||
LLVMOrcJITStackRef JIT =
|
||||
LLVMOrcCreateInstance(wrap(TM.get()));
|
||||
|
@ -89,7 +89,7 @@ TEST(RTDyldObjectLinkingLayerTest, TestSetProcessAllSections) {
|
||||
if (!TM)
|
||||
return;
|
||||
|
||||
auto Obj = SimpleCompiler(*TM)(*M);
|
||||
auto Obj = cantFail(SimpleCompiler(*TM)(*M));
|
||||
|
||||
EXPECT_FALSE(testSetProcessAllSections(
|
||||
MemoryBuffer::getMemBufferCopy(Obj->getBuffer()), false))
|
||||
@ -115,7 +115,7 @@ TEST(RTDyldObjectLinkingLayerTest, TestOverrideObjectFlags) {
|
||||
public:
|
||||
FunkySimpleCompiler(TargetMachine &TM) : SimpleCompiler(TM) {}
|
||||
|
||||
CompileResult operator()(Module &M) {
|
||||
Expected<CompileResult> operator()(Module &M) {
|
||||
auto *Foo = M.getFunction("foo");
|
||||
assert(Foo && "Expected function Foo not found");
|
||||
Foo->setVisibility(GlobalValue::HiddenVisibility);
|
||||
@ -155,7 +155,8 @@ TEST(RTDyldObjectLinkingLayerTest, TestOverrideObjectFlags) {
|
||||
auto Foo = ES.intern("foo");
|
||||
RTDyldObjectLinkingLayer ObjLayer(
|
||||
ES, []() { return std::make_unique<SectionMemoryManager>(); });
|
||||
IRCompileLayer CompileLayer(ES, ObjLayer, FunkySimpleCompiler(*TM));
|
||||
IRCompileLayer CompileLayer(ES, ObjLayer,
|
||||
std::make_unique<FunkySimpleCompiler>(*TM));
|
||||
|
||||
ObjLayer.setOverrideObjectFlagsWithResponsibilityFlags(true);
|
||||
|
||||
@ -184,7 +185,7 @@ TEST(RTDyldObjectLinkingLayerTest, TestAutoClaimResponsibilityForSymbols) {
|
||||
public:
|
||||
FunkySimpleCompiler(TargetMachine &TM) : SimpleCompiler(TM) {}
|
||||
|
||||
CompileResult operator()(Module &M) {
|
||||
Expected<CompileResult> operator()(Module &M) {
|
||||
Function *BarImpl = Function::Create(
|
||||
FunctionType::get(Type::getVoidTy(M.getContext()), {}, false),
|
||||
GlobalValue::ExternalLinkage, "bar", &M);
|
||||
@ -221,7 +222,8 @@ TEST(RTDyldObjectLinkingLayerTest, TestAutoClaimResponsibilityForSymbols) {
|
||||
auto Foo = ES.intern("foo");
|
||||
RTDyldObjectLinkingLayer ObjLayer(
|
||||
ES, []() { return std::make_unique<SectionMemoryManager>(); });
|
||||
IRCompileLayer CompileLayer(ES, ObjLayer, FunkySimpleCompiler(*TM));
|
||||
IRCompileLayer CompileLayer(ES, ObjLayer,
|
||||
std::make_unique<FunkySimpleCompiler>(*TM));
|
||||
|
||||
ObjLayer.setAutoClaimResponsibilityForObjectSymbols(true);
|
||||
|
||||
|
@ -105,7 +105,7 @@ MockObjectLayer::ObjectPtr createTestObject() {
|
||||
B.CreateRet(ConstantInt::getSigned(Type::getInt32Ty(Ctx), 42));
|
||||
|
||||
SimpleCompiler IRCompiler(*TM);
|
||||
return IRCompiler(*MB.getModule());
|
||||
return cantFail(IRCompiler(*MB.getModule()));
|
||||
}
|
||||
|
||||
TEST(RemoteObjectLayer, AddObject) {
|
||||
|
Loading…
x
Reference in New Issue
Block a user