mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-01-31 20:51:52 +01:00
[BuildingAJIT] Update chapter 2 to use the ORCv2 APIs.
llvm-svn: 346726
This commit is contained in:
parent
559b91886f
commit
e479ac1aea
@ -12,10 +12,11 @@ we welcome any feedback.
|
|||||||
Chapter 2 Introduction
|
Chapter 2 Introduction
|
||||||
======================
|
======================
|
||||||
|
|
||||||
**Warning: This text is currently out of date due to ORC API updates.**
|
**Warning: This tutorial is currently being updated to account for ORC API
|
||||||
|
changes. Only Chapters 1 and 2 are up-to-date.**
|
||||||
|
|
||||||
**The example code has been updated and can be used. The text will be updated
|
**Example code from Chapters 3 to 5 will compile and run, but has not been
|
||||||
once the API churn dies down.**
|
updated**
|
||||||
|
|
||||||
Welcome to Chapter 2 of the "Building an ORC-based JIT in LLVM" tutorial. In
|
Welcome to Chapter 2 of the "Building an ORC-based JIT in LLVM" tutorial. In
|
||||||
`Chapter 1 <BuildingAJIT1.html>`_ of this series we examined a basic JIT
|
`Chapter 1 <BuildingAJIT1.html>`_ of this series we examined a basic JIT
|
||||||
@ -42,67 +43,49 @@ added to it. In this Chapter we will make optimization a phase of our JIT
|
|||||||
instead. For now this will provide us a motivation to learn more about ORC
|
instead. For now this will provide us a motivation to learn more about ORC
|
||||||
layers, but in the long term making optimization part of our JIT will yield an
|
layers, but in the long term making optimization part of our JIT will yield an
|
||||||
important benefit: When we begin lazily compiling code (i.e. deferring
|
important benefit: When we begin lazily compiling code (i.e. deferring
|
||||||
compilation of each function until the first time it's run), having
|
compilation of each function until the first time it's run) having
|
||||||
optimization managed by our JIT will allow us to optimize lazily too, rather
|
optimization managed by our JIT will allow us to optimize lazily too, rather
|
||||||
than having to do all our optimization up-front.
|
than having to do all our optimization up-front.
|
||||||
|
|
||||||
To add optimization support to our JIT we will take the KaleidoscopeJIT from
|
To add optimization support to our JIT we will take the KaleidoscopeJIT from
|
||||||
Chapter 1 and compose an ORC *IRTransformLayer* on top. We will look at how the
|
Chapter 1 and compose an ORC *IRTransformLayer* on top. We will look at how the
|
||||||
IRTransformLayer works in more detail below, but the interface is simple: the
|
IRTransformLayer works in more detail below, but the interface is simple: the
|
||||||
constructor for this layer takes a reference to the layer below (as all layers
|
constructor for this layer takes a reference to the execution session and the
|
||||||
do) plus an *IR optimization function* that it will apply to each Module that
|
layer below (as all layers do) plus an *IR optimization function* that it will
|
||||||
is added via addModule:
|
apply to each Module that is added via addModule:
|
||||||
|
|
||||||
.. code-block:: c++
|
.. code-block:: c++
|
||||||
|
|
||||||
class KaleidoscopeJIT {
|
class KaleidoscopeJIT {
|
||||||
private:
|
private:
|
||||||
std::unique_ptr<TargetMachine> TM;
|
ExecutionSession ES;
|
||||||
const DataLayout DL;
|
RTDyldObjectLinkingLayer ObjectLayer;
|
||||||
RTDyldObjectLinkingLayer<> ObjectLayer;
|
IRCompileLayer CompileLayer;
|
||||||
IRCompileLayer<decltype(ObjectLayer)> CompileLayer;
|
IRTransformLayer TransformLayer;
|
||||||
|
|
||||||
using OptimizeFunction =
|
DataLayout DL;
|
||||||
std::function<std::shared_ptr<Module>(std::shared_ptr<Module>)>;
|
MangleAndInterner Mangle;
|
||||||
|
ThreadSafeContext Ctx;
|
||||||
IRTransformLayer<decltype(CompileLayer), OptimizeFunction> OptimizeLayer;
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
using ModuleHandle = decltype(OptimizeLayer)::ModuleHandleT;
|
|
||||||
|
|
||||||
KaleidoscopeJIT()
|
KaleidoscopeJIT(JITTargetMachineBuilder JTMB, DataLayout DL)
|
||||||
: TM(EngineBuilder().selectTarget()), DL(TM->createDataLayout()),
|
: ObjectLayer(ES,
|
||||||
ObjectLayer([]() { return std::make_shared<SectionMemoryManager>(); }),
|
[]() { return llvm::make_unique<SectionMemoryManager>(); }),
|
||||||
CompileLayer(ObjectLayer, SimpleCompiler(*TM)),
|
CompileLayer(ES, ObjectLayer, ConcurrentIRCompiler(std::move(JTMB))),
|
||||||
OptimizeLayer(CompileLayer,
|
TransformLayer(ES, CompileLayer, optimizeModule),
|
||||||
[this](std::unique_ptr<Module> M) {
|
DL(std::move(DL)), Mangle(ES, this->DL),
|
||||||
return optimizeModule(std::move(M));
|
Ctx(llvm::make_unique<LLVMContext>()) {
|
||||||
}) {
|
ES.getMainJITDylib().setGenerator(
|
||||||
llvm::sys::DynamicLibrary::LoadLibraryPermanently(nullptr);
|
cantFail(DynamicLibrarySearchGenerator::GetForCurrentProcess(DL)));
|
||||||
}
|
}
|
||||||
|
|
||||||
Our extended KaleidoscopeJIT class starts out the same as it did in Chapter 1,
|
Our extended KaleidoscopeJIT class starts out the same as it did in Chapter 1,
|
||||||
but after the CompileLayer we introduce a typedef for our optimization function.
|
but after the CompileLayer we introduce a new member, TransformLayer, which sits
|
||||||
In this case we use a std::function (a handy wrapper for "function-like" things)
|
on top of our CompileLayer. We initialize our OptimizeLayer with a reference to
|
||||||
from a single unique_ptr<Module> input to a std::unique_ptr<Module> output. With
|
the ExecutionSession and output layer (standard practice for layers), along with
|
||||||
our optimization function typedef in place we can declare our OptimizeLayer,
|
a *transform function*. For our transform function we supply our classes
|
||||||
which sits on top of our CompileLayer.
|
optimizeModule static method.
|
||||||
|
|
||||||
To initialize our OptimizeLayer we pass it a reference to the CompileLayer
|
|
||||||
below (standard practice for layers), and we initialize the OptimizeFunction
|
|
||||||
using a lambda that calls out to an "optimizeModule" function that we will
|
|
||||||
define below.
|
|
||||||
|
|
||||||
.. code-block:: c++
|
|
||||||
|
|
||||||
// ...
|
|
||||||
auto Resolver = createLambdaResolver(
|
|
||||||
[&](const std::string &Name) {
|
|
||||||
if (auto Sym = OptimizeLayer.findSymbol(Name, false))
|
|
||||||
return Sym;
|
|
||||||
return JITSymbol(nullptr);
|
|
||||||
},
|
|
||||||
// ...
|
|
||||||
|
|
||||||
.. code-block:: c++
|
.. code-block:: c++
|
||||||
|
|
||||||
@ -111,26 +94,13 @@ define below.
|
|||||||
std::move(Resolver)));
|
std::move(Resolver)));
|
||||||
// ...
|
// ...
|
||||||
|
|
||||||
.. code-block:: c++
|
Next we need to update our addModule method to replace the call to
|
||||||
|
``CompileLayer::add`` with a call to ``OptimizeLayer::add`` instead.
|
||||||
// ...
|
|
||||||
return OptimizeLayer.findSymbol(MangledNameStream.str(), true);
|
|
||||||
// ...
|
|
||||||
|
|
||||||
.. code-block:: c++
|
.. code-block:: c++
|
||||||
|
|
||||||
// ...
|
ThreadSafeModule optimizeModule(ThreadSafeModule M,
|
||||||
cantFail(OptimizeLayer.removeModule(H));
|
const MaterializationResponsibility &R) {
|
||||||
// ...
|
|
||||||
|
|
||||||
Next we need to replace references to 'CompileLayer' with references to
|
|
||||||
OptimizeLayer in our key methods: addModule, findSymbol, and removeModule. In
|
|
||||||
addModule we need to be careful to replace both references: the findSymbol call
|
|
||||||
inside our resolver, and the call through to addModule.
|
|
||||||
|
|
||||||
.. code-block:: c++
|
|
||||||
|
|
||||||
std::shared_ptr<Module> optimizeModule(std::shared_ptr<Module> M) {
|
|
||||||
// Create a function pass manager.
|
// Create a function pass manager.
|
||||||
auto FPM = llvm::make_unique<legacy::FunctionPassManager>(M.get());
|
auto FPM = llvm::make_unique<legacy::FunctionPassManager>(M.get());
|
||||||
|
|
||||||
@ -150,12 +120,18 @@ inside our resolver, and the call through to addModule.
|
|||||||
}
|
}
|
||||||
|
|
||||||
At the bottom of our JIT we add a private method to do the actual optimization:
|
At the bottom of our JIT we add a private method to do the actual optimization:
|
||||||
*optimizeModule*. This function sets up a FunctionPassManager, adds some passes
|
*optimizeModule*. This function takes the module to be transformed as input (as
|
||||||
to it, runs it over every function in the module, and then returns the mutated
|
a ThreadSafeModule) along with a reference to a reference to a new class:
|
||||||
module. The specific optimizations are the same ones used in
|
``MaterializationResponsibility``. The MaterializationResponsibility argument
|
||||||
`Chapter 4 <LangImpl04.html>`_ of the "Implementing a language with LLVM"
|
can be used to query JIT state for the module being transformed, such as the set
|
||||||
tutorial series. Readers may visit that chapter for a more in-depth
|
of definitions in the module that JIT'd code is actively trying to call/access.
|
||||||
discussion of these, and of IR optimization in general.
|
For now we will ignore this argument and use a standard optimization
|
||||||
|
pipeline. To do this we set up a FunctionPassManager, add some passes to it, run
|
||||||
|
it over every function in the module, and then return the mutated module. The
|
||||||
|
specific optimizations are the same ones used in `Chapter 4 <LangImpl04.html>`_
|
||||||
|
of the "Implementing a language with LLVM" tutorial series. Readers may visit
|
||||||
|
that chapter for a more in-depth discussion of these, and of IR optimization in
|
||||||
|
general.
|
||||||
|
|
||||||
And that's it in terms of changes to KaleidoscopeJIT: When a module is added via
|
And that's it in terms of changes to KaleidoscopeJIT: When a module is added via
|
||||||
addModule the OptimizeLayer will call our optimizeModule function before passing
|
addModule the OptimizeLayer will call our optimizeModule function before passing
|
||||||
@ -163,148 +139,122 @@ the transformed module on to the CompileLayer below. Of course, we could have
|
|||||||
called optimizeModule directly in our addModule function and not gone to the
|
called optimizeModule directly in our addModule function and not gone to the
|
||||||
bother of using the IRTransformLayer, but doing so gives us another opportunity
|
bother of using the IRTransformLayer, but doing so gives us another opportunity
|
||||||
to see how layers compose. It also provides a neat entry point to the *layer*
|
to see how layers compose. It also provides a neat entry point to the *layer*
|
||||||
concept itself, because IRTransformLayer turns out to be one of the simplest
|
concept itself, because IRTransformLayer is one of the simplest layers that
|
||||||
implementations of the layer concept that can be devised:
|
can be implemented.
|
||||||
|
|
||||||
.. code-block:: c++
|
.. code-block:: c++
|
||||||
|
|
||||||
template <typename BaseLayerT, typename TransformFtor>
|
// From IRTransformLayer.h:
|
||||||
class IRTransformLayer {
|
class IRTransformLayer : public IRLayer {
|
||||||
public:
|
public:
|
||||||
using ModuleHandleT = typename BaseLayerT::ModuleHandleT;
|
using TransformFunction = std::function<Expected<ThreadSafeModule>(
|
||||||
|
ThreadSafeModule, const MaterializationResponsibility &R)>;
|
||||||
|
|
||||||
IRTransformLayer(BaseLayerT &BaseLayer,
|
IRTransformLayer(ExecutionSession &ES, IRLayer &BaseLayer,
|
||||||
TransformFtor Transform = TransformFtor())
|
TransformFunction Transform = identityTransform);
|
||||||
: BaseLayer(BaseLayer), Transform(std::move(Transform)) {}
|
|
||||||
|
|
||||||
Expected<ModuleHandleT>
|
void setTransform(TransformFunction Transform) {
|
||||||
addModule(std::shared_ptr<Module> M,
|
this->Transform = std::move(Transform);
|
||||||
std::shared_ptr<JITSymbolResolver> Resolver) {
|
|
||||||
return BaseLayer.addModule(Transform(std::move(M)), std::move(Resolver));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void removeModule(ModuleHandleT H) { BaseLayer.removeModule(H); }
|
static ThreadSafeModule
|
||||||
|
identityTransform(ThreadSafeModule TSM,
|
||||||
JITSymbol findSymbol(const std::string &Name, bool ExportedSymbolsOnly) {
|
const MaterializationResponsibility &R) {
|
||||||
return BaseLayer.findSymbol(Name, ExportedSymbolsOnly);
|
return TSM;
|
||||||
}
|
}
|
||||||
|
|
||||||
JITSymbol findSymbolIn(ModuleHandleT H, const std::string &Name,
|
void emit(MaterializationResponsibility R, ThreadSafeModule TSM) override;
|
||||||
bool ExportedSymbolsOnly) {
|
|
||||||
return BaseLayer.findSymbolIn(H, Name, ExportedSymbolsOnly);
|
|
||||||
}
|
|
||||||
|
|
||||||
void emitAndFinalize(ModuleHandleT H) {
|
|
||||||
BaseLayer.emitAndFinalize(H);
|
|
||||||
}
|
|
||||||
|
|
||||||
TransformFtor& getTransform() { return Transform; }
|
|
||||||
|
|
||||||
const TransformFtor& getTransform() const { return Transform; }
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
BaseLayerT &BaseLayer;
|
IRLayer &BaseLayer;
|
||||||
TransformFtor Transform;
|
TransformFunction Transform;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// From IRTransfomrLayer.cpp:
|
||||||
|
|
||||||
|
IRTransformLayer::IRTransformLayer(ExecutionSession &ES,
|
||||||
|
IRLayer &BaseLayer,
|
||||||
|
TransformFunction Transform)
|
||||||
|
: IRLayer(ES), BaseLayer(BaseLayer), Transform(std::move(Transform)) {}
|
||||||
|
|
||||||
|
void IRTransformLayer::emit(MaterializationResponsibility R,
|
||||||
|
ThreadSafeModule TSM) {
|
||||||
|
assert(TSM.getModule() && "Module must not be null");
|
||||||
|
|
||||||
|
if (auto TransformedTSM = Transform(std::move(TSM), R))
|
||||||
|
BaseLayer.emit(std::move(R), std::move(*TransformedTSM));
|
||||||
|
else {
|
||||||
|
R.failMaterialization();
|
||||||
|
getExecutionSession().reportError(TransformedTSM.takeError());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
This is the whole definition of IRTransformLayer, from
|
This is the whole definition of IRTransformLayer, from
|
||||||
``llvm/include/llvm/ExecutionEngine/Orc/IRTransformLayer.h``, stripped of its
|
``llvm/include/llvm/ExecutionEngine/Orc/IRTransformLayer.h`` and
|
||||||
comments. It is a template class with two template arguments: ``BaesLayerT`` and
|
``llvm/lib/ExecutionEngine/Orc/IRTransformLayer.cpp``. This class is concerned
|
||||||
``TransformFtor`` that provide the type of the base layer and the type of the
|
with two very simple jobs: (1) Running every IR Module that is emitted via this
|
||||||
"transform functor" (in our case a std::function) respectively. This class is
|
layer through the transform function object, and (2) implementing the ORC
|
||||||
concerned with two very simple jobs: (1) Running every IR Module that is added
|
``IRLayer`` interface (which itself conforms to the general ORC Layer concept,
|
||||||
with addModule through the transform functor, and (2) conforming to the ORC
|
more on that below). Most of the class is straightforward: a typedef for the
|
||||||
layer interface. The interface consists of one typedef and five methods:
|
transform function, a constructor to initialize the members, a setter for the
|
||||||
|
transform function value, and a default no-op transform. The most important
|
||||||
|
method is ``emit`` as this is half of our IRLayer interface. The emit method
|
||||||
|
applies our transform to each module that it is called on and, if the transform
|
||||||
|
succeeds, passes the transformed module to the base layer. If the transform
|
||||||
|
fails, our emit function calls
|
||||||
|
``MaterializationResponsibility::failMaterialization`` (this JIT clients who
|
||||||
|
may be waiting on other threads know that the code they were waiting for has
|
||||||
|
failed to compile) and logs the error with the execution session before bailing
|
||||||
|
out.
|
||||||
|
|
||||||
+------------------+-----------------------------------------------------------+
|
The other half of the IRLayer interface we inherit unmodified from the IRLayer
|
||||||
| Interface | Description |
|
class:
|
||||||
+==================+===========================================================+
|
|
||||||
| | Provides a handle that can be used to identify a module |
|
|
||||||
| ModuleHandleT | set when calling findSymbolIn, removeModule, or |
|
|
||||||
| | emitAndFinalize. |
|
|
||||||
+------------------+-----------------------------------------------------------+
|
|
||||||
| | Takes a given set of Modules and makes them "available |
|
|
||||||
| | for execution". This means that symbols in those modules |
|
|
||||||
| | should be searchable via findSymbol and findSymbolIn, and |
|
|
||||||
| | the address of the symbols should be read/writable (for |
|
|
||||||
| | data symbols), or executable (for function symbols) after |
|
|
||||||
| | JITSymbol::getAddress() is called. Note: This means that |
|
|
||||||
| addModule | addModule doesn't have to compile (or do any other |
|
|
||||||
| | work) up-front. It *can*, like IRCompileLayer, act |
|
|
||||||
| | eagerly, but it can also simply record the module and |
|
|
||||||
| | take no further action until somebody calls |
|
|
||||||
| | JITSymbol::getAddress(). In IRTransformLayer's case |
|
|
||||||
| | addModule eagerly applies the transform functor to |
|
|
||||||
| | each module in the set, then passes the resulting set |
|
|
||||||
| | of mutated modules down to the layer below. |
|
|
||||||
+------------------+-----------------------------------------------------------+
|
|
||||||
| | Removes a set of modules from the JIT. Code or data |
|
|
||||||
| removeModule | defined in these modules will no longer be available, and |
|
|
||||||
| | the memory holding the JIT'd definitions will be freed. |
|
|
||||||
+------------------+-----------------------------------------------------------+
|
|
||||||
| | Searches for the named symbol in all modules that have |
|
|
||||||
| | previously been added via addModule (and not yet |
|
|
||||||
| findSymbol | removed by a call to removeModule). In |
|
|
||||||
| | IRTransformLayer we just pass the query on to the layer |
|
|
||||||
| | below. In our REPL this is our default way to search for |
|
|
||||||
| | function definitions. |
|
|
||||||
+------------------+-----------------------------------------------------------+
|
|
||||||
| | Searches for the named symbol in the module set indicated |
|
|
||||||
| | by the given ModuleHandleT. This is just an optimized |
|
|
||||||
| | search, better for lookup-speed when you know exactly |
|
|
||||||
| | a symbol definition should be found. In IRTransformLayer |
|
|
||||||
| findSymbolIn | we just pass this query on to the layer below. In our |
|
|
||||||
| | REPL we use this method to search for functions |
|
|
||||||
| | representing top-level expressions, since we know exactly |
|
|
||||||
| | where we'll find them: in the top-level expression module |
|
|
||||||
| | we just added. |
|
|
||||||
+------------------+-----------------------------------------------------------+
|
|
||||||
| | Forces all of the actions required to make the code and |
|
|
||||||
| | data in a module set (represented by a ModuleHandleT) |
|
|
||||||
| | accessible. Behaves as if some symbol in the set had been |
|
|
||||||
| | searched for and JITSymbol::getSymbolAddress called. This |
|
|
||||||
| emitAndFinalize | is rarely needed, but can be useful when dealing with |
|
|
||||||
| | layers that usually behave lazily if the user wants to |
|
|
||||||
| | trigger early compilation (for example, to use idle CPU |
|
|
||||||
| | time to eagerly compile code in the background). |
|
|
||||||
+------------------+-----------------------------------------------------------+
|
|
||||||
|
|
||||||
This interface attempts to capture the natural operations of a JIT (with some
|
.. code-block:: c++
|
||||||
wrinkles like emitAndFinalize for performance), similar to the basic JIT API
|
|
||||||
operations we identified in Chapter 1. Conforming to the layer concept allows
|
|
||||||
classes to compose neatly by implementing their behaviors in terms of the these
|
|
||||||
same operations, carried out on the layer below. For example, an eager layer
|
|
||||||
(like IRTransformLayer) can implement addModule by running each module in the
|
|
||||||
set through its transform up-front and immediately passing the result to the
|
|
||||||
layer below. A lazy layer, by contrast, could implement addModule by
|
|
||||||
squirreling away the modules doing no other up-front work, but applying the
|
|
||||||
transform (and calling addModule on the layer below) when the client calls
|
|
||||||
findSymbol instead. The JIT'd program behavior will be the same either way, but
|
|
||||||
these choices will have different performance characteristics: Doing work
|
|
||||||
eagerly means the JIT takes longer up-front, but proceeds smoothly once this is
|
|
||||||
done. Deferring work allows the JIT to get up-and-running quickly, but will
|
|
||||||
force the JIT to pause and wait whenever some code or data is needed that hasn't
|
|
||||||
already been processed.
|
|
||||||
|
|
||||||
Our current REPL is eager: Each function definition is optimized and compiled as
|
Error IRLayer::add(JITDylib &JD, ThreadSafeModule TSM, VModuleKey K) {
|
||||||
soon as it's typed in. If we were to make the transform layer lazy (but not
|
return JD.define(llvm::make_unique<BasicIRLayerMaterializationUnit>(
|
||||||
change things otherwise) we could defer optimization until the first time we
|
*this, std::move(K), std::move(TSM)));
|
||||||
reference a function in a top-level expression (see if you can figure out why,
|
}
|
||||||
then check out the answer below [1]_). In the next chapter, however we'll
|
|
||||||
introduce fully lazy compilation, in which function's aren't compiled until
|
This code, from ``llvm/lib/ExecutionEngine/Orc/Layer.cpp``, adds a
|
||||||
they're first called at run-time. At this point the trade-offs get much more
|
ThreadSafeModule to a given JITDylib by wrapping it up in a
|
||||||
|
``MaterializationUnit`` (in this case a ``BasicIRLayerMaterializationUnit``).
|
||||||
|
Most layers that derived from IRLayer can rely on this default implementation
|
||||||
|
of the ``add`` method.
|
||||||
|
|
||||||
|
These two operations, ``add`` and ``emit``, together constitute the layer
|
||||||
|
concept: A layer is a way to wrap a portion of a compiler pipeline (in this case
|
||||||
|
the "opt" phase of an LLVM compiler) whose API is is opaque to ORC in an
|
||||||
|
interface that allows ORC to invoke it when needed. The add method takes an
|
||||||
|
module in some input program representation (in this case an LLVM IR module) and
|
||||||
|
stores it in the target JITDylib, arranging for it to be passed back to the
|
||||||
|
Layer's emit method when any symbol defined by that module is requested. Layers
|
||||||
|
can compose neatly by calling the 'emit' method of a base layer to complete
|
||||||
|
their work. For example, in this tutorial our IRTransformLayer calls through to
|
||||||
|
our IRCompileLayer to compile the transformed IR, and our IRCompileLayer in turn
|
||||||
|
calls our ObjectLayer to link the object file produced by our compiler.
|
||||||
|
|
||||||
|
|
||||||
|
So far we have learned how to optimize and compile our LLVM IR, but we have not
|
||||||
|
focused on when compilation happens. Our current REPL is eager: Each function
|
||||||
|
definition is optimized and compiled as soon as it is referenced by any other
|
||||||
|
code, regardless of whether it is ever called at runtime. In the next chapter we
|
||||||
|
will introduce fully lazy compilation, in which functions are not compiled until
|
||||||
|
they are first called at run-time. At this point the trade-offs get much more
|
||||||
interesting: the lazier we are, the quicker we can start executing the first
|
interesting: the lazier we are, the quicker we can start executing the first
|
||||||
function, but the more often we'll have to pause to compile newly encountered
|
function, but the more often we will have to pause to compile newly encountered
|
||||||
functions. If we only code-gen lazily, but optimize eagerly, we'll have a slow
|
functions. If we only code-gen lazily, but optimize eagerly, we will have a
|
||||||
startup (which everything is optimized) but relatively short pauses as each
|
longer startup time (as everything is optimized) but relatively short pauses as
|
||||||
function just passes through code-gen. If we both optimize and code-gen lazily
|
each function just passes through code-gen. If we both optimize and code-gen
|
||||||
we can start executing the first function more quickly, but we'll have longer
|
lazily we can start executing the first function more quickly, but we will have
|
||||||
pauses as each function has to be both optimized and code-gen'd when it's first
|
longer pauses as each function has to be both optimized and code-gen'd when it
|
||||||
executed. Things become even more interesting if we consider interproceedural
|
is first executed. Things become even more interesting if we consider
|
||||||
optimizations like inlining, which must be performed eagerly. These are
|
interproceedural optimizations like inlining, which must be performed eagerly.
|
||||||
complex trade-offs, and there is no one-size-fits all solution to them, but by
|
These are complex trade-offs, and there is no one-size-fits all solution to
|
||||||
providing composable layers we leave the decisions to the person implementing
|
them, but by providing composable layers we leave the decisions to the person
|
||||||
the JIT, and make it easy for them to experiment with different configurations.
|
implementing the JIT, and make it easy for them to experiment with different
|
||||||
|
configurations.
|
||||||
|
|
||||||
`Next: Adding Per-function Lazy Compilation <BuildingAJIT3.html>`_
|
`Next: Adding Per-function Lazy Compilation <BuildingAJIT3.html>`_
|
||||||
|
|
||||||
@ -325,10 +275,3 @@ Here is the code:
|
|||||||
|
|
||||||
.. literalinclude:: ../../examples/Kaleidoscope/BuildingAJIT/Chapter2/KaleidoscopeJIT.h
|
.. literalinclude:: ../../examples/Kaleidoscope/BuildingAJIT/Chapter2/KaleidoscopeJIT.h
|
||||||
:language: c++
|
:language: c++
|
||||||
|
|
||||||
.. [1] When we add our top-level expression to the JIT, any calls to functions
|
|
||||||
that we defined earlier will appear to the RTDyldObjectLinkingLayer as
|
|
||||||
external symbols. The RTDyldObjectLinkingLayer will call the SymbolResolver
|
|
||||||
that we defined in addModule, which in turn calls findSymbol on the
|
|
||||||
OptimizeLayer, at which point even a lazy transform layer will have to
|
|
||||||
do its work.
|
|
||||||
|
@ -14,29 +14,23 @@
|
|||||||
#ifndef LLVM_EXECUTIONENGINE_ORC_KALEIDOSCOPEJIT_H
|
#ifndef LLVM_EXECUTIONENGINE_ORC_KALEIDOSCOPEJIT_H
|
||||||
#define LLVM_EXECUTIONENGINE_ORC_KALEIDOSCOPEJIT_H
|
#define LLVM_EXECUTIONENGINE_ORC_KALEIDOSCOPEJIT_H
|
||||||
|
|
||||||
#include "llvm/ADT/STLExtras.h"
|
#include "llvm/ADT/StringRef.h"
|
||||||
#include "llvm/ExecutionEngine/ExecutionEngine.h"
|
|
||||||
#include "llvm/ExecutionEngine/JITSymbol.h"
|
#include "llvm/ExecutionEngine/JITSymbol.h"
|
||||||
#include "llvm/ExecutionEngine/Orc/CompileUtils.h"
|
#include "llvm/ExecutionEngine/Orc/CompileUtils.h"
|
||||||
|
#include "llvm/ExecutionEngine/Orc/Core.h"
|
||||||
|
#include "llvm/ExecutionEngine/Orc/ExecutionUtils.h"
|
||||||
#include "llvm/ExecutionEngine/Orc/IRCompileLayer.h"
|
#include "llvm/ExecutionEngine/Orc/IRCompileLayer.h"
|
||||||
#include "llvm/ExecutionEngine/Orc/IRTransformLayer.h"
|
#include "llvm/ExecutionEngine/Orc/IRTransformLayer.h"
|
||||||
#include "llvm/ExecutionEngine/Orc/LambdaResolver.h"
|
#include "llvm/ExecutionEngine/Orc/JITTargetMachineBuilder.h"
|
||||||
#include "llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h"
|
#include "llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h"
|
||||||
#include "llvm/ExecutionEngine/RTDyldMemoryManager.h"
|
|
||||||
#include "llvm/ExecutionEngine/SectionMemoryManager.h"
|
#include "llvm/ExecutionEngine/SectionMemoryManager.h"
|
||||||
#include "llvm/IR/DataLayout.h"
|
#include "llvm/IR/DataLayout.h"
|
||||||
|
#include "llvm/IR/LLVMContext.h"
|
||||||
#include "llvm/IR/LegacyPassManager.h"
|
#include "llvm/IR/LegacyPassManager.h"
|
||||||
#include "llvm/IR/Mangler.h"
|
|
||||||
#include "llvm/Support/DynamicLibrary.h"
|
|
||||||
#include "llvm/Support/raw_ostream.h"
|
|
||||||
#include "llvm/Target/TargetMachine.h"
|
|
||||||
#include "llvm/Transforms/InstCombine/InstCombine.h"
|
#include "llvm/Transforms/InstCombine/InstCombine.h"
|
||||||
#include "llvm/Transforms/Scalar.h"
|
#include "llvm/Transforms/Scalar.h"
|
||||||
#include "llvm/Transforms/Scalar/GVN.h"
|
#include "llvm/Transforms/Scalar/GVN.h"
|
||||||
#include <algorithm>
|
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <string>
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
namespace llvm {
|
namespace llvm {
|
||||||
namespace orc {
|
namespace orc {
|
||||||
@ -44,69 +38,60 @@ namespace orc {
|
|||||||
class KaleidoscopeJIT {
|
class KaleidoscopeJIT {
|
||||||
private:
|
private:
|
||||||
ExecutionSession ES;
|
ExecutionSession ES;
|
||||||
std::shared_ptr<SymbolResolver> Resolver;
|
RTDyldObjectLinkingLayer ObjectLayer;
|
||||||
std::unique_ptr<TargetMachine> TM;
|
IRCompileLayer CompileLayer;
|
||||||
const DataLayout DL;
|
IRTransformLayer OptimizeLayer;
|
||||||
LegacyRTDyldObjectLinkingLayer ObjectLayer;
|
|
||||||
LegacyIRCompileLayer<decltype(ObjectLayer), SimpleCompiler> CompileLayer;
|
|
||||||
|
|
||||||
using OptimizeFunction =
|
DataLayout DL;
|
||||||
std::function<std::unique_ptr<Module>(std::unique_ptr<Module>)>;
|
MangleAndInterner Mangle;
|
||||||
|
ThreadSafeContext Ctx;
|
||||||
LegacyIRTransformLayer<decltype(CompileLayer), OptimizeFunction> OptimizeLayer;
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
KaleidoscopeJIT()
|
|
||||||
: Resolver(createLegacyLookupResolver(
|
KaleidoscopeJIT(JITTargetMachineBuilder JTMB, DataLayout DL)
|
||||||
ES,
|
: ObjectLayer(ES,
|
||||||
[this](const std::string &Name) -> JITSymbol {
|
[]() { return llvm::make_unique<SectionMemoryManager>(); }),
|
||||||
if (auto Sym = OptimizeLayer.findSymbol(Name, false))
|
CompileLayer(ES, ObjectLayer, ConcurrentIRCompiler(std::move(JTMB))),
|
||||||
return Sym;
|
OptimizeLayer(ES, CompileLayer, optimizeModule),
|
||||||
else if (auto Err = Sym.takeError())
|
DL(std::move(DL)), Mangle(ES, this->DL),
|
||||||
return std::move(Err);
|
Ctx(llvm::make_unique<LLVMContext>()) {
|
||||||
if (auto SymAddr =
|
ES.getMainJITDylib().setGenerator(
|
||||||
RTDyldMemoryManager::getSymbolAddressInProcess(Name))
|
cantFail(DynamicLibrarySearchGenerator::GetForCurrentProcess(DL)));
|
||||||
return JITSymbol(SymAddr, JITSymbolFlags::Exported);
|
|
||||||
return nullptr;
|
|
||||||
},
|
|
||||||
[](Error Err) { cantFail(std::move(Err), "lookupFlags failed"); })),
|
|
||||||
TM(EngineBuilder().selectTarget()), DL(TM->createDataLayout()),
|
|
||||||
ObjectLayer(ES,
|
|
||||||
[this](VModuleKey) {
|
|
||||||
return LegacyRTDyldObjectLinkingLayer::Resources{
|
|
||||||
std::make_shared<SectionMemoryManager>(), Resolver};
|
|
||||||
}),
|
|
||||||
CompileLayer(ObjectLayer, SimpleCompiler(*TM)),
|
|
||||||
OptimizeLayer(CompileLayer, [this](std::unique_ptr<Module> M) {
|
|
||||||
return optimizeModule(std::move(M));
|
|
||||||
}) {
|
|
||||||
llvm::sys::DynamicLibrary::LoadLibraryPermanently(nullptr);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
TargetMachine &getTargetMachine() { return *TM; }
|
const DataLayout &getDataLayout() const { return DL; }
|
||||||
|
|
||||||
VModuleKey addModule(std::unique_ptr<Module> M) {
|
LLVMContext &getContext() { return *Ctx.getContext(); }
|
||||||
// Add the module to the JIT with a new VModuleKey.
|
|
||||||
auto K = ES.allocateVModule();
|
static Expected<std::unique_ptr<KaleidoscopeJIT>> Create() {
|
||||||
cantFail(OptimizeLayer.addModule(K, std::move(M)));
|
auto JTMB = JITTargetMachineBuilder::detectHost();
|
||||||
return K;
|
|
||||||
|
if (!JTMB)
|
||||||
|
return JTMB.takeError();
|
||||||
|
|
||||||
|
auto DL = JTMB->getDefaultDataLayoutForTarget();
|
||||||
|
if (!DL)
|
||||||
|
return DL.takeError();
|
||||||
|
|
||||||
|
return llvm::make_unique<KaleidoscopeJIT>(std::move(*JTMB), std::move(*DL));
|
||||||
}
|
}
|
||||||
|
|
||||||
JITSymbol findSymbol(const std::string Name) {
|
Error addModule(std::unique_ptr<Module> M) {
|
||||||
std::string MangledName;
|
return OptimizeLayer.add(ES.getMainJITDylib(),
|
||||||
raw_string_ostream MangledNameStream(MangledName);
|
ThreadSafeModule(std::move(M), Ctx));
|
||||||
Mangler::getNameWithPrefix(MangledNameStream, Name, DL);
|
|
||||||
return OptimizeLayer.findSymbol(MangledNameStream.str(), true);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void removeModule(VModuleKey K) {
|
Expected<JITEvaluatedSymbol> lookup(StringRef Name) {
|
||||||
cantFail(OptimizeLayer.removeModule(K));
|
return ES.lookup({&ES.getMainJITDylib()}, Mangle(Name.str()));
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
std::unique_ptr<Module> optimizeModule(std::unique_ptr<Module> M) {
|
|
||||||
|
static Expected<ThreadSafeModule>
|
||||||
|
optimizeModule(ThreadSafeModule TSM,
|
||||||
|
const MaterializationResponsibility &R) {
|
||||||
// Create a function pass manager.
|
// Create a function pass manager.
|
||||||
auto FPM = llvm::make_unique<legacy::FunctionPassManager>(M.get());
|
auto FPM = llvm::make_unique<legacy::FunctionPassManager>(TSM.getModule());
|
||||||
|
|
||||||
// Add some optimizations.
|
// Add some optimizations.
|
||||||
FPM->add(createInstructionCombiningPass());
|
FPM->add(createInstructionCombiningPass());
|
||||||
@ -117,10 +102,10 @@ private:
|
|||||||
|
|
||||||
// Run the optimizations over all functions in the module being added to
|
// Run the optimizations over all functions in the module being added to
|
||||||
// the JIT.
|
// the JIT.
|
||||||
for (auto &F : *M)
|
for (auto &F : *TSM.getModule())
|
||||||
FPM->run(F);
|
FPM->run(F);
|
||||||
|
|
||||||
return M;
|
return TSM;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -676,10 +676,11 @@ static std::unique_ptr<FunctionAST> ParseDefinition() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// toplevelexpr ::= expression
|
/// toplevelexpr ::= expression
|
||||||
static std::unique_ptr<FunctionAST> ParseTopLevelExpr() {
|
static std::unique_ptr<FunctionAST> ParseTopLevelExpr(unsigned ExprCount) {
|
||||||
if (auto E = ParseExpression()) {
|
if (auto E = ParseExpression()) {
|
||||||
// Make an anonymous proto.
|
// Make an anonymous proto.
|
||||||
auto Proto = llvm::make_unique<PrototypeAST>("__anon_expr",
|
auto Proto = llvm::make_unique<PrototypeAST>(("__anon_expr" +
|
||||||
|
Twine(ExprCount)).str(),
|
||||||
std::vector<std::string>());
|
std::vector<std::string>());
|
||||||
return llvm::make_unique<FunctionAST>(std::move(Proto), std::move(E));
|
return llvm::make_unique<FunctionAST>(std::move(Proto), std::move(E));
|
||||||
}
|
}
|
||||||
@ -696,12 +697,13 @@ static std::unique_ptr<PrototypeAST> ParseExtern() {
|
|||||||
// Code Generation
|
// Code Generation
|
||||||
//===----------------------------------------------------------------------===//
|
//===----------------------------------------------------------------------===//
|
||||||
|
|
||||||
static LLVMContext TheContext;
|
static std::unique_ptr<KaleidoscopeJIT> TheJIT;
|
||||||
static IRBuilder<> Builder(TheContext);
|
static LLVMContext *TheContext;
|
||||||
|
static std::unique_ptr<IRBuilder<>> Builder;
|
||||||
static std::unique_ptr<Module> TheModule;
|
static std::unique_ptr<Module> TheModule;
|
||||||
static std::map<std::string, AllocaInst *> NamedValues;
|
static std::map<std::string, AllocaInst *> NamedValues;
|
||||||
static std::unique_ptr<KaleidoscopeJIT> TheJIT;
|
|
||||||
static std::map<std::string, std::unique_ptr<PrototypeAST>> FunctionProtos;
|
static std::map<std::string, std::unique_ptr<PrototypeAST>> FunctionProtos;
|
||||||
|
static ExitOnError ExitOnErr;
|
||||||
|
|
||||||
Value *LogErrorV(const char *Str) {
|
Value *LogErrorV(const char *Str) {
|
||||||
LogError(Str);
|
LogError(Str);
|
||||||
@ -729,11 +731,11 @@ static AllocaInst *CreateEntryBlockAlloca(Function *TheFunction,
|
|||||||
const std::string &VarName) {
|
const std::string &VarName) {
|
||||||
IRBuilder<> TmpB(&TheFunction->getEntryBlock(),
|
IRBuilder<> TmpB(&TheFunction->getEntryBlock(),
|
||||||
TheFunction->getEntryBlock().begin());
|
TheFunction->getEntryBlock().begin());
|
||||||
return TmpB.CreateAlloca(Type::getDoubleTy(TheContext), nullptr, VarName);
|
return TmpB.CreateAlloca(Type::getDoubleTy(*TheContext), nullptr, VarName);
|
||||||
}
|
}
|
||||||
|
|
||||||
Value *NumberExprAST::codegen() {
|
Value *NumberExprAST::codegen() {
|
||||||
return ConstantFP::get(TheContext, APFloat(Val));
|
return ConstantFP::get(*TheContext, APFloat(Val));
|
||||||
}
|
}
|
||||||
|
|
||||||
Value *VariableExprAST::codegen() {
|
Value *VariableExprAST::codegen() {
|
||||||
@ -743,7 +745,7 @@ Value *VariableExprAST::codegen() {
|
|||||||
return LogErrorV("Unknown variable name");
|
return LogErrorV("Unknown variable name");
|
||||||
|
|
||||||
// Load the value.
|
// Load the value.
|
||||||
return Builder.CreateLoad(V, Name.c_str());
|
return Builder->CreateLoad(V, Name.c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
Value *UnaryExprAST::codegen() {
|
Value *UnaryExprAST::codegen() {
|
||||||
@ -755,7 +757,7 @@ Value *UnaryExprAST::codegen() {
|
|||||||
if (!F)
|
if (!F)
|
||||||
return LogErrorV("Unknown unary operator");
|
return LogErrorV("Unknown unary operator");
|
||||||
|
|
||||||
return Builder.CreateCall(F, OperandV, "unop");
|
return Builder->CreateCall(F, OperandV, "unop");
|
||||||
}
|
}
|
||||||
|
|
||||||
Value *BinaryExprAST::codegen() {
|
Value *BinaryExprAST::codegen() {
|
||||||
@ -778,7 +780,7 @@ Value *BinaryExprAST::codegen() {
|
|||||||
if (!Variable)
|
if (!Variable)
|
||||||
return LogErrorV("Unknown variable name");
|
return LogErrorV("Unknown variable name");
|
||||||
|
|
||||||
Builder.CreateStore(Val, Variable);
|
Builder->CreateStore(Val, Variable);
|
||||||
return Val;
|
return Val;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -789,15 +791,15 @@ Value *BinaryExprAST::codegen() {
|
|||||||
|
|
||||||
switch (Op) {
|
switch (Op) {
|
||||||
case '+':
|
case '+':
|
||||||
return Builder.CreateFAdd(L, R, "addtmp");
|
return Builder->CreateFAdd(L, R, "addtmp");
|
||||||
case '-':
|
case '-':
|
||||||
return Builder.CreateFSub(L, R, "subtmp");
|
return Builder->CreateFSub(L, R, "subtmp");
|
||||||
case '*':
|
case '*':
|
||||||
return Builder.CreateFMul(L, R, "multmp");
|
return Builder->CreateFMul(L, R, "multmp");
|
||||||
case '<':
|
case '<':
|
||||||
L = Builder.CreateFCmpULT(L, R, "cmptmp");
|
L = Builder->CreateFCmpULT(L, R, "cmptmp");
|
||||||
// Convert bool 0/1 to double 0.0 or 1.0
|
// Convert bool 0/1 to double 0.0 or 1.0
|
||||||
return Builder.CreateUIToFP(L, Type::getDoubleTy(TheContext), "booltmp");
|
return Builder->CreateUIToFP(L, Type::getDoubleTy(*TheContext), "booltmp");
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -808,7 +810,7 @@ Value *BinaryExprAST::codegen() {
|
|||||||
assert(F && "binary operator not found!");
|
assert(F && "binary operator not found!");
|
||||||
|
|
||||||
Value *Ops[] = {L, R};
|
Value *Ops[] = {L, R};
|
||||||
return Builder.CreateCall(F, Ops, "binop");
|
return Builder->CreateCall(F, Ops, "binop");
|
||||||
}
|
}
|
||||||
|
|
||||||
Value *CallExprAST::codegen() {
|
Value *CallExprAST::codegen() {
|
||||||
@ -828,7 +830,7 @@ Value *CallExprAST::codegen() {
|
|||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
return Builder.CreateCall(CalleeF, ArgsV, "calltmp");
|
return Builder->CreateCall(CalleeF, ArgsV, "calltmp");
|
||||||
}
|
}
|
||||||
|
|
||||||
Value *IfExprAST::codegen() {
|
Value *IfExprAST::codegen() {
|
||||||
@ -837,46 +839,46 @@ Value *IfExprAST::codegen() {
|
|||||||
return nullptr;
|
return nullptr;
|
||||||
|
|
||||||
// Convert condition to a bool by comparing equal to 0.0.
|
// Convert condition to a bool by comparing equal to 0.0.
|
||||||
CondV = Builder.CreateFCmpONE(
|
CondV = Builder->CreateFCmpONE(
|
||||||
CondV, ConstantFP::get(TheContext, APFloat(0.0)), "ifcond");
|
CondV, ConstantFP::get(*TheContext, APFloat(0.0)), "ifcond");
|
||||||
|
|
||||||
Function *TheFunction = Builder.GetInsertBlock()->getParent();
|
Function *TheFunction = Builder->GetInsertBlock()->getParent();
|
||||||
|
|
||||||
// Create blocks for the then and else cases. Insert the 'then' block at the
|
// Create blocks for the then and else cases. Insert the 'then' block at the
|
||||||
// end of the function.
|
// end of the function.
|
||||||
BasicBlock *ThenBB = BasicBlock::Create(TheContext, "then", TheFunction);
|
BasicBlock *ThenBB = BasicBlock::Create(*TheContext, "then", TheFunction);
|
||||||
BasicBlock *ElseBB = BasicBlock::Create(TheContext, "else");
|
BasicBlock *ElseBB = BasicBlock::Create(*TheContext, "else");
|
||||||
BasicBlock *MergeBB = BasicBlock::Create(TheContext, "ifcont");
|
BasicBlock *MergeBB = BasicBlock::Create(*TheContext, "ifcont");
|
||||||
|
|
||||||
Builder.CreateCondBr(CondV, ThenBB, ElseBB);
|
Builder->CreateCondBr(CondV, ThenBB, ElseBB);
|
||||||
|
|
||||||
// Emit then value.
|
// Emit then value.
|
||||||
Builder.SetInsertPoint(ThenBB);
|
Builder->SetInsertPoint(ThenBB);
|
||||||
|
|
||||||
Value *ThenV = Then->codegen();
|
Value *ThenV = Then->codegen();
|
||||||
if (!ThenV)
|
if (!ThenV)
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
|
||||||
Builder.CreateBr(MergeBB);
|
Builder->CreateBr(MergeBB);
|
||||||
// Codegen of 'Then' can change the current block, update ThenBB for the PHI.
|
// Codegen of 'Then' can change the current block, update ThenBB for the PHI.
|
||||||
ThenBB = Builder.GetInsertBlock();
|
ThenBB = Builder->GetInsertBlock();
|
||||||
|
|
||||||
// Emit else block.
|
// Emit else block.
|
||||||
TheFunction->getBasicBlockList().push_back(ElseBB);
|
TheFunction->getBasicBlockList().push_back(ElseBB);
|
||||||
Builder.SetInsertPoint(ElseBB);
|
Builder->SetInsertPoint(ElseBB);
|
||||||
|
|
||||||
Value *ElseV = Else->codegen();
|
Value *ElseV = Else->codegen();
|
||||||
if (!ElseV)
|
if (!ElseV)
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
|
||||||
Builder.CreateBr(MergeBB);
|
Builder->CreateBr(MergeBB);
|
||||||
// Codegen of 'Else' can change the current block, update ElseBB for the PHI.
|
// Codegen of 'Else' can change the current block, update ElseBB for the PHI.
|
||||||
ElseBB = Builder.GetInsertBlock();
|
ElseBB = Builder->GetInsertBlock();
|
||||||
|
|
||||||
// Emit merge block.
|
// Emit merge block.
|
||||||
TheFunction->getBasicBlockList().push_back(MergeBB);
|
TheFunction->getBasicBlockList().push_back(MergeBB);
|
||||||
Builder.SetInsertPoint(MergeBB);
|
Builder->SetInsertPoint(MergeBB);
|
||||||
PHINode *PN = Builder.CreatePHI(Type::getDoubleTy(TheContext), 2, "iftmp");
|
PHINode *PN = Builder->CreatePHI(Type::getDoubleTy(*TheContext), 2, "iftmp");
|
||||||
|
|
||||||
PN->addIncoming(ThenV, ThenBB);
|
PN->addIncoming(ThenV, ThenBB);
|
||||||
PN->addIncoming(ElseV, ElseBB);
|
PN->addIncoming(ElseV, ElseBB);
|
||||||
@ -903,7 +905,7 @@ Value *IfExprAST::codegen() {
|
|||||||
// br endcond, loop, endloop
|
// br endcond, loop, endloop
|
||||||
// outloop:
|
// outloop:
|
||||||
Value *ForExprAST::codegen() {
|
Value *ForExprAST::codegen() {
|
||||||
Function *TheFunction = Builder.GetInsertBlock()->getParent();
|
Function *TheFunction = Builder->GetInsertBlock()->getParent();
|
||||||
|
|
||||||
// Create an alloca for the variable in the entry block.
|
// Create an alloca for the variable in the entry block.
|
||||||
AllocaInst *Alloca = CreateEntryBlockAlloca(TheFunction, VarName);
|
AllocaInst *Alloca = CreateEntryBlockAlloca(TheFunction, VarName);
|
||||||
@ -914,17 +916,17 @@ Value *ForExprAST::codegen() {
|
|||||||
return nullptr;
|
return nullptr;
|
||||||
|
|
||||||
// Store the value into the alloca.
|
// Store the value into the alloca.
|
||||||
Builder.CreateStore(StartVal, Alloca);
|
Builder->CreateStore(StartVal, Alloca);
|
||||||
|
|
||||||
// Make the new basic block for the loop header, inserting after current
|
// Make the new basic block for the loop header, inserting after current
|
||||||
// block.
|
// block.
|
||||||
BasicBlock *LoopBB = BasicBlock::Create(TheContext, "loop", TheFunction);
|
BasicBlock *LoopBB = BasicBlock::Create(*TheContext, "loop", TheFunction);
|
||||||
|
|
||||||
// Insert an explicit fall through from the current block to the LoopBB.
|
// Insert an explicit fall through from the current block to the LoopBB.
|
||||||
Builder.CreateBr(LoopBB);
|
Builder->CreateBr(LoopBB);
|
||||||
|
|
||||||
// Start insertion in LoopBB.
|
// Start insertion in LoopBB.
|
||||||
Builder.SetInsertPoint(LoopBB);
|
Builder->SetInsertPoint(LoopBB);
|
||||||
|
|
||||||
// Within the loop, the variable is defined equal to the PHI node. If it
|
// Within the loop, the variable is defined equal to the PHI node. If it
|
||||||
// shadows an existing variable, we have to restore it, so save it now.
|
// shadows an existing variable, we have to restore it, so save it now.
|
||||||
@ -945,7 +947,7 @@ Value *ForExprAST::codegen() {
|
|||||||
return nullptr;
|
return nullptr;
|
||||||
} else {
|
} else {
|
||||||
// If not specified, use 1.0.
|
// If not specified, use 1.0.
|
||||||
StepVal = ConstantFP::get(TheContext, APFloat(1.0));
|
StepVal = ConstantFP::get(*TheContext, APFloat(1.0));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Compute the end condition.
|
// Compute the end condition.
|
||||||
@ -955,23 +957,23 @@ Value *ForExprAST::codegen() {
|
|||||||
|
|
||||||
// Reload, increment, and restore the alloca. This handles the case where
|
// Reload, increment, and restore the alloca. This handles the case where
|
||||||
// the body of the loop mutates the variable.
|
// the body of the loop mutates the variable.
|
||||||
Value *CurVar = Builder.CreateLoad(Alloca, VarName.c_str());
|
Value *CurVar = Builder->CreateLoad(Alloca, VarName.c_str());
|
||||||
Value *NextVar = Builder.CreateFAdd(CurVar, StepVal, "nextvar");
|
Value *NextVar = Builder->CreateFAdd(CurVar, StepVal, "nextvar");
|
||||||
Builder.CreateStore(NextVar, Alloca);
|
Builder->CreateStore(NextVar, Alloca);
|
||||||
|
|
||||||
// Convert condition to a bool by comparing equal to 0.0.
|
// Convert condition to a bool by comparing equal to 0.0.
|
||||||
EndCond = Builder.CreateFCmpONE(
|
EndCond = Builder->CreateFCmpONE(
|
||||||
EndCond, ConstantFP::get(TheContext, APFloat(0.0)), "loopcond");
|
EndCond, ConstantFP::get(*TheContext, APFloat(0.0)), "loopcond");
|
||||||
|
|
||||||
// Create the "after loop" block and insert it.
|
// Create the "after loop" block and insert it.
|
||||||
BasicBlock *AfterBB =
|
BasicBlock *AfterBB =
|
||||||
BasicBlock::Create(TheContext, "afterloop", TheFunction);
|
BasicBlock::Create(*TheContext, "afterloop", TheFunction);
|
||||||
|
|
||||||
// Insert the conditional branch into the end of LoopEndBB.
|
// Insert the conditional branch into the end of LoopEndBB.
|
||||||
Builder.CreateCondBr(EndCond, LoopBB, AfterBB);
|
Builder->CreateCondBr(EndCond, LoopBB, AfterBB);
|
||||||
|
|
||||||
// Any new code will be inserted in AfterBB.
|
// Any new code will be inserted in AfterBB.
|
||||||
Builder.SetInsertPoint(AfterBB);
|
Builder->SetInsertPoint(AfterBB);
|
||||||
|
|
||||||
// Restore the unshadowed variable.
|
// Restore the unshadowed variable.
|
||||||
if (OldVal)
|
if (OldVal)
|
||||||
@ -980,13 +982,13 @@ Value *ForExprAST::codegen() {
|
|||||||
NamedValues.erase(VarName);
|
NamedValues.erase(VarName);
|
||||||
|
|
||||||
// for expr always returns 0.0.
|
// for expr always returns 0.0.
|
||||||
return Constant::getNullValue(Type::getDoubleTy(TheContext));
|
return Constant::getNullValue(Type::getDoubleTy(*TheContext));
|
||||||
}
|
}
|
||||||
|
|
||||||
Value *VarExprAST::codegen() {
|
Value *VarExprAST::codegen() {
|
||||||
std::vector<AllocaInst *> OldBindings;
|
std::vector<AllocaInst *> OldBindings;
|
||||||
|
|
||||||
Function *TheFunction = Builder.GetInsertBlock()->getParent();
|
Function *TheFunction = Builder->GetInsertBlock()->getParent();
|
||||||
|
|
||||||
// Register all variables and emit their initializer.
|
// Register all variables and emit their initializer.
|
||||||
for (unsigned i = 0, e = VarNames.size(); i != e; ++i) {
|
for (unsigned i = 0, e = VarNames.size(); i != e; ++i) {
|
||||||
@ -1004,11 +1006,11 @@ Value *VarExprAST::codegen() {
|
|||||||
if (!InitVal)
|
if (!InitVal)
|
||||||
return nullptr;
|
return nullptr;
|
||||||
} else { // If not specified, use 0.0.
|
} else { // If not specified, use 0.0.
|
||||||
InitVal = ConstantFP::get(TheContext, APFloat(0.0));
|
InitVal = ConstantFP::get(*TheContext, APFloat(0.0));
|
||||||
}
|
}
|
||||||
|
|
||||||
AllocaInst *Alloca = CreateEntryBlockAlloca(TheFunction, VarName);
|
AllocaInst *Alloca = CreateEntryBlockAlloca(TheFunction, VarName);
|
||||||
Builder.CreateStore(InitVal, Alloca);
|
Builder->CreateStore(InitVal, Alloca);
|
||||||
|
|
||||||
// Remember the old variable binding so that we can restore the binding when
|
// Remember the old variable binding so that we can restore the binding when
|
||||||
// we unrecurse.
|
// we unrecurse.
|
||||||
@ -1033,9 +1035,9 @@ Value *VarExprAST::codegen() {
|
|||||||
|
|
||||||
Function *PrototypeAST::codegen() {
|
Function *PrototypeAST::codegen() {
|
||||||
// Make the function type: double(double,double) etc.
|
// Make the function type: double(double,double) etc.
|
||||||
std::vector<Type *> Doubles(Args.size(), Type::getDoubleTy(TheContext));
|
std::vector<Type *> Doubles(Args.size(), Type::getDoubleTy(*TheContext));
|
||||||
FunctionType *FT =
|
FunctionType *FT =
|
||||||
FunctionType::get(Type::getDoubleTy(TheContext), Doubles, false);
|
FunctionType::get(Type::getDoubleTy(*TheContext), Doubles, false);
|
||||||
|
|
||||||
Function *F =
|
Function *F =
|
||||||
Function::Create(FT, Function::ExternalLinkage, Name, TheModule.get());
|
Function::Create(FT, Function::ExternalLinkage, Name, TheModule.get());
|
||||||
@ -1062,8 +1064,8 @@ Function *FunctionAST::codegen() {
|
|||||||
BinopPrecedence[P.getOperatorName()] = P.getBinaryPrecedence();
|
BinopPrecedence[P.getOperatorName()] = P.getBinaryPrecedence();
|
||||||
|
|
||||||
// Create a new basic block to start insertion into.
|
// Create a new basic block to start insertion into.
|
||||||
BasicBlock *BB = BasicBlock::Create(TheContext, "entry", TheFunction);
|
BasicBlock *BB = BasicBlock::Create(*TheContext, "entry", TheFunction);
|
||||||
Builder.SetInsertPoint(BB);
|
Builder->SetInsertPoint(BB);
|
||||||
|
|
||||||
// Record the function arguments in the NamedValues map.
|
// Record the function arguments in the NamedValues map.
|
||||||
NamedValues.clear();
|
NamedValues.clear();
|
||||||
@ -1072,7 +1074,7 @@ Function *FunctionAST::codegen() {
|
|||||||
AllocaInst *Alloca = CreateEntryBlockAlloca(TheFunction, Arg.getName());
|
AllocaInst *Alloca = CreateEntryBlockAlloca(TheFunction, Arg.getName());
|
||||||
|
|
||||||
// Store the initial value into the alloca.
|
// Store the initial value into the alloca.
|
||||||
Builder.CreateStore(&Arg, Alloca);
|
Builder->CreateStore(&Arg, Alloca);
|
||||||
|
|
||||||
// Add arguments to variable symbol table.
|
// Add arguments to variable symbol table.
|
||||||
NamedValues[Arg.getName()] = Alloca;
|
NamedValues[Arg.getName()] = Alloca;
|
||||||
@ -1080,7 +1082,7 @@ Function *FunctionAST::codegen() {
|
|||||||
|
|
||||||
if (Value *RetVal = Body->codegen()) {
|
if (Value *RetVal = Body->codegen()) {
|
||||||
// Finish off the function.
|
// Finish off the function.
|
||||||
Builder.CreateRet(RetVal);
|
Builder->CreateRet(RetVal);
|
||||||
|
|
||||||
// Validate the generated code, checking for consistency.
|
// Validate the generated code, checking for consistency.
|
||||||
verifyFunction(*TheFunction);
|
verifyFunction(*TheFunction);
|
||||||
@ -1102,8 +1104,11 @@ Function *FunctionAST::codegen() {
|
|||||||
|
|
||||||
static void InitializeModule() {
|
static void InitializeModule() {
|
||||||
// Open a new module.
|
// Open a new module.
|
||||||
TheModule = llvm::make_unique<Module>("my cool jit", TheContext);
|
TheModule = llvm::make_unique<Module>("my cool jit", *TheContext);
|
||||||
TheModule->setDataLayout(TheJIT->getTargetMachine().createDataLayout());
|
TheModule->setDataLayout(TheJIT->getDataLayout());
|
||||||
|
|
||||||
|
// Create a new builder for the module.
|
||||||
|
Builder = llvm::make_unique<IRBuilder<>>(*TheContext);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void HandleDefinition() {
|
static void HandleDefinition() {
|
||||||
@ -1112,7 +1117,7 @@ static void HandleDefinition() {
|
|||||||
fprintf(stderr, "Read function definition:");
|
fprintf(stderr, "Read function definition:");
|
||||||
FnIR->print(errs());
|
FnIR->print(errs());
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
TheJIT->addModule(std::move(TheModule));
|
ExitOnErr(TheJIT->addModule(std::move(TheModule)));
|
||||||
InitializeModule();
|
InitializeModule();
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@ -1136,25 +1141,27 @@ static void HandleExtern() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void HandleTopLevelExpression() {
|
static void HandleTopLevelExpression() {
|
||||||
|
static unsigned ExprCount = 0;
|
||||||
|
|
||||||
|
// Update ExprCount. This number will be added to anonymous expressions to
|
||||||
|
// prevent them from clashing.
|
||||||
|
++ExprCount;
|
||||||
|
|
||||||
// Evaluate a top-level expression into an anonymous function.
|
// Evaluate a top-level expression into an anonymous function.
|
||||||
if (auto FnAST = ParseTopLevelExpr()) {
|
if (auto FnAST = ParseTopLevelExpr(ExprCount)) {
|
||||||
if (FnAST->codegen()) {
|
if (FnAST->codegen()) {
|
||||||
// JIT the module containing the anonymous expression, keeping a handle so
|
// JIT the module containing the anonymous expression, keeping a handle so
|
||||||
// we can free it later.
|
// we can free it later.
|
||||||
auto H = TheJIT->addModule(std::move(TheModule));
|
ExitOnErr(TheJIT->addModule(std::move(TheModule)));
|
||||||
InitializeModule();
|
InitializeModule();
|
||||||
|
|
||||||
// Search the JIT for the __anon_expr symbol.
|
// Get the anonymous expression's JITSymbol.
|
||||||
auto ExprSymbol = TheJIT->findSymbol("__anon_expr");
|
auto Sym =
|
||||||
assert(ExprSymbol && "Function not found");
|
ExitOnErr(TheJIT->lookup(("__anon_expr" + Twine(ExprCount)).str()));
|
||||||
|
|
||||||
// Get the symbol's address and cast it to the right type (takes no
|
auto *FP = (double (*)())(intptr_t)Sym.getAddress();
|
||||||
// arguments, returns a double) so we can call it as a native function.
|
assert(FP && "Failed to codegen function");
|
||||||
double (*FP)() = (double (*)())(intptr_t)cantFail(ExprSymbol.getAddress());
|
|
||||||
fprintf(stderr, "Evaluated to %f\n", FP());
|
fprintf(stderr, "Evaluated to %f\n", FP());
|
||||||
|
|
||||||
// Delete the anonymous expression module from the JIT.
|
|
||||||
TheJIT->removeModule(H);
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Skip token for error recovery.
|
// Skip token for error recovery.
|
||||||
@ -1222,7 +1229,8 @@ int main() {
|
|||||||
fprintf(stderr, "ready> ");
|
fprintf(stderr, "ready> ");
|
||||||
getNextToken();
|
getNextToken();
|
||||||
|
|
||||||
TheJIT = llvm::make_unique<KaleidoscopeJIT>();
|
TheJIT = ExitOnErr(KaleidoscopeJIT::Create());
|
||||||
|
TheContext = &TheJIT->getContext();
|
||||||
|
|
||||||
InitializeModule();
|
InitializeModule();
|
||||||
|
|
||||||
|
@ -1947,7 +1947,8 @@ ExecutionSession::lookup(ArrayRef<JITDylib *> SearchOrder,
|
|||||||
SymbolStringPtr Name) {
|
SymbolStringPtr Name) {
|
||||||
SymbolNameSet Names({Name});
|
SymbolNameSet Names({Name});
|
||||||
|
|
||||||
JITDylibSearchList FullSearchOrder(SearchOrder.size());
|
JITDylibSearchList FullSearchOrder;
|
||||||
|
FullSearchOrder.reserve(SearchOrder.size());
|
||||||
for (auto *JD : SearchOrder)
|
for (auto *JD : SearchOrder)
|
||||||
FullSearchOrder.push_back({JD, false});
|
FullSearchOrder.push_back({JD, false});
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user