1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-18 18:42:46 +02:00
llvm-mirror/lib/Support/ThreadPool.cpp
Tim Northover 7c89253a7a Recommit: Support: add llvm::thread class that supports specifying stack size.
This adds a new llvm::thread class with the same interface as std::thread
except there is an extra constructor that allows us to set the new thread's
stack size. On Darwin even the default size is boosted to 8MB to match the main
thread.

It also switches all users of the older C-style `llvm_execute_on_thread` API
family over to `llvm::thread` followed by either a `detach` or `join` call and
removes the old API.

Moved definition of DefaultStackSize into the .cpp file to hopefully
fix the build on some (GCC-6?) machines.
2021-07-08 16:22:26 +01:00

144 lines
4.6 KiB
C++

//==-- llvm/Support/ThreadPool.cpp - A ThreadPool implementation -*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements a crude C++11 based thread pool.
//
//===----------------------------------------------------------------------===//
#include "llvm/Support/ThreadPool.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/Support/Threading.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
#if LLVM_ENABLE_THREADS
ThreadPool::ThreadPool(ThreadPoolStrategy S)
: ThreadCount(S.compute_thread_count()) {
// Create ThreadCount threads that will loop forever, wait on QueueCondition
// for tasks to be queued or the Pool to be destroyed.
Threads.reserve(ThreadCount);
for (unsigned ThreadID = 0; ThreadID < ThreadCount; ++ThreadID) {
Threads.emplace_back([S, ThreadID, this] {
S.apply_thread_strategy(ThreadID);
while (true) {
PackagedTaskTy Task;
{
std::unique_lock<std::mutex> LockGuard(QueueLock);
// Wait for tasks to be pushed in the queue
QueueCondition.wait(LockGuard,
[&] { return !EnableFlag || !Tasks.empty(); });
// Exit condition
if (!EnableFlag && Tasks.empty())
return;
// Yeah, we have a task, grab it and release the lock on the queue
// We first need to signal that we are active before popping the queue
// in order for wait() to properly detect that even if the queue is
// empty, there is still a task in flight.
++ActiveThreads;
Task = std::move(Tasks.front());
Tasks.pop();
}
// Run the task we just grabbed
Task();
bool Notify;
{
// Adjust `ActiveThreads`, in case someone waits on ThreadPool::wait()
std::lock_guard<std::mutex> LockGuard(QueueLock);
--ActiveThreads;
Notify = workCompletedUnlocked();
}
// Notify task completion if this is the last active thread, in case
// someone waits on ThreadPool::wait().
if (Notify)
CompletionCondition.notify_all();
}
});
}
}
void ThreadPool::wait() {
// Wait for all threads to complete and the queue to be empty
std::unique_lock<std::mutex> LockGuard(QueueLock);
CompletionCondition.wait(LockGuard, [&] { return workCompletedUnlocked(); });
}
bool ThreadPool::isWorkerThread() const {
llvm::thread::id CurrentThreadId = llvm::this_thread::get_id();
for (const llvm::thread &Thread : Threads)
if (CurrentThreadId == Thread.get_id())
return true;
return false;
}
std::shared_future<void> ThreadPool::asyncImpl(TaskTy Task) {
/// Wrap the Task in a packaged_task to return a future object.
PackagedTaskTy PackagedTask(std::move(Task));
auto Future = PackagedTask.get_future();
{
// Lock the queue and push the new task
std::unique_lock<std::mutex> LockGuard(QueueLock);
// Don't allow enqueueing after disabling the pool
assert(EnableFlag && "Queuing a thread during ThreadPool destruction");
Tasks.push(std::move(PackagedTask));
}
QueueCondition.notify_one();
return Future.share();
}
// The destructor joins all threads, waiting for completion.
ThreadPool::~ThreadPool() {
{
std::unique_lock<std::mutex> LockGuard(QueueLock);
EnableFlag = false;
}
QueueCondition.notify_all();
for (auto &Worker : Threads)
Worker.join();
}
#else // LLVM_ENABLE_THREADS Disabled
// No threads are launched, issue a warning if ThreadCount is not 0
ThreadPool::ThreadPool(ThreadPoolStrategy S)
: ThreadCount(S.compute_thread_count()) {
if (ThreadCount != 1) {
errs() << "Warning: request a ThreadPool with " << ThreadCount
<< " threads, but LLVM_ENABLE_THREADS has been turned off\n";
}
}
void ThreadPool::wait() {
// Sequential implementation running the tasks
while (!Tasks.empty()) {
auto Task = std::move(Tasks.front());
Tasks.pop();
Task();
}
}
std::shared_future<void> ThreadPool::asyncImpl(TaskTy Task) {
// Get a Future with launch::deferred execution using std::async
auto Future = std::async(std::launch::deferred, std::move(Task)).share();
// Wrap the future so that both ThreadPool::wait() can operate and the
// returned future can be sync'ed on.
PackagedTaskTy PackagedTask([Future]() { Future.get(); });
Tasks.push(std::move(PackagedTask));
return Future;
}
ThreadPool::~ThreadPool() { wait(); }
#endif