blob: d0212ca13467bbd1808616ce0f65ff9176f99ea1 [file] [log] [blame]
Mehdi Amini33a7ea42015-12-15 00:59:19 +00001//==-- llvm/Support/ThreadPool.cpp - A ThreadPool implementation -*- C++ -*-==//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements a crude C++11 based thread pool.
Jason Henline70378832016-06-22 18:01:11 +000011//
Mehdi Amini33a7ea42015-12-15 00:59:19 +000012//===----------------------------------------------------------------------===//
13
14#include "llvm/Support/ThreadPool.h"
15
16#include "llvm/Config/llvm-config.h"
Rafael Espindola8c0ff952017-10-04 20:27:01 +000017#include "llvm/Support/Threading.h"
Hans Wennborg86f0b702017-12-13 22:12:57 +000018#include "llvm/Support/raw_ostream.h"
Mehdi Amini33a7ea42015-12-15 00:59:19 +000019
20using namespace llvm;
21
22#if LLVM_ENABLE_THREADS
23
Rafael Espindola8c0ff952017-10-04 20:27:01 +000024// Default to hardware_concurrency
25ThreadPool::ThreadPool() : ThreadPool(hardware_concurrency()) {}
Mehdi Amini33a7ea42015-12-15 00:59:19 +000026
27ThreadPool::ThreadPool(unsigned ThreadCount)
28 : ActiveThreads(0), EnableFlag(true) {
29 // Create ThreadCount threads that will loop forever, wait on QueueCondition
30 // for tasks to be queued or the Pool to be destroyed.
31 Threads.reserve(ThreadCount);
32 for (unsigned ThreadID = 0; ThreadID < ThreadCount; ++ThreadID) {
33 Threads.emplace_back([&] {
34 while (true) {
35 PackagedTaskTy Task;
36 {
37 std::unique_lock<std::mutex> LockGuard(QueueLock);
38 // Wait for tasks to be pushed in the queue
39 QueueCondition.wait(LockGuard,
40 [&] { return !EnableFlag || !Tasks.empty(); });
41 // Exit condition
42 if (!EnableFlag && Tasks.empty())
43 return;
44 // Yeah, we have a task, grab it and release the lock on the queue
45
46 // We first need to signal that we are active before popping the queue
47 // in order for wait() to properly detect that even if the queue is
48 // empty, there is still a task in flight.
49 {
Mehdi Amini33a7ea42015-12-15 00:59:19 +000050 std::unique_lock<std::mutex> LockGuard(CompletionLock);
Jan Korousc723f652017-11-27 13:42:03 +000051 ++ActiveThreads;
Mehdi Amini33a7ea42015-12-15 00:59:19 +000052 }
53 Task = std::move(Tasks.front());
54 Tasks.pop();
55 }
56 // Run the task we just grabbed
Mehdi Amini33a7ea42015-12-15 00:59:19 +000057 Task();
Mehdi Amini33a7ea42015-12-15 00:59:19 +000058
59 {
60 // Adjust `ActiveThreads`, in case someone waits on ThreadPool::wait()
61 std::unique_lock<std::mutex> LockGuard(CompletionLock);
62 --ActiveThreads;
63 }
64
65 // Notify task completion, in case someone waits on ThreadPool::wait()
66 CompletionCondition.notify_all();
67 }
68 });
69 }
70}
71
72void ThreadPool::wait() {
73 // Wait for all threads to complete and the queue to be empty
74 std::unique_lock<std::mutex> LockGuard(CompletionLock);
Justin Lebar9e479e42016-04-06 23:46:40 +000075 // The order of the checks for ActiveThreads and Tasks.empty() matters because
76 // any active threads might be modifying the Tasks queue, and this would be a
77 // race.
Mehdi Amini33a7ea42015-12-15 00:59:19 +000078 CompletionCondition.wait(LockGuard,
Justin Lebar9e479e42016-04-06 23:46:40 +000079 [&] { return !ActiveThreads && Tasks.empty(); });
Mehdi Amini33a7ea42015-12-15 00:59:19 +000080}
81
Peter Collingbourneb78a68d2017-06-14 00:36:21 +000082std::shared_future<void> ThreadPool::asyncImpl(TaskTy Task) {
Mehdi Amini33a7ea42015-12-15 00:59:19 +000083 /// Wrap the Task in a packaged_task to return a future object.
84 PackagedTaskTy PackagedTask(std::move(Task));
85 auto Future = PackagedTask.get_future();
86 {
87 // Lock the queue and push the new task
88 std::unique_lock<std::mutex> LockGuard(QueueLock);
89
90 // Don't allow enqueueing after disabling the pool
91 assert(EnableFlag && "Queuing a thread during ThreadPool destruction");
92
93 Tasks.push(std::move(PackagedTask));
94 }
95 QueueCondition.notify_one();
Davide Italiano0f0d5d82016-11-28 09:17:12 +000096 return Future.share();
Mehdi Amini33a7ea42015-12-15 00:59:19 +000097}
98
99// The destructor joins all threads, waiting for completion.
100ThreadPool::~ThreadPool() {
101 {
102 std::unique_lock<std::mutex> LockGuard(QueueLock);
103 EnableFlag = false;
104 }
105 QueueCondition.notify_all();
106 for (auto &Worker : Threads)
107 Worker.join();
108}
109
110#else // LLVM_ENABLE_THREADS Disabled
111
112ThreadPool::ThreadPool() : ThreadPool(0) {}
113
114// No threads are launched, issue a warning if ThreadCount is not 0
115ThreadPool::ThreadPool(unsigned ThreadCount)
116 : ActiveThreads(0) {
117 if (ThreadCount) {
118 errs() << "Warning: request a ThreadPool with " << ThreadCount
119 << " threads, but LLVM_ENABLE_THREADS has been turned off\n";
120 }
121}
122
123void ThreadPool::wait() {
124 // Sequential implementation running the tasks
125 while (!Tasks.empty()) {
126 auto Task = std::move(Tasks.front());
127 Tasks.pop();
Peter Collingbourneb78a68d2017-06-14 00:36:21 +0000128 Task();
Mehdi Amini33a7ea42015-12-15 00:59:19 +0000129 }
130}
131
Peter Collingbourneb78a68d2017-06-14 00:36:21 +0000132std::shared_future<void> ThreadPool::asyncImpl(TaskTy Task) {
Mehdi Amini33a7ea42015-12-15 00:59:19 +0000133 // Get a Future with launch::deferred execution using std::async
134 auto Future = std::async(std::launch::deferred, std::move(Task)).share();
135 // Wrap the future so that both ThreadPool::wait() can operate and the
136 // returned future can be sync'ed on.
137 PackagedTaskTy PackagedTask([Future]() { Future.get(); });
Mehdi Amini33a7ea42015-12-15 00:59:19 +0000138 Tasks.push(std::move(PackagedTask));
Davide Italiano0f0d5d82016-11-28 09:17:12 +0000139 return Future;
Mehdi Amini33a7ea42015-12-15 00:59:19 +0000140}
141
142ThreadPool::~ThreadPool() {
143 wait();
144}
145
146#endif