blob: 22b7550d497149bbb0292855c36ac7d88799d6d0 [file] [log] [blame]
Mehdi Amini33a7ea42015-12-15 00:59:19 +00001//==-- llvm/Support/ThreadPool.cpp - A ThreadPool implementation -*- C++ -*-==//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements a crude C++11 based thread pool.
Jason Henline70378832016-06-22 18:01:11 +000011//
Mehdi Amini33a7ea42015-12-15 00:59:19 +000012//===----------------------------------------------------------------------===//
13
14#include "llvm/Support/ThreadPool.h"
15
16#include "llvm/Config/llvm-config.h"
17#include "llvm/Support/raw_ostream.h"
18
19using namespace llvm;
20
21#if LLVM_ENABLE_THREADS
22
23// Default to std::thread::hardware_concurrency
24ThreadPool::ThreadPool() : ThreadPool(std::thread::hardware_concurrency()) {}
25
26ThreadPool::ThreadPool(unsigned ThreadCount)
27 : ActiveThreads(0), EnableFlag(true) {
28 // Create ThreadCount threads that will loop forever, wait on QueueCondition
29 // for tasks to be queued or the Pool to be destroyed.
30 Threads.reserve(ThreadCount);
31 for (unsigned ThreadID = 0; ThreadID < ThreadCount; ++ThreadID) {
32 Threads.emplace_back([&] {
33 while (true) {
34 PackagedTaskTy Task;
35 {
36 std::unique_lock<std::mutex> LockGuard(QueueLock);
37 // Wait for tasks to be pushed in the queue
38 QueueCondition.wait(LockGuard,
39 [&] { return !EnableFlag || !Tasks.empty(); });
40 // Exit condition
41 if (!EnableFlag && Tasks.empty())
42 return;
43 // Yeah, we have a task, grab it and release the lock on the queue
44
45 // We first need to signal that we are active before popping the queue
46 // in order for wait() to properly detect that even if the queue is
47 // empty, there is still a task in flight.
48 {
49 ++ActiveThreads;
50 std::unique_lock<std::mutex> LockGuard(CompletionLock);
51 }
52 Task = std::move(Tasks.front());
53 Tasks.pop();
54 }
55 // Run the task we just grabbed
Mehdi Amini33a7ea42015-12-15 00:59:19 +000056 Task();
Mehdi Amini33a7ea42015-12-15 00:59:19 +000057
58 {
59 // Adjust `ActiveThreads`, in case someone waits on ThreadPool::wait()
60 std::unique_lock<std::mutex> LockGuard(CompletionLock);
61 --ActiveThreads;
62 }
63
64 // Notify task completion, in case someone waits on ThreadPool::wait()
65 CompletionCondition.notify_all();
66 }
67 });
68 }
69}
70
71void ThreadPool::wait() {
72 // Wait for all threads to complete and the queue to be empty
73 std::unique_lock<std::mutex> LockGuard(CompletionLock);
Justin Lebar9e479e42016-04-06 23:46:40 +000074 // The order of the checks for ActiveThreads and Tasks.empty() matters because
75 // any active threads might be modifying the Tasks queue, and this would be a
76 // race.
Mehdi Amini33a7ea42015-12-15 00:59:19 +000077 CompletionCondition.wait(LockGuard,
Justin Lebar9e479e42016-04-06 23:46:40 +000078 [&] { return !ActiveThreads && Tasks.empty(); });
Mehdi Amini33a7ea42015-12-15 00:59:19 +000079}
80
Peter Collingbourneb78a68d2017-06-14 00:36:21 +000081std::shared_future<void> ThreadPool::asyncImpl(TaskTy Task) {
Mehdi Amini33a7ea42015-12-15 00:59:19 +000082 /// Wrap the Task in a packaged_task to return a future object.
83 PackagedTaskTy PackagedTask(std::move(Task));
84 auto Future = PackagedTask.get_future();
85 {
86 // Lock the queue and push the new task
87 std::unique_lock<std::mutex> LockGuard(QueueLock);
88
89 // Don't allow enqueueing after disabling the pool
90 assert(EnableFlag && "Queuing a thread during ThreadPool destruction");
91
92 Tasks.push(std::move(PackagedTask));
93 }
94 QueueCondition.notify_one();
Davide Italiano0f0d5d82016-11-28 09:17:12 +000095 return Future.share();
Mehdi Amini33a7ea42015-12-15 00:59:19 +000096}
97
98// The destructor joins all threads, waiting for completion.
99ThreadPool::~ThreadPool() {
100 {
101 std::unique_lock<std::mutex> LockGuard(QueueLock);
102 EnableFlag = false;
103 }
104 QueueCondition.notify_all();
105 for (auto &Worker : Threads)
106 Worker.join();
107}
108
109#else // LLVM_ENABLE_THREADS Disabled
110
111ThreadPool::ThreadPool() : ThreadPool(0) {}
112
113// No threads are launched, issue a warning if ThreadCount is not 0
114ThreadPool::ThreadPool(unsigned ThreadCount)
115 : ActiveThreads(0) {
116 if (ThreadCount) {
117 errs() << "Warning: request a ThreadPool with " << ThreadCount
118 << " threads, but LLVM_ENABLE_THREADS has been turned off\n";
119 }
120}
121
122void ThreadPool::wait() {
123 // Sequential implementation running the tasks
124 while (!Tasks.empty()) {
125 auto Task = std::move(Tasks.front());
126 Tasks.pop();
Peter Collingbourneb78a68d2017-06-14 00:36:21 +0000127 Task();
Mehdi Amini33a7ea42015-12-15 00:59:19 +0000128 }
129}
130
Peter Collingbourneb78a68d2017-06-14 00:36:21 +0000131std::shared_future<void> ThreadPool::asyncImpl(TaskTy Task) {
Mehdi Amini33a7ea42015-12-15 00:59:19 +0000132 // Get a Future with launch::deferred execution using std::async
133 auto Future = std::async(std::launch::deferred, std::move(Task)).share();
134 // Wrap the future so that both ThreadPool::wait() can operate and the
135 // returned future can be sync'ed on.
136 PackagedTaskTy PackagedTask([Future]() { Future.get(); });
Mehdi Amini33a7ea42015-12-15 00:59:19 +0000137 Tasks.push(std::move(PackagedTask));
Davide Italiano0f0d5d82016-11-28 09:17:12 +0000138 return Future;
Mehdi Amini33a7ea42015-12-15 00:59:19 +0000139}
140
141ThreadPool::~ThreadPool() {
142 wait();
143}
144
145#endif