blob: 4d4d9b20661082696bc26f855ec506ac5dc0a410 [file] [log] [blame]
Marat Dukhan0a312192015-08-22 17:46:29 -04001/* Standard C headers */
2#include <stdint.h>
3#include <stdbool.h>
Marat Dukhan3a45d9a2015-08-23 22:25:19 -04004#include <stdlib.h>
Marat Dukhan0a312192015-08-22 17:46:29 -04005#include <string.h>
6#include <assert.h>
7
8/* POSIX headers */
9#include <pthread.h>
10#include <unistd.h>
11
Marat Dukhan1325d6e2016-07-03 13:13:16 -040012/* Dependencies */
13#include <fxdiv.h>
14
Marat Dukhan0a312192015-08-22 17:46:29 -040015/* Library header */
16#include <pthreadpool.h>
17
18#define PTHREADPOOL_CACHELINE_SIZE 64
19#define PTHREADPOOL_CACHELINE_ALIGNED __attribute__((__aligned__(PTHREADPOOL_CACHELINE_SIZE)))
Marat Dukhanaf6468b2015-08-25 12:16:57 -040020
Marat Dukhana04943a2015-08-25 12:41:05 -040021#if defined(__clang__)
22 #if __has_extension(c_static_assert) || __has_feature(c_static_assert)
23 #define PTHREADPOOL_STATIC_ASSERT(predicate, message) _Static_assert((predicate), message)
24 #else
25 #define PTHREADPOOL_STATIC_ASSERT(predicate, message)
26 #endif
27#elif defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4) && (__GNUC_MINOR__ >= 6))
28 /* Static assert is supported by gcc >= 4.6 */
Marat Dukhanaf6468b2015-08-25 12:16:57 -040029 #define PTHREADPOOL_STATIC_ASSERT(predicate, message) _Static_assert((predicate), message)
30#else
Marat Dukhanaf6468b2015-08-25 12:16:57 -040031 #define PTHREADPOOL_STATIC_ASSERT(predicate, message)
32#endif
Marat Dukhan0a312192015-08-22 17:46:29 -040033
Marat Dukhanad0ca6a2015-10-16 03:15:19 -040034static inline size_t multiply_divide(size_t a, size_t b, size_t d) {
35 #if defined(__SIZEOF_SIZE_T__) && (__SIZEOF_SIZE_T__ == 4)
36 return (size_t) (((uint64_t) a) * ((uint64_t) b)) / ((uint64_t) d);
37 #elif defined(__SIZEOF_SIZE_T__) && (__SIZEOF_SIZE_T__ == 8)
38 return (size_t) (((__uint128_t) a) * ((__uint128_t) b)) / ((__uint128_t) d);
39 #else
40 #error "Unsupported platform"
41 #endif
42}
43
44static inline size_t divide_round_up(size_t dividend, size_t divisor) {
45 if (dividend % divisor == 0) {
46 return dividend / divisor;
47 } else {
48 return dividend / divisor + 1;
49 }
50}
51
52static inline size_t min(size_t a, size_t b) {
53 return a < b ? a : b;
54}
55
Marat Dukhan0a312192015-08-22 17:46:29 -040056enum thread_state {
57 thread_state_idle,
58 thread_state_compute_1d,
59 thread_state_shutdown,
60};
61
62struct PTHREADPOOL_CACHELINE_ALIGNED thread_info {
63 /**
64 * Index of the first element in the work range.
65 * Before processing a new element the owning worker thread increments this value.
66 */
67 volatile size_t range_start;
68 /**
69 * Index of the element after the last element of the work range.
70 * Before processing a new element the stealing worker thread decrements this value.
71 */
72 volatile size_t range_end;
73 /**
74 * The number of elements in the work range.
75 * Due to race conditions range_length <= range_end - range_start.
76 * The owning worker thread must decrement this value before incrementing @a range_start.
77 * The stealing worker thread must decrement this value before decrementing @a range_end.
78 */
79 volatile size_t range_length;
80 /**
81 * The active state of the thread.
82 */
83 volatile enum thread_state state;
84 /**
85 * Thread number in the 0..threads_count-1 range.
86 */
87 size_t thread_number;
88 /**
89 * The pthread object corresponding to the thread.
90 */
91 pthread_t thread_object;
92 /**
93 * Condition variable used to wake up the thread.
94 * When the thread is idle, it waits on this condition variable.
95 */
96 pthread_cond_t wakeup_condvar;
97};
98
99PTHREADPOOL_STATIC_ASSERT(sizeof(struct thread_info) % PTHREADPOOL_CACHELINE_SIZE == 0, "thread_info structure must occupy an integer number of cache lines (64 bytes)");
100
101struct PTHREADPOOL_CACHELINE_ALIGNED pthreadpool {
102 /**
Marat Dukhan2cff4bb2017-03-05 18:16:48 -0500103 * The number of threads that are processing an operation.
Marat Dukhan0a312192015-08-22 17:46:29 -0400104 */
Marat Dukhan2cff4bb2017-03-05 18:16:48 -0500105 volatile size_t active_threads;
Marat Dukhan0a312192015-08-22 17:46:29 -0400106 /**
107 * The function to call for each item.
108 */
Marat Dukhanad0ca6a2015-10-16 03:15:19 -0400109 volatile void* function;
Marat Dukhan0a312192015-08-22 17:46:29 -0400110 /**
111 * The first argument to the item processing function.
112 */
113 void *volatile argument;
114 /**
115 * Serializes concurrent calls to @a pthreadpool_compute_* from different threads.
116 */
117 pthread_mutex_t execution_mutex;
118 /**
Marat Dukhan2cff4bb2017-03-05 18:16:48 -0500119 * Guards access to the @a active_threads variable.
Marat Dukhan0a312192015-08-22 17:46:29 -0400120 */
Marat Dukhan2cff4bb2017-03-05 18:16:48 -0500121 pthread_mutex_t completion_mutex;
Marat Dukhan0a312192015-08-22 17:46:29 -0400122 /**
Marat Dukhan2cff4bb2017-03-05 18:16:48 -0500123 * Condition variable to wait until all threads complete an operation.
Marat Dukhan0a312192015-08-22 17:46:29 -0400124 */
Marat Dukhan2cff4bb2017-03-05 18:16:48 -0500125 pthread_cond_t completion_condvar;
Marat Dukhan0a312192015-08-22 17:46:29 -0400126 /**
127 * Guards access to the @a state variables.
128 */
129 pthread_mutex_t state_mutex;
130 /**
131 * Condition variable to wait for change of @a state variable.
132 */
133 pthread_cond_t state_condvar;
134 /**
135 * The number of threads in the thread pool. Never changes after initialization.
136 */
137 size_t threads_count;
138 /**
139 * Thread information structures that immediately follow this structure.
140 */
141 struct thread_info threads[];
142};
143
144PTHREADPOOL_STATIC_ASSERT(sizeof(struct pthreadpool) % PTHREADPOOL_CACHELINE_SIZE == 0, "pthreadpool structure must occupy an integer number of cache lines (64 bytes)");
145
146static void checkin_worker_thread(struct pthreadpool* threadpool) {
Marat Dukhan2cff4bb2017-03-05 18:16:48 -0500147 pthread_mutex_lock(&threadpool->completion_mutex);
148 if (--threadpool->active_threads == 0) {
149 pthread_cond_signal(&threadpool->completion_condvar);
Marat Dukhan0a312192015-08-22 17:46:29 -0400150 }
Marat Dukhan2cff4bb2017-03-05 18:16:48 -0500151 pthread_mutex_unlock(&threadpool->completion_mutex);
Marat Dukhan0a312192015-08-22 17:46:29 -0400152}
153
154static void wait_worker_threads(struct pthreadpool* threadpool) {
Marat Dukhan2cff4bb2017-03-05 18:16:48 -0500155 if (threadpool->active_threads != 0) {
156 pthread_mutex_lock(&threadpool->completion_mutex);
157 while (threadpool->active_threads != 0) {
158 pthread_cond_wait(&threadpool->completion_condvar, &threadpool->completion_mutex);
Marat Dukhan0a312192015-08-22 17:46:29 -0400159 };
Marat Dukhan2cff4bb2017-03-05 18:16:48 -0500160 pthread_mutex_unlock(&threadpool->completion_mutex);
Marat Dukhan0a312192015-08-22 17:46:29 -0400161 }
162}
163
Marat Dukhan0a312192015-08-22 17:46:29 -0400164inline static bool atomic_decrement(volatile size_t* value) {
165 size_t actual_value = *value;
166 if (actual_value != 0) {
167 size_t expected_value;
168 do {
169 expected_value = actual_value;
170 const size_t new_value = actual_value - 1;
171 actual_value = __sync_val_compare_and_swap(value, expected_value, new_value);
172 } while ((actual_value != expected_value) && (actual_value != 0));
173 }
174 return actual_value != 0;
175}
176
177static void thread_compute_1d(struct pthreadpool* threadpool, struct thread_info* thread) {
Marat Dukhanad0ca6a2015-10-16 03:15:19 -0400178 const pthreadpool_function_1d_t function = (pthreadpool_function_1d_t) threadpool->function;
Marat Dukhan0a312192015-08-22 17:46:29 -0400179 void *const argument = threadpool->argument;
180 /* Process thread's own range of items */
181 size_t range_start = thread->range_start;
182 while (atomic_decrement(&thread->range_length)) {
183 function(argument, range_start++);
184 }
185 /* Done, now look for other threads' items to steal */
Marat Dukhana69b9eb2017-03-05 18:18:04 -0500186 if (threadpool->active_threads > 1) {
187 /* There are still other threads with work */
188 const size_t thread_number = thread->thread_number;
189 const size_t threads_count = threadpool->threads_count;
190 for (size_t tid = (thread_number + 1) % threads_count; tid != thread_number; tid = (tid + 1) % threads_count) {
191 struct thread_info* other_thread = &threadpool->threads[tid];
192 if (other_thread->state != thread_state_idle) {
193 while (atomic_decrement(&other_thread->range_length)) {
194 const size_t item_id = __sync_sub_and_fetch(&other_thread->range_end, 1);
195 function(argument, item_id);
196 }
Marat Dukhan0a312192015-08-22 17:46:29 -0400197 }
198 }
199 }
200}
201
202static void* thread_main(void* arg) {
203 struct thread_info* thread = (struct thread_info*) arg;
204 struct pthreadpool* threadpool = ((struct pthreadpool*) (thread - thread->thread_number)) - 1;
205
206 /* Check in */
207 checkin_worker_thread(threadpool);
208
209 /* Monitor the state changes and act accordingly */
210 for (;;) {
211 /* Lock the state mutex */
212 pthread_mutex_lock(&threadpool->state_mutex);
213 /* Read the state */
214 enum thread_state state;
215 while ((state = thread->state) == thread_state_idle) {
216 /* Wait for state change */
217 pthread_cond_wait(&threadpool->state_condvar, &threadpool->state_mutex);
218 }
219 /* Read non-idle state */
220 pthread_mutex_unlock(&threadpool->state_mutex);
221 switch (state) {
222 case thread_state_compute_1d:
223 thread_compute_1d(threadpool, thread);
224 break;
225 case thread_state_shutdown:
226 return NULL;
227 case thread_state_idle:
228 /* To inhibit compiler warning */
229 break;
230 }
231 /* Notify the master thread that we finished processing */
232 thread->state = thread_state_idle;
233 checkin_worker_thread(threadpool);
234 };
235}
236
237struct pthreadpool* pthreadpool_create(size_t threads_count) {
238 if (threads_count == 0) {
239 threads_count = (size_t) sysconf(_SC_NPROCESSORS_ONLN);
240 }
Marat Dukhan17747d72016-09-13 15:29:02 -0400241#if !defined(__ANDROID__)
Marat Dukhan3a45d9a2015-08-23 22:25:19 -0400242 struct pthreadpool* threadpool = NULL;
Marat Dukhanad0ca6a2015-10-16 03:15:19 -0400243 if (posix_memalign((void**) &threadpool, 64, sizeof(struct pthreadpool) + threads_count * sizeof(struct thread_info)) != 0) {
Marat Dukhan17747d72016-09-13 15:29:02 -0400244#else
245 /*
246 * Android didn't get posix_memalign until API level 17 (Android 4.2).
247 * Use (otherwise obsolete) memalign function on Android platform.
248 */
249 struct pthreadpool* threadpool = memalign(64, sizeof(struct pthreadpool) + threads_count * sizeof(struct thread_info));
250 if (threadpool == NULL) {
251#endif
Marat Dukhanad0ca6a2015-10-16 03:15:19 -0400252 return NULL;
253 }
Marat Dukhan0a312192015-08-22 17:46:29 -0400254 memset(threadpool, 0, sizeof(struct pthreadpool) + threads_count * sizeof(struct thread_info));
255 threadpool->threads_count = threads_count;
256 pthread_mutex_init(&threadpool->execution_mutex, NULL);
Marat Dukhan2cff4bb2017-03-05 18:16:48 -0500257 pthread_mutex_init(&threadpool->completion_mutex, NULL);
258 pthread_cond_init(&threadpool->completion_condvar, NULL);
Marat Dukhan0a312192015-08-22 17:46:29 -0400259 pthread_mutex_init(&threadpool->state_mutex, NULL);
260 pthread_cond_init(&threadpool->state_condvar, NULL);
261
Marat Dukhan2cff4bb2017-03-05 18:16:48 -0500262 threadpool->active_threads = threadpool->threads_count;
263
Marat Dukhan0a312192015-08-22 17:46:29 -0400264 for (size_t tid = 0; tid < threads_count; tid++) {
265 threadpool->threads[tid].thread_number = tid;
266 pthread_create(&threadpool->threads[tid].thread_object, NULL, &thread_main, &threadpool->threads[tid]);
267 }
268
269 /* Wait until all threads initialize */
270 wait_worker_threads(threadpool);
271 return threadpool;
272}
273
Marat Dukhan7b1f6e52015-08-25 11:24:08 -0400274size_t pthreadpool_get_threads_count(struct pthreadpool* threadpool) {
Marat Dukhan0a312192015-08-22 17:46:29 -0400275 return threadpool->threads_count;
276}
277
Marat Dukhan0a312192015-08-22 17:46:29 -0400278void pthreadpool_compute_1d(
279 struct pthreadpool* threadpool,
280 pthreadpool_function_1d_t function,
281 void* argument,
Marat Dukhanad0ca6a2015-10-16 03:15:19 -0400282 size_t range)
Marat Dukhan0a312192015-08-22 17:46:29 -0400283{
Marat Dukhanad0ca6a2015-10-16 03:15:19 -0400284 if (threadpool == NULL) {
285 /* No thread pool provided: execute function sequentially on the calling thread */
286 for (size_t i = 0; i < range; i++) {
287 function(argument, i);
288 }
289 } else {
290 /* Protect the global threadpool structures */
291 pthread_mutex_lock(&threadpool->execution_mutex);
Marat Dukhan0a312192015-08-22 17:46:29 -0400292
Marat Dukhanfa98b4b2015-11-06 18:19:31 -0500293 /* Lock the state variables to ensure that threads don't start processing before they observe complete state */
294 pthread_mutex_lock(&threadpool->state_mutex);
295
296 /* Setup global arguments */
297 threadpool->function = function;
298 threadpool->argument = argument;
299
Marat Dukhan2cff4bb2017-03-05 18:16:48 -0500300 /* Locking of completion_mutex not needed: readers are sleeping on state_condvar */
301 threadpool->active_threads = threadpool->threads_count;
Marat Dukhan630dfb62017-03-05 17:42:04 -0500302
Marat Dukhanad0ca6a2015-10-16 03:15:19 -0400303 /* Spread the work between threads */
304 for (size_t tid = 0; tid < threadpool->threads_count; tid++) {
305 struct thread_info* thread = &threadpool->threads[tid];
306 thread->range_start = multiply_divide(range, tid, threadpool->threads_count);
307 thread->range_end = multiply_divide(range, tid + 1, threadpool->threads_count);
308 thread->range_length = thread->range_end - thread->range_start;
309 thread->state = thread_state_compute_1d;
310 }
311
Marat Dukhanfa98b4b2015-11-06 18:19:31 -0500312 /* Unlock the state variables before waking up the threads for better performance */
313 pthread_mutex_unlock(&threadpool->state_mutex);
Marat Dukhanad0ca6a2015-10-16 03:15:19 -0400314
315 /* Wake up the threads */
Marat Dukhan630dfb62017-03-05 17:42:04 -0500316 pthread_cond_broadcast(&threadpool->state_condvar);
Marat Dukhanad0ca6a2015-10-16 03:15:19 -0400317
318 /* Wait until the threads finish computation */
319 wait_worker_threads(threadpool);
320
321 /* Unprotect the global threadpool structures */
322 pthread_mutex_unlock(&threadpool->execution_mutex);
Marat Dukhan0a312192015-08-22 17:46:29 -0400323 }
Marat Dukhanad0ca6a2015-10-16 03:15:19 -0400324}
Marat Dukhan0a312192015-08-22 17:46:29 -0400325
Marat Dukhane76282f2015-11-02 17:47:04 -0500326struct compute_1d_tiled_context {
327 pthreadpool_function_1d_tiled_t function;
328 void* argument;
329 size_t range;
330 size_t tile;
331};
332
333static void compute_1d_tiled(const struct compute_1d_tiled_context* context, size_t linear_index) {
334 const size_t tile_index = linear_index;
335 const size_t index = tile_index * context->tile;
336 const size_t tile = min(context->tile, context->range - index);
337 context->function(context->argument, index, tile);
338}
339
340void pthreadpool_compute_1d_tiled(
341 pthreadpool_t threadpool,
342 pthreadpool_function_1d_tiled_t function,
343 void* argument,
344 size_t range,
345 size_t tile)
346{
Marat Dukhanf3c8d732016-07-11 15:44:51 -0400347 if (threadpool == NULL) {
348 /* No thread pool provided: execute function sequentially on the calling thread */
349 for (size_t i = 0; i < range; i += tile) {
350 function(argument, i, min(range - i, tile));
351 }
352 } else {
353 /* Execute in parallel on the thread pool using linearized index */
354 const size_t tile_range = divide_round_up(range, tile);
355 struct compute_1d_tiled_context context = {
356 .function = function,
357 .argument = argument,
358 .range = range,
359 .tile = tile
360 };
361 pthreadpool_compute_1d(threadpool, (pthreadpool_function_1d_t) compute_1d_tiled, &context, tile_range);
362 }
Marat Dukhane76282f2015-11-02 17:47:04 -0500363}
364
Marat Dukhanad0ca6a2015-10-16 03:15:19 -0400365struct compute_2d_context {
366 pthreadpool_function_2d_t function;
367 void* argument;
Marat Dukhan1325d6e2016-07-03 13:13:16 -0400368 struct fxdiv_divisor_size_t range_j;
Marat Dukhanad0ca6a2015-10-16 03:15:19 -0400369};
Marat Dukhan0a312192015-08-22 17:46:29 -0400370
Marat Dukhanad0ca6a2015-10-16 03:15:19 -0400371static void compute_2d(const struct compute_2d_context* context, size_t linear_index) {
Marat Dukhan1325d6e2016-07-03 13:13:16 -0400372 const struct fxdiv_divisor_size_t range_j = context->range_j;
373 const struct fxdiv_result_size_t index = fxdiv_divide_size_t(linear_index, range_j);
374 context->function(context->argument, index.quotient, index.remainder);
Marat Dukhanad0ca6a2015-10-16 03:15:19 -0400375}
Marat Dukhan0a312192015-08-22 17:46:29 -0400376
Marat Dukhanad0ca6a2015-10-16 03:15:19 -0400377void pthreadpool_compute_2d(
378 struct pthreadpool* threadpool,
379 pthreadpool_function_2d_t function,
380 void* argument,
381 size_t range_i,
382 size_t range_j)
383{
Marat Dukhanf3c8d732016-07-11 15:44:51 -0400384 if (threadpool == NULL) {
385 /* No thread pool provided: execute function sequentially on the calling thread */
386 for (size_t i = 0; i < range_i; i++) {
387 for (size_t j = 0; j < range_j; j++) {
388 function(argument, i, j);
389 }
390 }
391 } else {
392 /* Execute in parallel on the thread pool using linearized index */
393 struct compute_2d_context context = {
394 .function = function,
395 .argument = argument,
396 .range_j = fxdiv_init_size_t(range_j)
397 };
398 pthreadpool_compute_1d(threadpool, (pthreadpool_function_1d_t) compute_2d, &context, range_i * range_j);
399 }
Marat Dukhanad0ca6a2015-10-16 03:15:19 -0400400}
Marat Dukhan0a312192015-08-22 17:46:29 -0400401
Marat Dukhanad0ca6a2015-10-16 03:15:19 -0400402struct compute_2d_tiled_context {
403 pthreadpool_function_2d_tiled_t function;
404 void* argument;
Marat Dukhan1325d6e2016-07-03 13:13:16 -0400405 struct fxdiv_divisor_size_t tile_range_j;
Marat Dukhanad0ca6a2015-10-16 03:15:19 -0400406 size_t range_i;
407 size_t range_j;
408 size_t tile_i;
409 size_t tile_j;
410};
411
412static void compute_2d_tiled(const struct compute_2d_tiled_context* context, size_t linear_index) {
Marat Dukhan1325d6e2016-07-03 13:13:16 -0400413 const struct fxdiv_divisor_size_t tile_range_j = context->tile_range_j;
414 const struct fxdiv_result_size_t tile_index = fxdiv_divide_size_t(linear_index, tile_range_j);
415 const size_t max_tile_i = context->tile_i;
416 const size_t max_tile_j = context->tile_j;
417 const size_t index_i = tile_index.quotient * max_tile_i;
418 const size_t index_j = tile_index.remainder * max_tile_j;
419 const size_t tile_i = min(max_tile_i, context->range_i - index_i);
420 const size_t tile_j = min(max_tile_j, context->range_j - index_j);
Marat Dukhanad0ca6a2015-10-16 03:15:19 -0400421 context->function(context->argument, index_i, index_j, tile_i, tile_j);
422}
423
424void pthreadpool_compute_2d_tiled(
425 pthreadpool_t threadpool,
426 pthreadpool_function_2d_tiled_t function,
427 void* argument,
428 size_t range_i,
429 size_t range_j,
430 size_t tile_i,
431 size_t tile_j)
432{
Marat Dukhanf3c8d732016-07-11 15:44:51 -0400433 if (threadpool == NULL) {
434 /* No thread pool provided: execute function sequentially on the calling thread */
435 for (size_t i = 0; i < range_i; i += tile_i) {
436 for (size_t j = 0; j < range_j; j += tile_j) {
437 function(argument, i, j, min(range_i - i, tile_i), min(range_j - j, tile_j));
438 }
439 }
440 } else {
441 /* Execute in parallel on the thread pool using linearized index */
442 const size_t tile_range_i = divide_round_up(range_i, tile_i);
443 const size_t tile_range_j = divide_round_up(range_j, tile_j);
444 struct compute_2d_tiled_context context = {
445 .function = function,
446 .argument = argument,
447 .tile_range_j = fxdiv_init_size_t(tile_range_j),
448 .range_i = range_i,
449 .range_j = range_j,
450 .tile_i = tile_i,
451 .tile_j = tile_j
452 };
453 pthreadpool_compute_1d(threadpool, (pthreadpool_function_1d_t) compute_2d_tiled, &context, tile_range_i * tile_range_j);
454 }
Marat Dukhan0a312192015-08-22 17:46:29 -0400455}
456
457void pthreadpool_destroy(struct pthreadpool* threadpool) {
Marat Dukhaneef99d42017-03-05 17:59:07 -0500458 if (threadpool != NULL) {
459 /* Lock the state variables to ensure that threads don't start processing before they observe complete state */
460 pthread_mutex_lock(&threadpool->state_mutex);
Marat Dukhan630dfb62017-03-05 17:42:04 -0500461
Marat Dukhan2cff4bb2017-03-05 18:16:48 -0500462 /* Locking of completion_mutex not needed: readers are sleeping on state_condvar */
463 threadpool->active_threads = threadpool->threads_count;
Marat Dukhan630dfb62017-03-05 17:42:04 -0500464
Marat Dukhaneef99d42017-03-05 17:59:07 -0500465 /* Update threads' states */
466 for (size_t tid = 0; tid < threadpool->threads_count; tid++) {
467 threadpool->threads[tid].state = thread_state_shutdown;
468 }
469
470 /* Wake up worker threads */
471 pthread_cond_broadcast(&threadpool->state_condvar);
472
473 /* Commit the state changes and let workers start processing */
474 pthread_mutex_unlock(&threadpool->state_mutex);
475
476 /* Wait until all threads return */
477 for (size_t tid = 0; tid < threadpool->threads_count; tid++) {
478 pthread_join(threadpool->threads[tid].thread_object, NULL);
479 }
480
481 /* Release resources */
482 pthread_mutex_destroy(&threadpool->execution_mutex);
Marat Dukhan2cff4bb2017-03-05 18:16:48 -0500483 pthread_mutex_destroy(&threadpool->completion_mutex);
484 pthread_cond_destroy(&threadpool->completion_condvar);
Marat Dukhaneef99d42017-03-05 17:59:07 -0500485 pthread_mutex_destroy(&threadpool->state_mutex);
486 pthread_cond_destroy(&threadpool->state_condvar);
487 free(threadpool);
Marat Dukhan0a312192015-08-22 17:46:29 -0400488 }
Marat Dukhan0a312192015-08-22 17:46:29 -0400489}