Marat Dukhan | 0a31219 | 2015-08-22 17:46:29 -0400 | [diff] [blame] | 1 | /* Standard C headers */ |
| 2 | #include <stdint.h> |
| 3 | #include <stdbool.h> |
Marat Dukhan | 3a45d9a | 2015-08-23 22:25:19 -0400 | [diff] [blame] | 4 | #include <stdlib.h> |
Marat Dukhan | 0a31219 | 2015-08-22 17:46:29 -0400 | [diff] [blame] | 5 | #include <string.h> |
| 6 | #include <assert.h> |
| 7 | |
| 8 | /* POSIX headers */ |
| 9 | #include <pthread.h> |
| 10 | #include <unistd.h> |
| 11 | |
Marat Dukhan | 1325d6e | 2016-07-03 13:13:16 -0400 | [diff] [blame] | 12 | /* Dependencies */ |
| 13 | #include <fxdiv.h> |
| 14 | |
Marat Dukhan | 0a31219 | 2015-08-22 17:46:29 -0400 | [diff] [blame] | 15 | /* Library header */ |
| 16 | #include <pthreadpool.h> |
| 17 | |
| 18 | #define PTHREADPOOL_CACHELINE_SIZE 64 |
| 19 | #define PTHREADPOOL_CACHELINE_ALIGNED __attribute__((__aligned__(PTHREADPOOL_CACHELINE_SIZE))) |
Marat Dukhan | af6468b | 2015-08-25 12:16:57 -0400 | [diff] [blame] | 20 | |
Marat Dukhan | a04943a | 2015-08-25 12:41:05 -0400 | [diff] [blame] | 21 | #if defined(__clang__) |
| 22 | #if __has_extension(c_static_assert) || __has_feature(c_static_assert) |
| 23 | #define PTHREADPOOL_STATIC_ASSERT(predicate, message) _Static_assert((predicate), message) |
| 24 | #else |
| 25 | #define PTHREADPOOL_STATIC_ASSERT(predicate, message) |
| 26 | #endif |
| 27 | #elif defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4) && (__GNUC_MINOR__ >= 6)) |
| 28 | /* Static assert is supported by gcc >= 4.6 */ |
Marat Dukhan | af6468b | 2015-08-25 12:16:57 -0400 | [diff] [blame] | 29 | #define PTHREADPOOL_STATIC_ASSERT(predicate, message) _Static_assert((predicate), message) |
| 30 | #else |
Marat Dukhan | af6468b | 2015-08-25 12:16:57 -0400 | [diff] [blame] | 31 | #define PTHREADPOOL_STATIC_ASSERT(predicate, message) |
| 32 | #endif |
Marat Dukhan | 0a31219 | 2015-08-22 17:46:29 -0400 | [diff] [blame] | 33 | |
Marat Dukhan | ad0ca6a | 2015-10-16 03:15:19 -0400 | [diff] [blame] | 34 | static inline size_t multiply_divide(size_t a, size_t b, size_t d) { |
| 35 | #if defined(__SIZEOF_SIZE_T__) && (__SIZEOF_SIZE_T__ == 4) |
| 36 | return (size_t) (((uint64_t) a) * ((uint64_t) b)) / ((uint64_t) d); |
| 37 | #elif defined(__SIZEOF_SIZE_T__) && (__SIZEOF_SIZE_T__ == 8) |
| 38 | return (size_t) (((__uint128_t) a) * ((__uint128_t) b)) / ((__uint128_t) d); |
| 39 | #else |
| 40 | #error "Unsupported platform" |
| 41 | #endif |
| 42 | } |
| 43 | |
| 44 | static inline size_t divide_round_up(size_t dividend, size_t divisor) { |
| 45 | if (dividend % divisor == 0) { |
| 46 | return dividend / divisor; |
| 47 | } else { |
| 48 | return dividend / divisor + 1; |
| 49 | } |
| 50 | } |
| 51 | |
| 52 | static inline size_t min(size_t a, size_t b) { |
| 53 | return a < b ? a : b; |
| 54 | } |
| 55 | |
Marat Dukhan | 0a31219 | 2015-08-22 17:46:29 -0400 | [diff] [blame] | 56 | enum thread_state { |
| 57 | thread_state_idle, |
| 58 | thread_state_compute_1d, |
| 59 | thread_state_shutdown, |
| 60 | }; |
| 61 | |
| 62 | struct PTHREADPOOL_CACHELINE_ALIGNED thread_info { |
| 63 | /** |
| 64 | * Index of the first element in the work range. |
| 65 | * Before processing a new element the owning worker thread increments this value. |
| 66 | */ |
| 67 | volatile size_t range_start; |
| 68 | /** |
| 69 | * Index of the element after the last element of the work range. |
| 70 | * Before processing a new element the stealing worker thread decrements this value. |
| 71 | */ |
| 72 | volatile size_t range_end; |
| 73 | /** |
| 74 | * The number of elements in the work range. |
| 75 | * Due to race conditions range_length <= range_end - range_start. |
| 76 | * The owning worker thread must decrement this value before incrementing @a range_start. |
| 77 | * The stealing worker thread must decrement this value before decrementing @a range_end. |
| 78 | */ |
| 79 | volatile size_t range_length; |
| 80 | /** |
| 81 | * The active state of the thread. |
| 82 | */ |
| 83 | volatile enum thread_state state; |
| 84 | /** |
| 85 | * Thread number in the 0..threads_count-1 range. |
| 86 | */ |
| 87 | size_t thread_number; |
| 88 | /** |
| 89 | * The pthread object corresponding to the thread. |
| 90 | */ |
| 91 | pthread_t thread_object; |
| 92 | /** |
| 93 | * Condition variable used to wake up the thread. |
| 94 | * When the thread is idle, it waits on this condition variable. |
| 95 | */ |
| 96 | pthread_cond_t wakeup_condvar; |
| 97 | }; |
| 98 | |
| 99 | PTHREADPOOL_STATIC_ASSERT(sizeof(struct thread_info) % PTHREADPOOL_CACHELINE_SIZE == 0, "thread_info structure must occupy an integer number of cache lines (64 bytes)"); |
| 100 | |
| 101 | struct PTHREADPOOL_CACHELINE_ALIGNED pthreadpool { |
| 102 | /** |
| 103 | * The number of threads that signalled completion of an operation. |
| 104 | */ |
| 105 | volatile size_t checkedin_threads; |
| 106 | /** |
| 107 | * The function to call for each item. |
| 108 | */ |
Marat Dukhan | ad0ca6a | 2015-10-16 03:15:19 -0400 | [diff] [blame] | 109 | volatile void* function; |
Marat Dukhan | 0a31219 | 2015-08-22 17:46:29 -0400 | [diff] [blame] | 110 | /** |
| 111 | * The first argument to the item processing function. |
| 112 | */ |
| 113 | void *volatile argument; |
| 114 | /** |
| 115 | * Serializes concurrent calls to @a pthreadpool_compute_* from different threads. |
| 116 | */ |
| 117 | pthread_mutex_t execution_mutex; |
| 118 | /** |
| 119 | * Guards access to the @a checkedin_threads variable. |
| 120 | */ |
| 121 | pthread_mutex_t barrier_mutex; |
| 122 | /** |
| 123 | * Condition variable to wait until all threads check in. |
| 124 | */ |
| 125 | pthread_cond_t barrier_condvar; |
| 126 | /** |
| 127 | * Guards access to the @a state variables. |
| 128 | */ |
| 129 | pthread_mutex_t state_mutex; |
| 130 | /** |
| 131 | * Condition variable to wait for change of @a state variable. |
| 132 | */ |
| 133 | pthread_cond_t state_condvar; |
| 134 | /** |
| 135 | * The number of threads in the thread pool. Never changes after initialization. |
| 136 | */ |
| 137 | size_t threads_count; |
| 138 | /** |
| 139 | * Thread information structures that immediately follow this structure. |
| 140 | */ |
| 141 | struct thread_info threads[]; |
| 142 | }; |
| 143 | |
| 144 | PTHREADPOOL_STATIC_ASSERT(sizeof(struct pthreadpool) % PTHREADPOOL_CACHELINE_SIZE == 0, "pthreadpool structure must occupy an integer number of cache lines (64 bytes)"); |
| 145 | |
| 146 | static void checkin_worker_thread(struct pthreadpool* threadpool) { |
| 147 | pthread_mutex_lock(&threadpool->barrier_mutex); |
| 148 | const size_t checkedin_threads = threadpool->checkedin_threads + 1; |
| 149 | threadpool->checkedin_threads = checkedin_threads; |
| 150 | if (checkedin_threads == threadpool->threads_count) { |
| 151 | pthread_cond_signal(&threadpool->barrier_condvar); |
| 152 | } |
| 153 | pthread_mutex_unlock(&threadpool->barrier_mutex); |
| 154 | } |
| 155 | |
| 156 | static void wait_worker_threads(struct pthreadpool* threadpool) { |
| 157 | if (threadpool->checkedin_threads != threadpool->threads_count) { |
| 158 | pthread_mutex_lock(&threadpool->barrier_mutex); |
| 159 | while (threadpool->checkedin_threads != threadpool->threads_count) { |
| 160 | pthread_cond_wait(&threadpool->barrier_condvar, &threadpool->barrier_mutex); |
| 161 | }; |
| 162 | pthread_mutex_unlock(&threadpool->barrier_mutex); |
| 163 | } |
| 164 | } |
| 165 | |
| 166 | static void wakeup_worker_threads(struct pthreadpool* threadpool) { |
| 167 | pthread_mutex_lock(&threadpool->state_mutex); |
| 168 | threadpool->checkedin_threads = 0; /* Locking of barrier_mutex not needed: readers are sleeping */ |
| 169 | pthread_cond_broadcast(&threadpool->state_condvar); |
| 170 | pthread_mutex_unlock(&threadpool->state_mutex); /* Do wake up */ |
| 171 | } |
| 172 | |
| 173 | inline static bool atomic_decrement(volatile size_t* value) { |
| 174 | size_t actual_value = *value; |
| 175 | if (actual_value != 0) { |
| 176 | size_t expected_value; |
| 177 | do { |
| 178 | expected_value = actual_value; |
| 179 | const size_t new_value = actual_value - 1; |
| 180 | actual_value = __sync_val_compare_and_swap(value, expected_value, new_value); |
| 181 | } while ((actual_value != expected_value) && (actual_value != 0)); |
| 182 | } |
| 183 | return actual_value != 0; |
| 184 | } |
| 185 | |
| 186 | static void thread_compute_1d(struct pthreadpool* threadpool, struct thread_info* thread) { |
Marat Dukhan | ad0ca6a | 2015-10-16 03:15:19 -0400 | [diff] [blame] | 187 | const pthreadpool_function_1d_t function = (pthreadpool_function_1d_t) threadpool->function; |
Marat Dukhan | 0a31219 | 2015-08-22 17:46:29 -0400 | [diff] [blame] | 188 | void *const argument = threadpool->argument; |
| 189 | /* Process thread's own range of items */ |
| 190 | size_t range_start = thread->range_start; |
| 191 | while (atomic_decrement(&thread->range_length)) { |
| 192 | function(argument, range_start++); |
| 193 | } |
| 194 | /* Done, now look for other threads' items to steal */ |
| 195 | const size_t thread_number = thread->thread_number; |
| 196 | const size_t threads_count = threadpool->threads_count; |
| 197 | for (size_t tid = (thread_number + 1) % threads_count; tid != thread_number; tid = (tid + 1) % threads_count) { |
| 198 | struct thread_info* other_thread = &threadpool->threads[tid]; |
| 199 | if (other_thread->state != thread_state_idle) { |
| 200 | while (atomic_decrement(&other_thread->range_length)) { |
| 201 | const size_t item_id = __sync_sub_and_fetch(&other_thread->range_end, 1); |
| 202 | function(argument, item_id); |
| 203 | } |
| 204 | } |
| 205 | } |
| 206 | } |
| 207 | |
| 208 | static void* thread_main(void* arg) { |
| 209 | struct thread_info* thread = (struct thread_info*) arg; |
| 210 | struct pthreadpool* threadpool = ((struct pthreadpool*) (thread - thread->thread_number)) - 1; |
| 211 | |
| 212 | /* Check in */ |
| 213 | checkin_worker_thread(threadpool); |
| 214 | |
| 215 | /* Monitor the state changes and act accordingly */ |
| 216 | for (;;) { |
| 217 | /* Lock the state mutex */ |
| 218 | pthread_mutex_lock(&threadpool->state_mutex); |
| 219 | /* Read the state */ |
| 220 | enum thread_state state; |
| 221 | while ((state = thread->state) == thread_state_idle) { |
| 222 | /* Wait for state change */ |
| 223 | pthread_cond_wait(&threadpool->state_condvar, &threadpool->state_mutex); |
| 224 | } |
| 225 | /* Read non-idle state */ |
| 226 | pthread_mutex_unlock(&threadpool->state_mutex); |
| 227 | switch (state) { |
| 228 | case thread_state_compute_1d: |
| 229 | thread_compute_1d(threadpool, thread); |
| 230 | break; |
| 231 | case thread_state_shutdown: |
| 232 | return NULL; |
| 233 | case thread_state_idle: |
| 234 | /* To inhibit compiler warning */ |
| 235 | break; |
| 236 | } |
| 237 | /* Notify the master thread that we finished processing */ |
| 238 | thread->state = thread_state_idle; |
| 239 | checkin_worker_thread(threadpool); |
| 240 | }; |
| 241 | } |
| 242 | |
| 243 | struct pthreadpool* pthreadpool_create(size_t threads_count) { |
| 244 | if (threads_count == 0) { |
| 245 | threads_count = (size_t) sysconf(_SC_NPROCESSORS_ONLN); |
| 246 | } |
Marat Dukhan | 17747d7 | 2016-09-13 15:29:02 -0400 | [diff] [blame^] | 247 | #if !defined(__ANDROID__) |
Marat Dukhan | 3a45d9a | 2015-08-23 22:25:19 -0400 | [diff] [blame] | 248 | struct pthreadpool* threadpool = NULL; |
Marat Dukhan | ad0ca6a | 2015-10-16 03:15:19 -0400 | [diff] [blame] | 249 | if (posix_memalign((void**) &threadpool, 64, sizeof(struct pthreadpool) + threads_count * sizeof(struct thread_info)) != 0) { |
Marat Dukhan | 17747d7 | 2016-09-13 15:29:02 -0400 | [diff] [blame^] | 250 | #else |
| 251 | /* |
| 252 | * Android didn't get posix_memalign until API level 17 (Android 4.2). |
| 253 | * Use (otherwise obsolete) memalign function on Android platform. |
| 254 | */ |
| 255 | struct pthreadpool* threadpool = memalign(64, sizeof(struct pthreadpool) + threads_count * sizeof(struct thread_info)); |
| 256 | if (threadpool == NULL) { |
| 257 | #endif |
Marat Dukhan | ad0ca6a | 2015-10-16 03:15:19 -0400 | [diff] [blame] | 258 | return NULL; |
| 259 | } |
Marat Dukhan | 0a31219 | 2015-08-22 17:46:29 -0400 | [diff] [blame] | 260 | memset(threadpool, 0, sizeof(struct pthreadpool) + threads_count * sizeof(struct thread_info)); |
| 261 | threadpool->threads_count = threads_count; |
| 262 | pthread_mutex_init(&threadpool->execution_mutex, NULL); |
| 263 | pthread_mutex_init(&threadpool->barrier_mutex, NULL); |
| 264 | pthread_cond_init(&threadpool->barrier_condvar, NULL); |
| 265 | pthread_mutex_init(&threadpool->state_mutex, NULL); |
| 266 | pthread_cond_init(&threadpool->state_condvar, NULL); |
| 267 | |
| 268 | for (size_t tid = 0; tid < threads_count; tid++) { |
| 269 | threadpool->threads[tid].thread_number = tid; |
| 270 | pthread_create(&threadpool->threads[tid].thread_object, NULL, &thread_main, &threadpool->threads[tid]); |
| 271 | } |
| 272 | |
| 273 | /* Wait until all threads initialize */ |
| 274 | wait_worker_threads(threadpool); |
| 275 | return threadpool; |
| 276 | } |
| 277 | |
Marat Dukhan | 7b1f6e5 | 2015-08-25 11:24:08 -0400 | [diff] [blame] | 278 | size_t pthreadpool_get_threads_count(struct pthreadpool* threadpool) { |
Marat Dukhan | 0a31219 | 2015-08-22 17:46:29 -0400 | [diff] [blame] | 279 | return threadpool->threads_count; |
| 280 | } |
| 281 | |
Marat Dukhan | 0a31219 | 2015-08-22 17:46:29 -0400 | [diff] [blame] | 282 | void pthreadpool_compute_1d( |
| 283 | struct pthreadpool* threadpool, |
| 284 | pthreadpool_function_1d_t function, |
| 285 | void* argument, |
Marat Dukhan | ad0ca6a | 2015-10-16 03:15:19 -0400 | [diff] [blame] | 286 | size_t range) |
Marat Dukhan | 0a31219 | 2015-08-22 17:46:29 -0400 | [diff] [blame] | 287 | { |
Marat Dukhan | ad0ca6a | 2015-10-16 03:15:19 -0400 | [diff] [blame] | 288 | if (threadpool == NULL) { |
| 289 | /* No thread pool provided: execute function sequentially on the calling thread */ |
| 290 | for (size_t i = 0; i < range; i++) { |
| 291 | function(argument, i); |
| 292 | } |
| 293 | } else { |
| 294 | /* Protect the global threadpool structures */ |
| 295 | pthread_mutex_lock(&threadpool->execution_mutex); |
Marat Dukhan | 0a31219 | 2015-08-22 17:46:29 -0400 | [diff] [blame] | 296 | |
Marat Dukhan | fa98b4b | 2015-11-06 18:19:31 -0500 | [diff] [blame] | 297 | /* Lock the state variables to ensure that threads don't start processing before they observe complete state */ |
| 298 | pthread_mutex_lock(&threadpool->state_mutex); |
| 299 | |
| 300 | /* Setup global arguments */ |
| 301 | threadpool->function = function; |
| 302 | threadpool->argument = argument; |
| 303 | |
Marat Dukhan | ad0ca6a | 2015-10-16 03:15:19 -0400 | [diff] [blame] | 304 | /* Spread the work between threads */ |
| 305 | for (size_t tid = 0; tid < threadpool->threads_count; tid++) { |
| 306 | struct thread_info* thread = &threadpool->threads[tid]; |
| 307 | thread->range_start = multiply_divide(range, tid, threadpool->threads_count); |
| 308 | thread->range_end = multiply_divide(range, tid + 1, threadpool->threads_count); |
| 309 | thread->range_length = thread->range_end - thread->range_start; |
| 310 | thread->state = thread_state_compute_1d; |
| 311 | } |
| 312 | |
Marat Dukhan | fa98b4b | 2015-11-06 18:19:31 -0500 | [diff] [blame] | 313 | /* Unlock the state variables before waking up the threads for better performance */ |
| 314 | pthread_mutex_unlock(&threadpool->state_mutex); |
Marat Dukhan | ad0ca6a | 2015-10-16 03:15:19 -0400 | [diff] [blame] | 315 | |
| 316 | /* Wake up the threads */ |
| 317 | wakeup_worker_threads(threadpool); |
| 318 | |
| 319 | /* Wait until the threads finish computation */ |
| 320 | wait_worker_threads(threadpool); |
| 321 | |
| 322 | /* Unprotect the global threadpool structures */ |
| 323 | pthread_mutex_unlock(&threadpool->execution_mutex); |
Marat Dukhan | 0a31219 | 2015-08-22 17:46:29 -0400 | [diff] [blame] | 324 | } |
Marat Dukhan | ad0ca6a | 2015-10-16 03:15:19 -0400 | [diff] [blame] | 325 | } |
Marat Dukhan | 0a31219 | 2015-08-22 17:46:29 -0400 | [diff] [blame] | 326 | |
Marat Dukhan | e76282f | 2015-11-02 17:47:04 -0500 | [diff] [blame] | 327 | struct compute_1d_tiled_context { |
| 328 | pthreadpool_function_1d_tiled_t function; |
| 329 | void* argument; |
| 330 | size_t range; |
| 331 | size_t tile; |
| 332 | }; |
| 333 | |
| 334 | static void compute_1d_tiled(const struct compute_1d_tiled_context* context, size_t linear_index) { |
| 335 | const size_t tile_index = linear_index; |
| 336 | const size_t index = tile_index * context->tile; |
| 337 | const size_t tile = min(context->tile, context->range - index); |
| 338 | context->function(context->argument, index, tile); |
| 339 | } |
| 340 | |
| 341 | void pthreadpool_compute_1d_tiled( |
| 342 | pthreadpool_t threadpool, |
| 343 | pthreadpool_function_1d_tiled_t function, |
| 344 | void* argument, |
| 345 | size_t range, |
| 346 | size_t tile) |
| 347 | { |
Marat Dukhan | f3c8d73 | 2016-07-11 15:44:51 -0400 | [diff] [blame] | 348 | if (threadpool == NULL) { |
| 349 | /* No thread pool provided: execute function sequentially on the calling thread */ |
| 350 | for (size_t i = 0; i < range; i += tile) { |
| 351 | function(argument, i, min(range - i, tile)); |
| 352 | } |
| 353 | } else { |
| 354 | /* Execute in parallel on the thread pool using linearized index */ |
| 355 | const size_t tile_range = divide_round_up(range, tile); |
| 356 | struct compute_1d_tiled_context context = { |
| 357 | .function = function, |
| 358 | .argument = argument, |
| 359 | .range = range, |
| 360 | .tile = tile |
| 361 | }; |
| 362 | pthreadpool_compute_1d(threadpool, (pthreadpool_function_1d_t) compute_1d_tiled, &context, tile_range); |
| 363 | } |
Marat Dukhan | e76282f | 2015-11-02 17:47:04 -0500 | [diff] [blame] | 364 | } |
| 365 | |
Marat Dukhan | ad0ca6a | 2015-10-16 03:15:19 -0400 | [diff] [blame] | 366 | struct compute_2d_context { |
| 367 | pthreadpool_function_2d_t function; |
| 368 | void* argument; |
Marat Dukhan | 1325d6e | 2016-07-03 13:13:16 -0400 | [diff] [blame] | 369 | struct fxdiv_divisor_size_t range_j; |
Marat Dukhan | ad0ca6a | 2015-10-16 03:15:19 -0400 | [diff] [blame] | 370 | }; |
Marat Dukhan | 0a31219 | 2015-08-22 17:46:29 -0400 | [diff] [blame] | 371 | |
Marat Dukhan | ad0ca6a | 2015-10-16 03:15:19 -0400 | [diff] [blame] | 372 | static void compute_2d(const struct compute_2d_context* context, size_t linear_index) { |
Marat Dukhan | 1325d6e | 2016-07-03 13:13:16 -0400 | [diff] [blame] | 373 | const struct fxdiv_divisor_size_t range_j = context->range_j; |
| 374 | const struct fxdiv_result_size_t index = fxdiv_divide_size_t(linear_index, range_j); |
| 375 | context->function(context->argument, index.quotient, index.remainder); |
Marat Dukhan | ad0ca6a | 2015-10-16 03:15:19 -0400 | [diff] [blame] | 376 | } |
Marat Dukhan | 0a31219 | 2015-08-22 17:46:29 -0400 | [diff] [blame] | 377 | |
Marat Dukhan | ad0ca6a | 2015-10-16 03:15:19 -0400 | [diff] [blame] | 378 | void pthreadpool_compute_2d( |
| 379 | struct pthreadpool* threadpool, |
| 380 | pthreadpool_function_2d_t function, |
| 381 | void* argument, |
| 382 | size_t range_i, |
| 383 | size_t range_j) |
| 384 | { |
Marat Dukhan | f3c8d73 | 2016-07-11 15:44:51 -0400 | [diff] [blame] | 385 | if (threadpool == NULL) { |
| 386 | /* No thread pool provided: execute function sequentially on the calling thread */ |
| 387 | for (size_t i = 0; i < range_i; i++) { |
| 388 | for (size_t j = 0; j < range_j; j++) { |
| 389 | function(argument, i, j); |
| 390 | } |
| 391 | } |
| 392 | } else { |
| 393 | /* Execute in parallel on the thread pool using linearized index */ |
| 394 | struct compute_2d_context context = { |
| 395 | .function = function, |
| 396 | .argument = argument, |
| 397 | .range_j = fxdiv_init_size_t(range_j) |
| 398 | }; |
| 399 | pthreadpool_compute_1d(threadpool, (pthreadpool_function_1d_t) compute_2d, &context, range_i * range_j); |
| 400 | } |
Marat Dukhan | ad0ca6a | 2015-10-16 03:15:19 -0400 | [diff] [blame] | 401 | } |
Marat Dukhan | 0a31219 | 2015-08-22 17:46:29 -0400 | [diff] [blame] | 402 | |
Marat Dukhan | ad0ca6a | 2015-10-16 03:15:19 -0400 | [diff] [blame] | 403 | struct compute_2d_tiled_context { |
| 404 | pthreadpool_function_2d_tiled_t function; |
| 405 | void* argument; |
Marat Dukhan | 1325d6e | 2016-07-03 13:13:16 -0400 | [diff] [blame] | 406 | struct fxdiv_divisor_size_t tile_range_j; |
Marat Dukhan | ad0ca6a | 2015-10-16 03:15:19 -0400 | [diff] [blame] | 407 | size_t range_i; |
| 408 | size_t range_j; |
| 409 | size_t tile_i; |
| 410 | size_t tile_j; |
| 411 | }; |
| 412 | |
| 413 | static void compute_2d_tiled(const struct compute_2d_tiled_context* context, size_t linear_index) { |
Marat Dukhan | 1325d6e | 2016-07-03 13:13:16 -0400 | [diff] [blame] | 414 | const struct fxdiv_divisor_size_t tile_range_j = context->tile_range_j; |
| 415 | const struct fxdiv_result_size_t tile_index = fxdiv_divide_size_t(linear_index, tile_range_j); |
| 416 | const size_t max_tile_i = context->tile_i; |
| 417 | const size_t max_tile_j = context->tile_j; |
| 418 | const size_t index_i = tile_index.quotient * max_tile_i; |
| 419 | const size_t index_j = tile_index.remainder * max_tile_j; |
| 420 | const size_t tile_i = min(max_tile_i, context->range_i - index_i); |
| 421 | const size_t tile_j = min(max_tile_j, context->range_j - index_j); |
Marat Dukhan | ad0ca6a | 2015-10-16 03:15:19 -0400 | [diff] [blame] | 422 | context->function(context->argument, index_i, index_j, tile_i, tile_j); |
| 423 | } |
| 424 | |
| 425 | void pthreadpool_compute_2d_tiled( |
| 426 | pthreadpool_t threadpool, |
| 427 | pthreadpool_function_2d_tiled_t function, |
| 428 | void* argument, |
| 429 | size_t range_i, |
| 430 | size_t range_j, |
| 431 | size_t tile_i, |
| 432 | size_t tile_j) |
| 433 | { |
Marat Dukhan | f3c8d73 | 2016-07-11 15:44:51 -0400 | [diff] [blame] | 434 | if (threadpool == NULL) { |
| 435 | /* No thread pool provided: execute function sequentially on the calling thread */ |
| 436 | for (size_t i = 0; i < range_i; i += tile_i) { |
| 437 | for (size_t j = 0; j < range_j; j += tile_j) { |
| 438 | function(argument, i, j, min(range_i - i, tile_i), min(range_j - j, tile_j)); |
| 439 | } |
| 440 | } |
| 441 | } else { |
| 442 | /* Execute in parallel on the thread pool using linearized index */ |
| 443 | const size_t tile_range_i = divide_round_up(range_i, tile_i); |
| 444 | const size_t tile_range_j = divide_round_up(range_j, tile_j); |
| 445 | struct compute_2d_tiled_context context = { |
| 446 | .function = function, |
| 447 | .argument = argument, |
| 448 | .tile_range_j = fxdiv_init_size_t(tile_range_j), |
| 449 | .range_i = range_i, |
| 450 | .range_j = range_j, |
| 451 | .tile_i = tile_i, |
| 452 | .tile_j = tile_j |
| 453 | }; |
| 454 | pthreadpool_compute_1d(threadpool, (pthreadpool_function_1d_t) compute_2d_tiled, &context, tile_range_i * tile_range_j); |
| 455 | } |
Marat Dukhan | 0a31219 | 2015-08-22 17:46:29 -0400 | [diff] [blame] | 456 | } |
| 457 | |
| 458 | void pthreadpool_destroy(struct pthreadpool* threadpool) { |
| 459 | /* Update threads' states */ |
| 460 | for (size_t tid = 0; tid < threadpool->threads_count; tid++) { |
| 461 | threadpool->threads[tid].state = thread_state_shutdown; |
| 462 | } |
| 463 | |
| 464 | /* Wake up the threads */ |
| 465 | wakeup_worker_threads(threadpool); |
| 466 | |
| 467 | /* Wait until all threads return */ |
| 468 | for (size_t tid = 0; tid < threadpool->threads_count; tid++) { |
| 469 | pthread_join(threadpool->threads[tid].thread_object, NULL); |
| 470 | } |
| 471 | |
| 472 | /* Release resources */ |
| 473 | pthread_mutex_destroy(&threadpool->execution_mutex); |
| 474 | pthread_mutex_destroy(&threadpool->barrier_mutex); |
| 475 | pthread_cond_destroy(&threadpool->barrier_condvar); |
| 476 | pthread_mutex_destroy(&threadpool->state_mutex); |
| 477 | pthread_cond_destroy(&threadpool->state_condvar); |
| 478 | free(threadpool); |
| 479 | } |