Rich Felker | 0b44a03 | 2011-02-12 00:22:29 -0500 | [diff] [blame] | 1 | #include "pthread_impl.h" |
| 2 | |
Rich Felker | 6016457 | 2011-09-27 13:50:29 -0400 | [diff] [blame] | 3 | static int pshared_barrier_wait(pthread_barrier_t *b) |
| 4 | { |
| 5 | int limit = (b->_b_limit & INT_MAX) + 1; |
Rich Felker | 6016457 | 2011-09-27 13:50:29 -0400 | [diff] [blame] | 6 | int ret = 0; |
Rich Felker | 95b1479 | 2011-09-28 18:00:02 -0400 | [diff] [blame] | 7 | int v, w; |
Rich Felker | 6016457 | 2011-09-27 13:50:29 -0400 | [diff] [blame] | 8 | |
Rich Felker | 067bdc2 | 2011-09-27 23:08:59 -0400 | [diff] [blame] | 9 | if (limit==1) return PTHREAD_BARRIER_SERIAL_THREAD; |
Rich Felker | 6016457 | 2011-09-27 13:50:29 -0400 | [diff] [blame] | 10 | |
Rich Felker | 95b1479 | 2011-09-28 18:00:02 -0400 | [diff] [blame] | 11 | while ((v=a_cas(&b->_b_lock, 0, limit))) |
| 12 | __wait(&b->_b_lock, &b->_b_waiters, v, 0); |
Rich Felker | 6016457 | 2011-09-27 13:50:29 -0400 | [diff] [blame] | 13 | |
Rich Felker | 9cee930 | 2011-09-28 18:57:18 -0400 | [diff] [blame] | 14 | /* Wait for <limit> threads to get to the barrier */ |
Rich Felker | 6016457 | 2011-09-27 13:50:29 -0400 | [diff] [blame] | 15 | if (++b->_b_count == limit) { |
Rich Felker | 9cee930 | 2011-09-28 18:57:18 -0400 | [diff] [blame] | 16 | a_store(&b->_b_count, 0); |
Rich Felker | 6016457 | 2011-09-27 13:50:29 -0400 | [diff] [blame] | 17 | ret = PTHREAD_BARRIER_SERIAL_THREAD; |
Rich Felker | 9cee930 | 2011-09-28 18:57:18 -0400 | [diff] [blame] | 18 | if (b->_b_waiters2) __wake(&b->_b_count, -1, 0); |
Rich Felker | 6016457 | 2011-09-27 13:50:29 -0400 | [diff] [blame] | 19 | } else { |
| 20 | a_store(&b->_b_lock, 0); |
| 21 | if (b->_b_waiters) __wake(&b->_b_lock, 1, 0); |
Rich Felker | 9cee930 | 2011-09-28 18:57:18 -0400 | [diff] [blame] | 22 | while ((v=b->_b_count)>0) |
| 23 | __wait(&b->_b_count, &b->_b_waiters2, v, 0); |
Rich Felker | 6016457 | 2011-09-27 13:50:29 -0400 | [diff] [blame] | 24 | } |
| 25 | |
Rich Felker | f08ab9e | 2015-04-10 02:27:52 -0400 | [diff] [blame] | 26 | __vm_lock(); |
Rich Felker | 6016457 | 2011-09-27 13:50:29 -0400 | [diff] [blame] | 27 | |
Rich Felker | 9cee930 | 2011-09-28 18:57:18 -0400 | [diff] [blame] | 28 | /* Ensure all threads have a vm lock before proceeding */ |
| 29 | if (a_fetch_add(&b->_b_count, -1)==1-limit) { |
| 30 | a_store(&b->_b_count, 0); |
| 31 | if (b->_b_waiters2) __wake(&b->_b_count, -1, 0); |
Rich Felker | 6016457 | 2011-09-27 13:50:29 -0400 | [diff] [blame] | 32 | } else { |
Rich Felker | 9cee930 | 2011-09-28 18:57:18 -0400 | [diff] [blame] | 33 | while ((v=b->_b_count)) |
| 34 | __wait(&b->_b_count, &b->_b_waiters2, v, 0); |
Rich Felker | 6016457 | 2011-09-27 13:50:29 -0400 | [diff] [blame] | 35 | } |
| 36 | |
Rich Felker | 95b1479 | 2011-09-28 18:00:02 -0400 | [diff] [blame] | 37 | /* Perform a recursive unlock suitable for self-sync'd destruction */ |
| 38 | do { |
| 39 | v = b->_b_lock; |
| 40 | w = b->_b_waiters; |
Rich Felker | 9cee930 | 2011-09-28 18:57:18 -0400 | [diff] [blame] | 41 | } while (a_cas(&b->_b_lock, v, v==INT_MIN+1 ? 0 : v-1) != v); |
Rich Felker | 95b1479 | 2011-09-28 18:00:02 -0400 | [diff] [blame] | 42 | |
Rich Felker | 9cee930 | 2011-09-28 18:57:18 -0400 | [diff] [blame] | 43 | /* Wake a thread waiting to reuse or destroy the barrier */ |
Rich Felker | 95b1479 | 2011-09-28 18:00:02 -0400 | [diff] [blame] | 44 | if (v==INT_MIN+1 || (v==1 && w)) |
| 45 | __wake(&b->_b_lock, 1, 0); |
| 46 | |
Rich Felker | f08ab9e | 2015-04-10 02:27:52 -0400 | [diff] [blame] | 47 | __vm_unlock(); |
Rich Felker | 6016457 | 2011-09-27 13:50:29 -0400 | [diff] [blame] | 48 | |
Rich Felker | d8dc1df | 2011-09-27 17:03:44 -0400 | [diff] [blame] | 49 | return ret; |
Rich Felker | 6016457 | 2011-09-27 13:50:29 -0400 | [diff] [blame] | 50 | } |
| 51 | |
Rich Felker | f16a308 | 2011-05-06 20:00:59 -0400 | [diff] [blame] | 52 | struct instance |
| 53 | { |
Rich Felker | 56fbaa3 | 2015-03-03 22:50:02 -0500 | [diff] [blame] | 54 | volatile int count; |
| 55 | volatile int last; |
| 56 | volatile int waiters; |
| 57 | volatile int finished; |
Rich Felker | f16a308 | 2011-05-06 20:00:59 -0400 | [diff] [blame] | 58 | }; |
| 59 | |
Rich Felker | 0b44a03 | 2011-02-12 00:22:29 -0500 | [diff] [blame] | 60 | int pthread_barrier_wait(pthread_barrier_t *b) |
| 61 | { |
Rich Felker | f16a308 | 2011-05-06 20:00:59 -0400 | [diff] [blame] | 62 | int limit = b->_b_limit; |
| 63 | struct instance *inst; |
Rich Felker | 0b44a03 | 2011-02-12 00:22:29 -0500 | [diff] [blame] | 64 | |
| 65 | /* Trivial case: count was set at 1 */ |
Rich Felker | f16a308 | 2011-05-06 20:00:59 -0400 | [diff] [blame] | 66 | if (!limit) return PTHREAD_BARRIER_SERIAL_THREAD; |
Rich Felker | 0b44a03 | 2011-02-12 00:22:29 -0500 | [diff] [blame] | 67 | |
Rich Felker | 6016457 | 2011-09-27 13:50:29 -0400 | [diff] [blame] | 68 | /* Process-shared barriers require a separate, inefficient wait */ |
| 69 | if (limit < 0) return pshared_barrier_wait(b); |
| 70 | |
Rich Felker | f16a308 | 2011-05-06 20:00:59 -0400 | [diff] [blame] | 71 | /* Otherwise we need a lock on the barrier object */ |
| 72 | while (a_swap(&b->_b_lock, 1)) |
Rich Felker | 6016457 | 2011-09-27 13:50:29 -0400 | [diff] [blame] | 73 | __wait(&b->_b_lock, &b->_b_waiters, 1, 1); |
Rich Felker | f16a308 | 2011-05-06 20:00:59 -0400 | [diff] [blame] | 74 | inst = b->_b_inst; |
Rich Felker | 0b44a03 | 2011-02-12 00:22:29 -0500 | [diff] [blame] | 75 | |
Rich Felker | f16a308 | 2011-05-06 20:00:59 -0400 | [diff] [blame] | 76 | /* First thread to enter the barrier becomes the "instance owner" */ |
| 77 | if (!inst) { |
| 78 | struct instance new_inst = { 0 }; |
Rich Felker | b8a9c90 | 2014-08-25 15:58:19 -0400 | [diff] [blame] | 79 | int spins = 200; |
Rich Felker | f16a308 | 2011-05-06 20:00:59 -0400 | [diff] [blame] | 80 | b->_b_inst = inst = &new_inst; |
| 81 | a_store(&b->_b_lock, 0); |
Rich Felker | 6016457 | 2011-09-27 13:50:29 -0400 | [diff] [blame] | 82 | if (b->_b_waiters) __wake(&b->_b_lock, 1, 1); |
Rich Felker | f16a308 | 2011-05-06 20:00:59 -0400 | [diff] [blame] | 83 | while (spins-- && !inst->finished) |
| 84 | a_spin(); |
| 85 | a_inc(&inst->finished); |
| 86 | while (inst->finished == 1) |
Rich Felker | b8ca9eb | 2014-08-22 23:49:54 -0400 | [diff] [blame] | 87 | __syscall(SYS_futex,&inst->finished,FUTEX_WAIT|128,1,0) != -ENOSYS |
Rich Felker | bc09d58 | 2014-08-15 23:54:52 -0400 | [diff] [blame] | 88 | || __syscall(SYS_futex,&inst->finished,FUTEX_WAIT,1,0); |
Rich Felker | 0b44a03 | 2011-02-12 00:22:29 -0500 | [diff] [blame] | 89 | return PTHREAD_BARRIER_SERIAL_THREAD; |
| 90 | } |
| 91 | |
Rich Felker | f16a308 | 2011-05-06 20:00:59 -0400 | [diff] [blame] | 92 | /* Last thread to enter the barrier wakes all non-instance-owners */ |
| 93 | if (++inst->count == limit) { |
Rich Felker | f16a308 | 2011-05-06 20:00:59 -0400 | [diff] [blame] | 94 | b->_b_inst = 0; |
| 95 | a_store(&b->_b_lock, 0); |
Rich Felker | 6016457 | 2011-09-27 13:50:29 -0400 | [diff] [blame] | 96 | if (b->_b_waiters) __wake(&b->_b_lock, 1, 1); |
Rich Felker | f16a308 | 2011-05-06 20:00:59 -0400 | [diff] [blame] | 97 | a_store(&inst->last, 1); |
| 98 | if (inst->waiters) |
Rich Felker | 6016457 | 2011-09-27 13:50:29 -0400 | [diff] [blame] | 99 | __wake(&inst->last, -1, 1); |
Rich Felker | f16a308 | 2011-05-06 20:00:59 -0400 | [diff] [blame] | 100 | } else { |
| 101 | a_store(&b->_b_lock, 0); |
Rich Felker | 6016457 | 2011-09-27 13:50:29 -0400 | [diff] [blame] | 102 | if (b->_b_waiters) __wake(&b->_b_lock, 1, 1); |
| 103 | __wait(&inst->last, &inst->waiters, 0, 1); |
Rich Felker | f16a308 | 2011-05-06 20:00:59 -0400 | [diff] [blame] | 104 | } |
Rich Felker | 0b44a03 | 2011-02-12 00:22:29 -0500 | [diff] [blame] | 105 | |
Rich Felker | f16a308 | 2011-05-06 20:00:59 -0400 | [diff] [blame] | 106 | /* Last thread to exit the barrier wakes the instance owner */ |
| 107 | if (a_fetch_add(&inst->count,-1)==1 && a_fetch_add(&inst->finished,1)) |
Rich Felker | 6016457 | 2011-09-27 13:50:29 -0400 | [diff] [blame] | 108 | __wake(&inst->finished, 1, 1); |
Rich Felker | 0b44a03 | 2011-02-12 00:22:29 -0500 | [diff] [blame] | 109 | |
| 110 | return 0; |
| 111 | } |