blob: 06b83db9262a56e5b19ba1116f02c389398ea546 [file] [log] [blame]
Rich Felker0b44a032011-02-12 00:22:29 -05001#include "pthread_impl.h"
2
Rich Felker60164572011-09-27 13:50:29 -04003static int pshared_barrier_wait(pthread_barrier_t *b)
4{
5 int limit = (b->_b_limit & INT_MAX) + 1;
Rich Felker60164572011-09-27 13:50:29 -04006 int ret = 0;
Rich Felker95b14792011-09-28 18:00:02 -04007 int v, w;
Rich Felker60164572011-09-27 13:50:29 -04008
Rich Felker067bdc22011-09-27 23:08:59 -04009 if (limit==1) return PTHREAD_BARRIER_SERIAL_THREAD;
Rich Felker60164572011-09-27 13:50:29 -040010
Rich Felker95b14792011-09-28 18:00:02 -040011 while ((v=a_cas(&b->_b_lock, 0, limit)))
12 __wait(&b->_b_lock, &b->_b_waiters, v, 0);
Rich Felker60164572011-09-27 13:50:29 -040013
Rich Felker9cee9302011-09-28 18:57:18 -040014 /* Wait for <limit> threads to get to the barrier */
Rich Felker60164572011-09-27 13:50:29 -040015 if (++b->_b_count == limit) {
Rich Felker9cee9302011-09-28 18:57:18 -040016 a_store(&b->_b_count, 0);
Rich Felker60164572011-09-27 13:50:29 -040017 ret = PTHREAD_BARRIER_SERIAL_THREAD;
Rich Felker9cee9302011-09-28 18:57:18 -040018 if (b->_b_waiters2) __wake(&b->_b_count, -1, 0);
Rich Felker60164572011-09-27 13:50:29 -040019 } else {
20 a_store(&b->_b_lock, 0);
21 if (b->_b_waiters) __wake(&b->_b_lock, 1, 0);
Rich Felker9cee9302011-09-28 18:57:18 -040022 while ((v=b->_b_count)>0)
23 __wait(&b->_b_count, &b->_b_waiters2, v, 0);
Rich Felker60164572011-09-27 13:50:29 -040024 }
25
Rich Felkerf08ab9e2015-04-10 02:27:52 -040026 __vm_lock();
Rich Felker60164572011-09-27 13:50:29 -040027
Rich Felker9cee9302011-09-28 18:57:18 -040028 /* Ensure all threads have a vm lock before proceeding */
29 if (a_fetch_add(&b->_b_count, -1)==1-limit) {
30 a_store(&b->_b_count, 0);
31 if (b->_b_waiters2) __wake(&b->_b_count, -1, 0);
Rich Felker60164572011-09-27 13:50:29 -040032 } else {
Rich Felker9cee9302011-09-28 18:57:18 -040033 while ((v=b->_b_count))
34 __wait(&b->_b_count, &b->_b_waiters2, v, 0);
Rich Felker60164572011-09-27 13:50:29 -040035 }
36
Rich Felker95b14792011-09-28 18:00:02 -040037 /* Perform a recursive unlock suitable for self-sync'd destruction */
38 do {
39 v = b->_b_lock;
40 w = b->_b_waiters;
Rich Felker9cee9302011-09-28 18:57:18 -040041 } while (a_cas(&b->_b_lock, v, v==INT_MIN+1 ? 0 : v-1) != v);
Rich Felker95b14792011-09-28 18:00:02 -040042
Rich Felker9cee9302011-09-28 18:57:18 -040043 /* Wake a thread waiting to reuse or destroy the barrier */
Rich Felker95b14792011-09-28 18:00:02 -040044 if (v==INT_MIN+1 || (v==1 && w))
45 __wake(&b->_b_lock, 1, 0);
46
Rich Felkerf08ab9e2015-04-10 02:27:52 -040047 __vm_unlock();
Rich Felker60164572011-09-27 13:50:29 -040048
Rich Felkerd8dc1df2011-09-27 17:03:44 -040049 return ret;
Rich Felker60164572011-09-27 13:50:29 -040050}
51
Rich Felkerf16a3082011-05-06 20:00:59 -040052struct instance
53{
Rich Felker56fbaa32015-03-03 22:50:02 -050054 volatile int count;
55 volatile int last;
56 volatile int waiters;
57 volatile int finished;
Rich Felkerf16a3082011-05-06 20:00:59 -040058};
59
Rich Felker0b44a032011-02-12 00:22:29 -050060int pthread_barrier_wait(pthread_barrier_t *b)
61{
Rich Felkerf16a3082011-05-06 20:00:59 -040062 int limit = b->_b_limit;
63 struct instance *inst;
Rich Felker0b44a032011-02-12 00:22:29 -050064
65 /* Trivial case: count was set at 1 */
Rich Felkerf16a3082011-05-06 20:00:59 -040066 if (!limit) return PTHREAD_BARRIER_SERIAL_THREAD;
Rich Felker0b44a032011-02-12 00:22:29 -050067
Rich Felker60164572011-09-27 13:50:29 -040068 /* Process-shared barriers require a separate, inefficient wait */
69 if (limit < 0) return pshared_barrier_wait(b);
70
Rich Felkerf16a3082011-05-06 20:00:59 -040071 /* Otherwise we need a lock on the barrier object */
72 while (a_swap(&b->_b_lock, 1))
Rich Felker60164572011-09-27 13:50:29 -040073 __wait(&b->_b_lock, &b->_b_waiters, 1, 1);
Rich Felkerf16a3082011-05-06 20:00:59 -040074 inst = b->_b_inst;
Rich Felker0b44a032011-02-12 00:22:29 -050075
Rich Felkerf16a3082011-05-06 20:00:59 -040076 /* First thread to enter the barrier becomes the "instance owner" */
77 if (!inst) {
78 struct instance new_inst = { 0 };
Rich Felkerb8a9c902014-08-25 15:58:19 -040079 int spins = 200;
Rich Felkerf16a3082011-05-06 20:00:59 -040080 b->_b_inst = inst = &new_inst;
81 a_store(&b->_b_lock, 0);
Rich Felker60164572011-09-27 13:50:29 -040082 if (b->_b_waiters) __wake(&b->_b_lock, 1, 1);
Rich Felkerf16a3082011-05-06 20:00:59 -040083 while (spins-- && !inst->finished)
84 a_spin();
85 a_inc(&inst->finished);
86 while (inst->finished == 1)
Rich Felkerb8ca9eb2014-08-22 23:49:54 -040087 __syscall(SYS_futex,&inst->finished,FUTEX_WAIT|128,1,0) != -ENOSYS
Rich Felkerbc09d582014-08-15 23:54:52 -040088 || __syscall(SYS_futex,&inst->finished,FUTEX_WAIT,1,0);
Rich Felker0b44a032011-02-12 00:22:29 -050089 return PTHREAD_BARRIER_SERIAL_THREAD;
90 }
91
Rich Felkerf16a3082011-05-06 20:00:59 -040092 /* Last thread to enter the barrier wakes all non-instance-owners */
93 if (++inst->count == limit) {
Rich Felkerf16a3082011-05-06 20:00:59 -040094 b->_b_inst = 0;
95 a_store(&b->_b_lock, 0);
Rich Felker60164572011-09-27 13:50:29 -040096 if (b->_b_waiters) __wake(&b->_b_lock, 1, 1);
Rich Felkerf16a3082011-05-06 20:00:59 -040097 a_store(&inst->last, 1);
98 if (inst->waiters)
Rich Felker60164572011-09-27 13:50:29 -040099 __wake(&inst->last, -1, 1);
Rich Felkerf16a3082011-05-06 20:00:59 -0400100 } else {
101 a_store(&b->_b_lock, 0);
Rich Felker60164572011-09-27 13:50:29 -0400102 if (b->_b_waiters) __wake(&b->_b_lock, 1, 1);
103 __wait(&inst->last, &inst->waiters, 0, 1);
Rich Felkerf16a3082011-05-06 20:00:59 -0400104 }
Rich Felker0b44a032011-02-12 00:22:29 -0500105
Rich Felkerf16a3082011-05-06 20:00:59 -0400106 /* Last thread to exit the barrier wakes the instance owner */
107 if (a_fetch_add(&inst->count,-1)==1 && a_fetch_add(&inst->finished,1))
Rich Felker60164572011-09-27 13:50:29 -0400108 __wake(&inst->finished, 1, 1);
Rich Felker0b44a032011-02-12 00:22:29 -0500109
110 return 0;
111}