blob: c73c52114074c66d3c04bf44b3cd18a1d2d78341 [file] [log] [blame]
Rich Felker0b44a032011-02-12 00:22:29 -05001#include "pthread_impl.h"
2
Rich Felker1a9a2ff2011-02-13 19:58:30 -05003void __pthread_unwind_next(struct __ptcb *cb)
4{
5 int i, j, not_finished;
6 pthread_t self;
7
8 if (cb->__next) longjmp((void *)cb->__next->__jb, 1);
9
10 self = pthread_self();
11 if (self->cancel) self->result = PTHREAD_CANCELLED;
12
Rich Felker1a9a2ff2011-02-13 19:58:30 -050013 LOCK(&self->exitlock);
14
15 not_finished = self->tsd_used;
16 for (j=0; not_finished && j<PTHREAD_DESTRUCTOR_ITERATIONS; j++) {
17 not_finished = 0;
18 for (i=0; i<PTHREAD_KEYS_MAX; i++) {
19 if (self->tsd[i] && libc.tsd_keys[i]) {
20 void *tmp = self->tsd[i];
21 self->tsd[i] = 0;
22 libc.tsd_keys[i](tmp);
23 not_finished = 1;
24 }
25 }
26 }
27
Rich Felker19eb13b2011-02-19 11:04:36 -050028 syscall4(__NR_sigprocmask, SIG_BLOCK, (long)(uint64_t[1]){-1}, 0, 8);
29
Rich Felkerfb11b6b2011-02-19 10:38:57 -050030 if (!a_fetch_add(&libc.threads_minus_1, -1))
31 exit(0);
32
Rich Felker1a9a2ff2011-02-13 19:58:30 -050033 if (self->detached && self->map_base)
34 __unmapself(self->map_base, self->map_size);
35
36 __syscall_exit(0);
37}
Rich Felker0b44a032011-02-12 00:22:29 -050038
39static void docancel(struct pthread *self)
40{
41 struct __ptcb cb = { .__next = self->cancelbuf };
42 __pthread_unwind_next(&cb);
43}
44
45static void cancel_handler(int sig, siginfo_t *si, void *ctx)
46{
Rich Felker1a9a2ff2011-02-13 19:58:30 -050047 struct pthread *self = __pthread_self();
Rich Felker0b44a032011-02-12 00:22:29 -050048 self->cancel = 1;
49 if (self->canceldisable || (!self->cancelasync && !self->cancelpoint))
50 return;
51 docancel(self);
52}
53
Rich Felker1a9a2ff2011-02-13 19:58:30 -050054static void cancelpt(int x)
55{
56 struct pthread *self = __pthread_self();
57 if (self->canceldisable) return;
58 self->cancelpoint = x;
59 if (self->cancel) docancel(self);
60}
61
Rich Felker0b44a032011-02-12 00:22:29 -050062/* "rsyscall" is a mechanism by which a thread can synchronously force all
63 * other threads to perform an arbitrary syscall. It is necessary to work
64 * around the non-conformant implementation of setuid() et al on Linux,
65 * which affect only the calling thread and not the whole process. This
66 * implementation performs some tricks with signal delivery to work around
67 * the fact that it does not keep any list of threads in userspace. */
68
69static struct {
70 volatile int lock, hold, blocks, cnt;
71 unsigned long arg[6];
72 int nr;
73 int err;
74} rs;
75
76static void rsyscall_handler(int sig, siginfo_t *si, void *ctx)
77{
78 if (rs.cnt == libc.threads_minus_1) return;
79
80 if (syscall6(rs.nr, rs.arg[0], rs.arg[1], rs.arg[2],
81 rs.arg[3], rs.arg[4], rs.arg[5]) < 0 && !rs.err) rs.err=errno;
82
83 a_inc(&rs.cnt);
84 __wake(&rs.cnt, 1, 1);
85 while(rs.hold)
86 __wait(&rs.hold, 0, 1, 1);
87 a_dec(&rs.cnt);
88 if (!rs.cnt) __wake(&rs.cnt, 1, 1);
89}
90
91static int rsyscall(int nr, long a, long b, long c, long d, long e, long f)
92{
93 int i, ret;
94 sigset_t set = { 0 };
Rich Felker1a9a2ff2011-02-13 19:58:30 -050095 struct pthread *self = __pthread_self();
Rich Felker0b44a032011-02-12 00:22:29 -050096 sigaddset(&set, SIGSYSCALL);
97
98 LOCK(&rs.lock);
99 while ((i=rs.blocks))
100 __wait(&rs.blocks, 0, i, 1);
101
102 __libc_sigprocmask(SIG_BLOCK, &set, 0);
103
104 rs.nr = nr;
105 rs.arg[0] = a; rs.arg[1] = b;
106 rs.arg[2] = c; rs.arg[3] = d;
107 rs.arg[4] = d; rs.arg[5] = f;
108 rs.hold = 1;
109 rs.err = 0;
110 rs.cnt = 0;
111
112 /* Dispatch signals until all threads respond */
113 for (i=libc.threads_minus_1; i; i--)
114 sigqueue(self->pid, SIGSYSCALL, (union sigval){0});
115 while ((i=rs.cnt) < libc.threads_minus_1) {
116 sigqueue(self->pid, SIGSYSCALL, (union sigval){0});
117 __wait(&rs.cnt, 0, i, 1);
118 }
119
120 /* Handle any lingering signals with no-op */
121 __libc_sigprocmask(SIG_UNBLOCK, &set, 0);
122
123 /* Resume other threads' signal handlers and wait for them */
124 rs.hold = 0;
125 __wake(&rs.hold, -1, 0);
126 while((i=rs.cnt)) __wait(&rs.cnt, 0, i, 1);
127
128 if (rs.err) errno = rs.err, ret = -1;
129 else ret = syscall6(nr, a, b, c, d, e, f);
130
131 UNLOCK(&rs.lock);
132 return ret;
133}
134
Rich Felker0b44a032011-02-12 00:22:29 -0500135static void init_threads()
136{
137 struct sigaction sa = { .sa_flags = SA_SIGINFO | SA_RESTART };
138 libc.lock = __lock;
139 libc.cancelpt = cancelpt;
140 libc.rsyscall = rsyscall;
141 sa.sa_sigaction = cancel_handler;
142 __libc_sigaction(SIGCANCEL, &sa, 0);
143 sigaddset(&sa.sa_mask, SIGSYSCALL);
144 sigaddset(&sa.sa_mask, SIGCANCEL);
145 sa.sa_sigaction = rsyscall_handler;
146 __libc_sigaction(SIGSYSCALL, &sa, 0);
147 sigprocmask(SIG_UNBLOCK, &sa.sa_mask, 0);
148}
149
150static int start(void *p)
151{
152 struct pthread *self = p;
153 pthread_exit(self->start(self->start_arg));
154 return 0;
155}
156
Rich Felker0b2006c2011-02-15 03:24:58 -0500157int __uniclone(void *, int (*)(), void *);
Rich Felker0b44a032011-02-12 00:22:29 -0500158
159#define ROUND(x) (((x)+PAGE_SIZE-1)&-PAGE_SIZE)
160
161/* pthread_key_create.c overrides this */
162static const size_t dummy = 0;
163weak_alias(dummy, __pthread_tsd_size);
164
165int pthread_create(pthread_t *res, const pthread_attr_t *attr, void *(*entry)(void *), void *arg)
166{
167 static int init;
168 int ret;
169 size_t size, guard;
170 struct pthread *self = pthread_self(), *new;
171 unsigned char *map, *stack, *tsd;
172 static const pthread_attr_t default_attr;
173
174 if (!self) return errno = ENOSYS;
175 if (!init && ++init) init_threads();
176
177 if (!attr) attr = &default_attr;
Rich Felkere8827562011-02-17 17:16:20 -0500178 guard = ROUND(attr->_a_guardsize + DEFAULT_GUARD_SIZE);
179 size = guard + ROUND(attr->_a_stacksize + DEFAULT_STACK_SIZE);
Rich Felker0b44a032011-02-12 00:22:29 -0500180 size += __pthread_tsd_size;
181 map = mmap(0, size, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE|MAP_ANON, -1, 0);
182 if (!map) return EAGAIN;
183 mprotect(map, guard, PROT_NONE);
184
185 tsd = map + size - __pthread_tsd_size;
186 new = (void *)(tsd - sizeof *new - PAGE_SIZE%sizeof *new);
187 new->map_base = map;
188 new->map_size = size;
189 new->pid = self->pid;
190 new->errno_ptr = &new->errno_val;
191 new->start = entry;
192 new->start_arg = arg;
193 new->self = new;
194 new->tsd = (void *)tsd;
Rich Felkere8827562011-02-17 17:16:20 -0500195 new->detached = attr->_a_detach;
Rich Felker0b44a032011-02-12 00:22:29 -0500196 new->attr = *attr;
197 memcpy(new->tlsdesc, self->tlsdesc, sizeof new->tlsdesc);
198 new->tlsdesc[1] = (uintptr_t)new;
199 stack = (void *)((uintptr_t)new-1 & ~(uintptr_t)15);
200
201 /* We must synchronize new thread creation with rsyscall
202 * delivery. This looks to be the least expensive way: */
203 a_inc(&rs.blocks);
204 while (rs.lock) __wait(&rs.lock, 0, 1, 1);
205
206 a_inc(&libc.threads_minus_1);
Rich Felker0b2006c2011-02-15 03:24:58 -0500207 ret = __uniclone(stack, start, new);
Rich Felker0b44a032011-02-12 00:22:29 -0500208
209 a_dec(&rs.blocks);
210 if (rs.lock) __wake(&rs.blocks, 1, 1);
211
212 if (ret < 0) {
213 a_dec(&libc.threads_minus_1);
214 munmap(map, size);
Rich Felker59666802011-02-15 02:20:21 -0500215 return EAGAIN;
Rich Felker0b44a032011-02-12 00:22:29 -0500216 }
217 *res = new;
218 return 0;
219}
Rich Felker1a9a2ff2011-02-13 19:58:30 -0500220
221void pthread_exit(void *result)
222{
223 struct pthread *self = pthread_self();
224 self->result = result;
225 docancel(self);
226}