blob: d01be551c1b54f21935b5970d6215cddb830465d [file] [log] [blame]
Rich Felker0b44a032011-02-12 00:22:29 -05001#include "pthread_impl.h"
2
Rich Felker1a9a2ff2011-02-13 19:58:30 -05003void __pthread_unwind_next(struct __ptcb *cb)
4{
5 int i, j, not_finished;
6 pthread_t self;
7
8 if (cb->__next) longjmp((void *)cb->__next->__jb, 1);
9
10 self = pthread_self();
11 if (self->cancel) self->result = PTHREAD_CANCELLED;
12
13 if (!a_fetch_add(&libc.threads_minus_1, -1))
14 exit(0);
15
16 LOCK(&self->exitlock);
17
18 not_finished = self->tsd_used;
19 for (j=0; not_finished && j<PTHREAD_DESTRUCTOR_ITERATIONS; j++) {
20 not_finished = 0;
21 for (i=0; i<PTHREAD_KEYS_MAX; i++) {
22 if (self->tsd[i] && libc.tsd_keys[i]) {
23 void *tmp = self->tsd[i];
24 self->tsd[i] = 0;
25 libc.tsd_keys[i](tmp);
26 not_finished = 1;
27 }
28 }
29 }
30
31 if (self->detached && self->map_base)
32 __unmapself(self->map_base, self->map_size);
33
34 __syscall_exit(0);
35}
Rich Felker0b44a032011-02-12 00:22:29 -050036
37static void docancel(struct pthread *self)
38{
39 struct __ptcb cb = { .__next = self->cancelbuf };
40 __pthread_unwind_next(&cb);
41}
42
43static void cancel_handler(int sig, siginfo_t *si, void *ctx)
44{
Rich Felker1a9a2ff2011-02-13 19:58:30 -050045 struct pthread *self = __pthread_self();
Rich Felker0b44a032011-02-12 00:22:29 -050046 self->cancel = 1;
47 if (self->canceldisable || (!self->cancelasync && !self->cancelpoint))
48 return;
49 docancel(self);
50}
51
Rich Felker1a9a2ff2011-02-13 19:58:30 -050052static void cancelpt(int x)
53{
54 struct pthread *self = __pthread_self();
55 if (self->canceldisable) return;
56 self->cancelpoint = x;
57 if (self->cancel) docancel(self);
58}
59
Rich Felker0b44a032011-02-12 00:22:29 -050060/* "rsyscall" is a mechanism by which a thread can synchronously force all
61 * other threads to perform an arbitrary syscall. It is necessary to work
62 * around the non-conformant implementation of setuid() et al on Linux,
63 * which affect only the calling thread and not the whole process. This
64 * implementation performs some tricks with signal delivery to work around
65 * the fact that it does not keep any list of threads in userspace. */
66
67static struct {
68 volatile int lock, hold, blocks, cnt;
69 unsigned long arg[6];
70 int nr;
71 int err;
72} rs;
73
74static void rsyscall_handler(int sig, siginfo_t *si, void *ctx)
75{
76 if (rs.cnt == libc.threads_minus_1) return;
77
78 if (syscall6(rs.nr, rs.arg[0], rs.arg[1], rs.arg[2],
79 rs.arg[3], rs.arg[4], rs.arg[5]) < 0 && !rs.err) rs.err=errno;
80
81 a_inc(&rs.cnt);
82 __wake(&rs.cnt, 1, 1);
83 while(rs.hold)
84 __wait(&rs.hold, 0, 1, 1);
85 a_dec(&rs.cnt);
86 if (!rs.cnt) __wake(&rs.cnt, 1, 1);
87}
88
89static int rsyscall(int nr, long a, long b, long c, long d, long e, long f)
90{
91 int i, ret;
92 sigset_t set = { 0 };
Rich Felker1a9a2ff2011-02-13 19:58:30 -050093 struct pthread *self = __pthread_self();
Rich Felker0b44a032011-02-12 00:22:29 -050094 sigaddset(&set, SIGSYSCALL);
95
96 LOCK(&rs.lock);
97 while ((i=rs.blocks))
98 __wait(&rs.blocks, 0, i, 1);
99
100 __libc_sigprocmask(SIG_BLOCK, &set, 0);
101
102 rs.nr = nr;
103 rs.arg[0] = a; rs.arg[1] = b;
104 rs.arg[2] = c; rs.arg[3] = d;
105 rs.arg[4] = d; rs.arg[5] = f;
106 rs.hold = 1;
107 rs.err = 0;
108 rs.cnt = 0;
109
110 /* Dispatch signals until all threads respond */
111 for (i=libc.threads_minus_1; i; i--)
112 sigqueue(self->pid, SIGSYSCALL, (union sigval){0});
113 while ((i=rs.cnt) < libc.threads_minus_1) {
114 sigqueue(self->pid, SIGSYSCALL, (union sigval){0});
115 __wait(&rs.cnt, 0, i, 1);
116 }
117
118 /* Handle any lingering signals with no-op */
119 __libc_sigprocmask(SIG_UNBLOCK, &set, 0);
120
121 /* Resume other threads' signal handlers and wait for them */
122 rs.hold = 0;
123 __wake(&rs.hold, -1, 0);
124 while((i=rs.cnt)) __wait(&rs.cnt, 0, i, 1);
125
126 if (rs.err) errno = rs.err, ret = -1;
127 else ret = syscall6(nr, a, b, c, d, e, f);
128
129 UNLOCK(&rs.lock);
130 return ret;
131}
132
Rich Felker0b44a032011-02-12 00:22:29 -0500133static void init_threads()
134{
135 struct sigaction sa = { .sa_flags = SA_SIGINFO | SA_RESTART };
136 libc.lock = __lock;
137 libc.cancelpt = cancelpt;
138 libc.rsyscall = rsyscall;
139 sa.sa_sigaction = cancel_handler;
140 __libc_sigaction(SIGCANCEL, &sa, 0);
141 sigaddset(&sa.sa_mask, SIGSYSCALL);
142 sigaddset(&sa.sa_mask, SIGCANCEL);
143 sa.sa_sigaction = rsyscall_handler;
144 __libc_sigaction(SIGSYSCALL, &sa, 0);
145 sigprocmask(SIG_UNBLOCK, &sa.sa_mask, 0);
146}
147
148static int start(void *p)
149{
150 struct pthread *self = p;
151 pthread_exit(self->start(self->start_arg));
152 return 0;
153}
154
Rich Felker0b44a032011-02-12 00:22:29 -0500155#define CLONE_MAGIC 0x7d0f00
156int __clone(int (*)(void *), void *, int, void *, pid_t *, void *, pid_t *);
157
158#define ROUND(x) (((x)+PAGE_SIZE-1)&-PAGE_SIZE)
159
160/* pthread_key_create.c overrides this */
161static const size_t dummy = 0;
162weak_alias(dummy, __pthread_tsd_size);
163
164int pthread_create(pthread_t *res, const pthread_attr_t *attr, void *(*entry)(void *), void *arg)
165{
166 static int init;
167 int ret;
168 size_t size, guard;
169 struct pthread *self = pthread_self(), *new;
170 unsigned char *map, *stack, *tsd;
171 static const pthread_attr_t default_attr;
172
173 if (!self) return errno = ENOSYS;
174 if (!init && ++init) init_threads();
175
176 if (!attr) attr = &default_attr;
177 guard = ROUND(attr->__guardsize + DEFAULT_GUARD_SIZE);
178 size = guard + ROUND(attr->__stacksize + DEFAULT_STACK_SIZE);
179 size += __pthread_tsd_size;
180 map = mmap(0, size, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE|MAP_ANON, -1, 0);
181 if (!map) return EAGAIN;
182 mprotect(map, guard, PROT_NONE);
183
184 tsd = map + size - __pthread_tsd_size;
185 new = (void *)(tsd - sizeof *new - PAGE_SIZE%sizeof *new);
186 new->map_base = map;
187 new->map_size = size;
188 new->pid = self->pid;
189 new->errno_ptr = &new->errno_val;
190 new->start = entry;
191 new->start_arg = arg;
192 new->self = new;
193 new->tsd = (void *)tsd;
194 new->detached = attr->__detach;
195 new->attr = *attr;
196 memcpy(new->tlsdesc, self->tlsdesc, sizeof new->tlsdesc);
197 new->tlsdesc[1] = (uintptr_t)new;
198 stack = (void *)((uintptr_t)new-1 & ~(uintptr_t)15);
199
200 /* We must synchronize new thread creation with rsyscall
201 * delivery. This looks to be the least expensive way: */
202 a_inc(&rs.blocks);
203 while (rs.lock) __wait(&rs.lock, 0, 1, 1);
204
205 a_inc(&libc.threads_minus_1);
206 ret = __clone(start, stack, CLONE_MAGIC, new,
207 &new->tid, &new->tlsdesc, &new->tid);
208
209 a_dec(&rs.blocks);
210 if (rs.lock) __wake(&rs.blocks, 1, 1);
211
212 if (ret < 0) {
213 a_dec(&libc.threads_minus_1);
214 munmap(map, size);
Rich Felker59666802011-02-15 02:20:21 -0500215 return EAGAIN;
Rich Felker0b44a032011-02-12 00:22:29 -0500216 }
217 *res = new;
218 return 0;
219}
Rich Felker1a9a2ff2011-02-13 19:58:30 -0500220
221void pthread_exit(void *result)
222{
223 struct pthread *self = pthread_self();
224 self->result = result;
225 docancel(self);
226}