blob: 5d5d3a6f939a84548faab354f970556109bec107 [file] [log] [blame]
Rich Felker0b44a032011-02-12 00:22:29 -05001#include "pthread_impl.h"
Rich Felkerdba68bf2011-07-30 08:02:14 -04002#include "stdio_impl.h"
Rich Felkerefd4d872012-11-08 17:04:20 -05003#include <sys/mman.h>
Rich Felker0b44a032011-02-12 00:22:29 -05004
Rich Felkerb2486a82011-04-06 20:27:07 -04005static void dummy_0()
6{
7}
Rich Felkerdcd60372012-10-05 11:51:50 -04008weak_alias(dummy_0, __acquire_ptc);
9weak_alias(dummy_0, __release_ptc);
Rich Felkera6054e32011-04-19 23:09:14 -040010weak_alias(dummy_0, __pthread_tsd_run_dtors);
Rich Felkerfd80cfa2011-04-03 02:33:50 -040011
Rich Felker0c05bd32012-09-06 23:34:10 -040012_Noreturn void pthread_exit(void *result)
Rich Felker1a9a2ff2011-02-13 19:58:30 -050013{
Rich Felker1ebde9c2011-04-17 17:06:05 -040014 pthread_t self = pthread_self();
Rich Felkerd0ba0982013-04-26 16:16:04 -040015 sigset_t set;
Rich Felker1a9a2ff2011-02-13 19:58:30 -050016
Rich Felkerafc35d52012-02-09 02:33:08 -050017 self->result = result;
18
19 while (self->cancelbuf) {
20 void (*f)(void *) = self->cancelbuf->__f;
21 void *x = self->cancelbuf->__x;
22 self->cancelbuf = self->cancelbuf->__next;
23 f(x);
Rich Felker1ebde9c2011-04-17 17:06:05 -040024 }
Rich Felker1a9a2ff2011-02-13 19:58:30 -050025
Rich Felkera6054e32011-04-19 23:09:14 -040026 __pthread_tsd_run_dtors();
Rich Felker1a9a2ff2011-02-13 19:58:30 -050027
Rich Felkerbbbe87e2012-07-12 11:23:43 -040028 __lock(self->exitlock);
Rich Felkerf58c8a02011-06-14 01:25:17 -040029
Rich Felker5fcebcd2011-03-10 18:31:37 -050030 /* Mark this thread dead before decrementing count */
Rich Felkerbbbe87e2012-07-12 11:23:43 -040031 __lock(self->killlock);
Rich Felker5fcebcd2011-03-10 18:31:37 -050032 self->dead = 1;
Rich Felkerbbbe87e2012-07-12 11:23:43 -040033 __unlock(self->killlock);
Rich Felker19eb13b2011-02-19 11:04:36 -050034
Rich Felker6e531f92013-04-26 16:04:30 -040035 /* Block all signals before decrementing the live thread count.
36 * This is important to ensure that dynamically allocated TLS
37 * is not under-allocated/over-committed, and possibly for other
38 * reasons as well. */
Rich Felkerd0ba0982013-04-26 16:16:04 -040039 __syscall(SYS_rt_sigprocmask, SIG_BLOCK, SIGALL_SET, &set, _NSIG/8);
Rich Felker23f21c32013-04-26 15:47:44 -040040
Rich Felkerd0ba0982013-04-26 16:16:04 -040041 /* It's impossible to determine whether this is "the last thread"
42 * until performing the atomic decrement, since multiple threads
43 * could exit at the same time. For the last thread, revert the
44 * decrement and unblock signals to give the atexit handlers and
45 * stdio cleanup code a consistent state. */
46 if (a_fetch_add(&libc.threads_minus_1, -1)==0) {
47 libc.threads_minus_1 = 0;
48 __syscall(SYS_rt_sigprocmask, SIG_SETMASK, &set, 0, _NSIG/8);
49 exit(0);
50 }
Rich Felkerfb11b6b2011-02-19 10:38:57 -050051
Rich Felker5fcebcd2011-03-10 18:31:37 -050052 if (self->detached && self->map_base) {
Rich Felker6e531f92013-04-26 16:04:30 -040053 /* Detached threads must avoid the kernel clear_child_tid
54 * feature, since the virtual address will have been
55 * unmapped and possibly already reused by a new mapping
56 * at the time the kernel would perform the write. In
57 * the case of threads that started out detached, the
58 * initial clone flags are correct, but if the thread was
59 * detached later (== 2), we need to clear it here. */
60 if (self->detached == 2) __syscall(SYS_set_tid_address, 0);
61
62 /* The following call unmaps the thread's stack mapping
63 * and then exits without touching the stack. */
Rich Felker1a9a2ff2011-02-13 19:58:30 -050064 __unmapself(self->map_base, self->map_size);
Rich Felker5fcebcd2011-03-10 18:31:37 -050065 }
Rich Felker1a9a2ff2011-02-13 19:58:30 -050066
Rich Felker0c05bd32012-09-06 23:34:10 -040067 for (;;) __syscall(SYS_exit, 0);
Rich Felker1a9a2ff2011-02-13 19:58:30 -050068}
Rich Felker0b44a032011-02-12 00:22:29 -050069
Rich Felkercfd892f2012-05-23 14:13:54 -040070void __do_cleanup_push(struct __ptcb *cb)
Rich Felker5f37fc12011-08-03 19:57:46 -040071{
72 struct pthread *self = pthread_self();
73 cb->__next = self->cancelbuf;
74 self->cancelbuf = cb;
75}
76
Rich Felkercfd892f2012-05-23 14:13:54 -040077void __do_cleanup_pop(struct __ptcb *cb)
Rich Felker5f37fc12011-08-03 19:57:46 -040078{
Rich Felkerafc35d52012-02-09 02:33:08 -050079 __pthread_self()->cancelbuf = cb->__next;
Rich Felker5f37fc12011-08-03 19:57:46 -040080}
81
Rich Felker3f72cda2011-09-18 10:14:37 -040082static int start(void *p)
Rich Felker0b44a032011-02-12 00:22:29 -050083{
Rich Felker3f72cda2011-09-18 10:14:37 -040084 pthread_t self = p;
Rich Felker1e21e782012-11-11 15:38:04 -050085 if (self->startlock[0]) {
86 __wait(self->startlock, 0, 1, 1);
87 if (self->startlock[0]) {
88 self->detached = 2;
89 pthread_exit(0);
90 }
91 __syscall(SYS_rt_sigprocmask, SIG_SETMASK,
Rich Felkerccc7b4c2013-03-26 23:07:31 -040092 self->sigmask, 0, _NSIG/8);
Rich Felker1e21e782012-11-11 15:38:04 -050093 }
Rich Felker99b8a252011-05-07 23:23:58 -040094 if (self->unblock_cancel)
Rich Felker2f437042012-08-09 22:52:13 -040095 __syscall(SYS_rt_sigprocmask, SIG_UNBLOCK,
Rich Felkerccc7b4c2013-03-26 23:07:31 -040096 SIGPT_SET, 0, _NSIG/8);
Rich Felker0b44a032011-02-12 00:22:29 -050097 pthread_exit(self->start(self->start_arg));
Rich Felker3f72cda2011-09-18 10:14:37 -040098 return 0;
Rich Felker0b44a032011-02-12 00:22:29 -050099}
100
Rich Felker0b44a032011-02-12 00:22:29 -0500101#define ROUND(x) (((x)+PAGE_SIZE-1)&-PAGE_SIZE)
102
103/* pthread_key_create.c overrides this */
104static const size_t dummy = 0;
105weak_alias(dummy, __pthread_tsd_size);
106
Rich Felkerdba68bf2011-07-30 08:02:14 -0400107static FILE *const dummy_file = 0;
108weak_alias(dummy_file, __stdin_used);
109weak_alias(dummy_file, __stdout_used);
110weak_alias(dummy_file, __stderr_used);
111
112static void init_file_lock(FILE *f)
113{
114 if (f && f->lock<0) f->lock = 0;
115}
116
Rich Felkerdcd60372012-10-05 11:51:50 -0400117void *__copy_tls(unsigned char *);
Rich Felker8431d792012-10-04 16:35:46 -0400118
Rich Felkerd5142642013-02-01 22:10:40 -0500119int pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict attrp, void *(*entry)(void *), void *restrict arg)
Rich Felker0b44a032011-02-12 00:22:29 -0500120{
Rich Felker0b44a032011-02-12 00:22:29 -0500121 int ret;
Rich Felkerd5142642013-02-01 22:10:40 -0500122 size_t size, guard;
Rich Felker0b44a032011-02-12 00:22:29 -0500123 struct pthread *self = pthread_self(), *new;
Rich Felker14a835b2013-03-31 23:25:55 -0400124 unsigned char *map = 0, *stack = 0, *tsd = 0, *stack_limit;
Rich Felker92f83962012-07-11 23:36:46 -0400125 unsigned flags = 0x7d8f00;
Rich Felker1e21e782012-11-11 15:38:04 -0500126 int do_sched = 0;
Rich Felkerd5142642013-02-01 22:10:40 -0500127 pthread_attr_t attr = {0};
Rich Felker0b44a032011-02-12 00:22:29 -0500128
Rich Felker7fd39952011-04-03 16:15:15 -0400129 if (!self) return ENOSYS;
Rich Felker9080cc12011-04-17 16:53:54 -0400130 if (!libc.threaded) {
Rich Felkerdba68bf2011-07-30 08:02:14 -0400131 for (FILE *f=libc.ofl_head; f; f=f->next)
132 init_file_lock(f);
133 init_file_lock(__stdin_used);
134 init_file_lock(__stdout_used);
135 init_file_lock(__stderr_used);
Rich Felker9080cc12011-04-17 16:53:54 -0400136 libc.threaded = 1;
137 }
Rich Felkerd5142642013-02-01 22:10:40 -0500138 if (attrp) attr = *attrp;
Rich Felker0b44a032011-02-12 00:22:29 -0500139
Rich Felkerdcd60372012-10-05 11:51:50 -0400140 __acquire_ptc();
141
Rich Felkerd5142642013-02-01 22:10:40 -0500142 if (attr._a_stackaddr) {
143 size_t need = libc.tls_size + __pthread_tsd_size;
144 size = attr._a_stacksize + DEFAULT_STACK_SIZE;
145 stack = (void *)(attr._a_stackaddr & -16);
Rich Felkerced64992013-04-06 01:15:08 -0400146 stack_limit = (void *)(attr._a_stackaddr - size);
Rich Felkerd5142642013-02-01 22:10:40 -0500147 /* Use application-provided stack for TLS only when
148 * it does not take more than ~12% or 2k of the
149 * application's stack space. */
150 if (need < size/8 && need < 2048) {
151 tsd = stack - __pthread_tsd_size;
152 stack = tsd - libc.tls_size;
153 } else {
154 size = ROUND(need);
155 guard = 0;
Rich Felker819006a2012-06-09 19:53:29 -0400156 }
Rich Felkerd5142642013-02-01 22:10:40 -0500157 } else {
158 guard = ROUND(DEFAULT_GUARD_SIZE + attr._a_guardsize);
159 size = guard + ROUND(DEFAULT_STACK_SIZE + attr._a_stacksize
160 + libc.tls_size + __pthread_tsd_size);
161 }
162
163 if (!tsd) {
Rich Felker8431d792012-10-04 16:35:46 -0400164 if (guard) {
165 map = mmap(0, size, PROT_NONE, MAP_PRIVATE|MAP_ANON, -1, 0);
Rich Felker72768ea2013-02-01 22:25:19 -0500166 if (map == MAP_FAILED) goto fail;
Rich Felker8431d792012-10-04 16:35:46 -0400167 if (mprotect(map+guard, size-guard, PROT_READ|PROT_WRITE)) {
168 munmap(map, size);
Rich Felker72768ea2013-02-01 22:25:19 -0500169 goto fail;
Rich Felker8431d792012-10-04 16:35:46 -0400170 }
171 } else {
172 map = mmap(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, 0);
Rich Felker72768ea2013-02-01 22:25:19 -0500173 if (map == MAP_FAILED) goto fail;
Rich Felker8431d792012-10-04 16:35:46 -0400174 }
Rich Felker819006a2012-06-09 19:53:29 -0400175 tsd = map + size - __pthread_tsd_size;
Rich Felker14a835b2013-03-31 23:25:55 -0400176 if (!stack) {
177 stack = tsd - libc.tls_size;
178 stack_limit = map + guard;
179 }
Rich Felker11e4b922011-05-07 23:39:48 -0400180 }
Rich Felkerd5142642013-02-01 22:10:40 -0500181
182 new = __copy_tls(tsd - libc.tls_size);
Rich Felker0b44a032011-02-12 00:22:29 -0500183 new->map_base = map;
184 new->map_size = size;
Rich Felker14a835b2013-03-31 23:25:55 -0400185 new->stack = stack;
186 new->stack_size = stack - stack_limit;
Rich Felker0b44a032011-02-12 00:22:29 -0500187 new->pid = self->pid;
188 new->errno_ptr = &new->errno_val;
189 new->start = entry;
190 new->start_arg = arg;
191 new->self = new;
192 new->tsd = (void *)tsd;
Rich Felkerd5142642013-02-01 22:10:40 -0500193 if (attr._a_detach) {
Rich Felker92f83962012-07-11 23:36:46 -0400194 new->detached = 1;
195 flags -= 0x200000;
196 }
Rich Felkerd5142642013-02-01 22:10:40 -0500197 if (attr._a_sched) {
Rich Felker1e21e782012-11-11 15:38:04 -0500198 do_sched = new->startlock[0] = 1;
199 __syscall(SYS_rt_sigprocmask, SIG_BLOCK,
Rich Felkerccc7b4c2013-03-26 23:07:31 -0400200 SIGALL_SET, self->sigmask, _NSIG/8);
Rich Felker1e21e782012-11-11 15:38:04 -0500201 }
Rich Felkerbf619d82011-03-29 12:58:22 -0400202 new->unblock_cancel = self->cancel;
Rich Felker0a96a372012-10-07 21:43:46 -0400203 new->canary = self->canary;
Rich Felker0b44a032011-02-12 00:22:29 -0500204
Rich Felker0b44a032011-02-12 00:22:29 -0500205 a_inc(&libc.threads_minus_1);
Rich Felker9ec42832012-10-15 18:51:53 -0400206 ret = __clone(start, stack, flags, new, &new->tid, TP_ADJ(new), &new->tid);
Rich Felker0b44a032011-02-12 00:22:29 -0500207
Rich Felkerdcd60372012-10-05 11:51:50 -0400208 __release_ptc();
Rich Felker0b44a032011-02-12 00:22:29 -0500209
Rich Felker1e21e782012-11-11 15:38:04 -0500210 if (do_sched) {
211 __syscall(SYS_rt_sigprocmask, SIG_SETMASK,
Rich Felkerccc7b4c2013-03-26 23:07:31 -0400212 new->sigmask, 0, _NSIG/8);
Rich Felker1e21e782012-11-11 15:38:04 -0500213 }
214
Rich Felker0b44a032011-02-12 00:22:29 -0500215 if (ret < 0) {
216 a_dec(&libc.threads_minus_1);
Rich Felker077549e2013-02-01 22:23:24 -0500217 if (map) munmap(map, size);
Rich Felker59666802011-02-15 02:20:21 -0500218 return EAGAIN;
Rich Felker0b44a032011-02-12 00:22:29 -0500219 }
Rich Felker1e21e782012-11-11 15:38:04 -0500220
221 if (do_sched) {
222 ret = __syscall(SYS_sched_setscheduler, new->tid,
Rich Felkerd5142642013-02-01 22:10:40 -0500223 attr._a_policy, &attr._a_prio);
Rich Felker1e21e782012-11-11 15:38:04 -0500224 a_store(new->startlock, ret<0 ? 2 : 0);
225 __wake(new->startlock, 1, 1);
226 if (ret < 0) return -ret;
227 }
228
Rich Felker0b44a032011-02-12 00:22:29 -0500229 *res = new;
230 return 0;
Rich Felker72768ea2013-02-01 22:25:19 -0500231fail:
232 __release_ptc();
233 return EAGAIN;
Rich Felker0b44a032011-02-12 00:22:29 -0500234}