blob: 6c841be799b1632917c4b318dc0fa8faf4bd3e3a [file] [log] [blame]
Rich Felker0b44a032011-02-12 00:22:29 -05001#include "pthread_impl.h"
Rich Felkerdba68bf2011-07-30 08:02:14 -04002#include "stdio_impl.h"
Rich Felkerefd4d872012-11-08 17:04:20 -05003#include <sys/mman.h>
Rich Felker0b44a032011-02-12 00:22:29 -05004
Rich Felkerb2486a82011-04-06 20:27:07 -04005static void dummy_0()
6{
7}
Rich Felkerdcd60372012-10-05 11:51:50 -04008weak_alias(dummy_0, __acquire_ptc);
9weak_alias(dummy_0, __release_ptc);
Rich Felkera6054e32011-04-19 23:09:14 -040010weak_alias(dummy_0, __pthread_tsd_run_dtors);
Rich Felkerfd80cfa2011-04-03 02:33:50 -040011
Rich Felker0c05bd32012-09-06 23:34:10 -040012_Noreturn void pthread_exit(void *result)
Rich Felker1a9a2ff2011-02-13 19:58:30 -050013{
Rich Felker1ebde9c2011-04-17 17:06:05 -040014 pthread_t self = pthread_self();
Rich Felkerd0ba0982013-04-26 16:16:04 -040015 sigset_t set;
Rich Felker1a9a2ff2011-02-13 19:58:30 -050016
Rich Felkerafc35d52012-02-09 02:33:08 -050017 self->result = result;
18
19 while (self->cancelbuf) {
20 void (*f)(void *) = self->cancelbuf->__f;
21 void *x = self->cancelbuf->__x;
22 self->cancelbuf = self->cancelbuf->__next;
23 f(x);
Rich Felker1ebde9c2011-04-17 17:06:05 -040024 }
Rich Felker1a9a2ff2011-02-13 19:58:30 -050025
Rich Felkera6054e32011-04-19 23:09:14 -040026 __pthread_tsd_run_dtors();
Rich Felker1a9a2ff2011-02-13 19:58:30 -050027
Rich Felkerbbbe87e2012-07-12 11:23:43 -040028 __lock(self->exitlock);
Rich Felkerf58c8a02011-06-14 01:25:17 -040029
Rich Felker5fcebcd2011-03-10 18:31:37 -050030 /* Mark this thread dead before decrementing count */
Rich Felkerbbbe87e2012-07-12 11:23:43 -040031 __lock(self->killlock);
Rich Felker5fcebcd2011-03-10 18:31:37 -050032 self->dead = 1;
Rich Felker19eb13b2011-02-19 11:04:36 -050033
Rich Felker6e531f92013-04-26 16:04:30 -040034 /* Block all signals before decrementing the live thread count.
35 * This is important to ensure that dynamically allocated TLS
36 * is not under-allocated/over-committed, and possibly for other
37 * reasons as well. */
Rich Felker2c074b02013-04-26 19:48:01 -040038 __block_all_sigs(&set);
Rich Felker23f21c32013-04-26 15:47:44 -040039
Rich Felkerd674f852013-04-26 17:51:22 -040040 /* Wait to unlock the kill lock, which governs functions like
41 * pthread_kill which target a thread id, until signals have
42 * been blocked. This precludes observation of the thread id
43 * as a live thread (with application code running in it) after
44 * the thread was reported dead by ESRCH being returned. */
45 __unlock(self->killlock);
46
Rich Felkerd0ba0982013-04-26 16:16:04 -040047 /* It's impossible to determine whether this is "the last thread"
48 * until performing the atomic decrement, since multiple threads
49 * could exit at the same time. For the last thread, revert the
50 * decrement and unblock signals to give the atexit handlers and
51 * stdio cleanup code a consistent state. */
52 if (a_fetch_add(&libc.threads_minus_1, -1)==0) {
53 libc.threads_minus_1 = 0;
Rich Felker2c074b02013-04-26 19:48:01 -040054 __restore_sigs(&set);
Rich Felkerd0ba0982013-04-26 16:16:04 -040055 exit(0);
56 }
Rich Felkerfb11b6b2011-02-19 10:38:57 -050057
Rich Felker5fcebcd2011-03-10 18:31:37 -050058 if (self->detached && self->map_base) {
Rich Felker6e531f92013-04-26 16:04:30 -040059 /* Detached threads must avoid the kernel clear_child_tid
60 * feature, since the virtual address will have been
61 * unmapped and possibly already reused by a new mapping
62 * at the time the kernel would perform the write. In
63 * the case of threads that started out detached, the
64 * initial clone flags are correct, but if the thread was
65 * detached later (== 2), we need to clear it here. */
66 if (self->detached == 2) __syscall(SYS_set_tid_address, 0);
67
68 /* The following call unmaps the thread's stack mapping
69 * and then exits without touching the stack. */
Rich Felker1a9a2ff2011-02-13 19:58:30 -050070 __unmapself(self->map_base, self->map_size);
Rich Felker5fcebcd2011-03-10 18:31:37 -050071 }
Rich Felker1a9a2ff2011-02-13 19:58:30 -050072
Rich Felker0c05bd32012-09-06 23:34:10 -040073 for (;;) __syscall(SYS_exit, 0);
Rich Felker1a9a2ff2011-02-13 19:58:30 -050074}
Rich Felker0b44a032011-02-12 00:22:29 -050075
Rich Felkercfd892f2012-05-23 14:13:54 -040076void __do_cleanup_push(struct __ptcb *cb)
Rich Felker5f37fc12011-08-03 19:57:46 -040077{
78 struct pthread *self = pthread_self();
79 cb->__next = self->cancelbuf;
80 self->cancelbuf = cb;
81}
82
Rich Felkercfd892f2012-05-23 14:13:54 -040083void __do_cleanup_pop(struct __ptcb *cb)
Rich Felker5f37fc12011-08-03 19:57:46 -040084{
Rich Felkerafc35d52012-02-09 02:33:08 -050085 __pthread_self()->cancelbuf = cb->__next;
Rich Felker5f37fc12011-08-03 19:57:46 -040086}
87
Rich Felker3f72cda2011-09-18 10:14:37 -040088static int start(void *p)
Rich Felker0b44a032011-02-12 00:22:29 -050089{
Rich Felker3f72cda2011-09-18 10:14:37 -040090 pthread_t self = p;
Rich Felker1e21e782012-11-11 15:38:04 -050091 if (self->startlock[0]) {
92 __wait(self->startlock, 0, 1, 1);
93 if (self->startlock[0]) {
94 self->detached = 2;
95 pthread_exit(0);
96 }
Rich Felker2c074b02013-04-26 19:48:01 -040097 __restore_sigs(self->sigmask);
Rich Felker1e21e782012-11-11 15:38:04 -050098 }
Rich Felker99b8a252011-05-07 23:23:58 -040099 if (self->unblock_cancel)
Rich Felker2f437042012-08-09 22:52:13 -0400100 __syscall(SYS_rt_sigprocmask, SIG_UNBLOCK,
Rich Felkerccc7b4c2013-03-26 23:07:31 -0400101 SIGPT_SET, 0, _NSIG/8);
Rich Felker0b44a032011-02-12 00:22:29 -0500102 pthread_exit(self->start(self->start_arg));
Rich Felker3f72cda2011-09-18 10:14:37 -0400103 return 0;
Rich Felker0b44a032011-02-12 00:22:29 -0500104}
105
Rich Felker0b44a032011-02-12 00:22:29 -0500106#define ROUND(x) (((x)+PAGE_SIZE-1)&-PAGE_SIZE)
107
108/* pthread_key_create.c overrides this */
109static const size_t dummy = 0;
110weak_alias(dummy, __pthread_tsd_size);
111
Rich Felkerdba68bf2011-07-30 08:02:14 -0400112static FILE *const dummy_file = 0;
113weak_alias(dummy_file, __stdin_used);
114weak_alias(dummy_file, __stdout_used);
115weak_alias(dummy_file, __stderr_used);
116
117static void init_file_lock(FILE *f)
118{
119 if (f && f->lock<0) f->lock = 0;
120}
121
Rich Felkerdcd60372012-10-05 11:51:50 -0400122void *__copy_tls(unsigned char *);
Rich Felker8431d792012-10-04 16:35:46 -0400123
Rich Felkerd5142642013-02-01 22:10:40 -0500124int pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict attrp, void *(*entry)(void *), void *restrict arg)
Rich Felker0b44a032011-02-12 00:22:29 -0500125{
Rich Felker0b44a032011-02-12 00:22:29 -0500126 int ret;
Rich Felkerd5142642013-02-01 22:10:40 -0500127 size_t size, guard;
Rich Felker0b44a032011-02-12 00:22:29 -0500128 struct pthread *self = pthread_self(), *new;
Rich Felker14a835b2013-03-31 23:25:55 -0400129 unsigned char *map = 0, *stack = 0, *tsd = 0, *stack_limit;
Rich Felker92f83962012-07-11 23:36:46 -0400130 unsigned flags = 0x7d8f00;
Rich Felker1e21e782012-11-11 15:38:04 -0500131 int do_sched = 0;
Rich Felkerd5142642013-02-01 22:10:40 -0500132 pthread_attr_t attr = {0};
Rich Felker0b44a032011-02-12 00:22:29 -0500133
Rich Felker7fd39952011-04-03 16:15:15 -0400134 if (!self) return ENOSYS;
Rich Felker9080cc12011-04-17 16:53:54 -0400135 if (!libc.threaded) {
Rich Felkerdba68bf2011-07-30 08:02:14 -0400136 for (FILE *f=libc.ofl_head; f; f=f->next)
137 init_file_lock(f);
138 init_file_lock(__stdin_used);
139 init_file_lock(__stdout_used);
140 init_file_lock(__stderr_used);
Rich Felker9080cc12011-04-17 16:53:54 -0400141 libc.threaded = 1;
142 }
Rich Felkerd5142642013-02-01 22:10:40 -0500143 if (attrp) attr = *attrp;
Rich Felker0b44a032011-02-12 00:22:29 -0500144
Rich Felkerdcd60372012-10-05 11:51:50 -0400145 __acquire_ptc();
146
Rich Felkerd5142642013-02-01 22:10:40 -0500147 if (attr._a_stackaddr) {
148 size_t need = libc.tls_size + __pthread_tsd_size;
149 size = attr._a_stacksize + DEFAULT_STACK_SIZE;
150 stack = (void *)(attr._a_stackaddr & -16);
Rich Felkerced64992013-04-06 01:15:08 -0400151 stack_limit = (void *)(attr._a_stackaddr - size);
Rich Felkerd5142642013-02-01 22:10:40 -0500152 /* Use application-provided stack for TLS only when
153 * it does not take more than ~12% or 2k of the
154 * application's stack space. */
155 if (need < size/8 && need < 2048) {
156 tsd = stack - __pthread_tsd_size;
157 stack = tsd - libc.tls_size;
158 } else {
159 size = ROUND(need);
160 guard = 0;
Rich Felker819006a2012-06-09 19:53:29 -0400161 }
Rich Felkerd5142642013-02-01 22:10:40 -0500162 } else {
163 guard = ROUND(DEFAULT_GUARD_SIZE + attr._a_guardsize);
164 size = guard + ROUND(DEFAULT_STACK_SIZE + attr._a_stacksize
165 + libc.tls_size + __pthread_tsd_size);
166 }
167
168 if (!tsd) {
Rich Felker8431d792012-10-04 16:35:46 -0400169 if (guard) {
170 map = mmap(0, size, PROT_NONE, MAP_PRIVATE|MAP_ANON, -1, 0);
Rich Felker72768ea2013-02-01 22:25:19 -0500171 if (map == MAP_FAILED) goto fail;
Rich Felker8431d792012-10-04 16:35:46 -0400172 if (mprotect(map+guard, size-guard, PROT_READ|PROT_WRITE)) {
173 munmap(map, size);
Rich Felker72768ea2013-02-01 22:25:19 -0500174 goto fail;
Rich Felker8431d792012-10-04 16:35:46 -0400175 }
176 } else {
177 map = mmap(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, 0);
Rich Felker72768ea2013-02-01 22:25:19 -0500178 if (map == MAP_FAILED) goto fail;
Rich Felker8431d792012-10-04 16:35:46 -0400179 }
Rich Felker819006a2012-06-09 19:53:29 -0400180 tsd = map + size - __pthread_tsd_size;
Rich Felker14a835b2013-03-31 23:25:55 -0400181 if (!stack) {
182 stack = tsd - libc.tls_size;
183 stack_limit = map + guard;
184 }
Rich Felker11e4b922011-05-07 23:39:48 -0400185 }
Rich Felkerd5142642013-02-01 22:10:40 -0500186
187 new = __copy_tls(tsd - libc.tls_size);
Rich Felker0b44a032011-02-12 00:22:29 -0500188 new->map_base = map;
189 new->map_size = size;
Rich Felker14a835b2013-03-31 23:25:55 -0400190 new->stack = stack;
191 new->stack_size = stack - stack_limit;
Rich Felker0b44a032011-02-12 00:22:29 -0500192 new->pid = self->pid;
193 new->errno_ptr = &new->errno_val;
194 new->start = entry;
195 new->start_arg = arg;
196 new->self = new;
197 new->tsd = (void *)tsd;
Rich Felkerd5142642013-02-01 22:10:40 -0500198 if (attr._a_detach) {
Rich Felker92f83962012-07-11 23:36:46 -0400199 new->detached = 1;
200 flags -= 0x200000;
201 }
Rich Felkerd5142642013-02-01 22:10:40 -0500202 if (attr._a_sched) {
Rich Felker1e21e782012-11-11 15:38:04 -0500203 do_sched = new->startlock[0] = 1;
Rich Felker2c074b02013-04-26 19:48:01 -0400204 __block_app_sigs(new->sigmask);
Rich Felker1e21e782012-11-11 15:38:04 -0500205 }
Rich Felkerbf619d82011-03-29 12:58:22 -0400206 new->unblock_cancel = self->cancel;
Rich Felker0a96a372012-10-07 21:43:46 -0400207 new->canary = self->canary;
Rich Felker0b44a032011-02-12 00:22:29 -0500208
Rich Felker0b44a032011-02-12 00:22:29 -0500209 a_inc(&libc.threads_minus_1);
Rich Felker9ec42832012-10-15 18:51:53 -0400210 ret = __clone(start, stack, flags, new, &new->tid, TP_ADJ(new), &new->tid);
Rich Felker0b44a032011-02-12 00:22:29 -0500211
Rich Felkerdcd60372012-10-05 11:51:50 -0400212 __release_ptc();
Rich Felker0b44a032011-02-12 00:22:29 -0500213
Rich Felker1e21e782012-11-11 15:38:04 -0500214 if (do_sched) {
Rich Felker2c074b02013-04-26 19:48:01 -0400215 __restore_sigs(new->sigmask);
Rich Felker1e21e782012-11-11 15:38:04 -0500216 }
217
Rich Felker0b44a032011-02-12 00:22:29 -0500218 if (ret < 0) {
219 a_dec(&libc.threads_minus_1);
Rich Felker077549e2013-02-01 22:23:24 -0500220 if (map) munmap(map, size);
Rich Felker59666802011-02-15 02:20:21 -0500221 return EAGAIN;
Rich Felker0b44a032011-02-12 00:22:29 -0500222 }
Rich Felker1e21e782012-11-11 15:38:04 -0500223
224 if (do_sched) {
225 ret = __syscall(SYS_sched_setscheduler, new->tid,
Rich Felkerd5142642013-02-01 22:10:40 -0500226 attr._a_policy, &attr._a_prio);
Rich Felker1e21e782012-11-11 15:38:04 -0500227 a_store(new->startlock, ret<0 ? 2 : 0);
228 __wake(new->startlock, 1, 1);
229 if (ret < 0) return -ret;
230 }
231
Rich Felker0b44a032011-02-12 00:22:29 -0500232 *res = new;
233 return 0;
Rich Felker72768ea2013-02-01 22:25:19 -0500234fail:
235 __release_ptc();
236 return EAGAIN;
Rich Felker0b44a032011-02-12 00:22:29 -0500237}