blob: d26f252ee1dd879ea41afa54e02c47526adefdb7 [file] [log] [blame]
Rich Felker0b44a032011-02-12 00:22:29 -05001#include "pthread_impl.h"
Rich Felkerdba68bf2011-07-30 08:02:14 -04002#include "stdio_impl.h"
Szabolcs Nagyb20760c2013-09-15 02:00:32 +00003#include "libc.h"
Rich Felkerefd4d872012-11-08 17:04:20 -05004#include <sys/mman.h>
Rich Felker0b44a032011-02-12 00:22:29 -05005
Rich Felkerb2486a82011-04-06 20:27:07 -04006static void dummy_0()
7{
8}
Rich Felkerdcd60372012-10-05 11:51:50 -04009weak_alias(dummy_0, __acquire_ptc);
10weak_alias(dummy_0, __release_ptc);
Rich Felkera6054e32011-04-19 23:09:14 -040011weak_alias(dummy_0, __pthread_tsd_run_dtors);
Rich Felkerfd80cfa2011-04-03 02:33:50 -040012
Rich Felker0c05bd32012-09-06 23:34:10 -040013_Noreturn void pthread_exit(void *result)
Rich Felker1a9a2ff2011-02-13 19:58:30 -050014{
Rich Felker1ebde9c2011-04-17 17:06:05 -040015 pthread_t self = pthread_self();
Rich Felkerd0ba0982013-04-26 16:16:04 -040016 sigset_t set;
Rich Felker1a9a2ff2011-02-13 19:58:30 -050017
Rich Felkerafc35d52012-02-09 02:33:08 -050018 self->result = result;
19
20 while (self->cancelbuf) {
21 void (*f)(void *) = self->cancelbuf->__f;
22 void *x = self->cancelbuf->__x;
23 self->cancelbuf = self->cancelbuf->__next;
24 f(x);
Rich Felker1ebde9c2011-04-17 17:06:05 -040025 }
Rich Felker1a9a2ff2011-02-13 19:58:30 -050026
Rich Felkera6054e32011-04-19 23:09:14 -040027 __pthread_tsd_run_dtors();
Rich Felker1a9a2ff2011-02-13 19:58:30 -050028
Rich Felkerbbbe87e2012-07-12 11:23:43 -040029 __lock(self->exitlock);
Rich Felkerf58c8a02011-06-14 01:25:17 -040030
Rich Felker5fcebcd2011-03-10 18:31:37 -050031 /* Mark this thread dead before decrementing count */
Rich Felkerbbbe87e2012-07-12 11:23:43 -040032 __lock(self->killlock);
Rich Felker5fcebcd2011-03-10 18:31:37 -050033 self->dead = 1;
Rich Felker19eb13b2011-02-19 11:04:36 -050034
Rich Felker6e531f92013-04-26 16:04:30 -040035 /* Block all signals before decrementing the live thread count.
36 * This is important to ensure that dynamically allocated TLS
37 * is not under-allocated/over-committed, and possibly for other
38 * reasons as well. */
Rich Felker2c074b02013-04-26 19:48:01 -040039 __block_all_sigs(&set);
Rich Felker23f21c32013-04-26 15:47:44 -040040
Rich Felkerd674f852013-04-26 17:51:22 -040041 /* Wait to unlock the kill lock, which governs functions like
42 * pthread_kill which target a thread id, until signals have
43 * been blocked. This precludes observation of the thread id
44 * as a live thread (with application code running in it) after
45 * the thread was reported dead by ESRCH being returned. */
46 __unlock(self->killlock);
47
Rich Felkerd0ba0982013-04-26 16:16:04 -040048 /* It's impossible to determine whether this is "the last thread"
49 * until performing the atomic decrement, since multiple threads
50 * could exit at the same time. For the last thread, revert the
51 * decrement and unblock signals to give the atexit handlers and
52 * stdio cleanup code a consistent state. */
53 if (a_fetch_add(&libc.threads_minus_1, -1)==0) {
54 libc.threads_minus_1 = 0;
Rich Felker2c074b02013-04-26 19:48:01 -040055 __restore_sigs(&set);
Rich Felkerd0ba0982013-04-26 16:16:04 -040056 exit(0);
57 }
Rich Felkerfb11b6b2011-02-19 10:38:57 -050058
Rich Felker5fcebcd2011-03-10 18:31:37 -050059 if (self->detached && self->map_base) {
Rich Felker6e531f92013-04-26 16:04:30 -040060 /* Detached threads must avoid the kernel clear_child_tid
61 * feature, since the virtual address will have been
62 * unmapped and possibly already reused by a new mapping
63 * at the time the kernel would perform the write. In
64 * the case of threads that started out detached, the
65 * initial clone flags are correct, but if the thread was
66 * detached later (== 2), we need to clear it here. */
67 if (self->detached == 2) __syscall(SYS_set_tid_address, 0);
68
69 /* The following call unmaps the thread's stack mapping
70 * and then exits without touching the stack. */
Rich Felker1a9a2ff2011-02-13 19:58:30 -050071 __unmapself(self->map_base, self->map_size);
Rich Felker5fcebcd2011-03-10 18:31:37 -050072 }
Rich Felker1a9a2ff2011-02-13 19:58:30 -050073
Rich Felker0c05bd32012-09-06 23:34:10 -040074 for (;;) __syscall(SYS_exit, 0);
Rich Felker1a9a2ff2011-02-13 19:58:30 -050075}
Rich Felker0b44a032011-02-12 00:22:29 -050076
Rich Felkercfd892f2012-05-23 14:13:54 -040077void __do_cleanup_push(struct __ptcb *cb)
Rich Felker5f37fc12011-08-03 19:57:46 -040078{
79 struct pthread *self = pthread_self();
80 cb->__next = self->cancelbuf;
81 self->cancelbuf = cb;
82}
83
Rich Felkercfd892f2012-05-23 14:13:54 -040084void __do_cleanup_pop(struct __ptcb *cb)
Rich Felker5f37fc12011-08-03 19:57:46 -040085{
Rich Felkerafc35d52012-02-09 02:33:08 -050086 __pthread_self()->cancelbuf = cb->__next;
Rich Felker5f37fc12011-08-03 19:57:46 -040087}
88
Rich Felker3f72cda2011-09-18 10:14:37 -040089static int start(void *p)
Rich Felker0b44a032011-02-12 00:22:29 -050090{
Rich Felker3f72cda2011-09-18 10:14:37 -040091 pthread_t self = p;
Rich Felker1e21e782012-11-11 15:38:04 -050092 if (self->startlock[0]) {
93 __wait(self->startlock, 0, 1, 1);
94 if (self->startlock[0]) {
95 self->detached = 2;
96 pthread_exit(0);
97 }
Rich Felker2c074b02013-04-26 19:48:01 -040098 __restore_sigs(self->sigmask);
Rich Felker1e21e782012-11-11 15:38:04 -050099 }
Rich Felker99b8a252011-05-07 23:23:58 -0400100 if (self->unblock_cancel)
Rich Felker2f437042012-08-09 22:52:13 -0400101 __syscall(SYS_rt_sigprocmask, SIG_UNBLOCK,
Rich Felkerccc7b4c2013-03-26 23:07:31 -0400102 SIGPT_SET, 0, _NSIG/8);
Rich Felker0b44a032011-02-12 00:22:29 -0500103 pthread_exit(self->start(self->start_arg));
Rich Felker3f72cda2011-09-18 10:14:37 -0400104 return 0;
Rich Felker0b44a032011-02-12 00:22:29 -0500105}
106
Rich Felker0b44a032011-02-12 00:22:29 -0500107#define ROUND(x) (((x)+PAGE_SIZE-1)&-PAGE_SIZE)
108
109/* pthread_key_create.c overrides this */
110static const size_t dummy = 0;
111weak_alias(dummy, __pthread_tsd_size);
112
Rich Felkerdba68bf2011-07-30 08:02:14 -0400113static FILE *const dummy_file = 0;
114weak_alias(dummy_file, __stdin_used);
115weak_alias(dummy_file, __stdout_used);
116weak_alias(dummy_file, __stderr_used);
117
118static void init_file_lock(FILE *f)
119{
120 if (f && f->lock<0) f->lock = 0;
121}
122
Rich Felkerdcd60372012-10-05 11:51:50 -0400123void *__copy_tls(unsigned char *);
Rich Felker8431d792012-10-04 16:35:46 -0400124
Rich Felkerd5142642013-02-01 22:10:40 -0500125int pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict attrp, void *(*entry)(void *), void *restrict arg)
Rich Felker0b44a032011-02-12 00:22:29 -0500126{
Rich Felker0b44a032011-02-12 00:22:29 -0500127 int ret;
Rich Felkerd5142642013-02-01 22:10:40 -0500128 size_t size, guard;
Rich Felker0b44a032011-02-12 00:22:29 -0500129 struct pthread *self = pthread_self(), *new;
Rich Felker14a835b2013-03-31 23:25:55 -0400130 unsigned char *map = 0, *stack = 0, *tsd = 0, *stack_limit;
Rich Felker92f83962012-07-11 23:36:46 -0400131 unsigned flags = 0x7d8f00;
Rich Felker1e21e782012-11-11 15:38:04 -0500132 int do_sched = 0;
Rich Felkerd5142642013-02-01 22:10:40 -0500133 pthread_attr_t attr = {0};
Rich Felker0b44a032011-02-12 00:22:29 -0500134
Rich Felker7fd39952011-04-03 16:15:15 -0400135 if (!self) return ENOSYS;
Rich Felker9080cc12011-04-17 16:53:54 -0400136 if (!libc.threaded) {
Rich Felkerdba68bf2011-07-30 08:02:14 -0400137 for (FILE *f=libc.ofl_head; f; f=f->next)
138 init_file_lock(f);
139 init_file_lock(__stdin_used);
140 init_file_lock(__stdout_used);
141 init_file_lock(__stderr_used);
Rich Felker9080cc12011-04-17 16:53:54 -0400142 libc.threaded = 1;
143 }
Rich Felkerd5142642013-02-01 22:10:40 -0500144 if (attrp) attr = *attrp;
Rich Felker0b44a032011-02-12 00:22:29 -0500145
Rich Felkerdcd60372012-10-05 11:51:50 -0400146 __acquire_ptc();
147
Rich Felkerd5142642013-02-01 22:10:40 -0500148 if (attr._a_stackaddr) {
149 size_t need = libc.tls_size + __pthread_tsd_size;
150 size = attr._a_stacksize + DEFAULT_STACK_SIZE;
151 stack = (void *)(attr._a_stackaddr & -16);
Rich Felkerced64992013-04-06 01:15:08 -0400152 stack_limit = (void *)(attr._a_stackaddr - size);
Rich Felkerd5142642013-02-01 22:10:40 -0500153 /* Use application-provided stack for TLS only when
154 * it does not take more than ~12% or 2k of the
155 * application's stack space. */
156 if (need < size/8 && need < 2048) {
157 tsd = stack - __pthread_tsd_size;
158 stack = tsd - libc.tls_size;
159 } else {
160 size = ROUND(need);
161 guard = 0;
Rich Felker819006a2012-06-09 19:53:29 -0400162 }
Rich Felkerd5142642013-02-01 22:10:40 -0500163 } else {
164 guard = ROUND(DEFAULT_GUARD_SIZE + attr._a_guardsize);
165 size = guard + ROUND(DEFAULT_STACK_SIZE + attr._a_stacksize
166 + libc.tls_size + __pthread_tsd_size);
167 }
168
169 if (!tsd) {
Rich Felker8431d792012-10-04 16:35:46 -0400170 if (guard) {
171 map = mmap(0, size, PROT_NONE, MAP_PRIVATE|MAP_ANON, -1, 0);
Rich Felker72768ea2013-02-01 22:25:19 -0500172 if (map == MAP_FAILED) goto fail;
Rich Felker8431d792012-10-04 16:35:46 -0400173 if (mprotect(map+guard, size-guard, PROT_READ|PROT_WRITE)) {
174 munmap(map, size);
Rich Felker72768ea2013-02-01 22:25:19 -0500175 goto fail;
Rich Felker8431d792012-10-04 16:35:46 -0400176 }
177 } else {
178 map = mmap(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, 0);
Rich Felker72768ea2013-02-01 22:25:19 -0500179 if (map == MAP_FAILED) goto fail;
Rich Felker8431d792012-10-04 16:35:46 -0400180 }
Rich Felker819006a2012-06-09 19:53:29 -0400181 tsd = map + size - __pthread_tsd_size;
Rich Felker14a835b2013-03-31 23:25:55 -0400182 if (!stack) {
183 stack = tsd - libc.tls_size;
184 stack_limit = map + guard;
185 }
Rich Felker11e4b922011-05-07 23:39:48 -0400186 }
Rich Felkerd5142642013-02-01 22:10:40 -0500187
188 new = __copy_tls(tsd - libc.tls_size);
Rich Felker0b44a032011-02-12 00:22:29 -0500189 new->map_base = map;
190 new->map_size = size;
Rich Felker14a835b2013-03-31 23:25:55 -0400191 new->stack = stack;
192 new->stack_size = stack - stack_limit;
Rich Felker0b44a032011-02-12 00:22:29 -0500193 new->pid = self->pid;
194 new->errno_ptr = &new->errno_val;
195 new->start = entry;
196 new->start_arg = arg;
197 new->self = new;
198 new->tsd = (void *)tsd;
Rich Felkerd5142642013-02-01 22:10:40 -0500199 if (attr._a_detach) {
Rich Felker92f83962012-07-11 23:36:46 -0400200 new->detached = 1;
201 flags -= 0x200000;
202 }
Rich Felkerd5142642013-02-01 22:10:40 -0500203 if (attr._a_sched) {
Rich Felker1e21e782012-11-11 15:38:04 -0500204 do_sched = new->startlock[0] = 1;
Rich Felker2c074b02013-04-26 19:48:01 -0400205 __block_app_sigs(new->sigmask);
Rich Felker1e21e782012-11-11 15:38:04 -0500206 }
Rich Felkerbf619d82011-03-29 12:58:22 -0400207 new->unblock_cancel = self->cancel;
Rich Felker0a96a372012-10-07 21:43:46 -0400208 new->canary = self->canary;
Rich Felker0b44a032011-02-12 00:22:29 -0500209
Rich Felker0b44a032011-02-12 00:22:29 -0500210 a_inc(&libc.threads_minus_1);
Rich Felker9ec42832012-10-15 18:51:53 -0400211 ret = __clone(start, stack, flags, new, &new->tid, TP_ADJ(new), &new->tid);
Rich Felker0b44a032011-02-12 00:22:29 -0500212
Rich Felkerdcd60372012-10-05 11:51:50 -0400213 __release_ptc();
Rich Felker0b44a032011-02-12 00:22:29 -0500214
Rich Felker1e21e782012-11-11 15:38:04 -0500215 if (do_sched) {
Rich Felker2c074b02013-04-26 19:48:01 -0400216 __restore_sigs(new->sigmask);
Rich Felker1e21e782012-11-11 15:38:04 -0500217 }
218
Rich Felker0b44a032011-02-12 00:22:29 -0500219 if (ret < 0) {
220 a_dec(&libc.threads_minus_1);
Rich Felker077549e2013-02-01 22:23:24 -0500221 if (map) munmap(map, size);
Rich Felker59666802011-02-15 02:20:21 -0500222 return EAGAIN;
Rich Felker0b44a032011-02-12 00:22:29 -0500223 }
Rich Felker1e21e782012-11-11 15:38:04 -0500224
225 if (do_sched) {
226 ret = __syscall(SYS_sched_setscheduler, new->tid,
Rich Felkerd5142642013-02-01 22:10:40 -0500227 attr._a_policy, &attr._a_prio);
Rich Felker1e21e782012-11-11 15:38:04 -0500228 a_store(new->startlock, ret<0 ? 2 : 0);
229 __wake(new->startlock, 1, 1);
230 if (ret < 0) return -ret;
231 }
232
Rich Felker0b44a032011-02-12 00:22:29 -0500233 *res = new;
234 return 0;
Rich Felker72768ea2013-02-01 22:25:19 -0500235fail:
236 __release_ptc();
237 return EAGAIN;
Rich Felker0b44a032011-02-12 00:22:29 -0500238}