Rich Felker | f68a346 | 2013-09-16 10:54:31 -0400 | [diff] [blame] | 1 | #define _GNU_SOURCE |
Rich Felker | 0b44a03 | 2011-02-12 00:22:29 -0500 | [diff] [blame] | 2 | #include "pthread_impl.h" |
Rich Felker | dba68bf | 2011-07-30 08:02:14 -0400 | [diff] [blame] | 3 | #include "stdio_impl.h" |
Szabolcs Nagy | b20760c | 2013-09-15 02:00:32 +0000 | [diff] [blame] | 4 | #include "libc.h" |
Rich Felker | efd4d87 | 2012-11-08 17:04:20 -0500 | [diff] [blame] | 5 | #include <sys/mman.h> |
Rich Felker | a629328 | 2014-08-22 14:05:10 -0400 | [diff] [blame] | 6 | #include <string.h> |
Rich Felker | 12e1e32 | 2015-04-10 00:26:34 -0400 | [diff] [blame] | 7 | #include <stddef.h> |
Rich Felker | 0b44a03 | 2011-02-12 00:22:29 -0500 | [diff] [blame] | 8 | |
Jens Gustedt | df7d0df | 2014-09-01 00:46:23 +0200 | [diff] [blame] | 9 | void *__mmap(void *, size_t, int, int, int, off_t); |
| 10 | int __munmap(void *, size_t); |
| 11 | int __mprotect(void *, size_t, int); |
| 12 | |
Rich Felker | b2486a8 | 2011-04-06 20:27:07 -0400 | [diff] [blame] | 13 | static void dummy_0() |
| 14 | { |
| 15 | } |
Rich Felker | dcd6037 | 2012-10-05 11:51:50 -0400 | [diff] [blame] | 16 | weak_alias(dummy_0, __acquire_ptc); |
| 17 | weak_alias(dummy_0, __release_ptc); |
Rich Felker | a6054e3 | 2011-04-19 23:09:14 -0400 | [diff] [blame] | 18 | weak_alias(dummy_0, __pthread_tsd_run_dtors); |
Rich Felker | 5345c9b | 2014-08-23 23:35:10 -0400 | [diff] [blame] | 19 | weak_alias(dummy_0, __do_orphaned_stdio_locks); |
Rich Felker | 01d4274 | 2015-04-18 18:00:22 -0400 | [diff] [blame] | 20 | weak_alias(dummy_0, __dl_thread_cleanup); |
Rich Felker | fd80cfa | 2011-04-03 02:33:50 -0400 | [diff] [blame] | 21 | |
Jens Gustedt | df7d0df | 2014-09-01 00:46:23 +0200 | [diff] [blame] | 22 | _Noreturn void __pthread_exit(void *result) |
Rich Felker | 1a9a2ff | 2011-02-13 19:58:30 -0500 | [diff] [blame] | 23 | { |
Rich Felker | df15168 | 2014-06-10 04:02:40 -0400 | [diff] [blame] | 24 | pthread_t self = __pthread_self(); |
Rich Felker | d0ba098 | 2013-04-26 16:16:04 -0400 | [diff] [blame] | 25 | sigset_t set; |
Rich Felker | 1a9a2ff | 2011-02-13 19:58:30 -0500 | [diff] [blame] | 26 | |
Rich Felker | 36d8e97 | 2015-02-16 22:25:50 -0500 | [diff] [blame] | 27 | self->canceldisable = 1; |
| 28 | self->cancelasync = 0; |
Rich Felker | afc35d5 | 2012-02-09 02:33:08 -0500 | [diff] [blame] | 29 | self->result = result; |
| 30 | |
| 31 | while (self->cancelbuf) { |
| 32 | void (*f)(void *) = self->cancelbuf->__f; |
| 33 | void *x = self->cancelbuf->__x; |
| 34 | self->cancelbuf = self->cancelbuf->__next; |
| 35 | f(x); |
Rich Felker | 1ebde9c | 2011-04-17 17:06:05 -0400 | [diff] [blame] | 36 | } |
Rich Felker | 1a9a2ff | 2011-02-13 19:58:30 -0500 | [diff] [blame] | 37 | |
Rich Felker | a6054e3 | 2011-04-19 23:09:14 -0400 | [diff] [blame] | 38 | __pthread_tsd_run_dtors(); |
Rich Felker | 1a9a2ff | 2011-02-13 19:58:30 -0500 | [diff] [blame] | 39 | |
Rich Felker | bbbe87e | 2012-07-12 11:23:43 -0400 | [diff] [blame] | 40 | __lock(self->exitlock); |
Rich Felker | f58c8a0 | 2011-06-14 01:25:17 -0400 | [diff] [blame] | 41 | |
Rich Felker | 5fcebcd | 2011-03-10 18:31:37 -0500 | [diff] [blame] | 42 | /* Mark this thread dead before decrementing count */ |
Rich Felker | bbbe87e | 2012-07-12 11:23:43 -0400 | [diff] [blame] | 43 | __lock(self->killlock); |
Rich Felker | 5fcebcd | 2011-03-10 18:31:37 -0500 | [diff] [blame] | 44 | self->dead = 1; |
Rich Felker | 19eb13b | 2011-02-19 11:04:36 -0500 | [diff] [blame] | 45 | |
Rich Felker | 6e531f9 | 2013-04-26 16:04:30 -0400 | [diff] [blame] | 46 | /* Block all signals before decrementing the live thread count. |
| 47 | * This is important to ensure that dynamically allocated TLS |
| 48 | * is not under-allocated/over-committed, and possibly for other |
| 49 | * reasons as well. */ |
Rich Felker | 2c074b0 | 2013-04-26 19:48:01 -0400 | [diff] [blame] | 50 | __block_all_sigs(&set); |
Rich Felker | 23f21c3 | 2013-04-26 15:47:44 -0400 | [diff] [blame] | 51 | |
Rich Felker | d674f85 | 2013-04-26 17:51:22 -0400 | [diff] [blame] | 52 | /* Wait to unlock the kill lock, which governs functions like |
| 53 | * pthread_kill which target a thread id, until signals have |
| 54 | * been blocked. This precludes observation of the thread id |
| 55 | * as a live thread (with application code running in it) after |
| 56 | * the thread was reported dead by ESRCH being returned. */ |
| 57 | __unlock(self->killlock); |
| 58 | |
Rich Felker | d0ba098 | 2013-04-26 16:16:04 -0400 | [diff] [blame] | 59 | /* It's impossible to determine whether this is "the last thread" |
| 60 | * until performing the atomic decrement, since multiple threads |
| 61 | * could exit at the same time. For the last thread, revert the |
| 62 | * decrement and unblock signals to give the atexit handlers and |
| 63 | * stdio cleanup code a consistent state. */ |
| 64 | if (a_fetch_add(&libc.threads_minus_1, -1)==0) { |
| 65 | libc.threads_minus_1 = 0; |
Rich Felker | 2c074b0 | 2013-04-26 19:48:01 -0400 | [diff] [blame] | 66 | __restore_sigs(&set); |
Rich Felker | d0ba098 | 2013-04-26 16:16:04 -0400 | [diff] [blame] | 67 | exit(0); |
| 68 | } |
Rich Felker | fb11b6b | 2011-02-19 10:38:57 -0500 | [diff] [blame] | 69 | |
Rich Felker | 12e1e32 | 2015-04-10 00:26:34 -0400 | [diff] [blame] | 70 | /* Process robust list in userspace to handle non-pshared mutexes |
| 71 | * and the detached thread case where the robust list head will |
| 72 | * be invalid when the kernel would process it. */ |
Rich Felker | f08ab9e | 2015-04-10 02:27:52 -0400 | [diff] [blame] | 73 | __vm_lock(); |
Rich Felker | 12e1e32 | 2015-04-10 00:26:34 -0400 | [diff] [blame] | 74 | volatile void *volatile *rp; |
| 75 | while ((rp=self->robust_list.head) && rp != &self->robust_list.head) { |
| 76 | pthread_mutex_t *m = (void *)((char *)rp |
| 77 | - offsetof(pthread_mutex_t, _m_next)); |
| 78 | int waiters = m->_m_waiters; |
| 79 | int priv = (m->_m_type & 128) ^ 128; |
| 80 | self->robust_list.pending = rp; |
| 81 | self->robust_list.head = *rp; |
| 82 | int cont = a_swap(&m->_m_lock, self->tid|0x40000000); |
| 83 | self->robust_list.pending = 0; |
| 84 | if (cont < 0 || waiters) |
| 85 | __wake(&m->_m_lock, 1, priv); |
| 86 | } |
Rich Felker | f08ab9e | 2015-04-10 02:27:52 -0400 | [diff] [blame] | 87 | __vm_unlock(); |
Rich Felker | 12e1e32 | 2015-04-10 00:26:34 -0400 | [diff] [blame] | 88 | |
Rich Felker | 5345c9b | 2014-08-23 23:35:10 -0400 | [diff] [blame] | 89 | __do_orphaned_stdio_locks(); |
Rich Felker | 01d4274 | 2015-04-18 18:00:22 -0400 | [diff] [blame] | 90 | __dl_thread_cleanup(); |
Rich Felker | b092f1c | 2014-08-16 02:28:34 -0400 | [diff] [blame] | 91 | |
Rich Felker | 5fcebcd | 2011-03-10 18:31:37 -0500 | [diff] [blame] | 92 | if (self->detached && self->map_base) { |
Rich Felker | 6e531f9 | 2013-04-26 16:04:30 -0400 | [diff] [blame] | 93 | /* Detached threads must avoid the kernel clear_child_tid |
| 94 | * feature, since the virtual address will have been |
| 95 | * unmapped and possibly already reused by a new mapping |
| 96 | * at the time the kernel would perform the write. In |
| 97 | * the case of threads that started out detached, the |
| 98 | * initial clone flags are correct, but if the thread was |
| 99 | * detached later (== 2), we need to clear it here. */ |
| 100 | if (self->detached == 2) __syscall(SYS_set_tid_address, 0); |
| 101 | |
Rich Felker | 12e1e32 | 2015-04-10 00:26:34 -0400 | [diff] [blame] | 102 | /* Robust list will no longer be valid, and was already |
| 103 | * processed above, so unregister it with the kernel. */ |
| 104 | if (self->robust_list.off) |
| 105 | __syscall(SYS_set_robust_list, 0, 3*sizeof(long)); |
| 106 | |
Rich Felker | a2d3053 | 2015-04-10 03:47:42 -0400 | [diff] [blame] | 107 | /* Since __unmapself bypasses the normal munmap code path, |
| 108 | * explicitly wait for vmlock holders first. */ |
| 109 | __vm_wait(); |
| 110 | |
Rich Felker | 6e531f9 | 2013-04-26 16:04:30 -0400 | [diff] [blame] | 111 | /* The following call unmaps the thread's stack mapping |
| 112 | * and then exits without touching the stack. */ |
Rich Felker | 1a9a2ff | 2011-02-13 19:58:30 -0500 | [diff] [blame] | 113 | __unmapself(self->map_base, self->map_size); |
Rich Felker | 5fcebcd | 2011-03-10 18:31:37 -0500 | [diff] [blame] | 114 | } |
Rich Felker | 1a9a2ff | 2011-02-13 19:58:30 -0500 | [diff] [blame] | 115 | |
Rich Felker | 0c05bd3 | 2012-09-06 23:34:10 -0400 | [diff] [blame] | 116 | for (;;) __syscall(SYS_exit, 0); |
Rich Felker | 1a9a2ff | 2011-02-13 19:58:30 -0500 | [diff] [blame] | 117 | } |
Rich Felker | 0b44a03 | 2011-02-12 00:22:29 -0500 | [diff] [blame] | 118 | |
Rich Felker | cfd892f | 2012-05-23 14:13:54 -0400 | [diff] [blame] | 119 | void __do_cleanup_push(struct __ptcb *cb) |
Rich Felker | 5f37fc1 | 2011-08-03 19:57:46 -0400 | [diff] [blame] | 120 | { |
Rich Felker | df15168 | 2014-06-10 04:02:40 -0400 | [diff] [blame] | 121 | struct pthread *self = __pthread_self(); |
Rich Felker | 5f37fc1 | 2011-08-03 19:57:46 -0400 | [diff] [blame] | 122 | cb->__next = self->cancelbuf; |
| 123 | self->cancelbuf = cb; |
| 124 | } |
| 125 | |
Rich Felker | cfd892f | 2012-05-23 14:13:54 -0400 | [diff] [blame] | 126 | void __do_cleanup_pop(struct __ptcb *cb) |
Rich Felker | 5f37fc1 | 2011-08-03 19:57:46 -0400 | [diff] [blame] | 127 | { |
Rich Felker | afc35d5 | 2012-02-09 02:33:08 -0500 | [diff] [blame] | 128 | __pthread_self()->cancelbuf = cb->__next; |
Rich Felker | 5f37fc1 | 2011-08-03 19:57:46 -0400 | [diff] [blame] | 129 | } |
| 130 | |
Rich Felker | 3f72cda | 2011-09-18 10:14:37 -0400 | [diff] [blame] | 131 | static int start(void *p) |
Rich Felker | 0b44a03 | 2011-02-12 00:22:29 -0500 | [diff] [blame] | 132 | { |
Rich Felker | 3f72cda | 2011-09-18 10:14:37 -0400 | [diff] [blame] | 133 | pthread_t self = p; |
Rich Felker | 1e21e78 | 2012-11-11 15:38:04 -0500 | [diff] [blame] | 134 | if (self->startlock[0]) { |
| 135 | __wait(self->startlock, 0, 1, 1); |
| 136 | if (self->startlock[0]) { |
| 137 | self->detached = 2; |
| 138 | pthread_exit(0); |
| 139 | } |
Rich Felker | 2c074b0 | 2013-04-26 19:48:01 -0400 | [diff] [blame] | 140 | __restore_sigs(self->sigmask); |
Rich Felker | 1e21e78 | 2012-11-11 15:38:04 -0500 | [diff] [blame] | 141 | } |
Rich Felker | 99b8a25 | 2011-05-07 23:23:58 -0400 | [diff] [blame] | 142 | if (self->unblock_cancel) |
Rich Felker | 2f43704 | 2012-08-09 22:52:13 -0400 | [diff] [blame] | 143 | __syscall(SYS_rt_sigprocmask, SIG_UNBLOCK, |
Rich Felker | ccc7b4c | 2013-03-26 23:07:31 -0400 | [diff] [blame] | 144 | SIGPT_SET, 0, _NSIG/8); |
Rich Felker | 23614b0 | 2014-09-07 10:28:08 -0400 | [diff] [blame] | 145 | __pthread_exit(self->start(self->start_arg)); |
| 146 | return 0; |
| 147 | } |
| 148 | |
| 149 | static int start_c11(void *p) |
| 150 | { |
| 151 | pthread_t self = p; |
| 152 | int (*start)(void*) = (int(*)(void*)) self->start; |
| 153 | __pthread_exit((void *)(uintptr_t)start(self->start_arg)); |
Rich Felker | 3f72cda | 2011-09-18 10:14:37 -0400 | [diff] [blame] | 154 | return 0; |
Rich Felker | 0b44a03 | 2011-02-12 00:22:29 -0500 | [diff] [blame] | 155 | } |
| 156 | |
Rich Felker | 0b44a03 | 2011-02-12 00:22:29 -0500 | [diff] [blame] | 157 | #define ROUND(x) (((x)+PAGE_SIZE-1)&-PAGE_SIZE) |
| 158 | |
| 159 | /* pthread_key_create.c overrides this */ |
Rich Felker | a6adb2b | 2014-07-16 21:32:06 -0400 | [diff] [blame] | 160 | static volatile size_t dummy = 0; |
Rich Felker | 0b44a03 | 2011-02-12 00:22:29 -0500 | [diff] [blame] | 161 | weak_alias(dummy, __pthread_tsd_size); |
Rich Felker | a6adb2b | 2014-07-16 21:32:06 -0400 | [diff] [blame] | 162 | static void *dummy_tsd[1] = { 0 }; |
Rich Felker | dab441a | 2014-03-24 16:57:11 -0400 | [diff] [blame] | 163 | weak_alias(dummy_tsd, __pthread_tsd_main); |
Rich Felker | 0b44a03 | 2011-02-12 00:22:29 -0500 | [diff] [blame] | 164 | |
Rich Felker | 78a8ef4 | 2015-01-15 23:17:38 -0500 | [diff] [blame] | 165 | volatile int __block_new_threads = 0; |
| 166 | |
Rich Felker | a6adb2b | 2014-07-16 21:32:06 -0400 | [diff] [blame] | 167 | static FILE *volatile dummy_file = 0; |
Rich Felker | dba68bf | 2011-07-30 08:02:14 -0400 | [diff] [blame] | 168 | weak_alias(dummy_file, __stdin_used); |
| 169 | weak_alias(dummy_file, __stdout_used); |
| 170 | weak_alias(dummy_file, __stderr_used); |
| 171 | |
| 172 | static void init_file_lock(FILE *f) |
| 173 | { |
| 174 | if (f && f->lock<0) f->lock = 0; |
| 175 | } |
| 176 | |
Rich Felker | dcd6037 | 2012-10-05 11:51:50 -0400 | [diff] [blame] | 177 | void *__copy_tls(unsigned char *); |
Rich Felker | 8431d79 | 2012-10-04 16:35:46 -0400 | [diff] [blame] | 178 | |
Jens Gustedt | df7d0df | 2014-09-01 00:46:23 +0200 | [diff] [blame] | 179 | int __pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict attrp, void *(*entry)(void *), void *restrict arg) |
Rich Felker | 0b44a03 | 2011-02-12 00:22:29 -0500 | [diff] [blame] | 180 | { |
Rich Felker | 23614b0 | 2014-09-07 10:28:08 -0400 | [diff] [blame] | 181 | int ret, c11 = (attrp == __ATTRP_C11_THREAD); |
Rich Felker | d514264 | 2013-02-01 22:10:40 -0500 | [diff] [blame] | 182 | size_t size, guard; |
Rich Felker | dab441a | 2014-03-24 16:57:11 -0400 | [diff] [blame] | 183 | struct pthread *self, *new; |
Rich Felker | 14a835b | 2013-03-31 23:25:55 -0400 | [diff] [blame] | 184 | unsigned char *map = 0, *stack = 0, *tsd = 0, *stack_limit; |
Rich Felker | f68a346 | 2013-09-16 10:54:31 -0400 | [diff] [blame] | 185 | unsigned flags = CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
Rich Felker | 271c211 | 2013-09-16 10:56:01 -0400 | [diff] [blame] | 186 | | CLONE_THREAD | CLONE_SYSVSEM | CLONE_SETTLS |
Rich Felker | f68a346 | 2013-09-16 10:54:31 -0400 | [diff] [blame] | 187 | | CLONE_PARENT_SETTID | CLONE_CHILD_CLEARTID | CLONE_DETACHED; |
Rich Felker | 1e21e78 | 2012-11-11 15:38:04 -0500 | [diff] [blame] | 188 | int do_sched = 0; |
Rich Felker | d514264 | 2013-02-01 22:10:40 -0500 | [diff] [blame] | 189 | pthread_attr_t attr = {0}; |
Rich Felker | 0b44a03 | 2011-02-12 00:22:29 -0500 | [diff] [blame] | 190 | |
Rich Felker | dab441a | 2014-03-24 16:57:11 -0400 | [diff] [blame] | 191 | if (!libc.can_do_threads) return ENOSYS; |
| 192 | self = __pthread_self(); |
Rich Felker | 9080cc1 | 2011-04-17 16:53:54 -0400 | [diff] [blame] | 193 | if (!libc.threaded) { |
Rich Felker | 1b0cdc8 | 2015-06-16 07:11:19 +0000 | [diff] [blame^] | 194 | for (FILE *f=*__ofl_lock(); f; f=f->next) |
Rich Felker | dba68bf | 2011-07-30 08:02:14 -0400 | [diff] [blame] | 195 | init_file_lock(f); |
Rich Felker | 1b0cdc8 | 2015-06-16 07:11:19 +0000 | [diff] [blame^] | 196 | __ofl_unlock(); |
Rich Felker | dba68bf | 2011-07-30 08:02:14 -0400 | [diff] [blame] | 197 | init_file_lock(__stdin_used); |
| 198 | init_file_lock(__stdout_used); |
| 199 | init_file_lock(__stderr_used); |
Rich Felker | dab441a | 2014-03-24 16:57:11 -0400 | [diff] [blame] | 200 | __syscall(SYS_rt_sigprocmask, SIG_UNBLOCK, SIGPT_SET, 0, _NSIG/8); |
Rich Felker | 689e0e6 | 2014-03-24 17:39:08 -0400 | [diff] [blame] | 201 | self->tsd = (void **)__pthread_tsd_main; |
Rich Felker | 9080cc1 | 2011-04-17 16:53:54 -0400 | [diff] [blame] | 202 | libc.threaded = 1; |
| 203 | } |
Rich Felker | 23614b0 | 2014-09-07 10:28:08 -0400 | [diff] [blame] | 204 | if (attrp && !c11) attr = *attrp; |
Rich Felker | 0b44a03 | 2011-02-12 00:22:29 -0500 | [diff] [blame] | 205 | |
Rich Felker | dcd6037 | 2012-10-05 11:51:50 -0400 | [diff] [blame] | 206 | __acquire_ptc(); |
Rich Felker | 78a8ef4 | 2015-01-15 23:17:38 -0500 | [diff] [blame] | 207 | if (__block_new_threads) __wait(&__block_new_threads, 0, 1, 1); |
Rich Felker | dcd6037 | 2012-10-05 11:51:50 -0400 | [diff] [blame] | 208 | |
Rich Felker | d514264 | 2013-02-01 22:10:40 -0500 | [diff] [blame] | 209 | if (attr._a_stackaddr) { |
| 210 | size_t need = libc.tls_size + __pthread_tsd_size; |
| 211 | size = attr._a_stacksize + DEFAULT_STACK_SIZE; |
| 212 | stack = (void *)(attr._a_stackaddr & -16); |
Rich Felker | ced6499 | 2013-04-06 01:15:08 -0400 | [diff] [blame] | 213 | stack_limit = (void *)(attr._a_stackaddr - size); |
Rich Felker | d514264 | 2013-02-01 22:10:40 -0500 | [diff] [blame] | 214 | /* Use application-provided stack for TLS only when |
| 215 | * it does not take more than ~12% or 2k of the |
| 216 | * application's stack space. */ |
| 217 | if (need < size/8 && need < 2048) { |
| 218 | tsd = stack - __pthread_tsd_size; |
| 219 | stack = tsd - libc.tls_size; |
Rich Felker | a629328 | 2014-08-22 14:05:10 -0400 | [diff] [blame] | 220 | memset(stack, 0, need); |
Rich Felker | d514264 | 2013-02-01 22:10:40 -0500 | [diff] [blame] | 221 | } else { |
| 222 | size = ROUND(need); |
| 223 | guard = 0; |
Rich Felker | 819006a | 2012-06-09 19:53:29 -0400 | [diff] [blame] | 224 | } |
Rich Felker | d514264 | 2013-02-01 22:10:40 -0500 | [diff] [blame] | 225 | } else { |
| 226 | guard = ROUND(DEFAULT_GUARD_SIZE + attr._a_guardsize); |
| 227 | size = guard + ROUND(DEFAULT_STACK_SIZE + attr._a_stacksize |
| 228 | + libc.tls_size + __pthread_tsd_size); |
| 229 | } |
| 230 | |
| 231 | if (!tsd) { |
Rich Felker | 8431d79 | 2012-10-04 16:35:46 -0400 | [diff] [blame] | 232 | if (guard) { |
Jens Gustedt | df7d0df | 2014-09-01 00:46:23 +0200 | [diff] [blame] | 233 | map = __mmap(0, size, PROT_NONE, MAP_PRIVATE|MAP_ANON, -1, 0); |
Rich Felker | 72768ea | 2013-02-01 22:25:19 -0500 | [diff] [blame] | 234 | if (map == MAP_FAILED) goto fail; |
Jens Gustedt | df7d0df | 2014-09-01 00:46:23 +0200 | [diff] [blame] | 235 | if (__mprotect(map+guard, size-guard, PROT_READ|PROT_WRITE)) { |
| 236 | __munmap(map, size); |
Rich Felker | 72768ea | 2013-02-01 22:25:19 -0500 | [diff] [blame] | 237 | goto fail; |
Rich Felker | 8431d79 | 2012-10-04 16:35:46 -0400 | [diff] [blame] | 238 | } |
| 239 | } else { |
Jens Gustedt | df7d0df | 2014-09-01 00:46:23 +0200 | [diff] [blame] | 240 | map = __mmap(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, 0); |
Rich Felker | 72768ea | 2013-02-01 22:25:19 -0500 | [diff] [blame] | 241 | if (map == MAP_FAILED) goto fail; |
Rich Felker | 8431d79 | 2012-10-04 16:35:46 -0400 | [diff] [blame] | 242 | } |
Rich Felker | 819006a | 2012-06-09 19:53:29 -0400 | [diff] [blame] | 243 | tsd = map + size - __pthread_tsd_size; |
Rich Felker | 14a835b | 2013-03-31 23:25:55 -0400 | [diff] [blame] | 244 | if (!stack) { |
| 245 | stack = tsd - libc.tls_size; |
| 246 | stack_limit = map + guard; |
| 247 | } |
Rich Felker | 11e4b92 | 2011-05-07 23:39:48 -0400 | [diff] [blame] | 248 | } |
Rich Felker | d514264 | 2013-02-01 22:10:40 -0500 | [diff] [blame] | 249 | |
| 250 | new = __copy_tls(tsd - libc.tls_size); |
Rich Felker | 0b44a03 | 2011-02-12 00:22:29 -0500 | [diff] [blame] | 251 | new->map_base = map; |
| 252 | new->map_size = size; |
Rich Felker | 14a835b | 2013-03-31 23:25:55 -0400 | [diff] [blame] | 253 | new->stack = stack; |
| 254 | new->stack_size = stack - stack_limit; |
Rich Felker | 0b44a03 | 2011-02-12 00:22:29 -0500 | [diff] [blame] | 255 | new->start = entry; |
| 256 | new->start_arg = arg; |
| 257 | new->self = new; |
| 258 | new->tsd = (void *)tsd; |
Rich Felker | 0bc0309 | 2014-07-02 19:33:19 -0400 | [diff] [blame] | 259 | new->locale = &libc.global_locale; |
Rich Felker | d514264 | 2013-02-01 22:10:40 -0500 | [diff] [blame] | 260 | if (attr._a_detach) { |
Rich Felker | 92f8396 | 2012-07-11 23:36:46 -0400 | [diff] [blame] | 261 | new->detached = 1; |
Rich Felker | f68a346 | 2013-09-16 10:54:31 -0400 | [diff] [blame] | 262 | flags -= CLONE_CHILD_CLEARTID; |
Rich Felker | 92f8396 | 2012-07-11 23:36:46 -0400 | [diff] [blame] | 263 | } |
Rich Felker | d514264 | 2013-02-01 22:10:40 -0500 | [diff] [blame] | 264 | if (attr._a_sched) { |
Rich Felker | 1e21e78 | 2012-11-11 15:38:04 -0500 | [diff] [blame] | 265 | do_sched = new->startlock[0] = 1; |
Rich Felker | 2c074b0 | 2013-04-26 19:48:01 -0400 | [diff] [blame] | 266 | __block_app_sigs(new->sigmask); |
Rich Felker | 1e21e78 | 2012-11-11 15:38:04 -0500 | [diff] [blame] | 267 | } |
Rich Felker | 4e98cce | 2015-04-10 00:54:48 -0400 | [diff] [blame] | 268 | new->robust_list.head = &new->robust_list.head; |
Rich Felker | bf619d8 | 2011-03-29 12:58:22 -0400 | [diff] [blame] | 269 | new->unblock_cancel = self->cancel; |
Rich Felker | 484194d | 2015-05-06 18:37:19 -0400 | [diff] [blame] | 270 | new->CANARY = self->CANARY; |
Rich Felker | 0b44a03 | 2011-02-12 00:22:29 -0500 | [diff] [blame] | 271 | |
Rich Felker | 0b44a03 | 2011-02-12 00:22:29 -0500 | [diff] [blame] | 272 | a_inc(&libc.threads_minus_1); |
Rich Felker | 23614b0 | 2014-09-07 10:28:08 -0400 | [diff] [blame] | 273 | ret = __clone((c11 ? start_c11 : start), stack, flags, new, &new->tid, TP_ADJ(new), &new->tid); |
Rich Felker | 0b44a03 | 2011-02-12 00:22:29 -0500 | [diff] [blame] | 274 | |
Rich Felker | dcd6037 | 2012-10-05 11:51:50 -0400 | [diff] [blame] | 275 | __release_ptc(); |
Rich Felker | 0b44a03 | 2011-02-12 00:22:29 -0500 | [diff] [blame] | 276 | |
Rich Felker | 1e21e78 | 2012-11-11 15:38:04 -0500 | [diff] [blame] | 277 | if (do_sched) { |
Rich Felker | 2c074b0 | 2013-04-26 19:48:01 -0400 | [diff] [blame] | 278 | __restore_sigs(new->sigmask); |
Rich Felker | 1e21e78 | 2012-11-11 15:38:04 -0500 | [diff] [blame] | 279 | } |
| 280 | |
Rich Felker | 0b44a03 | 2011-02-12 00:22:29 -0500 | [diff] [blame] | 281 | if (ret < 0) { |
| 282 | a_dec(&libc.threads_minus_1); |
Jens Gustedt | df7d0df | 2014-09-01 00:46:23 +0200 | [diff] [blame] | 283 | if (map) __munmap(map, size); |
Rich Felker | 5966680 | 2011-02-15 02:20:21 -0500 | [diff] [blame] | 284 | return EAGAIN; |
Rich Felker | 0b44a03 | 2011-02-12 00:22:29 -0500 | [diff] [blame] | 285 | } |
Rich Felker | 1e21e78 | 2012-11-11 15:38:04 -0500 | [diff] [blame] | 286 | |
| 287 | if (do_sched) { |
| 288 | ret = __syscall(SYS_sched_setscheduler, new->tid, |
Rich Felker | d514264 | 2013-02-01 22:10:40 -0500 | [diff] [blame] | 289 | attr._a_policy, &attr._a_prio); |
Rich Felker | 1e21e78 | 2012-11-11 15:38:04 -0500 | [diff] [blame] | 290 | a_store(new->startlock, ret<0 ? 2 : 0); |
| 291 | __wake(new->startlock, 1, 1); |
| 292 | if (ret < 0) return -ret; |
| 293 | } |
| 294 | |
Rich Felker | 0b44a03 | 2011-02-12 00:22:29 -0500 | [diff] [blame] | 295 | *res = new; |
| 296 | return 0; |
Rich Felker | 72768ea | 2013-02-01 22:25:19 -0500 | [diff] [blame] | 297 | fail: |
| 298 | __release_ptc(); |
| 299 | return EAGAIN; |
Rich Felker | 0b44a03 | 2011-02-12 00:22:29 -0500 | [diff] [blame] | 300 | } |
Jens Gustedt | df7d0df | 2014-09-01 00:46:23 +0200 | [diff] [blame] | 301 | |
| 302 | weak_alias(__pthread_exit, pthread_exit); |
| 303 | weak_alias(__pthread_create, pthread_create); |