blob: 34c1573cdd5a41b97e102d206e5697405f067244 [file] [log] [blame]
Jason Evanscd9a1342012-03-21 18:33:03 -07001#define JEMALLOC_TSD_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5/* Data. */
6
7static unsigned ncleanups;
8static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX];
9
Jason Evans5460aa62014-09-22 21:09:23 -070010malloc_tsd_data(, , tsd_t, TSD_INITIALIZER)
11
Jason Evanscd9a1342012-03-21 18:33:03 -070012/******************************************************************************/
13
14void *
15malloc_tsd_malloc(size_t size)
16{
17
Jason Evans4581b972014-11-27 17:22:36 -020018 return (a0malloc(CACHELINE_CEILING(size)));
Jason Evanscd9a1342012-03-21 18:33:03 -070019}
20
21void
22malloc_tsd_dalloc(void *wrapper)
23{
24
Jason Evans10aff3f2015-01-20 15:37:51 -080025 a0dalloc(wrapper);
Jason Evanscd9a1342012-03-21 18:33:03 -070026}
27
28void
29malloc_tsd_no_cleanup(void *arg)
30{
31
32 not_reached();
33}
34
Mike Hommeya19e87f2012-04-21 21:27:46 -070035#if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32)
Mike Hommeyda99e312012-04-30 12:38:29 +020036#ifndef _WIN32
37JEMALLOC_EXPORT
38#endif
Jason Evanscd9a1342012-03-21 18:33:03 -070039void
40_malloc_thread_cleanup(void)
41{
Mike Hommey8b499712012-04-24 23:22:02 +020042 bool pending[MALLOC_TSD_CLEANUPS_MAX], again;
Jason Evanscd9a1342012-03-21 18:33:03 -070043 unsigned i;
44
45 for (i = 0; i < ncleanups; i++)
46 pending[i] = true;
47
48 do {
49 again = false;
50 for (i = 0; i < ncleanups; i++) {
51 if (pending[i]) {
Mike Hommey13067ec2012-04-18 18:29:49 +020052 pending[i] = cleanups[i]();
Jason Evanscd9a1342012-03-21 18:33:03 -070053 if (pending[i])
54 again = true;
55 }
56 }
57 } while (again);
58}
59#endif
60
61void
Mike Hommey13067ec2012-04-18 18:29:49 +020062malloc_tsd_cleanup_register(bool (*f)(void))
Jason Evanscd9a1342012-03-21 18:33:03 -070063{
64
65 assert(ncleanups < MALLOC_TSD_CLEANUPS_MAX);
Mike Hommey13067ec2012-04-18 18:29:49 +020066 cleanups[ncleanups] = f;
Jason Evanscd9a1342012-03-21 18:33:03 -070067 ncleanups++;
68}
69
70void
Jason Evans5460aa62014-09-22 21:09:23 -070071tsd_cleanup(void *arg)
72{
73 tsd_t *tsd = (tsd_t *)arg;
74
Jason Evans5460aa62014-09-22 21:09:23 -070075 switch (tsd->state) {
Jason Evansd36c7eb2015-09-24 16:53:18 -070076 case tsd_state_uninitialized:
77 /* Do nothing. */
78 break;
Jason Evans5460aa62014-09-22 21:09:23 -070079 case tsd_state_nominal:
80#define O(n, t) \
81 n##_cleanup(tsd);
82MALLOC_TSD
83#undef O
84 tsd->state = tsd_state_purgatory;
85 tsd_set(tsd);
86 break;
87 case tsd_state_purgatory:
88 /*
89 * The previous time this destructor was called, we set the
90 * state to tsd_state_purgatory so that other destructors
91 * wouldn't cause re-creation of the tsd. This time, do
92 * nothing, and do not request another callback.
93 */
94 break;
95 case tsd_state_reincarnated:
96 /*
97 * Another destructor deallocated memory after this destructor
98 * was called. Reset state to tsd_state_purgatory and request
99 * another callback.
100 */
101 tsd->state = tsd_state_purgatory;
102 tsd_set(tsd);
103 break;
104 default:
105 not_reached();
106 }
107}
108
109bool
Jason Evans8bb31982014-10-07 23:14:57 -0700110malloc_tsd_boot0(void)
Jason Evanscd9a1342012-03-21 18:33:03 -0700111{
112
113 ncleanups = 0;
Jason Evans8bb31982014-10-07 23:14:57 -0700114 if (tsd_boot0())
Jason Evans5460aa62014-09-22 21:09:23 -0700115 return (true);
Jason Evansdb927b62016-02-19 19:37:10 -0800116 *tsd_arenas_tdata_bypassp_get(tsd_fetch()) = true;
Jason Evans5460aa62014-09-22 21:09:23 -0700117 return (false);
Jason Evanscd9a1342012-03-21 18:33:03 -0700118}
Mike Hommeya19e87f2012-04-21 21:27:46 -0700119
Jason Evans8bb31982014-10-07 23:14:57 -0700120void
121malloc_tsd_boot1(void)
122{
123
124 tsd_boot1();
Jason Evansdb927b62016-02-19 19:37:10 -0800125 *tsd_arenas_tdata_bypassp_get(tsd_fetch()) = false;
Jason Evans8bb31982014-10-07 23:14:57 -0700126}
127
Mike Hommeya19e87f2012-04-21 21:27:46 -0700128#ifdef _WIN32
129static BOOL WINAPI
130_tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved)
131{
132
133 switch (fdwReason) {
134#ifdef JEMALLOC_LAZY_LOCK
135 case DLL_THREAD_ATTACH:
136 isthreaded = true;
137 break;
138#endif
139 case DLL_THREAD_DETACH:
140 _malloc_thread_cleanup();
141 break;
142 default:
143 break;
144 }
145 return (true);
146}
147
Mike Hommeyfd97b1d2012-04-30 12:38:31 +0200148#ifdef _MSC_VER
149# ifdef _M_IX86
150# pragma comment(linker, "/INCLUDE:__tls_used")
rustyxefbee862016-02-02 11:27:18 +0100151# pragma comment(linker, "/INCLUDE:_tls_callback")
Mike Hommeyfd97b1d2012-04-30 12:38:31 +0200152# else
153# pragma comment(linker, "/INCLUDE:_tls_used")
rustyxefbee862016-02-02 11:27:18 +0100154# pragma comment(linker, "/INCLUDE:tls_callback")
Mike Hommeyfd97b1d2012-04-30 12:38:31 +0200155# endif
156# pragma section(".CRT$XLY",long,read)
157#endif
Mike Hommeyda99e312012-04-30 12:38:29 +0200158JEMALLOC_SECTION(".CRT$XLY") JEMALLOC_ATTR(used)
rustyxefbee862016-02-02 11:27:18 +0100159BOOL (WINAPI *const tls_callback)(HINSTANCE hinstDLL,
Jason Evansd508ec72015-07-07 20:28:22 -0700160 DWORD fdwReason, LPVOID lpvReserved) = _tls_callback;
Mike Hommeya19e87f2012-04-21 21:27:46 -0700161#endif
Leonard Crestezcb17fc62013-10-22 00:12:16 +0300162
163#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
164 !defined(_WIN32))
165void *
166tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block)
167{
168 pthread_t self = pthread_self();
169 tsd_init_block_t *iter;
170
171 /* Check whether this thread has already inserted into the list. */
172 malloc_mutex_lock(&head->lock);
173 ql_foreach(iter, &head->blocks, link) {
174 if (iter->thread == self) {
175 malloc_mutex_unlock(&head->lock);
176 return (iter->data);
177 }
178 }
179 /* Insert block into list. */
180 ql_elm_new(block, link);
181 block->thread = self;
182 ql_tail_insert(&head->blocks, block, link);
183 malloc_mutex_unlock(&head->lock);
184 return (NULL);
185}
186
187void
188tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block)
189{
190
191 malloc_mutex_lock(&head->lock);
192 ql_remove(&head->blocks, block, link);
193 malloc_mutex_unlock(&head->lock);
194}
195#endif