blob: 901553a11f5f0be5755395765c1554b9cbf0cd47 [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_BASE_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evanse476f8a2010-01-16 09:53:50 -08003
Jason Evans4201af02010-01-24 02:53:40 -08004/******************************************************************************/
5/* Data. */
Jason Evanse476f8a2010-01-16 09:53:50 -08006
Jason Evans4e2e3dd2012-03-13 16:31:41 -07007static malloc_mutex_t base_mtx;
Jason Evansf500a102015-01-30 21:49:19 -08008static extent_tree_t base_avail_szad;
Jason Evanse476f8a2010-01-16 09:53:50 -08009static extent_node_t *base_nodes;
Jason Evans4581b972014-11-27 17:22:36 -020010static size_t base_allocated;
Jason Evans4acd75a2015-03-23 17:25:57 -070011static size_t base_resident;
12static size_t base_mapped;
Jason Evans4581b972014-11-27 17:22:36 -020013
Jason Evans4201af02010-01-24 02:53:40 -080014/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -080015
Jason Evansf500a102015-01-30 21:49:19 -080016static extent_node_t *
Jason Evansd9394d02016-04-17 12:33:39 -070017base_node_try_alloc(tsd_t *tsd)
Jason Evanse476f8a2010-01-16 09:53:50 -080018{
Jason Evansf500a102015-01-30 21:49:19 -080019 extent_node_t *node;
Jason Evanse476f8a2010-01-16 09:53:50 -080020
Jason Evansd9394d02016-04-17 12:33:39 -070021 malloc_mutex_assert_owner(tsd, &base_mtx);
22
Jason Evansf500a102015-01-30 21:49:19 -080023 if (base_nodes == NULL)
24 return (NULL);
25 node = base_nodes;
26 base_nodes = *(extent_node_t **)node;
27 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
28 return (node);
Jason Evanse476f8a2010-01-16 09:53:50 -080029}
30
Jason Evansf500a102015-01-30 21:49:19 -080031static void
Jason Evansd9394d02016-04-17 12:33:39 -070032base_node_dalloc(tsd_t *tsd, extent_node_t *node)
Jason Evansf500a102015-01-30 21:49:19 -080033{
34
Jason Evansd9394d02016-04-17 12:33:39 -070035 malloc_mutex_assert_owner(tsd, &base_mtx);
36
Jason Evansf500a102015-01-30 21:49:19 -080037 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
38 *(extent_node_t **)node = base_nodes;
39 base_nodes = node;
40}
41
Jason Evansf500a102015-01-30 21:49:19 -080042static extent_node_t *
Jason Evansd9394d02016-04-17 12:33:39 -070043base_chunk_alloc(tsd_t *tsd, size_t minsize)
Jason Evansf500a102015-01-30 21:49:19 -080044{
45 extent_node_t *node;
46 size_t csize, nsize;
47 void *addr;
48
Jason Evansd9394d02016-04-17 12:33:39 -070049 malloc_mutex_assert_owner(tsd, &base_mtx);
Jason Evansf500a102015-01-30 21:49:19 -080050 assert(minsize != 0);
Jason Evansd9394d02016-04-17 12:33:39 -070051 node = base_node_try_alloc(tsd);
Jason Evansf500a102015-01-30 21:49:19 -080052 /* Allocate enough space to also carve a node out if necessary. */
53 nsize = (node == NULL) ? CACHELINE_CEILING(sizeof(extent_node_t)) : 0;
54 csize = CHUNK_CEILING(minsize + nsize);
55 addr = chunk_alloc_base(csize);
56 if (addr == NULL) {
57 if (node != NULL)
Jason Evansd9394d02016-04-17 12:33:39 -070058 base_node_dalloc(tsd, node);
Jason Evansf500a102015-01-30 21:49:19 -080059 return (NULL);
60 }
Jason Evans4acd75a2015-03-23 17:25:57 -070061 base_mapped += csize;
Jason Evansf500a102015-01-30 21:49:19 -080062 if (node == NULL) {
Jason Evans4acd75a2015-03-23 17:25:57 -070063 node = (extent_node_t *)addr;
64 addr = (void *)((uintptr_t)addr + nsize);
Jason Evansf500a102015-01-30 21:49:19 -080065 csize -= nsize;
Jason Evans4acd75a2015-03-23 17:25:57 -070066 if (config_stats) {
Jason Evansf500a102015-01-30 21:49:19 -080067 base_allocated += nsize;
Jason Evans4acd75a2015-03-23 17:25:57 -070068 base_resident += PAGE_CEILING(nsize);
69 }
Jason Evansf500a102015-01-30 21:49:19 -080070 }
Jason Evansb49a3342015-07-28 11:28:19 -040071 extent_node_init(node, NULL, addr, csize, true, true);
Jason Evansf500a102015-01-30 21:49:19 -080072 return (node);
73}
74
Jason Evanscbf3a6d2015-02-11 12:24:27 -080075/*
76 * base_alloc() guarantees demand-zeroed memory, in order to make multi-page
77 * sparse data structures such as radix tree nodes efficient with respect to
78 * physical memory usage.
79 */
80void *
Jason Evansb2c0d632016-04-13 23:36:15 -070081base_alloc(tsd_t *tsd, size_t size)
Jason Evansf500a102015-01-30 21:49:19 -080082{
83 void *ret;
Jason Evans5707d6f2015-03-06 17:14:05 -080084 size_t csize, usize;
Jason Evansf500a102015-01-30 21:49:19 -080085 extent_node_t *node;
86 extent_node_t key;
87
88 /*
89 * Round size up to nearest multiple of the cacheline size, so that
90 * there is no chance of false cache line sharing.
91 */
92 csize = CACHELINE_CEILING(size);
93
Jason Evans5707d6f2015-03-06 17:14:05 -080094 usize = s2u(csize);
Jason Evans8fadb1a2015-08-04 10:49:46 -070095 extent_node_init(&key, NULL, NULL, usize, false, false);
Jason Evansb2c0d632016-04-13 23:36:15 -070096 malloc_mutex_lock(tsd, &base_mtx);
Jason Evansf500a102015-01-30 21:49:19 -080097 node = extent_tree_szad_nsearch(&base_avail_szad, &key);
98 if (node != NULL) {
99 /* Use existing space. */
100 extent_tree_szad_remove(&base_avail_szad, node);
101 } else {
102 /* Try to allocate more space. */
Jason Evansd9394d02016-04-17 12:33:39 -0700103 node = base_chunk_alloc(tsd, csize);
Jason Evansf500a102015-01-30 21:49:19 -0800104 }
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800105 if (node == NULL) {
106 ret = NULL;
107 goto label_return;
108 }
Jason Evansf500a102015-01-30 21:49:19 -0800109
Jason Evansee41ad42015-02-15 18:04:46 -0800110 ret = extent_node_addr_get(node);
111 if (extent_node_size_get(node) > csize) {
112 extent_node_addr_set(node, (void *)((uintptr_t)ret + csize));
113 extent_node_size_set(node, extent_node_size_get(node) - csize);
Jason Evansf500a102015-01-30 21:49:19 -0800114 extent_tree_szad_insert(&base_avail_szad, node);
115 } else
Jason Evansd9394d02016-04-17 12:33:39 -0700116 base_node_dalloc(tsd, node);
Jason Evans4acd75a2015-03-23 17:25:57 -0700117 if (config_stats) {
Jason Evansf500a102015-01-30 21:49:19 -0800118 base_allocated += csize;
Jason Evans4acd75a2015-03-23 17:25:57 -0700119 /*
120 * Add one PAGE to base_resident for every page boundary that is
121 * crossed by the new allocation.
122 */
123 base_resident += PAGE_CEILING((uintptr_t)ret + csize) -
124 PAGE_CEILING((uintptr_t)ret);
125 }
Jason Evans4f6f2b12015-06-22 14:38:06 -0700126 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, csize);
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800127label_return:
Jason Evansb2c0d632016-04-13 23:36:15 -0700128 malloc_mutex_unlock(tsd, &base_mtx);
Jason Evans41b6afb2012-02-02 22:04:57 -0800129 return (ret);
130}
131
Jason Evans4acd75a2015-03-23 17:25:57 -0700132void
Jason Evansb2c0d632016-04-13 23:36:15 -0700133base_stats_get(tsd_t *tsd, size_t *allocated, size_t *resident, size_t *mapped)
Jason Evans4581b972014-11-27 17:22:36 -0200134{
Jason Evans4581b972014-11-27 17:22:36 -0200135
Jason Evansb2c0d632016-04-13 23:36:15 -0700136 malloc_mutex_lock(tsd, &base_mtx);
Jason Evans56048ba2015-05-28 15:03:58 -0700137 assert(base_allocated <= base_resident);
138 assert(base_resident <= base_mapped);
Jason Evans4acd75a2015-03-23 17:25:57 -0700139 *allocated = base_allocated;
140 *resident = base_resident;
141 *mapped = base_mapped;
Jason Evansb2c0d632016-04-13 23:36:15 -0700142 malloc_mutex_unlock(tsd, &base_mtx);
Jason Evans4581b972014-11-27 17:22:36 -0200143}
144
Jason Evanse476f8a2010-01-16 09:53:50 -0800145bool
146base_boot(void)
147{
148
Jason Evansb2c0d632016-04-13 23:36:15 -0700149 if (malloc_mutex_init(&base_mtx, "base", WITNESS_RANK_BASE))
Jason Evanse476f8a2010-01-16 09:53:50 -0800150 return (true);
Jason Evansf500a102015-01-30 21:49:19 -0800151 extent_tree_szad_new(&base_avail_szad);
152 base_nodes = NULL;
Jason Evanse476f8a2010-01-16 09:53:50 -0800153
154 return (false);
155}
Jason Evans4e2e3dd2012-03-13 16:31:41 -0700156
157void
Jason Evansb2c0d632016-04-13 23:36:15 -0700158base_prefork(tsd_t *tsd)
Jason Evans4e2e3dd2012-03-13 16:31:41 -0700159{
160
Jason Evansb2c0d632016-04-13 23:36:15 -0700161 malloc_mutex_prefork(tsd, &base_mtx);
Jason Evans4e2e3dd2012-03-13 16:31:41 -0700162}
163
164void
Jason Evansb2c0d632016-04-13 23:36:15 -0700165base_postfork_parent(tsd_t *tsd)
Jason Evans4e2e3dd2012-03-13 16:31:41 -0700166{
167
Jason Evansb2c0d632016-04-13 23:36:15 -0700168 malloc_mutex_postfork_parent(tsd, &base_mtx);
Jason Evans4e2e3dd2012-03-13 16:31:41 -0700169}
170
171void
Jason Evansb2c0d632016-04-13 23:36:15 -0700172base_postfork_child(tsd_t *tsd)
Jason Evans4e2e3dd2012-03-13 16:31:41 -0700173{
174
Jason Evansb2c0d632016-04-13 23:36:15 -0700175 malloc_mutex_postfork_child(tsd, &base_mtx);
Jason Evans4e2e3dd2012-03-13 16:31:41 -0700176}