blob: a42522d6f2d33a7fa6c0b026f0ad3c7347706435 [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001/******************************************************************************/
2#ifdef JEMALLOC_H_TYPES
3
Jason Evans155bfa72014-10-05 17:54:10 -07004#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS)
5
Jason Evans47e57f92011-03-22 09:00:56 -07006/* Maximum number of regions in one run. */
Jason Evans0c5dd032014-09-29 01:31:39 -07007#define LG_RUN_MAXREGS (LG_PAGE - LG_TINY_MIN)
Jason Evans47e57f92011-03-22 09:00:56 -07008#define RUN_MAXREGS (1U << LG_RUN_MAXREGS)
9
Jason Evanse476f8a2010-01-16 09:53:50 -080010/*
Jason Evans122449b2012-04-06 00:35:09 -070011 * Minimum redzone size. Redzones may be larger than this if necessary to
12 * preserve region alignment.
13 */
14#define REDZONE_MINSIZE 16
15
16/*
Jason Evanse476f8a2010-01-16 09:53:50 -080017 * The minimum ratio of active:dirty pages per arena is computed as:
18 *
19 * (nactive >> opt_lg_dirty_mult) >= ndirty
20 *
Jason Evanse3d13062012-10-30 15:42:37 -070021 * So, supposing that opt_lg_dirty_mult is 3, there can be no less than 8 times
22 * as many active pages as dirty pages.
Jason Evanse476f8a2010-01-16 09:53:50 -080023 */
Jason Evanse3d13062012-10-30 15:42:37 -070024#define LG_DIRTY_MULT_DEFAULT 3
Jason Evanse476f8a2010-01-16 09:53:50 -080025
Jason Evans0c5dd032014-09-29 01:31:39 -070026typedef struct arena_run_s arena_run_t;
Qinfan Wuff6a31d2014-08-29 13:34:40 -070027typedef struct arena_chunk_map_bits_s arena_chunk_map_bits_t;
28typedef struct arena_chunk_map_misc_s arena_chunk_map_misc_t;
Jason Evanse476f8a2010-01-16 09:53:50 -080029typedef struct arena_chunk_s arena_chunk_t;
Jason Evans49f7e8f2011-03-15 13:59:15 -070030typedef struct arena_bin_info_s arena_bin_info_t;
Jason Evanse476f8a2010-01-16 09:53:50 -080031typedef struct arena_bin_s arena_bin_t;
32typedef struct arena_s arena_t;
33
34#endif /* JEMALLOC_H_TYPES */
35/******************************************************************************/
36#ifdef JEMALLOC_H_STRUCTS
37
Jason Evans0c5dd032014-09-29 01:31:39 -070038struct arena_run_s {
Jason Evans381c23d2014-10-10 23:01:03 -070039 /* Index of bin this run is associated with. */
40 index_t binind;
Jason Evans0c5dd032014-09-29 01:31:39 -070041
42 /* Number of free regions in run. */
43 unsigned nfree;
44
45 /* Per region allocated/deallocated bitmap. */
46 bitmap_t bitmap[BITMAP_GROUPS_MAX];
47};
48
Jason Evanse476f8a2010-01-16 09:53:50 -080049/* Each element of the chunk map corresponds to one page within the chunk. */
Qinfan Wuff6a31d2014-08-29 13:34:40 -070050struct arena_chunk_map_bits_s {
Jason Evanse476f8a2010-01-16 09:53:50 -080051 /*
52 * Run address (or size) and various flags are stored together. The bit
53 * layout looks like (assuming 32-bit system):
54 *
Jason Evans53bd42c2012-05-10 00:18:46 -070055 * ???????? ???????? ????nnnn nnnndula
Jason Evanse476f8a2010-01-16 09:53:50 -080056 *
57 * ? : Unallocated: Run address for first/last pages, unset for internal
58 * pages.
Jason Evans19b3d612010-03-18 20:36:40 -070059 * Small: Run page offset.
Jason Evanse476f8a2010-01-16 09:53:50 -080060 * Large: Run size for first page, unset for trailing pages.
Jason Evans53bd42c2012-05-10 00:18:46 -070061 * n : binind for small size class, BININD_INVALID for large size class.
Jason Evanse476f8a2010-01-16 09:53:50 -080062 * d : dirty?
Jason Evans8ad0eac2010-12-17 18:07:53 -080063 * u : unzeroed?
Jason Evanse476f8a2010-01-16 09:53:50 -080064 * l : large?
65 * a : allocated?
66 *
67 * Following are example bit patterns for the three types of runs.
68 *
69 * p : run page offset
70 * s : run size
Jason Evans203484e2012-05-02 00:30:36 -070071 * n : binind for size class; large objects set these to BININD_INVALID
Jason Evanse476f8a2010-01-16 09:53:50 -080072 * x : don't care
73 * - : 0
Jason Evans0b270a92010-03-31 16:45:04 -070074 * + : 1
Jason Evans3377ffa2010-10-01 17:53:37 -070075 * [DULA] : bit set
76 * [dula] : bit unset
Jason Evanse476f8a2010-01-16 09:53:50 -080077 *
Jason Evans19b3d612010-03-18 20:36:40 -070078 * Unallocated (clean):
Jason Evans53bd42c2012-05-10 00:18:46 -070079 * ssssssss ssssssss ssss++++ ++++du-a
Jason Evans203484e2012-05-02 00:30:36 -070080 * xxxxxxxx xxxxxxxx xxxxxxxx xxxx-Uxx
Jason Evans53bd42c2012-05-10 00:18:46 -070081 * ssssssss ssssssss ssss++++ ++++dU-a
Jason Evans19b3d612010-03-18 20:36:40 -070082 *
83 * Unallocated (dirty):
Jason Evans53bd42c2012-05-10 00:18:46 -070084 * ssssssss ssssssss ssss++++ ++++D--a
Jason Evans203484e2012-05-02 00:30:36 -070085 * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
Jason Evans53bd42c2012-05-10 00:18:46 -070086 * ssssssss ssssssss ssss++++ ++++D--a
Jason Evanse476f8a2010-01-16 09:53:50 -080087 *
Jason Evansdafde142010-03-17 16:27:39 -070088 * Small:
Jason Evans203484e2012-05-02 00:30:36 -070089 * pppppppp pppppppp ppppnnnn nnnnd--A
90 * pppppppp pppppppp ppppnnnn nnnn---A
91 * pppppppp pppppppp ppppnnnn nnnnd--A
Jason Evanse476f8a2010-01-16 09:53:50 -080092 *
93 * Large:
Jason Evans53bd42c2012-05-10 00:18:46 -070094 * ssssssss ssssssss ssss++++ ++++D-LA
Jason Evans203484e2012-05-02 00:30:36 -070095 * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
Jason Evans53bd42c2012-05-10 00:18:46 -070096 * -------- -------- ----++++ ++++D-LA
Jason Evans0b270a92010-03-31 16:45:04 -070097 *
Jason Evans155bfa72014-10-05 17:54:10 -070098 * Large (sampled, size <= LARGE_MINCLASS):
Jason Evans203484e2012-05-02 00:30:36 -070099 * ssssssss ssssssss ssssnnnn nnnnD-LA
Jason Evans155bfa72014-10-05 17:54:10 -0700100 * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
101 * -------- -------- ----++++ ++++D-LA
Jason Evans0b270a92010-03-31 16:45:04 -0700102 *
Jason Evans155bfa72014-10-05 17:54:10 -0700103 * Large (not sampled, size == LARGE_MINCLASS):
Jason Evans53bd42c2012-05-10 00:18:46 -0700104 * ssssssss ssssssss ssss++++ ++++D-LA
Jason Evans155bfa72014-10-05 17:54:10 -0700105 * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
106 * -------- -------- ----++++ ++++D-LA
Jason Evanse476f8a2010-01-16 09:53:50 -0800107 */
108 size_t bits;
Jason Evans203484e2012-05-02 00:30:36 -0700109#define CHUNK_MAP_BININD_SHIFT 4
110#define BININD_INVALID ((size_t)0xffU)
111/* CHUNK_MAP_BININD_MASK == (BININD_INVALID << CHUNK_MAP_BININD_SHIFT) */
112#define CHUNK_MAP_BININD_MASK ((size_t)0xff0U)
113#define CHUNK_MAP_BININD_INVALID CHUNK_MAP_BININD_MASK
114#define CHUNK_MAP_FLAGS_MASK ((size_t)0xcU)
Jason Evans0b270a92010-03-31 16:45:04 -0700115#define CHUNK_MAP_DIRTY ((size_t)0x8U)
Jason Evans3377ffa2010-10-01 17:53:37 -0700116#define CHUNK_MAP_UNZEROED ((size_t)0x4U)
Jason Evans0b270a92010-03-31 16:45:04 -0700117#define CHUNK_MAP_LARGE ((size_t)0x2U)
118#define CHUNK_MAP_ALLOCATED ((size_t)0x1U)
119#define CHUNK_MAP_KEY CHUNK_MAP_ALLOCATED
Jason Evanse476f8a2010-01-16 09:53:50 -0800120};
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700121
122/*
123 * Each arena_chunk_map_misc_t corresponds to one page within the chunk, just
124 * like arena_chunk_map_bits_t. Two separate arrays are stored within each
125 * chunk header in order to improve cache locality.
126 */
127struct arena_chunk_map_misc_s {
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700128 /*
129 * Linkage for run trees. There are two disjoint uses:
130 *
131 * 1) arena_t's runs_avail tree.
132 * 2) arena_run_t conceptually uses this linkage for in-use non-full
133 * runs, rather than directly embedding linkage.
134 */
Jason Evans0c5dd032014-09-29 01:31:39 -0700135 rb_node(arena_chunk_map_misc_t) rb_link;
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700136
Jason Evans0c5dd032014-09-29 01:31:39 -0700137 union {
138 /* Linkage for list of dirty runs. */
139 ql_elm(arena_chunk_map_misc_t) dr_link;
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700140
Jason Evans0c5dd032014-09-29 01:31:39 -0700141 /* Profile counters, used for large object runs. */
142 prof_tctx_t *prof_tctx;
143
144 /* Small region run metadata. */
145 arena_run_t run;
146 };
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700147};
148typedef rb_tree(arena_chunk_map_misc_t) arena_avail_tree_t;
149typedef rb_tree(arena_chunk_map_misc_t) arena_run_tree_t;
150typedef ql_head(arena_chunk_map_misc_t) arena_chunk_miscelms_t;
Jason Evanse476f8a2010-01-16 09:53:50 -0800151
152/* Arena chunk header. */
153struct arena_chunk_s {
154 /* Arena that owns the chunk. */
Jason Evanse3d13062012-10-30 15:42:37 -0700155 arena_t *arena;
Jason Evanse476f8a2010-01-16 09:53:50 -0800156
Jason Evans7393f442010-10-01 17:35:43 -0700157 /*
158 * Map of pages within chunk that keeps track of free/large/small. The
159 * first map_bias entries are omitted, since the chunk header does not
160 * need to be tracked in the map. This omission saves a header page
161 * for common chunk sizes (e.g. 4 MiB).
162 */
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700163 arena_chunk_map_bits_t map_bits[1]; /* Dynamically sized. */
Jason Evanse476f8a2010-01-16 09:53:50 -0800164};
Jason Evanse476f8a2010-01-16 09:53:50 -0800165
Jason Evans49f7e8f2011-03-15 13:59:15 -0700166/*
Jason Evans84c8eef2011-03-16 10:30:13 -0700167 * Read-only information associated with each element of arena_t's bins array
Jason Evans49f7e8f2011-03-15 13:59:15 -0700168 * is stored separately, partly to reduce memory usage (only one copy, rather
169 * than one per arena), but mainly to avoid false cacheline sharing.
Jason Evans122449b2012-04-06 00:35:09 -0700170 *
171 * Each run has the following layout:
172 *
173 * /--------------------\
Jason Evans0c5dd032014-09-29 01:31:39 -0700174 * | pad? |
Jason Evans122449b2012-04-06 00:35:09 -0700175 * |--------------------|
176 * | redzone |
177 * reg0_offset | region 0 |
178 * | redzone |
179 * |--------------------| \
180 * | redzone | |
181 * | region 1 | > reg_interval
182 * | redzone | /
183 * |--------------------|
184 * | ... |
185 * | ... |
186 * | ... |
187 * |--------------------|
188 * | redzone |
189 * | region nregs-1 |
190 * | redzone |
191 * |--------------------|
192 * | alignment pad? |
193 * \--------------------/
194 *
195 * reg_interval has at least the same minimum alignment as reg_size; this
196 * preserves the alignment constraint that sa2u() depends on. Alignment pad is
197 * either 0 or redzone_size; it is present only if needed to align reg0_offset.
Jason Evans49f7e8f2011-03-15 13:59:15 -0700198 */
199struct arena_bin_info_s {
200 /* Size of regions in a run for this bin's size class. */
201 size_t reg_size;
202
Jason Evans122449b2012-04-06 00:35:09 -0700203 /* Redzone size. */
204 size_t redzone_size;
205
206 /* Interval between regions (reg_size + (redzone_size << 1)). */
207 size_t reg_interval;
208
Jason Evans49f7e8f2011-03-15 13:59:15 -0700209 /* Total size of a run for this bin's size class. */
210 size_t run_size;
211
212 /* Total number of regions in a run for this bin's size class. */
213 uint32_t nregs;
214
Jason Evans84c8eef2011-03-16 10:30:13 -0700215 /*
Jason Evans84c8eef2011-03-16 10:30:13 -0700216 * Metadata used to manipulate bitmaps for runs associated with this
217 * bin.
218 */
219 bitmap_info_t bitmap_info;
220
Jason Evans49f7e8f2011-03-15 13:59:15 -0700221 /* Offset of first region in a run for this bin's size class. */
222 uint32_t reg0_offset;
223};
224
Jason Evanse476f8a2010-01-16 09:53:50 -0800225struct arena_bin_s {
226 /*
Jason Evans86815df2010-03-13 20:32:56 -0800227 * All operations on runcur, runs, and stats require that lock be
228 * locked. Run allocation/deallocation are protected by the arena lock,
229 * which may be acquired while holding one or more bin locks, but not
230 * vise versa.
231 */
232 malloc_mutex_t lock;
233
234 /*
Jason Evanse476f8a2010-01-16 09:53:50 -0800235 * Current run being used to service allocations of this bin's size
236 * class.
237 */
238 arena_run_t *runcur;
239
240 /*
241 * Tree of non-full runs. This tree is used when looking for an
242 * existing run when runcur is no longer usable. We choose the
243 * non-full run that is lowest in memory; this policy tends to keep
244 * objects packed well, and it can also help reduce the number of
245 * almost-empty chunks.
246 */
247 arena_run_tree_t runs;
248
Jason Evanse476f8a2010-01-16 09:53:50 -0800249 /* Bin statistics. */
250 malloc_bin_stats_t stats;
Jason Evanse476f8a2010-01-16 09:53:50 -0800251};
252
253struct arena_s {
Jason Evans6109fe02010-02-10 10:37:56 -0800254 /* This arena's index within the arenas array. */
255 unsigned ind;
256
Jason Evans86815df2010-03-13 20:32:56 -0800257 /*
Jason Evans597632b2011-03-18 13:41:33 -0700258 * Number of threads currently assigned to this arena. This field is
259 * protected by arenas_lock.
260 */
261 unsigned nthreads;
262
263 /*
264 * There are three classes of arena operations from a locking
265 * perspective:
266 * 1) Thread asssignment (modifies nthreads) is protected by
267 * arenas_lock.
268 * 2) Bin-related operations are protected by bin locks.
269 * 3) Chunk- and run-related operations are protected by this mutex.
Jason Evans86815df2010-03-13 20:32:56 -0800270 */
Jason Evanse476f8a2010-01-16 09:53:50 -0800271 malloc_mutex_t lock;
272
Jason Evanse476f8a2010-01-16 09:53:50 -0800273 arena_stats_t stats;
Jason Evanse476f8a2010-01-16 09:53:50 -0800274 /*
275 * List of tcaches for extant threads associated with this arena.
276 * Stats from these are merged incrementally, and at exit.
277 */
278 ql_head(tcache_t) tcache_ql;
Jason Evanse476f8a2010-01-16 09:53:50 -0800279
Jason Evansd34f9e72010-02-11 13:19:21 -0800280 uint64_t prof_accumbytes;
Jason Evansd34f9e72010-02-11 13:19:21 -0800281
Jason Evans609ae592012-10-11 13:53:15 -0700282 dss_prec_t dss_prec;
283
Jason Evanse476f8a2010-01-16 09:53:50 -0800284 /*
285 * In order to avoid rapid chunk allocation/deallocation when an arena
286 * oscillates right on the cusp of needing a new chunk, cache the most
287 * recently freed chunk. The spare is left in the arena's chunk trees
288 * until it is deleted.
289 *
290 * There is one spare chunk per arena, rather than one spare total, in
291 * order to avoid interactions between multiple threads that could make
292 * a single spare inadequate.
293 */
294 arena_chunk_t *spare;
295
Jason Evanse2deab72014-05-15 22:22:27 -0700296 /* Number of pages in active runs and huge regions. */
Jason Evansbc25a472010-01-24 16:41:01 -0800297 size_t nactive;
Jason Evanse476f8a2010-01-16 09:53:50 -0800298
299 /*
300 * Current count of pages within unused runs that are potentially
301 * dirty, and for which madvise(... MADV_DONTNEED) has not been called.
302 * By tracking this, we can institute a limit on how much dirty unused
303 * memory is mapped for each arena.
304 */
305 size_t ndirty;
306
Jason Evanse476f8a2010-01-16 09:53:50 -0800307 /*
Jason Evans19b3d612010-03-18 20:36:40 -0700308 * Size/address-ordered trees of this arena's available runs. The trees
Jason Evanse3d13062012-10-30 15:42:37 -0700309 * are used for first-best-fit run allocation.
Jason Evanse476f8a2010-01-16 09:53:50 -0800310 */
Jason Evanse3d13062012-10-30 15:42:37 -0700311 arena_avail_tree_t runs_avail;
Jason Evanse476f8a2010-01-16 09:53:50 -0800312
Jason Evans070b3c32014-08-14 14:45:58 -0700313 /* List of dirty runs this arena manages. */
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700314 arena_chunk_miscelms_t runs_dirty;
Jason Evans070b3c32014-08-14 14:45:58 -0700315
aravindfb7fe502014-05-05 15:16:56 -0700316 /*
317 * user-configureable chunk allocation and deallocation functions.
318 */
319 chunk_alloc_t *chunk_alloc;
Jason Evanse2deab72014-05-15 22:22:27 -0700320 chunk_dalloc_t *chunk_dalloc;
aravindfb7fe502014-05-05 15:16:56 -0700321
Jason Evansb1726102012-02-28 16:50:47 -0800322 /* bins is used to store trees of free regions. */
323 arena_bin_t bins[NBINS];
Jason Evanse476f8a2010-01-16 09:53:50 -0800324};
325
326#endif /* JEMALLOC_H_STRUCTS */
327/******************************************************************************/
328#ifdef JEMALLOC_H_EXTERNS
329
Jason Evans84c8eef2011-03-16 10:30:13 -0700330extern ssize_t opt_lg_dirty_mult;
Jason Evanse476f8a2010-01-16 09:53:50 -0800331
Jason Evansb1726102012-02-28 16:50:47 -0800332extern arena_bin_info_t arena_bin_info[NBINS];
Jason Evans49f7e8f2011-03-15 13:59:15 -0700333
Jason Evans155bfa72014-10-05 17:54:10 -0700334extern size_t map_bias; /* Number of arena chunk header pages. */
335extern size_t map_misc_offset;
336extern size_t arena_maxrun; /* Max run size for arenas. */
337extern size_t arena_maxclass; /* Max size class for arenas. */
Jason Evans3c4d92e2014-10-12 22:53:59 -0700338extern unsigned nlclasses; /* Number of large size classes. */
339extern unsigned nhclasses; /* Number of huge size classes. */
Jason Evans3c234352010-01-27 13:10:55 -0800340
Jason Evans9b41ac92014-10-14 22:20:00 -0700341void *arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
342 bool *zero);
Jason Evans3c4d92e2014-10-12 22:53:59 -0700343void arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize);
Jason Evans9b41ac92014-10-14 22:20:00 -0700344void arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk,
345 size_t oldsize, size_t usize);
346void arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk,
347 size_t oldsize, size_t usize);
348bool arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk,
349 size_t oldsize, size_t usize, bool *zero);
Jason Evans6005f072010-09-30 16:55:08 -0700350void arena_purge_all(arena_t *arena);
Jason Evansdafde142010-03-17 16:27:39 -0700351void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin,
Jason Evans155bfa72014-10-05 17:54:10 -0700352 index_t binind, uint64_t prof_accumbytes);
Jason Evans122449b2012-04-06 00:35:09 -0700353void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info,
354 bool zero);
Jason Evans0d6c5d82013-12-17 15:14:36 -0800355#ifdef JEMALLOC_JET
356typedef void (arena_redzone_corruption_t)(void *, size_t, bool, size_t,
357 uint8_t);
Jason Evans6b694c42014-01-07 16:47:56 -0800358extern arena_redzone_corruption_t *arena_redzone_corruption;
359typedef void (arena_dalloc_junk_small_t)(void *, arena_bin_info_t *);
360extern arena_dalloc_junk_small_t *arena_dalloc_junk_small;
361#else
Jason Evans122449b2012-04-06 00:35:09 -0700362void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info);
Jason Evans6b694c42014-01-07 16:47:56 -0800363#endif
Jason Evans0d6c5d82013-12-17 15:14:36 -0800364void arena_quarantine_junk_small(void *ptr, size_t usize);
Jason Evanse476f8a2010-01-16 09:53:50 -0800365void *arena_malloc_small(arena_t *arena, size_t size, bool zero);
Jason Evansdafde142010-03-17 16:27:39 -0700366void *arena_malloc_large(arena_t *arena, size_t size, bool zero);
Jason Evans5ff709c2012-04-11 18:13:45 -0700367void *arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero);
Jason Evans0b270a92010-03-31 16:45:04 -0700368void arena_prof_promoted(const void *ptr, size_t size);
Jason Evansfc0b3b72014-10-09 17:54:06 -0700369void arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk,
370 void *ptr, arena_chunk_map_bits_t *bitselm);
Jason Evans203484e2012-05-02 00:30:36 -0700371void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700372 size_t pageind, arena_chunk_map_bits_t *bitselm);
Jason Evans203484e2012-05-02 00:30:36 -0700373void arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
374 size_t pageind);
Jason Evans6b694c42014-01-07 16:47:56 -0800375#ifdef JEMALLOC_JET
376typedef void (arena_dalloc_junk_large_t)(void *, size_t);
377extern arena_dalloc_junk_large_t *arena_dalloc_junk_large;
Jason Evansfc0b3b72014-10-09 17:54:06 -0700378#else
379void arena_dalloc_junk_large(void *ptr, size_t usize);
Jason Evans6b694c42014-01-07 16:47:56 -0800380#endif
Jason Evansfc0b3b72014-10-09 17:54:06 -0700381void arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk,
Jason Evans203484e2012-05-02 00:30:36 -0700382 void *ptr);
Jason Evanse476f8a2010-01-16 09:53:50 -0800383void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr);
Jason Evans6b694c42014-01-07 16:47:56 -0800384#ifdef JEMALLOC_JET
385typedef void (arena_ralloc_junk_large_t)(void *, size_t, size_t);
386extern arena_ralloc_junk_large_t *arena_ralloc_junk_large;
387#endif
Jason Evansb2c31662014-01-12 15:05:44 -0800388bool arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
Jason Evans8e3c3c62010-09-17 15:46:18 -0700389 size_t extra, bool zero);
Jason Evans5460aa62014-09-22 21:09:23 -0700390void *arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
391 size_t size, size_t extra, size_t alignment, bool zero,
392 bool try_tcache_alloc, bool try_tcache_dalloc);
Jason Evans609ae592012-10-11 13:53:15 -0700393dss_prec_t arena_dss_prec_get(arena_t *arena);
Jason Evans4d434ad2014-04-15 12:09:48 -0700394bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
Jason Evans609ae592012-10-11 13:53:15 -0700395void arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
396 size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
Jason Evans3c4d92e2014-10-12 22:53:59 -0700397 malloc_large_stats_t *lstats, malloc_huge_stats_t *hstats);
Jason Evans8bb31982014-10-07 23:14:57 -0700398arena_t *arena_new(unsigned ind);
Jason Evansb1726102012-02-28 16:50:47 -0800399void arena_boot(void);
Jason Evans4e2e3dd2012-03-13 16:31:41 -0700400void arena_prefork(arena_t *arena);
401void arena_postfork_parent(arena_t *arena);
402void arena_postfork_child(arena_t *arena);
Jason Evanse476f8a2010-01-16 09:53:50 -0800403
404#endif /* JEMALLOC_H_EXTERNS */
405/******************************************************************************/
406#ifdef JEMALLOC_H_INLINES
407
408#ifndef JEMALLOC_ENABLE_INLINE
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700409arena_chunk_map_bits_t *arena_bitselm_get(arena_chunk_t *chunk,
410 size_t pageind);
411arena_chunk_map_misc_t *arena_miscelm_get(arena_chunk_t *chunk,
412 size_t pageind);
Jason Evans0c5dd032014-09-29 01:31:39 -0700413size_t arena_miscelm_to_pageind(arena_chunk_map_misc_t *miscelm);
414void *arena_miscelm_to_rpages(arena_chunk_map_misc_t *miscelm);
415arena_chunk_map_misc_t *arena_run_to_miscelm(arena_run_t *run);
Jason Evans203484e2012-05-02 00:30:36 -0700416size_t *arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind);
Jason Evans87a02d22013-10-19 21:40:20 -0700417size_t arena_mapbitsp_read(size_t *mapbitsp);
Jason Evans203484e2012-05-02 00:30:36 -0700418size_t arena_mapbits_get(arena_chunk_t *chunk, size_t pageind);
419size_t arena_mapbits_unallocated_size_get(arena_chunk_t *chunk,
420 size_t pageind);
421size_t arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind);
422size_t arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind);
Jason Evans155bfa72014-10-05 17:54:10 -0700423index_t arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind);
Jason Evans203484e2012-05-02 00:30:36 -0700424size_t arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind);
425size_t arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind);
426size_t arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind);
427size_t arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind);
Jason Evans87a02d22013-10-19 21:40:20 -0700428void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits);
Jason Evans203484e2012-05-02 00:30:36 -0700429void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind,
430 size_t size, size_t flags);
431void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
432 size_t size);
433void arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind,
434 size_t size, size_t flags);
435void arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
Jason Evans155bfa72014-10-05 17:54:10 -0700436 index_t binind);
Jason Evans203484e2012-05-02 00:30:36 -0700437void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind,
Jason Evans155bfa72014-10-05 17:54:10 -0700438 size_t runind, index_t binind, size_t flags);
Jason Evans203484e2012-05-02 00:30:36 -0700439void arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind,
440 size_t unzeroed);
Jason Evans88c222c2013-02-06 11:59:30 -0800441bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes);
442bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes);
443bool arena_prof_accum(arena_t *arena, uint64_t accumbytes);
Jason Evans155bfa72014-10-05 17:54:10 -0700444index_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits);
445index_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
Jason Evans49f7e8f2011-03-15 13:59:15 -0700446unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
Jason Evansb602daa2011-03-15 22:19:45 -0700447 const void *ptr);
Jason Evans602c8e02014-08-18 16:22:13 -0700448prof_tctx_t *arena_prof_tctx_get(const void *ptr);
449void arena_prof_tctx_set(const void *ptr, prof_tctx_t *tctx);
Jason Evans5460aa62014-09-22 21:09:23 -0700450void *arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
451 bool try_tcache);
Jason Evansf7088e62012-04-19 18:28:03 -0700452size_t arena_salloc(const void *ptr, bool demote);
Jason Evans5460aa62014-09-22 21:09:23 -0700453void arena_dalloc(tsd_t *tsd, arena_chunk_t *chunk, void *ptr,
454 bool try_tcache);
455void arena_sdalloc(tsd_t *tsd, arena_chunk_t *chunk, void *ptr, size_t size,
Jason Evans9c640bf2014-09-11 16:20:44 -0700456 bool try_tcache);
Jason Evanse476f8a2010-01-16 09:53:50 -0800457#endif
458
459#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
Jason Evans203484e2012-05-02 00:30:36 -0700460# ifdef JEMALLOC_ARENA_INLINE_A
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700461JEMALLOC_ALWAYS_INLINE arena_chunk_map_bits_t *
462arena_bitselm_get(arena_chunk_t *chunk, size_t pageind)
Jason Evans203484e2012-05-02 00:30:36 -0700463{
464
465 assert(pageind >= map_bias);
466 assert(pageind < chunk_npages);
467
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700468 return (&chunk->map_bits[pageind-map_bias]);
469}
470
471JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
472arena_miscelm_get(arena_chunk_t *chunk, size_t pageind)
473{
474
475 assert(pageind >= map_bias);
476 assert(pageind < chunk_npages);
477
478 return ((arena_chunk_map_misc_t *)((uintptr_t)chunk +
479 (uintptr_t)map_misc_offset) + pageind-map_bias);
Jason Evans203484e2012-05-02 00:30:36 -0700480}
481
Jason Evans0c5dd032014-09-29 01:31:39 -0700482JEMALLOC_ALWAYS_INLINE size_t
483arena_miscelm_to_pageind(arena_chunk_map_misc_t *miscelm)
484{
485 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
486 size_t pageind = ((uintptr_t)miscelm - ((uintptr_t)chunk +
487 map_misc_offset)) / sizeof(arena_chunk_map_misc_t) + map_bias;
488
489 assert(pageind >= map_bias);
490 assert(pageind < chunk_npages);
491
492 return (pageind);
493}
494
495JEMALLOC_ALWAYS_INLINE void *
496arena_miscelm_to_rpages(arena_chunk_map_misc_t *miscelm)
497{
498 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
499 size_t pageind = arena_miscelm_to_pageind(miscelm);
500
501 return ((void *)((uintptr_t)chunk + (pageind << LG_PAGE)));
502}
503
504JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
505arena_run_to_miscelm(arena_run_t *run)
506{
507 arena_chunk_map_misc_t *miscelm = (arena_chunk_map_misc_t
508 *)((uintptr_t)run - offsetof(arena_chunk_map_misc_t, run));
509
510 assert(arena_miscelm_to_pageind(miscelm) >= map_bias);
511 assert(arena_miscelm_to_pageind(miscelm) < chunk_npages);
512
513 return (miscelm);
514}
515
Jason Evans88393cb2013-01-22 08:45:43 -0800516JEMALLOC_ALWAYS_INLINE size_t *
Jason Evans203484e2012-05-02 00:30:36 -0700517arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind)
518{
519
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700520 return (&arena_bitselm_get(chunk, pageind)->bits);
Jason Evans203484e2012-05-02 00:30:36 -0700521}
522
Jason Evans88393cb2013-01-22 08:45:43 -0800523JEMALLOC_ALWAYS_INLINE size_t
Jason Evans87a02d22013-10-19 21:40:20 -0700524arena_mapbitsp_read(size_t *mapbitsp)
525{
526
527 return (*mapbitsp);
528}
529
530JEMALLOC_ALWAYS_INLINE size_t
Jason Evans203484e2012-05-02 00:30:36 -0700531arena_mapbits_get(arena_chunk_t *chunk, size_t pageind)
532{
533
Jason Evans87a02d22013-10-19 21:40:20 -0700534 return (arena_mapbitsp_read(arena_mapbitsp_get(chunk, pageind)));
Jason Evans203484e2012-05-02 00:30:36 -0700535}
536
Jason Evans88393cb2013-01-22 08:45:43 -0800537JEMALLOC_ALWAYS_INLINE size_t
Jason Evans203484e2012-05-02 00:30:36 -0700538arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind)
539{
540 size_t mapbits;
541
542 mapbits = arena_mapbits_get(chunk, pageind);
543 assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
544 return (mapbits & ~PAGE_MASK);
545}
546
Jason Evans88393cb2013-01-22 08:45:43 -0800547JEMALLOC_ALWAYS_INLINE size_t
Jason Evans203484e2012-05-02 00:30:36 -0700548arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind)
549{
550 size_t mapbits;
551
552 mapbits = arena_mapbits_get(chunk, pageind);
553 assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
554 (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED));
555 return (mapbits & ~PAGE_MASK);
556}
557
Jason Evans88393cb2013-01-22 08:45:43 -0800558JEMALLOC_ALWAYS_INLINE size_t
Jason Evans203484e2012-05-02 00:30:36 -0700559arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind)
560{
561 size_t mapbits;
562
563 mapbits = arena_mapbits_get(chunk, pageind);
564 assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
565 CHUNK_MAP_ALLOCATED);
566 return (mapbits >> LG_PAGE);
567}
568
Jason Evans155bfa72014-10-05 17:54:10 -0700569JEMALLOC_ALWAYS_INLINE index_t
Jason Evans80737c32012-05-02 16:11:03 -0700570arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind)
571{
572 size_t mapbits;
Jason Evans155bfa72014-10-05 17:54:10 -0700573 index_t binind;
Jason Evans80737c32012-05-02 16:11:03 -0700574
575 mapbits = arena_mapbits_get(chunk, pageind);
576 binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
577 assert(binind < NBINS || binind == BININD_INVALID);
578 return (binind);
579}
580
Jason Evans88393cb2013-01-22 08:45:43 -0800581JEMALLOC_ALWAYS_INLINE size_t
Jason Evans203484e2012-05-02 00:30:36 -0700582arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind)
583{
584 size_t mapbits;
585
586 mapbits = arena_mapbits_get(chunk, pageind);
587 return (mapbits & CHUNK_MAP_DIRTY);
588}
589
Jason Evans88393cb2013-01-22 08:45:43 -0800590JEMALLOC_ALWAYS_INLINE size_t
Jason Evans203484e2012-05-02 00:30:36 -0700591arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind)
592{
593 size_t mapbits;
594
595 mapbits = arena_mapbits_get(chunk, pageind);
596 return (mapbits & CHUNK_MAP_UNZEROED);
597}
598
Jason Evans88393cb2013-01-22 08:45:43 -0800599JEMALLOC_ALWAYS_INLINE size_t
Jason Evans203484e2012-05-02 00:30:36 -0700600arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind)
601{
602 size_t mapbits;
603
604 mapbits = arena_mapbits_get(chunk, pageind);
605 return (mapbits & CHUNK_MAP_LARGE);
606}
607
Jason Evans88393cb2013-01-22 08:45:43 -0800608JEMALLOC_ALWAYS_INLINE size_t
Jason Evans203484e2012-05-02 00:30:36 -0700609arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind)
610{
611 size_t mapbits;
612
613 mapbits = arena_mapbits_get(chunk, pageind);
614 return (mapbits & CHUNK_MAP_ALLOCATED);
615}
616
Jason Evans88393cb2013-01-22 08:45:43 -0800617JEMALLOC_ALWAYS_INLINE void
Jason Evans87a02d22013-10-19 21:40:20 -0700618arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits)
619{
620
621 *mapbitsp = mapbits;
622}
623
624JEMALLOC_ALWAYS_INLINE void
Jason Evans203484e2012-05-02 00:30:36 -0700625arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size,
626 size_t flags)
627{
Jason Evans87a02d22013-10-19 21:40:20 -0700628 size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
Jason Evans203484e2012-05-02 00:30:36 -0700629
Jason Evans203484e2012-05-02 00:30:36 -0700630 assert((size & PAGE_MASK) == 0);
631 assert((flags & ~CHUNK_MAP_FLAGS_MASK) == 0);
Jason Evansd8ceef62012-05-10 20:59:39 -0700632 assert((flags & (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == flags);
Jason Evans87a02d22013-10-19 21:40:20 -0700633 arena_mapbitsp_write(mapbitsp, size | CHUNK_MAP_BININD_INVALID | flags);
Jason Evans203484e2012-05-02 00:30:36 -0700634}
635
Jason Evans88393cb2013-01-22 08:45:43 -0800636JEMALLOC_ALWAYS_INLINE void
Jason Evans203484e2012-05-02 00:30:36 -0700637arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
638 size_t size)
639{
Jason Evans87a02d22013-10-19 21:40:20 -0700640 size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
641 size_t mapbits = arena_mapbitsp_read(mapbitsp);
Jason Evans203484e2012-05-02 00:30:36 -0700642
Jason Evans203484e2012-05-02 00:30:36 -0700643 assert((size & PAGE_MASK) == 0);
Jason Evans87a02d22013-10-19 21:40:20 -0700644 assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
645 arena_mapbitsp_write(mapbitsp, size | (mapbits & PAGE_MASK));
Jason Evans203484e2012-05-02 00:30:36 -0700646}
647
Jason Evans88393cb2013-01-22 08:45:43 -0800648JEMALLOC_ALWAYS_INLINE void
Jason Evans203484e2012-05-02 00:30:36 -0700649arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size,
650 size_t flags)
651{
Jason Evans87a02d22013-10-19 21:40:20 -0700652 size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
653 size_t mapbits = arena_mapbitsp_read(mapbitsp);
Jason Evansd8ceef62012-05-10 20:59:39 -0700654 size_t unzeroed;
Jason Evans203484e2012-05-02 00:30:36 -0700655
Jason Evans203484e2012-05-02 00:30:36 -0700656 assert((size & PAGE_MASK) == 0);
Jason Evansd8ceef62012-05-10 20:59:39 -0700657 assert((flags & CHUNK_MAP_DIRTY) == flags);
Jason Evans87a02d22013-10-19 21:40:20 -0700658 unzeroed = mapbits & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */
659 arena_mapbitsp_write(mapbitsp, size | CHUNK_MAP_BININD_INVALID | flags
660 | unzeroed | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED);
Jason Evans203484e2012-05-02 00:30:36 -0700661}
662
Jason Evans88393cb2013-01-22 08:45:43 -0800663JEMALLOC_ALWAYS_INLINE void
Jason Evans203484e2012-05-02 00:30:36 -0700664arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
Jason Evans155bfa72014-10-05 17:54:10 -0700665 index_t binind)
Jason Evans203484e2012-05-02 00:30:36 -0700666{
Jason Evans87a02d22013-10-19 21:40:20 -0700667 size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
668 size_t mapbits = arena_mapbitsp_read(mapbitsp);
Jason Evans203484e2012-05-02 00:30:36 -0700669
670 assert(binind <= BININD_INVALID);
Jason Evans155bfa72014-10-05 17:54:10 -0700671 assert(arena_mapbits_large_size_get(chunk, pageind) == LARGE_MINCLASS);
Jason Evans87a02d22013-10-19 21:40:20 -0700672 arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_BININD_MASK) |
673 (binind << CHUNK_MAP_BININD_SHIFT));
Jason Evans203484e2012-05-02 00:30:36 -0700674}
675
Jason Evans88393cb2013-01-22 08:45:43 -0800676JEMALLOC_ALWAYS_INLINE void
Jason Evans203484e2012-05-02 00:30:36 -0700677arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind,
Jason Evans155bfa72014-10-05 17:54:10 -0700678 index_t binind, size_t flags)
Jason Evans203484e2012-05-02 00:30:36 -0700679{
Jason Evans87a02d22013-10-19 21:40:20 -0700680 size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
681 size_t mapbits = arena_mapbitsp_read(mapbitsp);
Jason Evansd8ceef62012-05-10 20:59:39 -0700682 size_t unzeroed;
Jason Evans203484e2012-05-02 00:30:36 -0700683
684 assert(binind < BININD_INVALID);
Jason Evans203484e2012-05-02 00:30:36 -0700685 assert(pageind - runind >= map_bias);
Jason Evansd8ceef62012-05-10 20:59:39 -0700686 assert((flags & CHUNK_MAP_DIRTY) == flags);
Jason Evans87a02d22013-10-19 21:40:20 -0700687 unzeroed = mapbits & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */
688 arena_mapbitsp_write(mapbitsp, (runind << LG_PAGE) | (binind <<
689 CHUNK_MAP_BININD_SHIFT) | flags | unzeroed | CHUNK_MAP_ALLOCATED);
Jason Evans203484e2012-05-02 00:30:36 -0700690}
691
Jason Evans88393cb2013-01-22 08:45:43 -0800692JEMALLOC_ALWAYS_INLINE void
Jason Evans203484e2012-05-02 00:30:36 -0700693arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind,
694 size_t unzeroed)
695{
Jason Evans87a02d22013-10-19 21:40:20 -0700696 size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
697 size_t mapbits = arena_mapbitsp_read(mapbitsp);
Jason Evans203484e2012-05-02 00:30:36 -0700698
Jason Evans87a02d22013-10-19 21:40:20 -0700699 arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_UNZEROED) |
700 unzeroed);
Jason Evans203484e2012-05-02 00:30:36 -0700701}
702
Jason Evans88c222c2013-02-06 11:59:30 -0800703JEMALLOC_INLINE bool
Jason Evansa3b33862012-11-13 12:56:27 -0800704arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes)
705{
706
707 cassert(config_prof);
708 assert(prof_interval != 0);
709
710 arena->prof_accumbytes += accumbytes;
711 if (arena->prof_accumbytes >= prof_interval) {
Jason Evansa3b33862012-11-13 12:56:27 -0800712 arena->prof_accumbytes -= prof_interval;
Jason Evans88c222c2013-02-06 11:59:30 -0800713 return (true);
Jason Evansa3b33862012-11-13 12:56:27 -0800714 }
Jason Evans88c222c2013-02-06 11:59:30 -0800715 return (false);
Jason Evansa3b33862012-11-13 12:56:27 -0800716}
717
Jason Evans88c222c2013-02-06 11:59:30 -0800718JEMALLOC_INLINE bool
Jason Evansa3b33862012-11-13 12:56:27 -0800719arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes)
720{
721
722 cassert(config_prof);
723
Jason Evans9c640bf2014-09-11 16:20:44 -0700724 if (likely(prof_interval == 0))
Jason Evans88c222c2013-02-06 11:59:30 -0800725 return (false);
726 return (arena_prof_accum_impl(arena, accumbytes));
Jason Evansa3b33862012-11-13 12:56:27 -0800727}
728
Jason Evans88c222c2013-02-06 11:59:30 -0800729JEMALLOC_INLINE bool
Jason Evansa3b33862012-11-13 12:56:27 -0800730arena_prof_accum(arena_t *arena, uint64_t accumbytes)
731{
732
733 cassert(config_prof);
734
Jason Evans9c640bf2014-09-11 16:20:44 -0700735 if (likely(prof_interval == 0))
Jason Evans88c222c2013-02-06 11:59:30 -0800736 return (false);
737
738 {
739 bool ret;
740
741 malloc_mutex_lock(&arena->lock);
742 ret = arena_prof_accum_impl(arena, accumbytes);
743 malloc_mutex_unlock(&arena->lock);
744 return (ret);
745 }
Jason Evansa3b33862012-11-13 12:56:27 -0800746}
747
Jason Evans155bfa72014-10-05 17:54:10 -0700748JEMALLOC_ALWAYS_INLINE index_t
Jason Evans80737c32012-05-02 16:11:03 -0700749arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
Jason Evans203484e2012-05-02 00:30:36 -0700750{
Jason Evans155bfa72014-10-05 17:54:10 -0700751 index_t binind;
Jason Evans203484e2012-05-02 00:30:36 -0700752
753 binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
754
755 if (config_debug) {
Jason Evans80737c32012-05-02 16:11:03 -0700756 arena_chunk_t *chunk;
757 arena_t *arena;
758 size_t pageind;
759 size_t actual_mapbits;
Jason Evans0c5dd032014-09-29 01:31:39 -0700760 size_t rpages_ind;
Jason Evans80737c32012-05-02 16:11:03 -0700761 arena_run_t *run;
762 arena_bin_t *bin;
Jason Evans381c23d2014-10-10 23:01:03 -0700763 index_t run_binind, actual_binind;
Jason Evans80737c32012-05-02 16:11:03 -0700764 arena_bin_info_t *bin_info;
Jason Evans0c5dd032014-09-29 01:31:39 -0700765 arena_chunk_map_misc_t *miscelm;
766 void *rpages;
Jason Evans203484e2012-05-02 00:30:36 -0700767
Jason Evans80737c32012-05-02 16:11:03 -0700768 assert(binind != BININD_INVALID);
769 assert(binind < NBINS);
770 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
771 arena = chunk->arena;
772 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
773 actual_mapbits = arena_mapbits_get(chunk, pageind);
Jason Evans203484e2012-05-02 00:30:36 -0700774 assert(mapbits == actual_mapbits);
Jason Evans80737c32012-05-02 16:11:03 -0700775 assert(arena_mapbits_large_get(chunk, pageind) == 0);
776 assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
Jason Evans0c5dd032014-09-29 01:31:39 -0700777 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk,
778 pageind);
779 miscelm = arena_miscelm_get(chunk, rpages_ind);
780 run = &miscelm->run;
Jason Evans381c23d2014-10-10 23:01:03 -0700781 run_binind = run->binind;
782 bin = &arena->bins[run_binind];
Jason Evans80737c32012-05-02 16:11:03 -0700783 actual_binind = bin - arena->bins;
Jason Evans381c23d2014-10-10 23:01:03 -0700784 assert(run_binind == actual_binind);
Jason Evans80737c32012-05-02 16:11:03 -0700785 bin_info = &arena_bin_info[actual_binind];
Jason Evans0c5dd032014-09-29 01:31:39 -0700786 rpages = arena_miscelm_to_rpages(miscelm);
787 assert(((uintptr_t)ptr - ((uintptr_t)rpages +
Jason Evans203484e2012-05-02 00:30:36 -0700788 (uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval
789 == 0);
790 }
791
792 return (binind);
793}
Jason Evans155bfa72014-10-05 17:54:10 -0700794# endif /* JEMALLOC_ARENA_INLINE_A */
Jason Evans203484e2012-05-02 00:30:36 -0700795
Jason Evans155bfa72014-10-05 17:54:10 -0700796# ifdef JEMALLOC_ARENA_INLINE_B
797JEMALLOC_INLINE index_t
Jason Evans49f7e8f2011-03-15 13:59:15 -0700798arena_bin_index(arena_t *arena, arena_bin_t *bin)
799{
Jason Evans155bfa72014-10-05 17:54:10 -0700800 index_t binind = bin - arena->bins;
Jason Evansb1726102012-02-28 16:50:47 -0800801 assert(binind < NBINS);
Jason Evans49f7e8f2011-03-15 13:59:15 -0700802 return (binind);
803}
804
Jason Evans81b4e6e2010-10-20 20:52:00 -0700805JEMALLOC_INLINE unsigned
Jason Evansb602daa2011-03-15 22:19:45 -0700806arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
Jason Evans81b4e6e2010-10-20 20:52:00 -0700807{
808 unsigned shift, diff, regind;
Jason Evans122449b2012-04-06 00:35:09 -0700809 size_t interval;
Jason Evans0c5dd032014-09-29 01:31:39 -0700810 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
811 void *rpages = arena_miscelm_to_rpages(miscelm);
Jason Evans81b4e6e2010-10-20 20:52:00 -0700812
Jason Evans84c8eef2011-03-16 10:30:13 -0700813 /*
814 * Freeing a pointer lower than region zero can cause assertion
815 * failure.
816 */
Jason Evans0c5dd032014-09-29 01:31:39 -0700817 assert((uintptr_t)ptr >= (uintptr_t)rpages +
Jason Evans84c8eef2011-03-16 10:30:13 -0700818 (uintptr_t)bin_info->reg0_offset);
Jason Evans81b4e6e2010-10-20 20:52:00 -0700819
820 /*
821 * Avoid doing division with a variable divisor if possible. Using
822 * actual division here can reduce allocator throughput by over 20%!
823 */
Jason Evans0c5dd032014-09-29 01:31:39 -0700824 diff = (unsigned)((uintptr_t)ptr - (uintptr_t)rpages -
Jason Evans49f7e8f2011-03-15 13:59:15 -0700825 bin_info->reg0_offset);
Jason Evans81b4e6e2010-10-20 20:52:00 -0700826
827 /* Rescale (factor powers of 2 out of the numerator and denominator). */
Jason Evans122449b2012-04-06 00:35:09 -0700828 interval = bin_info->reg_interval;
Richard Diamond9c3a10f2014-05-28 21:37:02 -0500829 shift = jemalloc_ffs(interval) - 1;
Jason Evans81b4e6e2010-10-20 20:52:00 -0700830 diff >>= shift;
Jason Evans122449b2012-04-06 00:35:09 -0700831 interval >>= shift;
Jason Evans81b4e6e2010-10-20 20:52:00 -0700832
Jason Evans122449b2012-04-06 00:35:09 -0700833 if (interval == 1) {
Jason Evans81b4e6e2010-10-20 20:52:00 -0700834 /* The divisor was a power of 2. */
835 regind = diff;
836 } else {
837 /*
838 * To divide by a number D that is not a power of two we
839 * multiply by (2^21 / D) and then right shift by 21 positions.
840 *
841 * X / D
842 *
843 * becomes
844 *
Jason Evans122449b2012-04-06 00:35:09 -0700845 * (X * interval_invs[D - 3]) >> SIZE_INV_SHIFT
Jason Evans81b4e6e2010-10-20 20:52:00 -0700846 *
847 * We can omit the first three elements, because we never
848 * divide by 0, and 1 and 2 are both powers of two, which are
849 * handled above.
850 */
Jason Evans47e57f92011-03-22 09:00:56 -0700851#define SIZE_INV_SHIFT ((sizeof(unsigned) << 3) - LG_RUN_MAXREGS)
852#define SIZE_INV(s) (((1U << SIZE_INV_SHIFT) / (s)) + 1)
Jason Evans122449b2012-04-06 00:35:09 -0700853 static const unsigned interval_invs[] = {
Jason Evans81b4e6e2010-10-20 20:52:00 -0700854 SIZE_INV(3),
855 SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7),
856 SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11),
857 SIZE_INV(12), SIZE_INV(13), SIZE_INV(14), SIZE_INV(15),
858 SIZE_INV(16), SIZE_INV(17), SIZE_INV(18), SIZE_INV(19),
859 SIZE_INV(20), SIZE_INV(21), SIZE_INV(22), SIZE_INV(23),
860 SIZE_INV(24), SIZE_INV(25), SIZE_INV(26), SIZE_INV(27),
861 SIZE_INV(28), SIZE_INV(29), SIZE_INV(30), SIZE_INV(31)
862 };
863
Jason Evans9c640bf2014-09-11 16:20:44 -0700864 if (likely(interval <= ((sizeof(interval_invs) /
865 sizeof(unsigned)) + 2))) {
Jason Evans122449b2012-04-06 00:35:09 -0700866 regind = (diff * interval_invs[interval - 3]) >>
867 SIZE_INV_SHIFT;
868 } else
869 regind = diff / interval;
Jason Evans81b4e6e2010-10-20 20:52:00 -0700870#undef SIZE_INV
871#undef SIZE_INV_SHIFT
872 }
Jason Evans122449b2012-04-06 00:35:09 -0700873 assert(diff == regind * interval);
Jason Evans49f7e8f2011-03-15 13:59:15 -0700874 assert(regind < bin_info->nregs);
Jason Evans81b4e6e2010-10-20 20:52:00 -0700875
876 return (regind);
877}
878
Jason Evans602c8e02014-08-18 16:22:13 -0700879JEMALLOC_INLINE prof_tctx_t *
880arena_prof_tctx_get(const void *ptr)
Jason Evans81b4e6e2010-10-20 20:52:00 -0700881{
Jason Evans602c8e02014-08-18 16:22:13 -0700882 prof_tctx_t *ret;
Jason Evans81b4e6e2010-10-20 20:52:00 -0700883 arena_chunk_t *chunk;
884 size_t pageind, mapbits;
885
Jason Evans7372b152012-02-10 20:22:09 -0800886 cassert(config_prof);
Jason Evans81b4e6e2010-10-20 20:52:00 -0700887 assert(ptr != NULL);
888 assert(CHUNK_ADDR2BASE(ptr) != ptr);
889
890 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
Jason Evansae4c7b42012-04-02 07:04:34 -0700891 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -0700892 mapbits = arena_mapbits_get(chunk, pageind);
Jason Evans81b4e6e2010-10-20 20:52:00 -0700893 assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
Jason Evans9c640bf2014-09-11 16:20:44 -0700894 if (likely((mapbits & CHUNK_MAP_LARGE) == 0))
Jason Evans602c8e02014-08-18 16:22:13 -0700895 ret = (prof_tctx_t *)(uintptr_t)1U;
Jason Evans9b0cbf02014-04-11 14:24:51 -0700896 else
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700897 ret = arena_miscelm_get(chunk, pageind)->prof_tctx;
Jason Evans81b4e6e2010-10-20 20:52:00 -0700898
899 return (ret);
900}
Jason Evanse4f78462010-10-22 10:45:59 -0700901
902JEMALLOC_INLINE void
Jason Evans602c8e02014-08-18 16:22:13 -0700903arena_prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
Jason Evanse4f78462010-10-22 10:45:59 -0700904{
905 arena_chunk_t *chunk;
Jason Evans5fbad092013-12-15 22:08:44 -0800906 size_t pageind;
Jason Evanse4f78462010-10-22 10:45:59 -0700907
Jason Evans7372b152012-02-10 20:22:09 -0800908 cassert(config_prof);
Jason Evanse4f78462010-10-22 10:45:59 -0700909 assert(ptr != NULL);
910 assert(CHUNK_ADDR2BASE(ptr) != ptr);
911
912 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
Jason Evansae4c7b42012-04-02 07:04:34 -0700913 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans5fbad092013-12-15 22:08:44 -0800914 assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
Jason Evans66576932013-12-15 16:21:30 -0800915
Jason Evans9c640bf2014-09-11 16:20:44 -0700916 if (unlikely(arena_mapbits_large_get(chunk, pageind) != 0))
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700917 arena_miscelm_get(chunk, pageind)->prof_tctx = tctx;
Jason Evanse4f78462010-10-22 10:45:59 -0700918}
Jason Evans81b4e6e2010-10-20 20:52:00 -0700919
Jason Evans88393cb2013-01-22 08:45:43 -0800920JEMALLOC_ALWAYS_INLINE void *
Jason Evans5460aa62014-09-22 21:09:23 -0700921arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
922 bool try_tcache)
Jason Evans962463d2012-02-13 12:29:49 -0800923{
Jason Evansef8897b2012-02-13 14:30:52 -0800924 tcache_t *tcache;
Jason Evans962463d2012-02-13 12:29:49 -0800925
926 assert(size != 0);
Jason Evanscd9a1342012-03-21 18:33:03 -0700927 assert(size <= arena_maxclass);
Jason Evans962463d2012-02-13 12:29:49 -0800928
Jason Evans9c640bf2014-09-11 16:20:44 -0700929 if (likely(size <= SMALL_MAXCLASS)) {
Jason Evans5460aa62014-09-22 21:09:23 -0700930 if (likely(try_tcache) && likely((tcache = tcache_get(tsd,
931 true)) != NULL))
Jason Evans962463d2012-02-13 12:29:49 -0800932 return (tcache_alloc_small(tcache, size, zero));
Jason Evans01b3fe52012-04-03 09:28:00 -0700933 else {
Jason Evans8bb31982014-10-07 23:14:57 -0700934 arena = arena_choose(tsd, arena);
935 if (unlikely(arena == NULL))
936 return (NULL);
937 return (arena_malloc_small(arena, size, zero));
Jason Evans01b3fe52012-04-03 09:28:00 -0700938 }
Jason Evans962463d2012-02-13 12:29:49 -0800939 } else {
Jason Evans74686892012-02-13 15:18:19 -0800940 /*
941 * Initialize tcache after checking size in order to avoid
942 * infinite recursion during tcache initialization.
943 */
Jason Evans9c640bf2014-09-11 16:20:44 -0700944 if (try_tcache && size <= tcache_maxclass && likely((tcache =
Jason Evans5460aa62014-09-22 21:09:23 -0700945 tcache_get(tsd, true)) != NULL))
Jason Evans962463d2012-02-13 12:29:49 -0800946 return (tcache_alloc_large(tcache, size, zero));
Jason Evans01b3fe52012-04-03 09:28:00 -0700947 else {
Jason Evans8bb31982014-10-07 23:14:57 -0700948 arena = arena_choose(tsd, arena);
949 if (unlikely(arena == NULL))
950 return (NULL);
951 return (arena_malloc_large(arena, size, zero));
Jason Evans01b3fe52012-04-03 09:28:00 -0700952 }
Jason Evans962463d2012-02-13 12:29:49 -0800953 }
954}
955
Jason Evansf7088e62012-04-19 18:28:03 -0700956/* Return the size of the allocation pointed to by ptr. */
Jason Evans88393cb2013-01-22 08:45:43 -0800957JEMALLOC_ALWAYS_INLINE size_t
Jason Evansf7088e62012-04-19 18:28:03 -0700958arena_salloc(const void *ptr, bool demote)
959{
960 size_t ret;
961 arena_chunk_t *chunk;
Jason Evans155bfa72014-10-05 17:54:10 -0700962 size_t pageind;
963 index_t binind;
Jason Evansf7088e62012-04-19 18:28:03 -0700964
965 assert(ptr != NULL);
966 assert(CHUNK_ADDR2BASE(ptr) != ptr);
967
968 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
969 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -0700970 assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
Jason Evans80737c32012-05-02 16:11:03 -0700971 binind = arena_mapbits_binind_get(chunk, pageind);
Jason Evans551ebc42014-10-03 10:16:09 -0700972 if (unlikely(binind == BININD_INVALID || (config_prof && !demote &&
973 arena_mapbits_large_get(chunk, pageind) != 0))) {
Jason Evans80737c32012-05-02 16:11:03 -0700974 /*
Jason Evans551ebc42014-10-03 10:16:09 -0700975 * Large allocation. In the common case (demote), and as this
976 * is an inline function, most callers will only end up looking
977 * at binind to determine that ptr is a small allocation.
Jason Evans80737c32012-05-02 16:11:03 -0700978 */
Jason Evansf7088e62012-04-19 18:28:03 -0700979 assert(((uintptr_t)ptr & PAGE_MASK) == 0);
Jason Evans203484e2012-05-02 00:30:36 -0700980 ret = arena_mapbits_large_size_get(chunk, pageind);
Jason Evansf7088e62012-04-19 18:28:03 -0700981 assert(ret != 0);
Jason Evans80737c32012-05-02 16:11:03 -0700982 assert(pageind + (ret>>LG_PAGE) <= chunk_npages);
Jason Evans80737c32012-05-02 16:11:03 -0700983 assert(arena_mapbits_dirty_get(chunk, pageind) ==
984 arena_mapbits_dirty_get(chunk, pageind+(ret>>LG_PAGE)-1));
985 } else {
Jason Evans9b0cbf02014-04-11 14:24:51 -0700986 /* Small allocation (possibly promoted to a large object). */
Jason Evans80737c32012-05-02 16:11:03 -0700987 assert(arena_mapbits_large_get(chunk, pageind) != 0 ||
988 arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
989 pageind)) == binind);
Jason Evans155bfa72014-10-05 17:54:10 -0700990 ret = index2size(binind);
Jason Evansf7088e62012-04-19 18:28:03 -0700991 }
992
993 return (ret);
994}
995
Jason Evans88393cb2013-01-22 08:45:43 -0800996JEMALLOC_ALWAYS_INLINE void
Jason Evans5460aa62014-09-22 21:09:23 -0700997arena_dalloc(tsd_t *tsd, arena_chunk_t *chunk, void *ptr, bool try_tcache)
Jason Evanse476f8a2010-01-16 09:53:50 -0800998{
Jason Evans203484e2012-05-02 00:30:36 -0700999 size_t pageind, mapbits;
Jason Evans01b3fe52012-04-03 09:28:00 -07001000 tcache_t *tcache;
Jason Evanse476f8a2010-01-16 09:53:50 -08001001
Jason Evanse476f8a2010-01-16 09:53:50 -08001002 assert(ptr != NULL);
1003 assert(CHUNK_ADDR2BASE(ptr) != ptr);
1004
Jason Evansae4c7b42012-04-02 07:04:34 -07001005 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07001006 mapbits = arena_mapbits_get(chunk, pageind);
1007 assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
Jason Evans9c640bf2014-09-11 16:20:44 -07001008 if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) {
Jason Evanse476f8a2010-01-16 09:53:50 -08001009 /* Small allocation. */
Jason Evans5460aa62014-09-22 21:09:23 -07001010 if (likely(try_tcache) && likely((tcache = tcache_get(tsd,
1011 false)) != NULL)) {
Jason Evans155bfa72014-10-05 17:54:10 -07001012 index_t binind = arena_ptr_small_binind_get(ptr,
Jason Evans9c640bf2014-09-11 16:20:44 -07001013 mapbits);
Jason Evans203484e2012-05-02 00:30:36 -07001014 tcache_dalloc_small(tcache, ptr, binind);
1015 } else
Ben Maurerbe8e59f2014-04-05 15:59:08 -07001016 arena_dalloc_small(chunk->arena, chunk, ptr, pageind);
Jason Evansf00bb7f2010-03-15 16:38:27 -07001017 } else {
Jason Evans203484e2012-05-02 00:30:36 -07001018 size_t size = arena_mapbits_large_size_get(chunk, pageind);
Jason Evansdafde142010-03-17 16:27:39 -07001019
Jason Evans962463d2012-02-13 12:29:49 -08001020 assert(((uintptr_t)ptr & PAGE_MASK) == 0);
Jason Evansdafde142010-03-17 16:27:39 -07001021
Jason Evans9c640bf2014-09-11 16:20:44 -07001022 if (try_tcache && size <= tcache_maxclass && likely((tcache =
Jason Evans6da2e9d2014-10-31 17:08:13 -07001023 tcache_get(tsd, false)) != NULL))
Jason Evans962463d2012-02-13 12:29:49 -08001024 tcache_dalloc_large(tcache, ptr, size);
Jason Evans6da2e9d2014-10-31 17:08:13 -07001025 else
Ben Maurerbe8e59f2014-04-05 15:59:08 -07001026 arena_dalloc_large(chunk->arena, chunk, ptr);
Jason Evansf00bb7f2010-03-15 16:38:27 -07001027 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001028}
Daniel Micay4cfe5512014-08-28 15:41:48 -04001029
1030JEMALLOC_ALWAYS_INLINE void
Jason Evans5460aa62014-09-22 21:09:23 -07001031arena_sdalloc(tsd_t *tsd, arena_chunk_t *chunk, void *ptr, size_t size,
1032 bool try_tcache)
Daniel Micay4cfe5512014-08-28 15:41:48 -04001033{
1034 tcache_t *tcache;
1035
1036 assert(ptr != NULL);
1037 assert(CHUNK_ADDR2BASE(ptr) != ptr);
1038
Jason Evans6da2e9d2014-10-31 17:08:13 -07001039 if (config_prof && opt_prof) {
Jason Evans6da2e9d2014-10-31 17:08:13 -07001040 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evansd7a9bab2014-10-31 22:26:24 -07001041 assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
1042 if (arena_mapbits_large_get(chunk, pageind) != 0) {
1043 /* Make sure to use promoted size, not request size. */
1044 assert(((uintptr_t)ptr & PAGE_MASK) == 0);
1045 size = arena_mapbits_large_size_get(chunk, pageind);
1046 }
1047 }
1048 assert(s2u(size) == s2u(arena_salloc(ptr, false)));
Jason Evans6da2e9d2014-10-31 17:08:13 -07001049
Jason Evans9c640bf2014-09-11 16:20:44 -07001050 if (likely(size <= SMALL_MAXCLASS)) {
Daniel Micay4cfe5512014-08-28 15:41:48 -04001051 /* Small allocation. */
Jason Evans5460aa62014-09-22 21:09:23 -07001052 if (likely(try_tcache) && likely((tcache = tcache_get(tsd,
Jason Evansd7a9bab2014-10-31 22:26:24 -07001053 false)) != NULL)) {
1054 index_t binind = size2index(size);
Daniel Micay4cfe5512014-08-28 15:41:48 -04001055 tcache_dalloc_small(tcache, ptr, binind);
Jason Evansd7a9bab2014-10-31 22:26:24 -07001056 } else {
Jason Evans9c640bf2014-09-11 16:20:44 -07001057 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
1058 LG_PAGE;
Daniel Micay4cfe5512014-08-28 15:41:48 -04001059 arena_dalloc_small(chunk->arena, chunk, ptr, pageind);
1060 }
1061 } else {
1062 assert(((uintptr_t)ptr & PAGE_MASK) == 0);
1063
1064 if (try_tcache && size <= tcache_maxclass && (tcache =
Jason Evans6da2e9d2014-10-31 17:08:13 -07001065 tcache_get(tsd, false)) != NULL)
Daniel Micay4cfe5512014-08-28 15:41:48 -04001066 tcache_dalloc_large(tcache, ptr, size);
Jason Evans6da2e9d2014-10-31 17:08:13 -07001067 else
Daniel Micay4cfe5512014-08-28 15:41:48 -04001068 arena_dalloc_large(chunk->arena, chunk, ptr);
1069 }
1070}
Jason Evans155bfa72014-10-05 17:54:10 -07001071# endif /* JEMALLOC_ARENA_INLINE_B */
Jason Evanse476f8a2010-01-16 09:53:50 -08001072#endif
1073
1074#endif /* JEMALLOC_H_INLINES */
1075/******************************************************************************/