blob: f3f6426c6ed465ee9c0ae5c2e749b0f629cb0702 [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001/******************************************************************************/
2#ifdef JEMALLOC_H_TYPES
3
4/*
Jason Evanse476f8a2010-01-16 09:53:50 -08005 * RUN_MAX_OVRHD indicates maximum desired run header overhead. Runs are sized
6 * as small as possible such that this setting is still honored, without
7 * violating other constraints. The goal is to make runs as small as possible
8 * without exceeding a per run external fragmentation threshold.
9 *
10 * We use binary fixed point math for overhead computations, where the binary
11 * point is implicitly RUN_BFP bits to the left.
12 *
13 * Note that it is possible to set RUN_MAX_OVRHD low enough that it cannot be
Jason Evans8ad0eac2010-12-17 18:07:53 -080014 * honored for some/all object sizes, since when heap profiling is enabled
15 * there is one pointer of header overhead per object (plus a constant). This
16 * constraint is relaxed (ignored) for runs that are so small that the
17 * per-region overhead is greater than:
Jason Evanse476f8a2010-01-16 09:53:50 -080018 *
Jason Evans122449b2012-04-06 00:35:09 -070019 * (RUN_MAX_OVRHD / (reg_interval << (3+RUN_BFP))
Jason Evanse476f8a2010-01-16 09:53:50 -080020 */
21#define RUN_BFP 12
22/* \/ Implicit binary fixed point. */
23#define RUN_MAX_OVRHD 0x0000003dU
24#define RUN_MAX_OVRHD_RELAX 0x00001800U
25
Jason Evans47e57f92011-03-22 09:00:56 -070026/* Maximum number of regions in one run. */
27#define LG_RUN_MAXREGS 11
28#define RUN_MAXREGS (1U << LG_RUN_MAXREGS)
29
Jason Evanse476f8a2010-01-16 09:53:50 -080030/*
Jason Evans122449b2012-04-06 00:35:09 -070031 * Minimum redzone size. Redzones may be larger than this if necessary to
32 * preserve region alignment.
33 */
34#define REDZONE_MINSIZE 16
35
36/*
Jason Evanse476f8a2010-01-16 09:53:50 -080037 * The minimum ratio of active:dirty pages per arena is computed as:
38 *
39 * (nactive >> opt_lg_dirty_mult) >= ndirty
40 *
Jason Evanse3d13062012-10-30 15:42:37 -070041 * So, supposing that opt_lg_dirty_mult is 3, there can be no less than 8 times
42 * as many active pages as dirty pages.
Jason Evanse476f8a2010-01-16 09:53:50 -080043 */
Jason Evanse3d13062012-10-30 15:42:37 -070044#define LG_DIRTY_MULT_DEFAULT 3
Jason Evanse476f8a2010-01-16 09:53:50 -080045
46typedef struct arena_chunk_map_s arena_chunk_map_t;
47typedef struct arena_chunk_s arena_chunk_t;
48typedef struct arena_run_s arena_run_t;
Jason Evans49f7e8f2011-03-15 13:59:15 -070049typedef struct arena_bin_info_s arena_bin_info_t;
Jason Evanse476f8a2010-01-16 09:53:50 -080050typedef struct arena_bin_s arena_bin_t;
51typedef struct arena_s arena_t;
52
53#endif /* JEMALLOC_H_TYPES */
54/******************************************************************************/
55#ifdef JEMALLOC_H_STRUCTS
56
57/* Each element of the chunk map corresponds to one page within the chunk. */
58struct arena_chunk_map_s {
Jason Evans7372b152012-02-10 20:22:09 -080059#ifndef JEMALLOC_PROF
60 /*
Jason Evans602c8e02014-08-18 16:22:13 -070061 * Overlay prof_tctx in order to allow it to be referenced by dead code.
Jason Evans7372b152012-02-10 20:22:09 -080062 * Such antics aren't warranted for per arena data structures, but
63 * chunk map overhead accounts for a percentage of memory, rather than
64 * being just a fixed cost.
65 */
66 union {
67#endif
Jason Evans070b3c32014-08-14 14:45:58 -070068 /*
69 * Linkage for run trees. There are two disjoint uses:
70 *
71 * 1) arena_t's runs_avail tree.
72 * 2) arena_run_t conceptually uses this linkage for in-use non-full
73 * runs, rather than directly embedding linkage.
74 */
75 rb_node(arena_chunk_map_t) rb_link;
Jason Evanse476f8a2010-01-16 09:53:50 -080076
Jason Evans6109fe02010-02-10 10:37:56 -080077 /* Profile counters, used for large object runs. */
Jason Evans602c8e02014-08-18 16:22:13 -070078 prof_tctx_t *prof_tctx;
Jason Evans7372b152012-02-10 20:22:09 -080079#ifndef JEMALLOC_PROF
80 }; /* union { ... }; */
Jason Evans6109fe02010-02-10 10:37:56 -080081#endif
82
Qinfan Wu04d60a12014-07-18 14:21:17 -070083 /* Linkage for list of dirty runs. */
84 ql_elm(arena_chunk_map_t) dr_link;
85
Jason Evanse476f8a2010-01-16 09:53:50 -080086 /*
87 * Run address (or size) and various flags are stored together. The bit
88 * layout looks like (assuming 32-bit system):
89 *
Jason Evans53bd42c2012-05-10 00:18:46 -070090 * ???????? ???????? ????nnnn nnnndula
Jason Evanse476f8a2010-01-16 09:53:50 -080091 *
92 * ? : Unallocated: Run address for first/last pages, unset for internal
93 * pages.
Jason Evans19b3d612010-03-18 20:36:40 -070094 * Small: Run page offset.
Jason Evanse476f8a2010-01-16 09:53:50 -080095 * Large: Run size for first page, unset for trailing pages.
Jason Evans53bd42c2012-05-10 00:18:46 -070096 * n : binind for small size class, BININD_INVALID for large size class.
Jason Evanse476f8a2010-01-16 09:53:50 -080097 * d : dirty?
Jason Evans8ad0eac2010-12-17 18:07:53 -080098 * u : unzeroed?
Jason Evanse476f8a2010-01-16 09:53:50 -080099 * l : large?
100 * a : allocated?
101 *
102 * Following are example bit patterns for the three types of runs.
103 *
104 * p : run page offset
105 * s : run size
Jason Evans203484e2012-05-02 00:30:36 -0700106 * n : binind for size class; large objects set these to BININD_INVALID
Jason Evanse476f8a2010-01-16 09:53:50 -0800107 * x : don't care
108 * - : 0
Jason Evans0b270a92010-03-31 16:45:04 -0700109 * + : 1
Jason Evans3377ffa2010-10-01 17:53:37 -0700110 * [DULA] : bit set
111 * [dula] : bit unset
Jason Evanse476f8a2010-01-16 09:53:50 -0800112 *
Jason Evans19b3d612010-03-18 20:36:40 -0700113 * Unallocated (clean):
Jason Evans53bd42c2012-05-10 00:18:46 -0700114 * ssssssss ssssssss ssss++++ ++++du-a
Jason Evans203484e2012-05-02 00:30:36 -0700115 * xxxxxxxx xxxxxxxx xxxxxxxx xxxx-Uxx
Jason Evans53bd42c2012-05-10 00:18:46 -0700116 * ssssssss ssssssss ssss++++ ++++dU-a
Jason Evans19b3d612010-03-18 20:36:40 -0700117 *
118 * Unallocated (dirty):
Jason Evans53bd42c2012-05-10 00:18:46 -0700119 * ssssssss ssssssss ssss++++ ++++D--a
Jason Evans203484e2012-05-02 00:30:36 -0700120 * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
Jason Evans53bd42c2012-05-10 00:18:46 -0700121 * ssssssss ssssssss ssss++++ ++++D--a
Jason Evanse476f8a2010-01-16 09:53:50 -0800122 *
Jason Evansdafde142010-03-17 16:27:39 -0700123 * Small:
Jason Evans203484e2012-05-02 00:30:36 -0700124 * pppppppp pppppppp ppppnnnn nnnnd--A
125 * pppppppp pppppppp ppppnnnn nnnn---A
126 * pppppppp pppppppp ppppnnnn nnnnd--A
Jason Evanse476f8a2010-01-16 09:53:50 -0800127 *
128 * Large:
Jason Evans53bd42c2012-05-10 00:18:46 -0700129 * ssssssss ssssssss ssss++++ ++++D-LA
Jason Evans203484e2012-05-02 00:30:36 -0700130 * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
Jason Evans53bd42c2012-05-10 00:18:46 -0700131 * -------- -------- ----++++ ++++D-LA
Jason Evans0b270a92010-03-31 16:45:04 -0700132 *
Jason Evansae4c7b42012-04-02 07:04:34 -0700133 * Large (sampled, size <= PAGE):
Jason Evans203484e2012-05-02 00:30:36 -0700134 * ssssssss ssssssss ssssnnnn nnnnD-LA
Jason Evans0b270a92010-03-31 16:45:04 -0700135 *
Jason Evansae4c7b42012-04-02 07:04:34 -0700136 * Large (not sampled, size == PAGE):
Jason Evans53bd42c2012-05-10 00:18:46 -0700137 * ssssssss ssssssss ssss++++ ++++D-LA
Jason Evanse476f8a2010-01-16 09:53:50 -0800138 */
139 size_t bits;
Jason Evans203484e2012-05-02 00:30:36 -0700140#define CHUNK_MAP_BININD_SHIFT 4
141#define BININD_INVALID ((size_t)0xffU)
142/* CHUNK_MAP_BININD_MASK == (BININD_INVALID << CHUNK_MAP_BININD_SHIFT) */
143#define CHUNK_MAP_BININD_MASK ((size_t)0xff0U)
144#define CHUNK_MAP_BININD_INVALID CHUNK_MAP_BININD_MASK
145#define CHUNK_MAP_FLAGS_MASK ((size_t)0xcU)
Jason Evans0b270a92010-03-31 16:45:04 -0700146#define CHUNK_MAP_DIRTY ((size_t)0x8U)
Jason Evans3377ffa2010-10-01 17:53:37 -0700147#define CHUNK_MAP_UNZEROED ((size_t)0x4U)
Jason Evans0b270a92010-03-31 16:45:04 -0700148#define CHUNK_MAP_LARGE ((size_t)0x2U)
149#define CHUNK_MAP_ALLOCATED ((size_t)0x1U)
150#define CHUNK_MAP_KEY CHUNK_MAP_ALLOCATED
Jason Evanse476f8a2010-01-16 09:53:50 -0800151};
152typedef rb_tree(arena_chunk_map_t) arena_avail_tree_t;
153typedef rb_tree(arena_chunk_map_t) arena_run_tree_t;
Jason Evansaa5113b2014-01-14 16:23:03 -0800154typedef ql_head(arena_chunk_map_t) arena_chunk_mapelms_t;
Jason Evanse476f8a2010-01-16 09:53:50 -0800155
156/* Arena chunk header. */
157struct arena_chunk_s {
158 /* Arena that owns the chunk. */
Jason Evanse3d13062012-10-30 15:42:37 -0700159 arena_t *arena;
Jason Evanse476f8a2010-01-16 09:53:50 -0800160
Jason Evans7393f442010-10-01 17:35:43 -0700161 /*
162 * Map of pages within chunk that keeps track of free/large/small. The
163 * first map_bias entries are omitted, since the chunk header does not
164 * need to be tracked in the map. This omission saves a header page
165 * for common chunk sizes (e.g. 4 MiB).
166 */
Jason Evanse3d13062012-10-30 15:42:37 -0700167 arena_chunk_map_t map[1]; /* Dynamically sized. */
Jason Evanse476f8a2010-01-16 09:53:50 -0800168};
Jason Evanse476f8a2010-01-16 09:53:50 -0800169
170struct arena_run_s {
Jason Evanse476f8a2010-01-16 09:53:50 -0800171 /* Bin this run is associated with. */
172 arena_bin_t *bin;
173
Jason Evans84c8eef2011-03-16 10:30:13 -0700174 /* Index of next region that has never been allocated, or nregs. */
175 uint32_t nextind;
Jason Evanse476f8a2010-01-16 09:53:50 -0800176
177 /* Number of free regions in run. */
178 unsigned nfree;
Jason Evanse476f8a2010-01-16 09:53:50 -0800179};
180
Jason Evans49f7e8f2011-03-15 13:59:15 -0700181/*
Jason Evans84c8eef2011-03-16 10:30:13 -0700182 * Read-only information associated with each element of arena_t's bins array
Jason Evans49f7e8f2011-03-15 13:59:15 -0700183 * is stored separately, partly to reduce memory usage (only one copy, rather
184 * than one per arena), but mainly to avoid false cacheline sharing.
Jason Evans122449b2012-04-06 00:35:09 -0700185 *
186 * Each run has the following layout:
187 *
188 * /--------------------\
189 * | arena_run_t header |
190 * | ... |
191 * bitmap_offset | bitmap |
192 * | ... |
Jason Evans122449b2012-04-06 00:35:09 -0700193 * |--------------------|
194 * | redzone |
195 * reg0_offset | region 0 |
196 * | redzone |
197 * |--------------------| \
198 * | redzone | |
199 * | region 1 | > reg_interval
200 * | redzone | /
201 * |--------------------|
202 * | ... |
203 * | ... |
204 * | ... |
205 * |--------------------|
206 * | redzone |
207 * | region nregs-1 |
208 * | redzone |
209 * |--------------------|
210 * | alignment pad? |
211 * \--------------------/
212 *
213 * reg_interval has at least the same minimum alignment as reg_size; this
214 * preserves the alignment constraint that sa2u() depends on. Alignment pad is
215 * either 0 or redzone_size; it is present only if needed to align reg0_offset.
Jason Evans49f7e8f2011-03-15 13:59:15 -0700216 */
217struct arena_bin_info_s {
218 /* Size of regions in a run for this bin's size class. */
219 size_t reg_size;
220
Jason Evans122449b2012-04-06 00:35:09 -0700221 /* Redzone size. */
222 size_t redzone_size;
223
224 /* Interval between regions (reg_size + (redzone_size << 1)). */
225 size_t reg_interval;
226
Jason Evans49f7e8f2011-03-15 13:59:15 -0700227 /* Total size of a run for this bin's size class. */
228 size_t run_size;
229
230 /* Total number of regions in a run for this bin's size class. */
231 uint32_t nregs;
232
Jason Evans84c8eef2011-03-16 10:30:13 -0700233 /*
234 * Offset of first bitmap_t element in a run header for this bin's size
235 * class.
236 */
237 uint32_t bitmap_offset;
238
239 /*
240 * Metadata used to manipulate bitmaps for runs associated with this
241 * bin.
242 */
243 bitmap_info_t bitmap_info;
244
Jason Evans49f7e8f2011-03-15 13:59:15 -0700245 /* Offset of first region in a run for this bin's size class. */
246 uint32_t reg0_offset;
247};
248
Jason Evanse476f8a2010-01-16 09:53:50 -0800249struct arena_bin_s {
250 /*
Jason Evans86815df2010-03-13 20:32:56 -0800251 * All operations on runcur, runs, and stats require that lock be
252 * locked. Run allocation/deallocation are protected by the arena lock,
253 * which may be acquired while holding one or more bin locks, but not
254 * vise versa.
255 */
256 malloc_mutex_t lock;
257
258 /*
Jason Evanse476f8a2010-01-16 09:53:50 -0800259 * Current run being used to service allocations of this bin's size
260 * class.
261 */
262 arena_run_t *runcur;
263
264 /*
265 * Tree of non-full runs. This tree is used when looking for an
266 * existing run when runcur is no longer usable. We choose the
267 * non-full run that is lowest in memory; this policy tends to keep
268 * objects packed well, and it can also help reduce the number of
269 * almost-empty chunks.
270 */
271 arena_run_tree_t runs;
272
Jason Evanse476f8a2010-01-16 09:53:50 -0800273 /* Bin statistics. */
274 malloc_bin_stats_t stats;
Jason Evanse476f8a2010-01-16 09:53:50 -0800275};
276
277struct arena_s {
Jason Evans6109fe02010-02-10 10:37:56 -0800278 /* This arena's index within the arenas array. */
279 unsigned ind;
280
Jason Evans86815df2010-03-13 20:32:56 -0800281 /*
Jason Evans597632b2011-03-18 13:41:33 -0700282 * Number of threads currently assigned to this arena. This field is
283 * protected by arenas_lock.
284 */
285 unsigned nthreads;
286
287 /*
288 * There are three classes of arena operations from a locking
289 * perspective:
290 * 1) Thread asssignment (modifies nthreads) is protected by
291 * arenas_lock.
292 * 2) Bin-related operations are protected by bin locks.
293 * 3) Chunk- and run-related operations are protected by this mutex.
Jason Evans86815df2010-03-13 20:32:56 -0800294 */
Jason Evanse476f8a2010-01-16 09:53:50 -0800295 malloc_mutex_t lock;
296
Jason Evanse476f8a2010-01-16 09:53:50 -0800297 arena_stats_t stats;
Jason Evanse476f8a2010-01-16 09:53:50 -0800298 /*
299 * List of tcaches for extant threads associated with this arena.
300 * Stats from these are merged incrementally, and at exit.
301 */
302 ql_head(tcache_t) tcache_ql;
Jason Evanse476f8a2010-01-16 09:53:50 -0800303
Jason Evansd34f9e72010-02-11 13:19:21 -0800304 uint64_t prof_accumbytes;
Jason Evansd34f9e72010-02-11 13:19:21 -0800305
Jason Evans609ae592012-10-11 13:53:15 -0700306 dss_prec_t dss_prec;
307
Jason Evanse476f8a2010-01-16 09:53:50 -0800308 /*
309 * In order to avoid rapid chunk allocation/deallocation when an arena
310 * oscillates right on the cusp of needing a new chunk, cache the most
311 * recently freed chunk. The spare is left in the arena's chunk trees
312 * until it is deleted.
313 *
314 * There is one spare chunk per arena, rather than one spare total, in
315 * order to avoid interactions between multiple threads that could make
316 * a single spare inadequate.
317 */
318 arena_chunk_t *spare;
319
Jason Evanse2deab72014-05-15 22:22:27 -0700320 /* Number of pages in active runs and huge regions. */
Jason Evansbc25a472010-01-24 16:41:01 -0800321 size_t nactive;
Jason Evanse476f8a2010-01-16 09:53:50 -0800322
323 /*
324 * Current count of pages within unused runs that are potentially
325 * dirty, and for which madvise(... MADV_DONTNEED) has not been called.
326 * By tracking this, we can institute a limit on how much dirty unused
327 * memory is mapped for each arena.
328 */
329 size_t ndirty;
330
Jason Evanse476f8a2010-01-16 09:53:50 -0800331 /*
Jason Evans19b3d612010-03-18 20:36:40 -0700332 * Size/address-ordered trees of this arena's available runs. The trees
Jason Evanse3d13062012-10-30 15:42:37 -0700333 * are used for first-best-fit run allocation.
Jason Evanse476f8a2010-01-16 09:53:50 -0800334 */
Jason Evanse3d13062012-10-30 15:42:37 -0700335 arena_avail_tree_t runs_avail;
Jason Evanse476f8a2010-01-16 09:53:50 -0800336
Jason Evans070b3c32014-08-14 14:45:58 -0700337 /* List of dirty runs this arena manages. */
338 arena_chunk_mapelms_t runs_dirty;
339
aravindfb7fe502014-05-05 15:16:56 -0700340 /*
341 * user-configureable chunk allocation and deallocation functions.
342 */
343 chunk_alloc_t *chunk_alloc;
Jason Evanse2deab72014-05-15 22:22:27 -0700344 chunk_dalloc_t *chunk_dalloc;
aravindfb7fe502014-05-05 15:16:56 -0700345
Jason Evansb1726102012-02-28 16:50:47 -0800346 /* bins is used to store trees of free regions. */
347 arena_bin_t bins[NBINS];
Jason Evanse476f8a2010-01-16 09:53:50 -0800348};
349
350#endif /* JEMALLOC_H_STRUCTS */
351/******************************************************************************/
352#ifdef JEMALLOC_H_EXTERNS
353
Jason Evans84c8eef2011-03-16 10:30:13 -0700354extern ssize_t opt_lg_dirty_mult;
Jason Evans41ade962011-03-06 22:56:36 -0800355/*
Jason Evans3541a902014-04-16 17:14:33 -0700356 * small_size2bin_tab is a compact lookup table that rounds request sizes up to
Jason Evans41ade962011-03-06 22:56:36 -0800357 * size classes. In order to reduce cache footprint, the table is compressed,
Jason Evans3541a902014-04-16 17:14:33 -0700358 * and all accesses are via small_size2bin().
Jason Evans41ade962011-03-06 22:56:36 -0800359 */
Jason Evans3541a902014-04-16 17:14:33 -0700360extern uint8_t const small_size2bin_tab[];
361/*
362 * small_bin2size_tab duplicates information in arena_bin_info, but in a const
363 * array, for which it is easier for the compiler to optimize repeated
364 * dereferences.
365 */
366extern uint32_t const small_bin2size_tab[NBINS];
Jason Evanse476f8a2010-01-16 09:53:50 -0800367
Jason Evansb1726102012-02-28 16:50:47 -0800368extern arena_bin_info_t arena_bin_info[NBINS];
Jason Evans49f7e8f2011-03-15 13:59:15 -0700369
Jason Evansb1726102012-02-28 16:50:47 -0800370/* Number of large size classes. */
Jason Evans7393f442010-10-01 17:35:43 -0700371#define nlclasses (chunk_npages - map_bias)
Jason Evans3c234352010-01-27 13:10:55 -0800372
Jason Evanse2deab72014-05-15 22:22:27 -0700373void *arena_chunk_alloc_huge(arena_t *arena, size_t size, size_t alignment,
374 bool *zero);
375void arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t size);
Jason Evans6005f072010-09-30 16:55:08 -0700376void arena_purge_all(arena_t *arena);
Jason Evansdafde142010-03-17 16:27:39 -0700377void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin,
Jason Evans7372b152012-02-10 20:22:09 -0800378 size_t binind, uint64_t prof_accumbytes);
Jason Evans122449b2012-04-06 00:35:09 -0700379void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info,
380 bool zero);
Jason Evans0d6c5d82013-12-17 15:14:36 -0800381#ifdef JEMALLOC_JET
382typedef void (arena_redzone_corruption_t)(void *, size_t, bool, size_t,
383 uint8_t);
Jason Evans6b694c42014-01-07 16:47:56 -0800384extern arena_redzone_corruption_t *arena_redzone_corruption;
385typedef void (arena_dalloc_junk_small_t)(void *, arena_bin_info_t *);
386extern arena_dalloc_junk_small_t *arena_dalloc_junk_small;
387#else
Jason Evans122449b2012-04-06 00:35:09 -0700388void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info);
Jason Evans6b694c42014-01-07 16:47:56 -0800389#endif
Jason Evans0d6c5d82013-12-17 15:14:36 -0800390void arena_quarantine_junk_small(void *ptr, size_t usize);
Jason Evanse476f8a2010-01-16 09:53:50 -0800391void *arena_malloc_small(arena_t *arena, size_t size, bool zero);
Jason Evansdafde142010-03-17 16:27:39 -0700392void *arena_malloc_large(arena_t *arena, size_t size, bool zero);
Jason Evans5ff709c2012-04-11 18:13:45 -0700393void *arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero);
Jason Evans0b270a92010-03-31 16:45:04 -0700394void arena_prof_promoted(const void *ptr, size_t size);
Jason Evans203484e2012-05-02 00:30:36 -0700395void arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
Jason Evanse476f8a2010-01-16 09:53:50 -0800396 arena_chunk_map_t *mapelm);
Jason Evans203484e2012-05-02 00:30:36 -0700397void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
398 size_t pageind, arena_chunk_map_t *mapelm);
399void arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
400 size_t pageind);
Jason Evans6b694c42014-01-07 16:47:56 -0800401#ifdef JEMALLOC_JET
402typedef void (arena_dalloc_junk_large_t)(void *, size_t);
403extern arena_dalloc_junk_large_t *arena_dalloc_junk_large;
404#endif
Jason Evans203484e2012-05-02 00:30:36 -0700405void arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk,
406 void *ptr);
Jason Evanse476f8a2010-01-16 09:53:50 -0800407void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr);
Jason Evans6b694c42014-01-07 16:47:56 -0800408#ifdef JEMALLOC_JET
409typedef void (arena_ralloc_junk_large_t)(void *, size_t, size_t);
410extern arena_ralloc_junk_large_t *arena_ralloc_junk_large;
411#endif
Jason Evansb2c31662014-01-12 15:05:44 -0800412bool arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
Jason Evans8e3c3c62010-09-17 15:46:18 -0700413 size_t extra, bool zero);
Jason Evans609ae592012-10-11 13:53:15 -0700414void *arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
415 size_t extra, size_t alignment, bool zero, bool try_tcache_alloc,
416 bool try_tcache_dalloc);
417dss_prec_t arena_dss_prec_get(arena_t *arena);
Jason Evans4d434ad2014-04-15 12:09:48 -0700418bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
Jason Evans609ae592012-10-11 13:53:15 -0700419void arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
420 size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
421 malloc_large_stats_t *lstats);
Jason Evanse476f8a2010-01-16 09:53:50 -0800422bool arena_new(arena_t *arena, unsigned ind);
Jason Evansb1726102012-02-28 16:50:47 -0800423void arena_boot(void);
Jason Evans4e2e3dd2012-03-13 16:31:41 -0700424void arena_prefork(arena_t *arena);
425void arena_postfork_parent(arena_t *arena);
426void arena_postfork_child(arena_t *arena);
Jason Evanse476f8a2010-01-16 09:53:50 -0800427
428#endif /* JEMALLOC_H_EXTERNS */
429/******************************************************************************/
430#ifdef JEMALLOC_H_INLINES
431
432#ifndef JEMALLOC_ENABLE_INLINE
Jason Evansd04047c2014-05-28 16:11:55 -0700433size_t small_size2bin_compute(size_t size);
434size_t small_size2bin_lookup(size_t size);
Jason Evans3541a902014-04-16 17:14:33 -0700435size_t small_size2bin(size_t size);
Jason Evansd04047c2014-05-28 16:11:55 -0700436size_t small_bin2size_compute(size_t binind);
437size_t small_bin2size_lookup(size_t binind);
Jason Evans3541a902014-04-16 17:14:33 -0700438size_t small_bin2size(size_t binind);
Jason Evansd04047c2014-05-28 16:11:55 -0700439size_t small_s2u_compute(size_t size);
440size_t small_s2u_lookup(size_t size);
441size_t small_s2u(size_t size);
Jason Evans203484e2012-05-02 00:30:36 -0700442arena_chunk_map_t *arena_mapp_get(arena_chunk_t *chunk, size_t pageind);
443size_t *arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind);
Jason Evans87a02d22013-10-19 21:40:20 -0700444size_t arena_mapbitsp_read(size_t *mapbitsp);
Jason Evans203484e2012-05-02 00:30:36 -0700445size_t arena_mapbits_get(arena_chunk_t *chunk, size_t pageind);
446size_t arena_mapbits_unallocated_size_get(arena_chunk_t *chunk,
447 size_t pageind);
448size_t arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind);
449size_t arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind);
Jason Evans80737c32012-05-02 16:11:03 -0700450size_t arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind);
Jason Evans203484e2012-05-02 00:30:36 -0700451size_t arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind);
452size_t arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind);
453size_t arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind);
454size_t arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind);
Jason Evans87a02d22013-10-19 21:40:20 -0700455void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits);
Jason Evans203484e2012-05-02 00:30:36 -0700456void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind,
457 size_t size, size_t flags);
458void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
459 size_t size);
460void arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind,
461 size_t size, size_t flags);
462void arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
463 size_t binind);
464void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind,
465 size_t runind, size_t binind, size_t flags);
466void arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind,
467 size_t unzeroed);
Jason Evans88c222c2013-02-06 11:59:30 -0800468bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes);
469bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes);
470bool arena_prof_accum(arena_t *arena, uint64_t accumbytes);
Jason Evans80737c32012-05-02 16:11:03 -0700471size_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits);
Jason Evans49f7e8f2011-03-15 13:59:15 -0700472size_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
473unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
Jason Evansb602daa2011-03-15 22:19:45 -0700474 const void *ptr);
Jason Evans602c8e02014-08-18 16:22:13 -0700475prof_tctx_t *arena_prof_tctx_get(const void *ptr);
476void arena_prof_tctx_set(const void *ptr, prof_tctx_t *tctx);
Jason Evans01b3fe52012-04-03 09:28:00 -0700477void *arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache);
Jason Evansf7088e62012-04-19 18:28:03 -0700478size_t arena_salloc(const void *ptr, bool demote);
Ben Maurerbe8e59f2014-04-05 15:59:08 -0700479void arena_dalloc(arena_chunk_t *chunk, void *ptr, bool try_tcache);
Jason Evanse476f8a2010-01-16 09:53:50 -0800480#endif
481
482#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
Jason Evans203484e2012-05-02 00:30:36 -0700483# ifdef JEMALLOC_ARENA_INLINE_A
Jason Evansd04047c2014-05-28 16:11:55 -0700484JEMALLOC_INLINE size_t
485small_size2bin_compute(size_t size)
486{
487#if (NTBINS != 0)
488 if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
489 size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
490 size_t lg_ceil = lg_floor(pow2_ceil(size));
491 return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin);
492 } else
493#endif
494 {
495 size_t x = lg_floor((size<<1)-1);
496 size_t shift = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM) ? 0 :
497 x - (LG_SIZE_CLASS_GROUP + LG_QUANTUM);
498 size_t grp = shift << LG_SIZE_CLASS_GROUP;
499
500 size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
501 ? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
502
503 size_t delta_inverse_mask = ZI(-1) << lg_delta;
504 size_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) &
505 ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
506
507 size_t bin = NTBINS + grp + mod;
508 return (bin);
509 }
510}
511
512JEMALLOC_ALWAYS_INLINE size_t
513small_size2bin_lookup(size_t size)
514{
515
516 assert(size <= LOOKUP_MAXCLASS);
517 {
518 size_t ret = ((size_t)(small_size2bin_tab[(size-1) >>
519 LG_TINY_MIN]));
520 assert(ret == small_size2bin_compute(size));
521 return (ret);
522 }
523}
524
Jason Evans3541a902014-04-16 17:14:33 -0700525JEMALLOC_ALWAYS_INLINE size_t
526small_size2bin(size_t size)
527{
528
Jason Evansd04047c2014-05-28 16:11:55 -0700529 assert(size > 0);
530 if (size <= LOOKUP_MAXCLASS)
531 return (small_size2bin_lookup(size));
532 else
533 return (small_size2bin_compute(size));
534}
535
536JEMALLOC_INLINE size_t
537small_bin2size_compute(size_t binind)
538{
539#if (NTBINS > 0)
540 if (binind < NTBINS)
541 return (ZU(1) << (LG_TINY_MAXCLASS - NTBINS + 1 + binind));
542 else
543#endif
544 {
545 size_t reduced_binind = binind - NTBINS;
546 size_t grp = reduced_binind >> LG_SIZE_CLASS_GROUP;
547 size_t mod = reduced_binind & ((ZU(1) << LG_SIZE_CLASS_GROUP) -
548 1);
549
550 size_t grp_size_mask = ~((!!grp)-1);
551 size_t grp_size = ((ZU(1) << (LG_QUANTUM +
552 (LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask;
553
554 size_t shift = (grp == 0) ? 1 : grp;
555 size_t lg_delta = shift + (LG_QUANTUM-1);
556 size_t mod_size = (mod+1) << lg_delta;
557
558 size_t usize = grp_size + mod_size;
559 return (usize);
560 }
561}
562
563JEMALLOC_ALWAYS_INLINE size_t
564small_bin2size_lookup(size_t binind)
565{
566
567 assert(binind < NBINS);
568 {
569 size_t ret = ((size_t)(small_bin2size_tab[binind]));
570 assert(ret == small_bin2size_compute(binind));
571 return (ret);
572 }
Jason Evans3541a902014-04-16 17:14:33 -0700573}
574
575JEMALLOC_ALWAYS_INLINE size_t
576small_bin2size(size_t binind)
577{
578
Jason Evansd04047c2014-05-28 16:11:55 -0700579 return (small_bin2size_lookup(binind));
580}
581
582JEMALLOC_ALWAYS_INLINE size_t
583small_s2u_compute(size_t size)
584{
585#if (NTBINS > 0)
586 if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
587 size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
588 size_t lg_ceil = lg_floor(pow2_ceil(size));
589 return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) :
590 (ZU(1) << lg_ceil));
591 } else
592#endif
593 {
594 size_t x = lg_floor((size<<1)-1);
595 size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
596 ? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
597 size_t delta = ZU(1) << lg_delta;
598 size_t delta_mask = delta - 1;
599 size_t usize = (size + delta_mask) & ~delta_mask;
600 return (usize);
601 }
602}
603
604JEMALLOC_ALWAYS_INLINE size_t
605small_s2u_lookup(size_t size)
606{
607 size_t ret = (small_bin2size(small_size2bin(size)));
608
609 assert(ret == small_s2u_compute(size));
610 return (ret);
611}
612
613JEMALLOC_ALWAYS_INLINE size_t
614small_s2u(size_t size)
615{
616
617 assert(size > 0);
618 if (size <= LOOKUP_MAXCLASS)
619 return (small_s2u_lookup(size));
620 else
621 return (small_s2u_compute(size));
Jason Evans3541a902014-04-16 17:14:33 -0700622}
623# endif /* JEMALLOC_ARENA_INLINE_A */
624
625# ifdef JEMALLOC_ARENA_INLINE_B
Jason Evans88393cb2013-01-22 08:45:43 -0800626JEMALLOC_ALWAYS_INLINE arena_chunk_map_t *
Jason Evans203484e2012-05-02 00:30:36 -0700627arena_mapp_get(arena_chunk_t *chunk, size_t pageind)
628{
629
630 assert(pageind >= map_bias);
631 assert(pageind < chunk_npages);
632
633 return (&chunk->map[pageind-map_bias]);
634}
635
Jason Evans88393cb2013-01-22 08:45:43 -0800636JEMALLOC_ALWAYS_INLINE size_t *
Jason Evans203484e2012-05-02 00:30:36 -0700637arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind)
638{
639
640 return (&arena_mapp_get(chunk, pageind)->bits);
641}
642
Jason Evans88393cb2013-01-22 08:45:43 -0800643JEMALLOC_ALWAYS_INLINE size_t
Jason Evans87a02d22013-10-19 21:40:20 -0700644arena_mapbitsp_read(size_t *mapbitsp)
645{
646
647 return (*mapbitsp);
648}
649
650JEMALLOC_ALWAYS_INLINE size_t
Jason Evans203484e2012-05-02 00:30:36 -0700651arena_mapbits_get(arena_chunk_t *chunk, size_t pageind)
652{
653
Jason Evans87a02d22013-10-19 21:40:20 -0700654 return (arena_mapbitsp_read(arena_mapbitsp_get(chunk, pageind)));
Jason Evans203484e2012-05-02 00:30:36 -0700655}
656
Jason Evans88393cb2013-01-22 08:45:43 -0800657JEMALLOC_ALWAYS_INLINE size_t
Jason Evans203484e2012-05-02 00:30:36 -0700658arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind)
659{
660 size_t mapbits;
661
662 mapbits = arena_mapbits_get(chunk, pageind);
663 assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
664 return (mapbits & ~PAGE_MASK);
665}
666
Jason Evans88393cb2013-01-22 08:45:43 -0800667JEMALLOC_ALWAYS_INLINE size_t
Jason Evans203484e2012-05-02 00:30:36 -0700668arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind)
669{
670 size_t mapbits;
671
672 mapbits = arena_mapbits_get(chunk, pageind);
673 assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
674 (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED));
675 return (mapbits & ~PAGE_MASK);
676}
677
Jason Evans88393cb2013-01-22 08:45:43 -0800678JEMALLOC_ALWAYS_INLINE size_t
Jason Evans203484e2012-05-02 00:30:36 -0700679arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind)
680{
681 size_t mapbits;
682
683 mapbits = arena_mapbits_get(chunk, pageind);
684 assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
685 CHUNK_MAP_ALLOCATED);
686 return (mapbits >> LG_PAGE);
687}
688
Jason Evans88393cb2013-01-22 08:45:43 -0800689JEMALLOC_ALWAYS_INLINE size_t
Jason Evans80737c32012-05-02 16:11:03 -0700690arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind)
691{
692 size_t mapbits;
693 size_t binind;
694
695 mapbits = arena_mapbits_get(chunk, pageind);
696 binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
697 assert(binind < NBINS || binind == BININD_INVALID);
698 return (binind);
699}
700
Jason Evans88393cb2013-01-22 08:45:43 -0800701JEMALLOC_ALWAYS_INLINE size_t
Jason Evans203484e2012-05-02 00:30:36 -0700702arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind)
703{
704 size_t mapbits;
705
706 mapbits = arena_mapbits_get(chunk, pageind);
707 return (mapbits & CHUNK_MAP_DIRTY);
708}
709
Jason Evans88393cb2013-01-22 08:45:43 -0800710JEMALLOC_ALWAYS_INLINE size_t
Jason Evans203484e2012-05-02 00:30:36 -0700711arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind)
712{
713 size_t mapbits;
714
715 mapbits = arena_mapbits_get(chunk, pageind);
716 return (mapbits & CHUNK_MAP_UNZEROED);
717}
718
Jason Evans88393cb2013-01-22 08:45:43 -0800719JEMALLOC_ALWAYS_INLINE size_t
Jason Evans203484e2012-05-02 00:30:36 -0700720arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind)
721{
722 size_t mapbits;
723
724 mapbits = arena_mapbits_get(chunk, pageind);
725 return (mapbits & CHUNK_MAP_LARGE);
726}
727
Jason Evans88393cb2013-01-22 08:45:43 -0800728JEMALLOC_ALWAYS_INLINE size_t
Jason Evans203484e2012-05-02 00:30:36 -0700729arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind)
730{
731 size_t mapbits;
732
733 mapbits = arena_mapbits_get(chunk, pageind);
734 return (mapbits & CHUNK_MAP_ALLOCATED);
735}
736
Jason Evans88393cb2013-01-22 08:45:43 -0800737JEMALLOC_ALWAYS_INLINE void
Jason Evans87a02d22013-10-19 21:40:20 -0700738arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits)
739{
740
741 *mapbitsp = mapbits;
742}
743
744JEMALLOC_ALWAYS_INLINE void
Jason Evans203484e2012-05-02 00:30:36 -0700745arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size,
746 size_t flags)
747{
Jason Evans87a02d22013-10-19 21:40:20 -0700748 size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
Jason Evans203484e2012-05-02 00:30:36 -0700749
Jason Evans203484e2012-05-02 00:30:36 -0700750 assert((size & PAGE_MASK) == 0);
751 assert((flags & ~CHUNK_MAP_FLAGS_MASK) == 0);
Jason Evansd8ceef62012-05-10 20:59:39 -0700752 assert((flags & (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == flags);
Jason Evans87a02d22013-10-19 21:40:20 -0700753 arena_mapbitsp_write(mapbitsp, size | CHUNK_MAP_BININD_INVALID | flags);
Jason Evans203484e2012-05-02 00:30:36 -0700754}
755
Jason Evans88393cb2013-01-22 08:45:43 -0800756JEMALLOC_ALWAYS_INLINE void
Jason Evans203484e2012-05-02 00:30:36 -0700757arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
758 size_t size)
759{
Jason Evans87a02d22013-10-19 21:40:20 -0700760 size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
761 size_t mapbits = arena_mapbitsp_read(mapbitsp);
Jason Evans203484e2012-05-02 00:30:36 -0700762
Jason Evans203484e2012-05-02 00:30:36 -0700763 assert((size & PAGE_MASK) == 0);
Jason Evans87a02d22013-10-19 21:40:20 -0700764 assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
765 arena_mapbitsp_write(mapbitsp, size | (mapbits & PAGE_MASK));
Jason Evans203484e2012-05-02 00:30:36 -0700766}
767
Jason Evans88393cb2013-01-22 08:45:43 -0800768JEMALLOC_ALWAYS_INLINE void
Jason Evans203484e2012-05-02 00:30:36 -0700769arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size,
770 size_t flags)
771{
Jason Evans87a02d22013-10-19 21:40:20 -0700772 size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
773 size_t mapbits = arena_mapbitsp_read(mapbitsp);
Jason Evansd8ceef62012-05-10 20:59:39 -0700774 size_t unzeroed;
Jason Evans203484e2012-05-02 00:30:36 -0700775
Jason Evans203484e2012-05-02 00:30:36 -0700776 assert((size & PAGE_MASK) == 0);
Jason Evansd8ceef62012-05-10 20:59:39 -0700777 assert((flags & CHUNK_MAP_DIRTY) == flags);
Jason Evans87a02d22013-10-19 21:40:20 -0700778 unzeroed = mapbits & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */
779 arena_mapbitsp_write(mapbitsp, size | CHUNK_MAP_BININD_INVALID | flags
780 | unzeroed | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED);
Jason Evans203484e2012-05-02 00:30:36 -0700781}
782
Jason Evans88393cb2013-01-22 08:45:43 -0800783JEMALLOC_ALWAYS_INLINE void
Jason Evans203484e2012-05-02 00:30:36 -0700784arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
785 size_t binind)
786{
Jason Evans87a02d22013-10-19 21:40:20 -0700787 size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
788 size_t mapbits = arena_mapbitsp_read(mapbitsp);
Jason Evans203484e2012-05-02 00:30:36 -0700789
790 assert(binind <= BININD_INVALID);
Jason Evans203484e2012-05-02 00:30:36 -0700791 assert(arena_mapbits_large_size_get(chunk, pageind) == PAGE);
Jason Evans87a02d22013-10-19 21:40:20 -0700792 arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_BININD_MASK) |
793 (binind << CHUNK_MAP_BININD_SHIFT));
Jason Evans203484e2012-05-02 00:30:36 -0700794}
795
Jason Evans88393cb2013-01-22 08:45:43 -0800796JEMALLOC_ALWAYS_INLINE void
Jason Evans203484e2012-05-02 00:30:36 -0700797arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind,
798 size_t binind, size_t flags)
799{
Jason Evans87a02d22013-10-19 21:40:20 -0700800 size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
801 size_t mapbits = arena_mapbitsp_read(mapbitsp);
Jason Evansd8ceef62012-05-10 20:59:39 -0700802 size_t unzeroed;
Jason Evans203484e2012-05-02 00:30:36 -0700803
804 assert(binind < BININD_INVALID);
Jason Evans203484e2012-05-02 00:30:36 -0700805 assert(pageind - runind >= map_bias);
Jason Evansd8ceef62012-05-10 20:59:39 -0700806 assert((flags & CHUNK_MAP_DIRTY) == flags);
Jason Evans87a02d22013-10-19 21:40:20 -0700807 unzeroed = mapbits & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */
808 arena_mapbitsp_write(mapbitsp, (runind << LG_PAGE) | (binind <<
809 CHUNK_MAP_BININD_SHIFT) | flags | unzeroed | CHUNK_MAP_ALLOCATED);
Jason Evans203484e2012-05-02 00:30:36 -0700810}
811
Jason Evans88393cb2013-01-22 08:45:43 -0800812JEMALLOC_ALWAYS_INLINE void
Jason Evans203484e2012-05-02 00:30:36 -0700813arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind,
814 size_t unzeroed)
815{
Jason Evans87a02d22013-10-19 21:40:20 -0700816 size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
817 size_t mapbits = arena_mapbitsp_read(mapbitsp);
Jason Evans203484e2012-05-02 00:30:36 -0700818
Jason Evans87a02d22013-10-19 21:40:20 -0700819 arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_UNZEROED) |
820 unzeroed);
Jason Evans203484e2012-05-02 00:30:36 -0700821}
822
Jason Evans88c222c2013-02-06 11:59:30 -0800823JEMALLOC_INLINE bool
Jason Evansa3b33862012-11-13 12:56:27 -0800824arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes)
825{
826
827 cassert(config_prof);
828 assert(prof_interval != 0);
829
830 arena->prof_accumbytes += accumbytes;
831 if (arena->prof_accumbytes >= prof_interval) {
Jason Evansa3b33862012-11-13 12:56:27 -0800832 arena->prof_accumbytes -= prof_interval;
Jason Evans88c222c2013-02-06 11:59:30 -0800833 return (true);
Jason Evansa3b33862012-11-13 12:56:27 -0800834 }
Jason Evans88c222c2013-02-06 11:59:30 -0800835 return (false);
Jason Evansa3b33862012-11-13 12:56:27 -0800836}
837
Jason Evans88c222c2013-02-06 11:59:30 -0800838JEMALLOC_INLINE bool
Jason Evansa3b33862012-11-13 12:56:27 -0800839arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes)
840{
841
842 cassert(config_prof);
843
844 if (prof_interval == 0)
Jason Evans88c222c2013-02-06 11:59:30 -0800845 return (false);
846 return (arena_prof_accum_impl(arena, accumbytes));
Jason Evansa3b33862012-11-13 12:56:27 -0800847}
848
Jason Evans88c222c2013-02-06 11:59:30 -0800849JEMALLOC_INLINE bool
Jason Evansa3b33862012-11-13 12:56:27 -0800850arena_prof_accum(arena_t *arena, uint64_t accumbytes)
851{
852
853 cassert(config_prof);
854
855 if (prof_interval == 0)
Jason Evans88c222c2013-02-06 11:59:30 -0800856 return (false);
857
858 {
859 bool ret;
860
861 malloc_mutex_lock(&arena->lock);
862 ret = arena_prof_accum_impl(arena, accumbytes);
863 malloc_mutex_unlock(&arena->lock);
864 return (ret);
865 }
Jason Evansa3b33862012-11-13 12:56:27 -0800866}
867
Jason Evans88393cb2013-01-22 08:45:43 -0800868JEMALLOC_ALWAYS_INLINE size_t
Jason Evans80737c32012-05-02 16:11:03 -0700869arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
Jason Evans203484e2012-05-02 00:30:36 -0700870{
871 size_t binind;
872
873 binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
874
875 if (config_debug) {
Jason Evans80737c32012-05-02 16:11:03 -0700876 arena_chunk_t *chunk;
877 arena_t *arena;
878 size_t pageind;
879 size_t actual_mapbits;
880 arena_run_t *run;
881 arena_bin_t *bin;
882 size_t actual_binind;
883 arena_bin_info_t *bin_info;
Jason Evans203484e2012-05-02 00:30:36 -0700884
Jason Evans80737c32012-05-02 16:11:03 -0700885 assert(binind != BININD_INVALID);
886 assert(binind < NBINS);
887 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
888 arena = chunk->arena;
889 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
890 actual_mapbits = arena_mapbits_get(chunk, pageind);
Jason Evans203484e2012-05-02 00:30:36 -0700891 assert(mapbits == actual_mapbits);
Jason Evans80737c32012-05-02 16:11:03 -0700892 assert(arena_mapbits_large_get(chunk, pageind) == 0);
893 assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
894 run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
895 (actual_mapbits >> LG_PAGE)) << LG_PAGE));
896 bin = run->bin;
897 actual_binind = bin - arena->bins;
Jason Evans203484e2012-05-02 00:30:36 -0700898 assert(binind == actual_binind);
Jason Evans80737c32012-05-02 16:11:03 -0700899 bin_info = &arena_bin_info[actual_binind];
Jason Evans203484e2012-05-02 00:30:36 -0700900 assert(((uintptr_t)ptr - ((uintptr_t)run +
901 (uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval
902 == 0);
903 }
904
905 return (binind);
906}
Jason Evans3541a902014-04-16 17:14:33 -0700907# endif /* JEMALLOC_ARENA_INLINE_B */
Jason Evans203484e2012-05-02 00:30:36 -0700908
Jason Evans3541a902014-04-16 17:14:33 -0700909# ifdef JEMALLOC_ARENA_INLINE_C
Jason Evans49f7e8f2011-03-15 13:59:15 -0700910JEMALLOC_INLINE size_t
911arena_bin_index(arena_t *arena, arena_bin_t *bin)
912{
913 size_t binind = bin - arena->bins;
Jason Evansb1726102012-02-28 16:50:47 -0800914 assert(binind < NBINS);
Jason Evans49f7e8f2011-03-15 13:59:15 -0700915 return (binind);
916}
917
Jason Evans81b4e6e2010-10-20 20:52:00 -0700918JEMALLOC_INLINE unsigned
Jason Evansb602daa2011-03-15 22:19:45 -0700919arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
Jason Evans81b4e6e2010-10-20 20:52:00 -0700920{
921 unsigned shift, diff, regind;
Jason Evans122449b2012-04-06 00:35:09 -0700922 size_t interval;
Jason Evans81b4e6e2010-10-20 20:52:00 -0700923
Jason Evans84c8eef2011-03-16 10:30:13 -0700924 /*
925 * Freeing a pointer lower than region zero can cause assertion
926 * failure.
927 */
928 assert((uintptr_t)ptr >= (uintptr_t)run +
929 (uintptr_t)bin_info->reg0_offset);
Jason Evans81b4e6e2010-10-20 20:52:00 -0700930
931 /*
932 * Avoid doing division with a variable divisor if possible. Using
933 * actual division here can reduce allocator throughput by over 20%!
934 */
Jason Evans49f7e8f2011-03-15 13:59:15 -0700935 diff = (unsigned)((uintptr_t)ptr - (uintptr_t)run -
936 bin_info->reg0_offset);
Jason Evans81b4e6e2010-10-20 20:52:00 -0700937
938 /* Rescale (factor powers of 2 out of the numerator and denominator). */
Jason Evans122449b2012-04-06 00:35:09 -0700939 interval = bin_info->reg_interval;
Richard Diamond9c3a10f2014-05-28 21:37:02 -0500940 shift = jemalloc_ffs(interval) - 1;
Jason Evans81b4e6e2010-10-20 20:52:00 -0700941 diff >>= shift;
Jason Evans122449b2012-04-06 00:35:09 -0700942 interval >>= shift;
Jason Evans81b4e6e2010-10-20 20:52:00 -0700943
Jason Evans122449b2012-04-06 00:35:09 -0700944 if (interval == 1) {
Jason Evans81b4e6e2010-10-20 20:52:00 -0700945 /* The divisor was a power of 2. */
946 regind = diff;
947 } else {
948 /*
949 * To divide by a number D that is not a power of two we
950 * multiply by (2^21 / D) and then right shift by 21 positions.
951 *
952 * X / D
953 *
954 * becomes
955 *
Jason Evans122449b2012-04-06 00:35:09 -0700956 * (X * interval_invs[D - 3]) >> SIZE_INV_SHIFT
Jason Evans81b4e6e2010-10-20 20:52:00 -0700957 *
958 * We can omit the first three elements, because we never
959 * divide by 0, and 1 and 2 are both powers of two, which are
960 * handled above.
961 */
Jason Evans47e57f92011-03-22 09:00:56 -0700962#define SIZE_INV_SHIFT ((sizeof(unsigned) << 3) - LG_RUN_MAXREGS)
963#define SIZE_INV(s) (((1U << SIZE_INV_SHIFT) / (s)) + 1)
Jason Evans122449b2012-04-06 00:35:09 -0700964 static const unsigned interval_invs[] = {
Jason Evans81b4e6e2010-10-20 20:52:00 -0700965 SIZE_INV(3),
966 SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7),
967 SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11),
968 SIZE_INV(12), SIZE_INV(13), SIZE_INV(14), SIZE_INV(15),
969 SIZE_INV(16), SIZE_INV(17), SIZE_INV(18), SIZE_INV(19),
970 SIZE_INV(20), SIZE_INV(21), SIZE_INV(22), SIZE_INV(23),
971 SIZE_INV(24), SIZE_INV(25), SIZE_INV(26), SIZE_INV(27),
972 SIZE_INV(28), SIZE_INV(29), SIZE_INV(30), SIZE_INV(31)
973 };
974
Jason Evans122449b2012-04-06 00:35:09 -0700975 if (interval <= ((sizeof(interval_invs) / sizeof(unsigned)) +
976 2)) {
977 regind = (diff * interval_invs[interval - 3]) >>
978 SIZE_INV_SHIFT;
979 } else
980 regind = diff / interval;
Jason Evans81b4e6e2010-10-20 20:52:00 -0700981#undef SIZE_INV
982#undef SIZE_INV_SHIFT
983 }
Jason Evans122449b2012-04-06 00:35:09 -0700984 assert(diff == regind * interval);
Jason Evans49f7e8f2011-03-15 13:59:15 -0700985 assert(regind < bin_info->nregs);
Jason Evans81b4e6e2010-10-20 20:52:00 -0700986
987 return (regind);
988}
989
Jason Evans602c8e02014-08-18 16:22:13 -0700990JEMALLOC_INLINE prof_tctx_t *
991arena_prof_tctx_get(const void *ptr)
Jason Evans81b4e6e2010-10-20 20:52:00 -0700992{
Jason Evans602c8e02014-08-18 16:22:13 -0700993 prof_tctx_t *ret;
Jason Evans81b4e6e2010-10-20 20:52:00 -0700994 arena_chunk_t *chunk;
995 size_t pageind, mapbits;
996
Jason Evans7372b152012-02-10 20:22:09 -0800997 cassert(config_prof);
Jason Evans81b4e6e2010-10-20 20:52:00 -0700998 assert(ptr != NULL);
999 assert(CHUNK_ADDR2BASE(ptr) != ptr);
1000
1001 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
Jason Evansae4c7b42012-04-02 07:04:34 -07001002 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07001003 mapbits = arena_mapbits_get(chunk, pageind);
Jason Evans81b4e6e2010-10-20 20:52:00 -07001004 assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
Jason Evans9b0cbf02014-04-11 14:24:51 -07001005 if ((mapbits & CHUNK_MAP_LARGE) == 0)
Jason Evans602c8e02014-08-18 16:22:13 -07001006 ret = (prof_tctx_t *)(uintptr_t)1U;
Jason Evans9b0cbf02014-04-11 14:24:51 -07001007 else
Jason Evans602c8e02014-08-18 16:22:13 -07001008 ret = arena_mapp_get(chunk, pageind)->prof_tctx;
Jason Evans81b4e6e2010-10-20 20:52:00 -07001009
1010 return (ret);
1011}
Jason Evanse4f78462010-10-22 10:45:59 -07001012
1013JEMALLOC_INLINE void
Jason Evans602c8e02014-08-18 16:22:13 -07001014arena_prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
Jason Evanse4f78462010-10-22 10:45:59 -07001015{
1016 arena_chunk_t *chunk;
Jason Evans5fbad092013-12-15 22:08:44 -08001017 size_t pageind;
Jason Evanse4f78462010-10-22 10:45:59 -07001018
Jason Evans7372b152012-02-10 20:22:09 -08001019 cassert(config_prof);
Jason Evanse4f78462010-10-22 10:45:59 -07001020 assert(ptr != NULL);
1021 assert(CHUNK_ADDR2BASE(ptr) != ptr);
1022
1023 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
Jason Evansae4c7b42012-04-02 07:04:34 -07001024 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans5fbad092013-12-15 22:08:44 -08001025 assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
Jason Evans66576932013-12-15 16:21:30 -08001026
Jason Evans9b0cbf02014-04-11 14:24:51 -07001027 if (arena_mapbits_large_get(chunk, pageind) != 0)
Jason Evans602c8e02014-08-18 16:22:13 -07001028 arena_mapp_get(chunk, pageind)->prof_tctx = tctx;
Jason Evanse4f78462010-10-22 10:45:59 -07001029}
Jason Evans81b4e6e2010-10-20 20:52:00 -07001030
Jason Evans88393cb2013-01-22 08:45:43 -08001031JEMALLOC_ALWAYS_INLINE void *
Jason Evans01b3fe52012-04-03 09:28:00 -07001032arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache)
Jason Evans962463d2012-02-13 12:29:49 -08001033{
Jason Evansef8897b2012-02-13 14:30:52 -08001034 tcache_t *tcache;
Jason Evans962463d2012-02-13 12:29:49 -08001035
1036 assert(size != 0);
Jason Evanscd9a1342012-03-21 18:33:03 -07001037 assert(size <= arena_maxclass);
Jason Evans962463d2012-02-13 12:29:49 -08001038
Jason Evansb1726102012-02-28 16:50:47 -08001039 if (size <= SMALL_MAXCLASS) {
Jason Evans01b3fe52012-04-03 09:28:00 -07001040 if (try_tcache && (tcache = tcache_get(true)) != NULL)
Jason Evans962463d2012-02-13 12:29:49 -08001041 return (tcache_alloc_small(tcache, size, zero));
Jason Evans01b3fe52012-04-03 09:28:00 -07001042 else {
1043 return (arena_malloc_small(choose_arena(arena), size,
1044 zero));
1045 }
Jason Evans962463d2012-02-13 12:29:49 -08001046 } else {
Jason Evans74686892012-02-13 15:18:19 -08001047 /*
1048 * Initialize tcache after checking size in order to avoid
1049 * infinite recursion during tcache initialization.
1050 */
Jason Evans01b3fe52012-04-03 09:28:00 -07001051 if (try_tcache && size <= tcache_maxclass && (tcache =
1052 tcache_get(true)) != NULL)
Jason Evans962463d2012-02-13 12:29:49 -08001053 return (tcache_alloc_large(tcache, size, zero));
Jason Evans01b3fe52012-04-03 09:28:00 -07001054 else {
1055 return (arena_malloc_large(choose_arena(arena), size,
1056 zero));
1057 }
Jason Evans962463d2012-02-13 12:29:49 -08001058 }
1059}
1060
Jason Evansf7088e62012-04-19 18:28:03 -07001061/* Return the size of the allocation pointed to by ptr. */
Jason Evans88393cb2013-01-22 08:45:43 -08001062JEMALLOC_ALWAYS_INLINE size_t
Jason Evansf7088e62012-04-19 18:28:03 -07001063arena_salloc(const void *ptr, bool demote)
1064{
1065 size_t ret;
1066 arena_chunk_t *chunk;
Jason Evans80737c32012-05-02 16:11:03 -07001067 size_t pageind, binind;
Jason Evansf7088e62012-04-19 18:28:03 -07001068
1069 assert(ptr != NULL);
1070 assert(CHUNK_ADDR2BASE(ptr) != ptr);
1071
1072 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1073 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07001074 assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
Jason Evans80737c32012-05-02 16:11:03 -07001075 binind = arena_mapbits_binind_get(chunk, pageind);
1076 if (binind == BININD_INVALID || (config_prof && demote == false &&
Jason Evans9b0cbf02014-04-11 14:24:51 -07001077 arena_mapbits_large_get(chunk, pageind) != 0)) {
Jason Evans80737c32012-05-02 16:11:03 -07001078 /*
1079 * Large allocation. In the common case (demote == true), and
1080 * as this is an inline function, most callers will only end up
1081 * looking at binind to determine that ptr is a small
1082 * allocation.
1083 */
Jason Evansf7088e62012-04-19 18:28:03 -07001084 assert(((uintptr_t)ptr & PAGE_MASK) == 0);
Jason Evans203484e2012-05-02 00:30:36 -07001085 ret = arena_mapbits_large_size_get(chunk, pageind);
Jason Evansf7088e62012-04-19 18:28:03 -07001086 assert(ret != 0);
Jason Evans80737c32012-05-02 16:11:03 -07001087 assert(pageind + (ret>>LG_PAGE) <= chunk_npages);
1088 assert(ret == PAGE || arena_mapbits_large_size_get(chunk,
1089 pageind+(ret>>LG_PAGE)-1) == 0);
1090 assert(binind == arena_mapbits_binind_get(chunk,
1091 pageind+(ret>>LG_PAGE)-1));
1092 assert(arena_mapbits_dirty_get(chunk, pageind) ==
1093 arena_mapbits_dirty_get(chunk, pageind+(ret>>LG_PAGE)-1));
1094 } else {
Jason Evans9b0cbf02014-04-11 14:24:51 -07001095 /* Small allocation (possibly promoted to a large object). */
Jason Evans80737c32012-05-02 16:11:03 -07001096 assert(arena_mapbits_large_get(chunk, pageind) != 0 ||
1097 arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
1098 pageind)) == binind);
Jason Evans3541a902014-04-16 17:14:33 -07001099 ret = small_bin2size(binind);
Jason Evansf7088e62012-04-19 18:28:03 -07001100 }
1101
1102 return (ret);
1103}
1104
Jason Evans88393cb2013-01-22 08:45:43 -08001105JEMALLOC_ALWAYS_INLINE void
Ben Maurerbe8e59f2014-04-05 15:59:08 -07001106arena_dalloc(arena_chunk_t *chunk, void *ptr, bool try_tcache)
Jason Evanse476f8a2010-01-16 09:53:50 -08001107{
Jason Evans203484e2012-05-02 00:30:36 -07001108 size_t pageind, mapbits;
Jason Evans01b3fe52012-04-03 09:28:00 -07001109 tcache_t *tcache;
Jason Evanse476f8a2010-01-16 09:53:50 -08001110
Jason Evanse476f8a2010-01-16 09:53:50 -08001111 assert(ptr != NULL);
1112 assert(CHUNK_ADDR2BASE(ptr) != ptr);
1113
Jason Evansae4c7b42012-04-02 07:04:34 -07001114 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07001115 mapbits = arena_mapbits_get(chunk, pageind);
1116 assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
1117 if ((mapbits & CHUNK_MAP_LARGE) == 0) {
Jason Evanse476f8a2010-01-16 09:53:50 -08001118 /* Small allocation. */
Jason Evans203484e2012-05-02 00:30:36 -07001119 if (try_tcache && (tcache = tcache_get(false)) != NULL) {
1120 size_t binind;
Jason Evans86815df2010-03-13 20:32:56 -08001121
Jason Evans80737c32012-05-02 16:11:03 -07001122 binind = arena_ptr_small_binind_get(ptr, mapbits);
Jason Evans203484e2012-05-02 00:30:36 -07001123 tcache_dalloc_small(tcache, ptr, binind);
1124 } else
Ben Maurerbe8e59f2014-04-05 15:59:08 -07001125 arena_dalloc_small(chunk->arena, chunk, ptr, pageind);
Jason Evansf00bb7f2010-03-15 16:38:27 -07001126 } else {
Jason Evans203484e2012-05-02 00:30:36 -07001127 size_t size = arena_mapbits_large_size_get(chunk, pageind);
Jason Evansdafde142010-03-17 16:27:39 -07001128
Jason Evans962463d2012-02-13 12:29:49 -08001129 assert(((uintptr_t)ptr & PAGE_MASK) == 0);
Jason Evansdafde142010-03-17 16:27:39 -07001130
Jason Evans01b3fe52012-04-03 09:28:00 -07001131 if (try_tcache && size <= tcache_maxclass && (tcache =
1132 tcache_get(false)) != NULL) {
Jason Evans962463d2012-02-13 12:29:49 -08001133 tcache_dalloc_large(tcache, ptr, size);
Jason Evans203484e2012-05-02 00:30:36 -07001134 } else
Ben Maurerbe8e59f2014-04-05 15:59:08 -07001135 arena_dalloc_large(chunk->arena, chunk, ptr);
Jason Evansf00bb7f2010-03-15 16:38:27 -07001136 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001137}
Jason Evans3541a902014-04-16 17:14:33 -07001138# endif /* JEMALLOC_ARENA_INLINE_C */
Jason Evanse476f8a2010-01-16 09:53:50 -08001139#endif
1140
1141#endif /* JEMALLOC_H_INLINES */
1142/******************************************************************************/