Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 1 | /******************************************************************************/ |
| 2 | #ifdef JEMALLOC_H_TYPES |
| 3 | |
| 4 | /* |
| 5 | * Subpages are an artificially designated partitioning of pages. Their only |
| 6 | * purpose is to support subpage-spaced size classes. |
| 7 | * |
| 8 | * There must be at least 4 subpages per page, due to the way size classes are |
| 9 | * handled. |
| 10 | */ |
| 11 | #define LG_SUBPAGE 8 |
| 12 | #define SUBPAGE ((size_t)(1U << LG_SUBPAGE)) |
| 13 | #define SUBPAGE_MASK (SUBPAGE - 1) |
| 14 | |
| 15 | /* Return the smallest subpage multiple that is >= s. */ |
| 16 | #define SUBPAGE_CEILING(s) \ |
| 17 | (((s) + SUBPAGE_MASK) & ~SUBPAGE_MASK) |
| 18 | |
| 19 | #ifdef JEMALLOC_TINY |
| 20 | /* Smallest size class to support. */ |
| 21 | # define LG_TINY_MIN 1 |
| 22 | #endif |
| 23 | |
| 24 | /* |
| 25 | * Maximum size class that is a multiple of the quantum, but not (necessarily) |
| 26 | * a power of 2. Above this size, allocations are rounded up to the nearest |
| 27 | * power of 2. |
| 28 | */ |
| 29 | #define LG_QSPACE_MAX_DEFAULT 7 |
| 30 | |
| 31 | /* |
| 32 | * Maximum size class that is a multiple of the cacheline, but not (necessarily) |
| 33 | * a power of 2. Above this size, allocations are rounded up to the nearest |
| 34 | * power of 2. |
| 35 | */ |
| 36 | #define LG_CSPACE_MAX_DEFAULT 9 |
| 37 | |
| 38 | /* |
| 39 | * Maximum medium size class. This must not be more than 1/4 of a chunk |
| 40 | * (LG_MEDIUM_MAX_DEFAULT <= LG_CHUNK_DEFAULT - 2). |
| 41 | */ |
| 42 | #define LG_MEDIUM_MAX_DEFAULT 15 |
| 43 | |
| 44 | /* Return the smallest medium size class that is >= s. */ |
| 45 | #define MEDIUM_CEILING(s) \ |
| 46 | (((s) + mspace_mask) & ~mspace_mask) |
| 47 | |
| 48 | /* |
| 49 | * Soft limit on the number of medium size classes. Spacing between medium |
| 50 | * size classes never exceeds pagesize, which can force more than NBINS_MAX |
| 51 | * medium size classes. |
| 52 | */ |
| 53 | #define NMBINS_MAX 16 |
| 54 | |
| 55 | /* |
| 56 | * RUN_MAX_OVRHD indicates maximum desired run header overhead. Runs are sized |
| 57 | * as small as possible such that this setting is still honored, without |
| 58 | * violating other constraints. The goal is to make runs as small as possible |
| 59 | * without exceeding a per run external fragmentation threshold. |
| 60 | * |
| 61 | * We use binary fixed point math for overhead computations, where the binary |
| 62 | * point is implicitly RUN_BFP bits to the left. |
| 63 | * |
| 64 | * Note that it is possible to set RUN_MAX_OVRHD low enough that it cannot be |
| 65 | * honored for some/all object sizes, since there is one bit of header overhead |
| 66 | * per object (plus a constant). This constraint is relaxed (ignored) for runs |
| 67 | * that are so small that the per-region overhead is greater than: |
| 68 | * |
| 69 | * (RUN_MAX_OVRHD / (reg_size << (3+RUN_BFP)) |
| 70 | */ |
| 71 | #define RUN_BFP 12 |
| 72 | /* \/ Implicit binary fixed point. */ |
| 73 | #define RUN_MAX_OVRHD 0x0000003dU |
| 74 | #define RUN_MAX_OVRHD_RELAX 0x00001800U |
| 75 | |
| 76 | /* Put a cap on small object run size. This overrides RUN_MAX_OVRHD. */ |
| 77 | #define RUN_MAX_SMALL \ |
| 78 | (arena_maxclass <= (1U << (CHUNK_MAP_LG_PG_RANGE + PAGE_SHIFT)) \ |
| 79 | ? arena_maxclass : (1U << (CHUNK_MAP_LG_PG_RANGE + \ |
| 80 | PAGE_SHIFT))) |
| 81 | |
| 82 | /* |
| 83 | * The minimum ratio of active:dirty pages per arena is computed as: |
| 84 | * |
| 85 | * (nactive >> opt_lg_dirty_mult) >= ndirty |
| 86 | * |
| 87 | * So, supposing that opt_lg_dirty_mult is 5, there can be no less than 32 |
| 88 | * times as many active pages as dirty pages. |
| 89 | */ |
| 90 | #define LG_DIRTY_MULT_DEFAULT 5 |
| 91 | |
| 92 | typedef struct arena_chunk_map_s arena_chunk_map_t; |
| 93 | typedef struct arena_chunk_s arena_chunk_t; |
| 94 | typedef struct arena_run_s arena_run_t; |
| 95 | typedef struct arena_bin_s arena_bin_t; |
| 96 | typedef struct arena_s arena_t; |
| 97 | |
| 98 | #endif /* JEMALLOC_H_TYPES */ |
| 99 | /******************************************************************************/ |
| 100 | #ifdef JEMALLOC_H_STRUCTS |
| 101 | |
| 102 | /* Each element of the chunk map corresponds to one page within the chunk. */ |
| 103 | struct arena_chunk_map_s { |
| 104 | /* |
| 105 | * Linkage for run trees. There are two disjoint uses: |
| 106 | * |
| 107 | * 1) arena_t's runs_avail tree. |
| 108 | * 2) arena_run_t conceptually uses this linkage for in-use non-full |
| 109 | * runs, rather than directly embedding linkage. |
| 110 | */ |
| 111 | rb_node(arena_chunk_map_t) link; |
| 112 | |
Jason Evans | 6109fe0 | 2010-02-10 10:37:56 -0800 | [diff] [blame] | 113 | #ifdef JEMALLOC_PROF |
| 114 | /* Profile counters, used for large object runs. */ |
| 115 | prof_thr_cnt_t *prof_cnt; |
| 116 | #endif |
| 117 | |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 118 | /* |
| 119 | * Run address (or size) and various flags are stored together. The bit |
| 120 | * layout looks like (assuming 32-bit system): |
| 121 | * |
| 122 | * ???????? ???????? ????cccc ccccdzla |
| 123 | * |
| 124 | * ? : Unallocated: Run address for first/last pages, unset for internal |
| 125 | * pages. |
| 126 | * Small/medium: Don't care. |
| 127 | * Large: Run size for first page, unset for trailing pages. |
| 128 | * - : Unused. |
| 129 | * c : refcount (could overflow for PAGE_SIZE >= 128 KiB) |
| 130 | * d : dirty? |
| 131 | * z : zeroed? |
| 132 | * l : large? |
| 133 | * a : allocated? |
| 134 | * |
| 135 | * Following are example bit patterns for the three types of runs. |
| 136 | * |
| 137 | * p : run page offset |
| 138 | * s : run size |
| 139 | * x : don't care |
| 140 | * - : 0 |
| 141 | * [dzla] : bit set |
| 142 | * |
| 143 | * Unallocated: |
| 144 | * ssssssss ssssssss ssss---- -------- |
| 145 | * xxxxxxxx xxxxxxxx xxxx---- ----d--- |
| 146 | * ssssssss ssssssss ssss---- -----z-- |
| 147 | * |
| 148 | * Small/medium: |
| 149 | * pppppppp ppppcccc cccccccc cccc---a |
| 150 | * pppppppp ppppcccc cccccccc cccc---a |
| 151 | * pppppppp ppppcccc cccccccc cccc---a |
| 152 | * |
| 153 | * Large: |
| 154 | * ssssssss ssssssss ssss---- ------la |
| 155 | * -------- -------- -------- ------la |
| 156 | * -------- -------- -------- ------la |
| 157 | */ |
| 158 | size_t bits; |
| 159 | #define CHUNK_MAP_PG_MASK ((size_t)0xfff00000U) |
| 160 | #define CHUNK_MAP_PG_SHIFT 20 |
| 161 | #define CHUNK_MAP_LG_PG_RANGE 12 |
| 162 | |
| 163 | #define CHUNK_MAP_RC_MASK ((size_t)0xffff0U) |
| 164 | #define CHUNK_MAP_RC_ONE ((size_t)0x00010U) |
| 165 | |
| 166 | #define CHUNK_MAP_FLAGS_MASK ((size_t)0xfU) |
| 167 | #define CHUNK_MAP_DIRTY ((size_t)0x8U) |
| 168 | #define CHUNK_MAP_ZEROED ((size_t)0x4U) |
| 169 | #define CHUNK_MAP_LARGE ((size_t)0x2U) |
| 170 | #define CHUNK_MAP_ALLOCATED ((size_t)0x1U) |
| 171 | #define CHUNK_MAP_KEY (CHUNK_MAP_DIRTY | CHUNK_MAP_ALLOCATED) |
| 172 | }; |
| 173 | typedef rb_tree(arena_chunk_map_t) arena_avail_tree_t; |
| 174 | typedef rb_tree(arena_chunk_map_t) arena_run_tree_t; |
| 175 | |
| 176 | /* Arena chunk header. */ |
| 177 | struct arena_chunk_s { |
| 178 | /* Arena that owns the chunk. */ |
| 179 | arena_t *arena; |
| 180 | |
| 181 | /* Linkage for the arena's chunks_dirty tree. */ |
| 182 | rb_node(arena_chunk_t) link_dirty; |
| 183 | |
| 184 | /* |
| 185 | * True if the chunk is currently in the chunks_dirty tree, due to |
| 186 | * having at some point contained one or more dirty pages. Removal |
| 187 | * from chunks_dirty is lazy, so (dirtied && ndirty == 0) is possible. |
| 188 | */ |
| 189 | bool dirtied; |
| 190 | |
| 191 | /* Number of dirty pages. */ |
| 192 | size_t ndirty; |
Jason Evans | 1366826 | 2010-01-31 03:57:29 -0800 | [diff] [blame] | 193 | |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 194 | /* Map of pages within chunk that keeps track of free/large/small. */ |
| 195 | arena_chunk_map_t map[1]; /* Dynamically sized. */ |
| 196 | }; |
| 197 | typedef rb_tree(arena_chunk_t) arena_chunk_tree_t; |
| 198 | |
| 199 | struct arena_run_s { |
| 200 | #ifdef JEMALLOC_DEBUG |
| 201 | uint32_t magic; |
| 202 | # define ARENA_RUN_MAGIC 0x384adf93 |
| 203 | #endif |
| 204 | |
| 205 | /* Bin this run is associated with. */ |
| 206 | arena_bin_t *bin; |
| 207 | |
| 208 | /* Index of first element that might have a free region. */ |
| 209 | unsigned regs_minelm; |
| 210 | |
| 211 | /* Number of free regions in run. */ |
| 212 | unsigned nfree; |
| 213 | |
| 214 | /* Bitmask of in-use regions (0: in use, 1: free). */ |
| 215 | unsigned regs_mask[1]; /* Dynamically sized. */ |
| 216 | }; |
| 217 | |
| 218 | struct arena_bin_s { |
| 219 | /* |
| 220 | * Current run being used to service allocations of this bin's size |
| 221 | * class. |
| 222 | */ |
| 223 | arena_run_t *runcur; |
| 224 | |
| 225 | /* |
| 226 | * Tree of non-full runs. This tree is used when looking for an |
| 227 | * existing run when runcur is no longer usable. We choose the |
| 228 | * non-full run that is lowest in memory; this policy tends to keep |
| 229 | * objects packed well, and it can also help reduce the number of |
| 230 | * almost-empty chunks. |
| 231 | */ |
| 232 | arena_run_tree_t runs; |
| 233 | |
| 234 | /* Size of regions in a run for this bin's size class. */ |
| 235 | size_t reg_size; |
| 236 | |
| 237 | /* Total size of a run for this bin's size class. */ |
| 238 | size_t run_size; |
| 239 | |
| 240 | /* Total number of regions in a run for this bin's size class. */ |
| 241 | uint32_t nregs; |
| 242 | |
| 243 | /* Number of elements in a run's regs_mask for this bin's size class. */ |
| 244 | uint32_t regs_mask_nelms; |
| 245 | |
Jason Evans | 6109fe0 | 2010-02-10 10:37:56 -0800 | [diff] [blame] | 246 | #ifdef JEMALLOC_PROF |
| 247 | /* |
| 248 | * Offset of first (prof_cnt_t *) in a run header for this bin's size |
| 249 | * class, or 0 if (opt_prof == false). |
| 250 | */ |
| 251 | uint32_t cnt0_offset; |
| 252 | #endif |
| 253 | |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 254 | /* Offset of first region in a run for this bin's size class. */ |
| 255 | uint32_t reg0_offset; |
| 256 | |
| 257 | #ifdef JEMALLOC_STATS |
| 258 | /* Bin statistics. */ |
| 259 | malloc_bin_stats_t stats; |
| 260 | #endif |
| 261 | }; |
| 262 | |
| 263 | struct arena_s { |
| 264 | #ifdef JEMALLOC_DEBUG |
| 265 | uint32_t magic; |
| 266 | # define ARENA_MAGIC 0x947d3d24 |
| 267 | #endif |
| 268 | |
Jason Evans | 6109fe0 | 2010-02-10 10:37:56 -0800 | [diff] [blame] | 269 | /* This arena's index within the arenas array. */ |
| 270 | unsigned ind; |
| 271 | |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 272 | /* All operations on this arena require that lock be locked. */ |
| 273 | malloc_mutex_t lock; |
| 274 | |
| 275 | #ifdef JEMALLOC_STATS |
| 276 | arena_stats_t stats; |
| 277 | # ifdef JEMALLOC_TCACHE |
| 278 | /* |
| 279 | * List of tcaches for extant threads associated with this arena. |
| 280 | * Stats from these are merged incrementally, and at exit. |
| 281 | */ |
| 282 | ql_head(tcache_t) tcache_ql; |
| 283 | # endif |
| 284 | #endif |
| 285 | |
Jason Evans | d34f9e7 | 2010-02-11 13:19:21 -0800 | [diff] [blame] | 286 | #ifdef JEMALLOC_PROF |
| 287 | uint64_t prof_accumbytes; |
| 288 | #endif |
| 289 | |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 290 | /* Tree of dirty-page-containing chunks this arena manages. */ |
| 291 | arena_chunk_tree_t chunks_dirty; |
| 292 | |
| 293 | /* |
| 294 | * In order to avoid rapid chunk allocation/deallocation when an arena |
| 295 | * oscillates right on the cusp of needing a new chunk, cache the most |
| 296 | * recently freed chunk. The spare is left in the arena's chunk trees |
| 297 | * until it is deleted. |
| 298 | * |
| 299 | * There is one spare chunk per arena, rather than one spare total, in |
| 300 | * order to avoid interactions between multiple threads that could make |
| 301 | * a single spare inadequate. |
| 302 | */ |
| 303 | arena_chunk_t *spare; |
| 304 | |
| 305 | /* Number of pages in active runs. */ |
Jason Evans | bc25a47 | 2010-01-24 16:41:01 -0800 | [diff] [blame] | 306 | size_t nactive; |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 307 | |
| 308 | /* |
| 309 | * Current count of pages within unused runs that are potentially |
| 310 | * dirty, and for which madvise(... MADV_DONTNEED) has not been called. |
| 311 | * By tracking this, we can institute a limit on how much dirty unused |
| 312 | * memory is mapped for each arena. |
| 313 | */ |
| 314 | size_t ndirty; |
| 315 | |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 316 | /* |
| 317 | * Size/address-ordered tree of this arena's available runs. This tree |
| 318 | * is used for first-best-fit run allocation. |
| 319 | */ |
| 320 | arena_avail_tree_t runs_avail; |
| 321 | |
| 322 | /* |
| 323 | * bins is used to store trees of free regions of the following sizes, |
| 324 | * assuming a 16-byte quantum, 4 KiB page size, and default |
| 325 | * JEMALLOC_OPTIONS. |
| 326 | * |
| 327 | * bins[i] | size | |
| 328 | * --------+--------+ |
| 329 | * 0 | 2 | |
| 330 | * 1 | 4 | |
| 331 | * 2 | 8 | |
| 332 | * --------+--------+ |
| 333 | * 3 | 16 | |
| 334 | * 4 | 32 | |
| 335 | * 5 | 48 | |
| 336 | * : : |
| 337 | * 8 | 96 | |
| 338 | * 9 | 112 | |
| 339 | * 10 | 128 | |
| 340 | * --------+--------+ |
| 341 | * 11 | 192 | |
| 342 | * 12 | 256 | |
| 343 | * 13 | 320 | |
| 344 | * 14 | 384 | |
| 345 | * 15 | 448 | |
| 346 | * 16 | 512 | |
| 347 | * --------+--------+ |
| 348 | * 17 | 768 | |
| 349 | * 18 | 1024 | |
| 350 | * 19 | 1280 | |
| 351 | * : : |
| 352 | * 27 | 3328 | |
| 353 | * 28 | 3584 | |
| 354 | * 29 | 3840 | |
| 355 | * --------+--------+ |
| 356 | * 30 | 4 KiB | |
| 357 | * 31 | 6 KiB | |
| 358 | * 33 | 8 KiB | |
| 359 | * : : |
| 360 | * 43 | 28 KiB | |
| 361 | * 44 | 30 KiB | |
| 362 | * 45 | 32 KiB | |
| 363 | * --------+--------+ |
| 364 | */ |
| 365 | arena_bin_t bins[1]; /* Dynamically sized. */ |
| 366 | }; |
| 367 | |
| 368 | #endif /* JEMALLOC_H_STRUCTS */ |
| 369 | /******************************************************************************/ |
| 370 | #ifdef JEMALLOC_H_EXTERNS |
| 371 | |
| 372 | extern size_t opt_lg_qspace_max; |
| 373 | extern size_t opt_lg_cspace_max; |
| 374 | extern size_t opt_lg_medium_max; |
| 375 | extern ssize_t opt_lg_dirty_mult; |
| 376 | extern uint8_t const *small_size2bin; |
| 377 | |
| 378 | /* Various bin-related settings. */ |
| 379 | #ifdef JEMALLOC_TINY /* Number of (2^n)-spaced tiny bins. */ |
| 380 | # define ntbins ((unsigned)(LG_QUANTUM - LG_TINY_MIN)) |
| 381 | #else |
| 382 | # define ntbins 0 |
| 383 | #endif |
| 384 | extern unsigned nqbins; /* Number of quantum-spaced bins. */ |
| 385 | extern unsigned ncbins; /* Number of cacheline-spaced bins. */ |
| 386 | extern unsigned nsbins; /* Number of subpage-spaced bins. */ |
| 387 | extern unsigned nmbins; /* Number of medium bins. */ |
| 388 | extern unsigned nbins; |
| 389 | extern unsigned mbin0; /* mbin offset (nbins - nmbins). */ |
| 390 | #ifdef JEMALLOC_TINY |
| 391 | # define tspace_max ((size_t)(QUANTUM >> 1)) |
| 392 | #endif |
| 393 | #define qspace_min QUANTUM |
| 394 | extern size_t qspace_max; |
| 395 | extern size_t cspace_min; |
| 396 | extern size_t cspace_max; |
| 397 | extern size_t sspace_min; |
| 398 | extern size_t sspace_max; |
| 399 | #define small_maxclass sspace_max |
| 400 | #define medium_min PAGE_SIZE |
| 401 | extern size_t medium_max; |
| 402 | #define bin_maxclass medium_max |
| 403 | |
| 404 | /* Spacing between medium size classes. */ |
| 405 | extern size_t lg_mspace; |
| 406 | extern size_t mspace_mask; |
| 407 | |
Jason Evans | 3c23435 | 2010-01-27 13:10:55 -0800 | [diff] [blame] | 408 | #define nlclasses ((chunksize - PAGE_SIZE) >> PAGE_SHIFT) |
| 409 | |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 410 | #ifdef JEMALLOC_TCACHE |
Jason Evans | d34f9e7 | 2010-02-11 13:19:21 -0800 | [diff] [blame] | 411 | void arena_tcache_fill(arena_t *arena, tcache_bin_t *tbin, size_t binind |
| 412 | # ifdef JEMALLOC_PROF |
| 413 | , uint64_t prof_accumbytes |
| 414 | # endif |
| 415 | ); |
| 416 | #endif |
| 417 | #ifdef JEMALLOC_PROF |
| 418 | void arena_prof_accum(arena_t *arena, uint64_t accumbytes); |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 419 | #endif |
| 420 | void *arena_malloc_small(arena_t *arena, size_t size, bool zero); |
| 421 | void *arena_malloc_medium(arena_t *arena, size_t size, bool zero); |
| 422 | void *arena_malloc(size_t size, bool zero); |
| 423 | void *arena_palloc(arena_t *arena, size_t alignment, size_t size, |
| 424 | size_t alloc_size); |
| 425 | size_t arena_salloc(const void *ptr); |
Jason Evans | 6109fe0 | 2010-02-10 10:37:56 -0800 | [diff] [blame] | 426 | #ifdef JEMALLOC_PROF |
| 427 | prof_thr_cnt_t *arena_prof_cnt_get(const void *ptr); |
| 428 | void arena_prof_cnt_set(const void *ptr, prof_thr_cnt_t *cnt); |
| 429 | #endif |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 430 | void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, |
| 431 | arena_chunk_map_t *mapelm); |
| 432 | void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr); |
| 433 | #ifdef JEMALLOC_STATS |
Jason Evans | b34e868 | 2010-01-17 17:35:19 -0800 | [diff] [blame] | 434 | void arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty, |
| 435 | arena_stats_t *astats, malloc_bin_stats_t *bstats, |
| 436 | malloc_large_stats_t *lstats); |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 437 | #endif |
| 438 | void *arena_ralloc(void *ptr, size_t size, size_t oldsize); |
| 439 | bool arena_new(arena_t *arena, unsigned ind); |
Jason Evans | a0bf242 | 2010-01-29 14:30:41 -0800 | [diff] [blame] | 440 | bool arena_boot(void); |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 441 | |
| 442 | #endif /* JEMALLOC_H_EXTERNS */ |
| 443 | /******************************************************************************/ |
| 444 | #ifdef JEMALLOC_H_INLINES |
| 445 | |
| 446 | #ifndef JEMALLOC_ENABLE_INLINE |
| 447 | void arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr); |
| 448 | #endif |
| 449 | |
| 450 | #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_)) |
| 451 | JEMALLOC_INLINE void |
| 452 | arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr) |
| 453 | { |
| 454 | size_t pageind; |
| 455 | arena_chunk_map_t *mapelm; |
| 456 | |
| 457 | assert(arena != NULL); |
| 458 | assert(arena->magic == ARENA_MAGIC); |
| 459 | assert(chunk->arena == arena); |
| 460 | assert(ptr != NULL); |
| 461 | assert(CHUNK_ADDR2BASE(ptr) != ptr); |
| 462 | |
| 463 | pageind = (((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT); |
| 464 | mapelm = &chunk->map[pageind]; |
| 465 | assert((mapelm->bits & CHUNK_MAP_ALLOCATED) != 0); |
| 466 | if ((mapelm->bits & CHUNK_MAP_LARGE) == 0) { |
| 467 | /* Small allocation. */ |
| 468 | #ifdef JEMALLOC_TCACHE |
| 469 | tcache_t *tcache; |
| 470 | |
| 471 | if ((tcache = tcache_get()) != NULL) |
| 472 | tcache_dalloc(tcache, ptr); |
| 473 | else { |
| 474 | #endif |
| 475 | malloc_mutex_lock(&arena->lock); |
| 476 | arena_dalloc_bin(arena, chunk, ptr, mapelm); |
| 477 | malloc_mutex_unlock(&arena->lock); |
| 478 | #ifdef JEMALLOC_TCACHE |
| 479 | } |
| 480 | #endif |
| 481 | } else |
| 482 | arena_dalloc_large(arena, chunk, ptr); |
| 483 | } |
| 484 | #endif |
| 485 | |
| 486 | #endif /* JEMALLOC_H_INLINES */ |
| 487 | /******************************************************************************/ |