blob: 7932a97586b5fb2cf03e695c4ffff54c844c4a32 [file] [log] [blame]
Rich Felkerb052f132011-04-20 15:55:58 -04001#define _GNU_SOURCE
Rich Felker0b44a032011-02-12 00:22:29 -05002#include <stdlib.h>
3#include <string.h>
4#include <limits.h>
5#include <stdint.h>
6#include <errno.h>
7#include <sys/mman.h>
8#include "libc.h"
9#include "atomic.h"
10#include "pthread_impl.h"
11
Rich Felkerafd209d2012-09-14 23:52:51 -040012#if defined(__GNUC__) && defined(__PIC__)
13#define inline inline __attribute__((always_inline))
14#endif
15
Rich Felker0b44a032011-02-12 00:22:29 -050016uintptr_t __brk(uintptr_t);
17void *__mmap(void *, size_t, int, int, int, off_t);
18int __munmap(void *, size_t);
19void *__mremap(void *, size_t, size_t, int, ...);
20int __madvise(void *, size_t, int);
21
22struct chunk {
Rich Felker5d0965c2011-06-26 16:12:43 -040023 size_t psize, csize;
24 struct chunk *next, *prev;
Rich Felker0b44a032011-02-12 00:22:29 -050025};
26
27struct bin {
28 int lock[2];
29 struct chunk *head;
30 struct chunk *tail;
31};
32
33static struct {
34 uintptr_t brk;
35 size_t *heap;
36 uint64_t binmap;
37 struct bin bins[64];
38 int brk_lock[2];
39 int free_lock[2];
Rich Felker54463032014-04-02 17:57:15 -040040 unsigned mmap_step;
Rich Felker0b44a032011-02-12 00:22:29 -050041} mal;
42
43
44#define SIZE_ALIGN (4*sizeof(size_t))
45#define SIZE_MASK (-SIZE_ALIGN)
46#define OVERHEAD (2*sizeof(size_t))
47#define MMAP_THRESHOLD (0x1c00*SIZE_ALIGN)
48#define DONTCARE 16
49#define RECLAIM 163840
50
Rich Felkere5d78fe2011-11-16 23:59:28 -050051#define CHUNK_SIZE(c) ((c)->csize & -2)
52#define CHUNK_PSIZE(c) ((c)->psize & -2)
Rich Felker0b44a032011-02-12 00:22:29 -050053#define PREV_CHUNK(c) ((struct chunk *)((char *)(c) - CHUNK_PSIZE(c)))
54#define NEXT_CHUNK(c) ((struct chunk *)((char *)(c) + CHUNK_SIZE(c)))
Rich Felker5d0965c2011-06-26 16:12:43 -040055#define MEM_TO_CHUNK(p) (struct chunk *)((char *)(p) - OVERHEAD)
56#define CHUNK_TO_MEM(c) (void *)((char *)(c) + OVERHEAD)
Rich Felker0b44a032011-02-12 00:22:29 -050057#define BIN_TO_CHUNK(i) (MEM_TO_CHUNK(&mal.bins[i].head))
58
59#define C_INUSE ((size_t)1)
Rich Felker0b44a032011-02-12 00:22:29 -050060
Rich Felker5d0965c2011-06-26 16:12:43 -040061#define IS_MMAPPED(c) !((c)->csize & (C_INUSE))
Rich Felker0b44a032011-02-12 00:22:29 -050062
63
64/* Synchronization tools */
65
Rich Felkerafd209d2012-09-14 23:52:51 -040066static inline void lock(volatile int *lk)
Rich Felker0b44a032011-02-12 00:22:29 -050067{
Rich Felkere8038292013-09-20 02:00:27 -040068 if (libc.threads_minus_1)
69 while(a_swap(lk, 1)) __wait(lk, lk+1, 1, 1);
Rich Felker0b44a032011-02-12 00:22:29 -050070}
71
Rich Felkerafd209d2012-09-14 23:52:51 -040072static inline void unlock(volatile int *lk)
Rich Felker0b44a032011-02-12 00:22:29 -050073{
Rich Felkere8038292013-09-20 02:00:27 -040074 if (lk[0]) {
75 a_store(lk, 0);
76 if (lk[1]) __wake(lk, 1, 1);
77 }
Rich Felker0b44a032011-02-12 00:22:29 -050078}
79
Rich Felkerafd209d2012-09-14 23:52:51 -040080static inline void lock_bin(int i)
Rich Felker0b44a032011-02-12 00:22:29 -050081{
Rich Felkere8038292013-09-20 02:00:27 -040082 lock(mal.bins[i].lock);
Rich Felker0b44a032011-02-12 00:22:29 -050083 if (!mal.bins[i].head)
84 mal.bins[i].head = mal.bins[i].tail = BIN_TO_CHUNK(i);
85}
86
Rich Felkerafd209d2012-09-14 23:52:51 -040087static inline void unlock_bin(int i)
Rich Felker0b44a032011-02-12 00:22:29 -050088{
Rich Felker0b44a032011-02-12 00:22:29 -050089 unlock(mal.bins[i].lock);
90}
91
92static int first_set(uint64_t x)
93{
94#if 1
95 return a_ctz_64(x);
96#else
97 static const char debruijn64[64] = {
98 0, 1, 2, 53, 3, 7, 54, 27, 4, 38, 41, 8, 34, 55, 48, 28,
99 62, 5, 39, 46, 44, 42, 22, 9, 24, 35, 59, 56, 49, 18, 29, 11,
100 63, 52, 6, 26, 37, 40, 33, 47, 61, 45, 43, 21, 23, 58, 17, 10,
101 51, 25, 36, 32, 60, 20, 57, 16, 50, 31, 19, 15, 30, 14, 13, 12
102 };
103 static const char debruijn32[32] = {
104 0, 1, 23, 2, 29, 24, 19, 3, 30, 27, 25, 11, 20, 8, 4, 13,
105 31, 22, 28, 18, 26, 10, 7, 12, 21, 17, 9, 6, 16, 5, 15, 14
106 };
107 if (sizeof(long) < 8) {
108 uint32_t y = x;
109 if (!y) {
110 y = x>>32;
111 return 32 + debruijn32[(y&-y)*0x076be629 >> 27];
112 }
113 return debruijn32[(y&-y)*0x076be629 >> 27];
114 }
115 return debruijn64[(x&-x)*0x022fdd63cc95386dull >> 58];
116#endif
117}
118
119static int bin_index(size_t x)
120{
121 x = x / SIZE_ALIGN - 1;
122 if (x <= 32) return x;
123 if (x > 0x1c00) return 63;
Rich Felker2afebbb2011-06-12 10:53:42 -0400124 return ((union { float v; uint32_t r; }){(int)x}.r>>21) - 496;
Rich Felker0b44a032011-02-12 00:22:29 -0500125}
126
127static int bin_index_up(size_t x)
128{
129 x = x / SIZE_ALIGN - 1;
130 if (x <= 32) return x;
Rich Felker2afebbb2011-06-12 10:53:42 -0400131 return ((union { float v; uint32_t r; }){(int)x}.r+0x1fffff>>21) - 496;
Rich Felker0b44a032011-02-12 00:22:29 -0500132}
133
134#if 0
135void __dump_heap(int x)
136{
137 struct chunk *c;
138 int i;
139 for (c = (void *)mal.heap; CHUNK_SIZE(c); c = NEXT_CHUNK(c))
140 fprintf(stderr, "base %p size %zu (%d) flags %d/%d\n",
141 c, CHUNK_SIZE(c), bin_index(CHUNK_SIZE(c)),
Rich Felker5d0965c2011-06-26 16:12:43 -0400142 c->csize & 15,
143 NEXT_CHUNK(c)->psize & 15);
Rich Felker0b44a032011-02-12 00:22:29 -0500144 for (i=0; i<64; i++) {
145 if (mal.bins[i].head != BIN_TO_CHUNK(i) && mal.bins[i].head) {
146 fprintf(stderr, "bin %d: %p\n", i, mal.bins[i].head);
147 if (!(mal.binmap & 1ULL<<i))
148 fprintf(stderr, "missing from binmap!\n");
149 } else if (mal.binmap & 1ULL<<i)
150 fprintf(stderr, "binmap wrongly contains %d!\n", i);
151 }
152}
153#endif
154
155static struct chunk *expand_heap(size_t n)
156{
157 struct chunk *w;
158 uintptr_t new;
159
160 lock(mal.brk_lock);
161
162 if (n > SIZE_MAX - mal.brk - 2*PAGE_SIZE) goto fail;
163 new = mal.brk + n + SIZE_ALIGN + PAGE_SIZE - 1 & -PAGE_SIZE;
164 n = new - mal.brk;
165
Rich Felker54463032014-04-02 17:57:15 -0400166 if (__brk(new) != new) {
167 size_t min = (size_t)PAGE_SIZE << mal.mmap_step/2;
168 n += -n & PAGE_SIZE-1;
169 if (n < min) n = min;
170 void *area = __mmap(0, n, PROT_READ|PROT_WRITE,
171 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
172 if (area == MAP_FAILED) goto fail;
173
174 mal.mmap_step++;
175 area = (char *)area + SIZE_ALIGN - OVERHEAD;
176 w = area;
177 n -= SIZE_ALIGN;
178 w->psize = 0 | C_INUSE;
179 w->csize = n | C_INUSE;
180 w = NEXT_CHUNK(w);
181 w->psize = n | C_INUSE;
182 w->csize = 0 | C_INUSE;
183
184 unlock(mal.brk_lock);
185
186 return area;
187 }
Rich Felker0b44a032011-02-12 00:22:29 -0500188
189 w = MEM_TO_CHUNK(new);
Rich Felker5d0965c2011-06-26 16:12:43 -0400190 w->psize = n | C_INUSE;
191 w->csize = 0 | C_INUSE;
Rich Felker0b44a032011-02-12 00:22:29 -0500192
193 w = MEM_TO_CHUNK(mal.brk);
Rich Felker5d0965c2011-06-26 16:12:43 -0400194 w->csize = n | C_INUSE;
Rich Felker0b44a032011-02-12 00:22:29 -0500195 mal.brk = new;
196
197 unlock(mal.brk_lock);
198
199 return w;
200fail:
201 unlock(mal.brk_lock);
Rich Felkera947d312013-10-05 11:59:21 -0400202 errno = ENOMEM;
Rich Felker0b44a032011-02-12 00:22:29 -0500203 return 0;
204}
205
Rich Felkerbf878582011-04-01 23:07:03 -0400206static int init_malloc(size_t n)
Rich Felker0b44a032011-02-12 00:22:29 -0500207{
208 static int init, waiters;
209 int state;
210 struct chunk *c;
211
212 if (init == 2) return 0;
213
214 while ((state=a_swap(&init, 1)) == 1)
215 __wait(&init, &waiters, 1, 1);
216 if (state) {
217 a_store(&init, 2);
218 return 0;
219 }
220
Rich Felkerb8ccf8e2012-12-07 22:33:11 -0500221 mal.brk = __brk(0);
222#ifdef SHARED
223 mal.brk = mal.brk + PAGE_SIZE-1 & -PAGE_SIZE;
224#endif
225 mal.brk = mal.brk + 2*SIZE_ALIGN-1 & -SIZE_ALIGN;
Rich Felker0b44a032011-02-12 00:22:29 -0500226
Rich Felkerbf878582011-04-01 23:07:03 -0400227 c = expand_heap(n);
Rich Felker0b44a032011-02-12 00:22:29 -0500228
229 if (!c) {
230 a_store(&init, 0);
231 if (waiters) __wake(&init, 1, 1);
232 return -1;
233 }
234
235 mal.heap = (void *)c;
Rich Felker5d0965c2011-06-26 16:12:43 -0400236 c->psize = 0 | C_INUSE;
Rich Felker0b44a032011-02-12 00:22:29 -0500237 free(CHUNK_TO_MEM(c));
238
239 a_store(&init, 2);
240 if (waiters) __wake(&init, -1, 1);
Rich Felkerbf878582011-04-01 23:07:03 -0400241 return 1;
Rich Felker0b44a032011-02-12 00:22:29 -0500242}
243
244static int adjust_size(size_t *n)
245{
246 /* Result of pointer difference must fit in ptrdiff_t. */
Rich Felker26031da2011-02-20 16:16:33 -0500247 if (*n-1 > PTRDIFF_MAX - SIZE_ALIGN - PAGE_SIZE) {
248 if (*n) {
249 errno = ENOMEM;
250 return -1;
251 } else {
252 *n = SIZE_ALIGN;
253 return 0;
254 }
Rich Felker0b44a032011-02-12 00:22:29 -0500255 }
256 *n = (*n + OVERHEAD + SIZE_ALIGN - 1) & SIZE_MASK;
257 return 0;
258}
259
260static void unbin(struct chunk *c, int i)
261{
262 if (c->prev == c->next)
263 a_and_64(&mal.binmap, ~(1ULL<<i));
264 c->prev->next = c->next;
265 c->next->prev = c->prev;
Rich Felker5d0965c2011-06-26 16:12:43 -0400266 c->csize |= C_INUSE;
267 NEXT_CHUNK(c)->psize |= C_INUSE;
Rich Felker0b44a032011-02-12 00:22:29 -0500268}
269
270static int alloc_fwd(struct chunk *c)
271{
272 int i;
273 size_t k;
Rich Felker5d0965c2011-06-26 16:12:43 -0400274 while (!((k=c->csize) & C_INUSE)) {
Rich Felker0b44a032011-02-12 00:22:29 -0500275 i = bin_index(k);
276 lock_bin(i);
Rich Felker5d0965c2011-06-26 16:12:43 -0400277 if (c->csize == k) {
Rich Felker0b44a032011-02-12 00:22:29 -0500278 unbin(c, i);
279 unlock_bin(i);
280 return 1;
281 }
282 unlock_bin(i);
283 }
284 return 0;
285}
286
287static int alloc_rev(struct chunk *c)
288{
289 int i;
290 size_t k;
Rich Felker5d0965c2011-06-26 16:12:43 -0400291 while (!((k=c->psize) & C_INUSE)) {
Rich Felker0b44a032011-02-12 00:22:29 -0500292 i = bin_index(k);
293 lock_bin(i);
Rich Felker5d0965c2011-06-26 16:12:43 -0400294 if (c->psize == k) {
Rich Felker0b44a032011-02-12 00:22:29 -0500295 unbin(PREV_CHUNK(c), i);
296 unlock_bin(i);
297 return 1;
298 }
299 unlock_bin(i);
300 }
301 return 0;
302}
303
304
305/* pretrim - trims a chunk _prior_ to removing it from its bin.
306 * Must be called with i as the ideal bin for size n, j the bin
307 * for the _free_ chunk self, and bin j locked. */
308static int pretrim(struct chunk *self, size_t n, int i, int j)
309{
310 size_t n1;
311 struct chunk *next, *split;
312
313 /* We cannot pretrim if it would require re-binning. */
314 if (j < 40) return 0;
315 if (j < i+3) {
316 if (j != 63) return 0;
317 n1 = CHUNK_SIZE(self);
318 if (n1-n <= MMAP_THRESHOLD) return 0;
319 } else {
320 n1 = CHUNK_SIZE(self);
321 }
322 if (bin_index(n1-n) != j) return 0;
323
324 next = NEXT_CHUNK(self);
325 split = (void *)((char *)self + n);
326
327 split->prev = self->prev;
328 split->next = self->next;
329 split->prev->next = split;
330 split->next->prev = split;
Rich Felker5d0965c2011-06-26 16:12:43 -0400331 split->psize = n | C_INUSE;
332 split->csize = n1-n;
333 next->psize = n1-n;
334 self->csize = n | C_INUSE;
Rich Felker0b44a032011-02-12 00:22:29 -0500335 return 1;
336}
337
338static void trim(struct chunk *self, size_t n)
339{
340 size_t n1 = CHUNK_SIZE(self);
341 struct chunk *next, *split;
342
343 if (n >= n1 - DONTCARE) return;
344
345 next = NEXT_CHUNK(self);
346 split = (void *)((char *)self + n);
347
Rich Felker5d0965c2011-06-26 16:12:43 -0400348 split->psize = n | C_INUSE;
349 split->csize = n1-n | C_INUSE;
350 next->psize = n1-n | C_INUSE;
351 self->csize = n | C_INUSE;
Rich Felker0b44a032011-02-12 00:22:29 -0500352
353 free(CHUNK_TO_MEM(split));
354}
355
356void *malloc(size_t n)
357{
358 struct chunk *c;
359 int i, j;
360
Rich Felker26031da2011-02-20 16:16:33 -0500361 if (adjust_size(&n) < 0) return 0;
Rich Felker0b44a032011-02-12 00:22:29 -0500362
363 if (n > MMAP_THRESHOLD) {
Rich Felkerb761bd12011-04-04 17:26:41 -0400364 size_t len = n + OVERHEAD + PAGE_SIZE - 1 & -PAGE_SIZE;
Rich Felker0b44a032011-02-12 00:22:29 -0500365 char *base = __mmap(0, len, PROT_READ|PROT_WRITE,
366 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
367 if (base == (void *)-1) return 0;
Rich Felker5d0965c2011-06-26 16:12:43 -0400368 c = (void *)(base + SIZE_ALIGN - OVERHEAD);
369 c->csize = len - (SIZE_ALIGN - OVERHEAD);
370 c->psize = SIZE_ALIGN - OVERHEAD;
Rich Felker0b44a032011-02-12 00:22:29 -0500371 return CHUNK_TO_MEM(c);
372 }
373
374 i = bin_index_up(n);
375 for (;;) {
376 uint64_t mask = mal.binmap & -(1ULL<<i);
377 if (!mask) {
Rich Felkerbf878582011-04-01 23:07:03 -0400378 if (init_malloc(n) > 0) continue;
Rich Felker0b44a032011-02-12 00:22:29 -0500379 c = expand_heap(n);
380 if (!c) return 0;
381 if (alloc_rev(c)) {
382 struct chunk *x = c;
383 c = PREV_CHUNK(c);
Rich Felker5d0965c2011-06-26 16:12:43 -0400384 NEXT_CHUNK(x)->psize = c->csize =
385 x->csize + CHUNK_SIZE(c);
Rich Felker0b44a032011-02-12 00:22:29 -0500386 }
387 break;
388 }
389 j = first_set(mask);
390 lock_bin(j);
391 c = mal.bins[j].head;
Rich Felker5d0965c2011-06-26 16:12:43 -0400392 if (c != BIN_TO_CHUNK(j) && j == bin_index(c->csize)) {
Rich Felker0b44a032011-02-12 00:22:29 -0500393 if (!pretrim(c, n, i, j)) unbin(c, j);
394 unlock_bin(j);
395 break;
396 }
397 unlock_bin(j);
398 }
399
400 /* Now patch up in case we over-allocated */
401 trim(c, n);
402
403 return CHUNK_TO_MEM(c);
404}
405
406void *realloc(void *p, size_t n)
407{
408 struct chunk *self, *next;
409 size_t n0, n1;
410 void *new;
411
412 if (!p) return malloc(n);
Rich Felker0b44a032011-02-12 00:22:29 -0500413
414 if (adjust_size(&n) < 0) return 0;
415
416 self = MEM_TO_CHUNK(p);
417 n1 = n0 = CHUNK_SIZE(self);
418
419 if (IS_MMAPPED(self)) {
Rich Felker5d0965c2011-06-26 16:12:43 -0400420 size_t extra = self->psize;
Rich Felker0b44a032011-02-12 00:22:29 -0500421 char *base = (char *)self - extra;
422 size_t oldlen = n0 + extra;
423 size_t newlen = n + extra;
Rich Felker09582002011-03-23 13:24:00 -0400424 /* Crash on realloc of freed chunk */
Rich Felker1c8bead2011-08-23 09:43:45 -0400425 if (extra & 1) a_crash();
Rich Felker0b44a032011-02-12 00:22:29 -0500426 if (newlen < PAGE_SIZE && (new = malloc(n))) {
427 memcpy(new, p, n-OVERHEAD);
428 free(p);
429 return new;
430 }
431 newlen = (newlen + PAGE_SIZE-1) & -PAGE_SIZE;
432 if (oldlen == newlen) return p;
433 base = __mremap(base, oldlen, newlen, MREMAP_MAYMOVE);
434 if (base == (void *)-1)
435 return newlen < oldlen ? p : 0;
436 self = (void *)(base + extra);
Rich Felker5d0965c2011-06-26 16:12:43 -0400437 self->csize = newlen - extra;
Rich Felker0b44a032011-02-12 00:22:29 -0500438 return CHUNK_TO_MEM(self);
439 }
440
441 next = NEXT_CHUNK(self);
442
Rich Felker83895202013-07-19 20:00:11 -0400443 /* Crash on corrupted footer (likely from buffer overflow) */
444 if (next->psize != self->csize) a_crash();
445
Rich Felker0b44a032011-02-12 00:22:29 -0500446 /* Merge adjacent chunks if we need more space. This is not
447 * a waste of time even if we fail to get enough space, because our
448 * subsequent call to free would otherwise have to do the merge. */
449 if (n > n1 && alloc_fwd(next)) {
450 n1 += CHUNK_SIZE(next);
451 next = NEXT_CHUNK(next);
452 }
453 /* FIXME: find what's wrong here and reenable it..? */
454 if (0 && n > n1 && alloc_rev(self)) {
455 self = PREV_CHUNK(self);
456 n1 += CHUNK_SIZE(self);
457 }
Rich Felker5d0965c2011-06-26 16:12:43 -0400458 self->csize = n1 | C_INUSE;
459 next->psize = n1 | C_INUSE;
Rich Felker0b44a032011-02-12 00:22:29 -0500460
461 /* If we got enough space, split off the excess and return */
462 if (n <= n1) {
463 //memmove(CHUNK_TO_MEM(self), p, n0-OVERHEAD);
464 trim(self, n);
465 return CHUNK_TO_MEM(self);
466 }
467
468 /* As a last resort, allocate a new chunk and copy to it. */
469 new = malloc(n-OVERHEAD);
470 if (!new) return 0;
471 memcpy(new, p, n0-OVERHEAD);
472 free(CHUNK_TO_MEM(self));
473 return new;
474}
475
476void free(void *p)
477{
478 struct chunk *self = MEM_TO_CHUNK(p);
479 struct chunk *next;
480 size_t final_size, new_size, size;
481 int reclaim=0;
482 int i;
483
484 if (!p) return;
485
486 if (IS_MMAPPED(self)) {
Rich Felker5d0965c2011-06-26 16:12:43 -0400487 size_t extra = self->psize;
Rich Felker0b44a032011-02-12 00:22:29 -0500488 char *base = (char *)self - extra;
489 size_t len = CHUNK_SIZE(self) + extra;
Rich Felker09582002011-03-23 13:24:00 -0400490 /* Crash on double free */
Rich Felker1c8bead2011-08-23 09:43:45 -0400491 if (extra & 1) a_crash();
Rich Felker0b44a032011-02-12 00:22:29 -0500492 __munmap(base, len);
493 return;
494 }
495
496 final_size = new_size = CHUNK_SIZE(self);
497 next = NEXT_CHUNK(self);
498
Rich Felker83895202013-07-19 20:00:11 -0400499 /* Crash on corrupted footer (likely from buffer overflow) */
500 if (next->psize != self->csize) a_crash();
501
Rich Felker0b44a032011-02-12 00:22:29 -0500502 for (;;) {
503 /* Replace middle of large chunks with fresh zero pages */
Rich Felker5d0965c2011-06-26 16:12:43 -0400504 if (reclaim && (self->psize & next->csize & C_INUSE)) {
Rich Felker0b44a032011-02-12 00:22:29 -0500505 uintptr_t a = (uintptr_t)self + SIZE_ALIGN+PAGE_SIZE-1 & -PAGE_SIZE;
506 uintptr_t b = (uintptr_t)next - SIZE_ALIGN & -PAGE_SIZE;
507#if 1
508 __madvise((void *)a, b-a, MADV_DONTNEED);
509#else
510 __mmap((void *)a, b-a, PROT_READ|PROT_WRITE,
511 MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED, -1, 0);
512#endif
513 }
514
Rich Felker5d0965c2011-06-26 16:12:43 -0400515 if (self->psize & next->csize & C_INUSE) {
516 self->csize = final_size | C_INUSE;
517 next->psize = final_size | C_INUSE;
Rich Felker0b44a032011-02-12 00:22:29 -0500518 i = bin_index(final_size);
519 lock_bin(i);
520 lock(mal.free_lock);
Rich Felker5d0965c2011-06-26 16:12:43 -0400521 if (self->psize & next->csize & C_INUSE)
Rich Felker0b44a032011-02-12 00:22:29 -0500522 break;
523 unlock(mal.free_lock);
524 unlock_bin(i);
525 }
526
527 if (alloc_rev(self)) {
528 self = PREV_CHUNK(self);
529 size = CHUNK_SIZE(self);
530 final_size += size;
531 if (new_size+size > RECLAIM && (new_size+size^size) > size)
532 reclaim = 1;
533 }
534
535 if (alloc_fwd(next)) {
536 size = CHUNK_SIZE(next);
537 final_size += size;
538 if (new_size+size > RECLAIM && (new_size+size^size) > size)
539 reclaim = 1;
540 next = NEXT_CHUNK(next);
541 }
542 }
543
Rich Felker5d0965c2011-06-26 16:12:43 -0400544 self->csize = final_size;
545 next->psize = final_size;
Rich Felker0b44a032011-02-12 00:22:29 -0500546 unlock(mal.free_lock);
547
548 self->next = BIN_TO_CHUNK(i);
549 self->prev = mal.bins[i].tail;
550 self->next->prev = self;
551 self->prev->next = self;
552
553 if (!(mal.binmap & 1ULL<<i))
554 a_or_64(&mal.binmap, 1ULL<<i);
555
556 unlock_bin(i);
557}