blob: 39c7d05188ce7fc516e312f348dcfb9b688bf3e0 [file] [log] [blame]
Rich Felkerb052f132011-04-20 15:55:58 -04001#define _GNU_SOURCE
Rich Felker0b44a032011-02-12 00:22:29 -05002#include <stdlib.h>
3#include <string.h>
4#include <limits.h>
5#include <stdint.h>
6#include <errno.h>
7#include <sys/mman.h>
8#include "libc.h"
9#include "atomic.h"
10#include "pthread_impl.h"
11
12uintptr_t __brk(uintptr_t);
13void *__mmap(void *, size_t, int, int, int, off_t);
14int __munmap(void *, size_t);
15void *__mremap(void *, size_t, size_t, int, ...);
16int __madvise(void *, size_t, int);
17
18struct chunk {
Rich Felker5d0965c2011-06-26 16:12:43 -040019 size_t psize, csize;
20 struct chunk *next, *prev;
Rich Felker0b44a032011-02-12 00:22:29 -050021};
22
23struct bin {
24 int lock[2];
25 struct chunk *head;
26 struct chunk *tail;
27};
28
29static struct {
30 uintptr_t brk;
31 size_t *heap;
32 uint64_t binmap;
33 struct bin bins[64];
34 int brk_lock[2];
35 int free_lock[2];
36} mal;
37
38
39#define SIZE_ALIGN (4*sizeof(size_t))
40#define SIZE_MASK (-SIZE_ALIGN)
41#define OVERHEAD (2*sizeof(size_t))
42#define MMAP_THRESHOLD (0x1c00*SIZE_ALIGN)
43#define DONTCARE 16
44#define RECLAIM 163840
45
Rich Felkere5d78fe2011-11-16 23:59:28 -050046#define CHUNK_SIZE(c) ((c)->csize & -2)
47#define CHUNK_PSIZE(c) ((c)->psize & -2)
Rich Felker0b44a032011-02-12 00:22:29 -050048#define PREV_CHUNK(c) ((struct chunk *)((char *)(c) - CHUNK_PSIZE(c)))
49#define NEXT_CHUNK(c) ((struct chunk *)((char *)(c) + CHUNK_SIZE(c)))
Rich Felker5d0965c2011-06-26 16:12:43 -040050#define MEM_TO_CHUNK(p) (struct chunk *)((char *)(p) - OVERHEAD)
51#define CHUNK_TO_MEM(c) (void *)((char *)(c) + OVERHEAD)
Rich Felker0b44a032011-02-12 00:22:29 -050052#define BIN_TO_CHUNK(i) (MEM_TO_CHUNK(&mal.bins[i].head))
53
54#define C_INUSE ((size_t)1)
Rich Felker0b44a032011-02-12 00:22:29 -050055
Rich Felker5d0965c2011-06-26 16:12:43 -040056#define IS_MMAPPED(c) !((c)->csize & (C_INUSE))
Rich Felker0b44a032011-02-12 00:22:29 -050057
58
59/* Synchronization tools */
60
61static void lock(volatile int *lk)
62{
63 if (!libc.threads_minus_1) return;
64 while(a_swap(lk, 1)) __wait(lk, lk+1, 1, 1);
65}
66
67static void unlock(volatile int *lk)
68{
69 if (!libc.threads_minus_1) return;
70 a_store(lk, 0);
71 if (lk[1]) __wake(lk, 1, 1);
72}
73
74static void lock_bin(int i)
75{
76 if (libc.threads_minus_1)
77 lock(mal.bins[i].lock);
78 if (!mal.bins[i].head)
79 mal.bins[i].head = mal.bins[i].tail = BIN_TO_CHUNK(i);
80}
81
82static void unlock_bin(int i)
83{
84 if (!libc.threads_minus_1) return;
85 unlock(mal.bins[i].lock);
86}
87
88static int first_set(uint64_t x)
89{
90#if 1
91 return a_ctz_64(x);
92#else
93 static const char debruijn64[64] = {
94 0, 1, 2, 53, 3, 7, 54, 27, 4, 38, 41, 8, 34, 55, 48, 28,
95 62, 5, 39, 46, 44, 42, 22, 9, 24, 35, 59, 56, 49, 18, 29, 11,
96 63, 52, 6, 26, 37, 40, 33, 47, 61, 45, 43, 21, 23, 58, 17, 10,
97 51, 25, 36, 32, 60, 20, 57, 16, 50, 31, 19, 15, 30, 14, 13, 12
98 };
99 static const char debruijn32[32] = {
100 0, 1, 23, 2, 29, 24, 19, 3, 30, 27, 25, 11, 20, 8, 4, 13,
101 31, 22, 28, 18, 26, 10, 7, 12, 21, 17, 9, 6, 16, 5, 15, 14
102 };
103 if (sizeof(long) < 8) {
104 uint32_t y = x;
105 if (!y) {
106 y = x>>32;
107 return 32 + debruijn32[(y&-y)*0x076be629 >> 27];
108 }
109 return debruijn32[(y&-y)*0x076be629 >> 27];
110 }
111 return debruijn64[(x&-x)*0x022fdd63cc95386dull >> 58];
112#endif
113}
114
115static int bin_index(size_t x)
116{
117 x = x / SIZE_ALIGN - 1;
118 if (x <= 32) return x;
119 if (x > 0x1c00) return 63;
Rich Felker2afebbb2011-06-12 10:53:42 -0400120 return ((union { float v; uint32_t r; }){(int)x}.r>>21) - 496;
Rich Felker0b44a032011-02-12 00:22:29 -0500121}
122
123static int bin_index_up(size_t x)
124{
125 x = x / SIZE_ALIGN - 1;
126 if (x <= 32) return x;
Rich Felker2afebbb2011-06-12 10:53:42 -0400127 return ((union { float v; uint32_t r; }){(int)x}.r+0x1fffff>>21) - 496;
Rich Felker0b44a032011-02-12 00:22:29 -0500128}
129
130#if 0
131void __dump_heap(int x)
132{
133 struct chunk *c;
134 int i;
135 for (c = (void *)mal.heap; CHUNK_SIZE(c); c = NEXT_CHUNK(c))
136 fprintf(stderr, "base %p size %zu (%d) flags %d/%d\n",
137 c, CHUNK_SIZE(c), bin_index(CHUNK_SIZE(c)),
Rich Felker5d0965c2011-06-26 16:12:43 -0400138 c->csize & 15,
139 NEXT_CHUNK(c)->psize & 15);
Rich Felker0b44a032011-02-12 00:22:29 -0500140 for (i=0; i<64; i++) {
141 if (mal.bins[i].head != BIN_TO_CHUNK(i) && mal.bins[i].head) {
142 fprintf(stderr, "bin %d: %p\n", i, mal.bins[i].head);
143 if (!(mal.binmap & 1ULL<<i))
144 fprintf(stderr, "missing from binmap!\n");
145 } else if (mal.binmap & 1ULL<<i)
146 fprintf(stderr, "binmap wrongly contains %d!\n", i);
147 }
148}
149#endif
150
151static struct chunk *expand_heap(size_t n)
152{
153 struct chunk *w;
154 uintptr_t new;
155
156 lock(mal.brk_lock);
157
158 if (n > SIZE_MAX - mal.brk - 2*PAGE_SIZE) goto fail;
159 new = mal.brk + n + SIZE_ALIGN + PAGE_SIZE - 1 & -PAGE_SIZE;
160 n = new - mal.brk;
161
162 if (__brk(new) != new) goto fail;
163
164 w = MEM_TO_CHUNK(new);
Rich Felker5d0965c2011-06-26 16:12:43 -0400165 w->psize = n | C_INUSE;
166 w->csize = 0 | C_INUSE;
Rich Felker0b44a032011-02-12 00:22:29 -0500167
168 w = MEM_TO_CHUNK(mal.brk);
Rich Felker5d0965c2011-06-26 16:12:43 -0400169 w->csize = n | C_INUSE;
Rich Felker0b44a032011-02-12 00:22:29 -0500170 mal.brk = new;
171
172 unlock(mal.brk_lock);
173
174 return w;
175fail:
176 unlock(mal.brk_lock);
177 return 0;
178}
179
Rich Felkerbf878582011-04-01 23:07:03 -0400180static int init_malloc(size_t n)
Rich Felker0b44a032011-02-12 00:22:29 -0500181{
182 static int init, waiters;
183 int state;
184 struct chunk *c;
185
186 if (init == 2) return 0;
187
188 while ((state=a_swap(&init, 1)) == 1)
189 __wait(&init, &waiters, 1, 1);
190 if (state) {
191 a_store(&init, 2);
192 return 0;
193 }
194
195 mal.brk = __brk(0) + 2*SIZE_ALIGN-1 & -SIZE_ALIGN;
196
Rich Felkerbf878582011-04-01 23:07:03 -0400197 c = expand_heap(n);
Rich Felker0b44a032011-02-12 00:22:29 -0500198
199 if (!c) {
200 a_store(&init, 0);
201 if (waiters) __wake(&init, 1, 1);
202 return -1;
203 }
204
205 mal.heap = (void *)c;
Rich Felker5d0965c2011-06-26 16:12:43 -0400206 c->psize = 0 | C_INUSE;
Rich Felker0b44a032011-02-12 00:22:29 -0500207 free(CHUNK_TO_MEM(c));
208
209 a_store(&init, 2);
210 if (waiters) __wake(&init, -1, 1);
Rich Felkerbf878582011-04-01 23:07:03 -0400211 return 1;
Rich Felker0b44a032011-02-12 00:22:29 -0500212}
213
214static int adjust_size(size_t *n)
215{
216 /* Result of pointer difference must fit in ptrdiff_t. */
Rich Felker26031da2011-02-20 16:16:33 -0500217 if (*n-1 > PTRDIFF_MAX - SIZE_ALIGN - PAGE_SIZE) {
218 if (*n) {
219 errno = ENOMEM;
220 return -1;
221 } else {
222 *n = SIZE_ALIGN;
223 return 0;
224 }
Rich Felker0b44a032011-02-12 00:22:29 -0500225 }
226 *n = (*n + OVERHEAD + SIZE_ALIGN - 1) & SIZE_MASK;
227 return 0;
228}
229
230static void unbin(struct chunk *c, int i)
231{
232 if (c->prev == c->next)
233 a_and_64(&mal.binmap, ~(1ULL<<i));
234 c->prev->next = c->next;
235 c->next->prev = c->prev;
Rich Felker5d0965c2011-06-26 16:12:43 -0400236 c->csize |= C_INUSE;
237 NEXT_CHUNK(c)->psize |= C_INUSE;
Rich Felker0b44a032011-02-12 00:22:29 -0500238}
239
240static int alloc_fwd(struct chunk *c)
241{
242 int i;
243 size_t k;
Rich Felker5d0965c2011-06-26 16:12:43 -0400244 while (!((k=c->csize) & C_INUSE)) {
Rich Felker0b44a032011-02-12 00:22:29 -0500245 i = bin_index(k);
246 lock_bin(i);
Rich Felker5d0965c2011-06-26 16:12:43 -0400247 if (c->csize == k) {
Rich Felker0b44a032011-02-12 00:22:29 -0500248 unbin(c, i);
249 unlock_bin(i);
250 return 1;
251 }
252 unlock_bin(i);
253 }
254 return 0;
255}
256
257static int alloc_rev(struct chunk *c)
258{
259 int i;
260 size_t k;
Rich Felker5d0965c2011-06-26 16:12:43 -0400261 while (!((k=c->psize) & C_INUSE)) {
Rich Felker0b44a032011-02-12 00:22:29 -0500262 i = bin_index(k);
263 lock_bin(i);
Rich Felker5d0965c2011-06-26 16:12:43 -0400264 if (c->psize == k) {
Rich Felker0b44a032011-02-12 00:22:29 -0500265 unbin(PREV_CHUNK(c), i);
266 unlock_bin(i);
267 return 1;
268 }
269 unlock_bin(i);
270 }
271 return 0;
272}
273
274
275/* pretrim - trims a chunk _prior_ to removing it from its bin.
276 * Must be called with i as the ideal bin for size n, j the bin
277 * for the _free_ chunk self, and bin j locked. */
278static int pretrim(struct chunk *self, size_t n, int i, int j)
279{
280 size_t n1;
281 struct chunk *next, *split;
282
283 /* We cannot pretrim if it would require re-binning. */
284 if (j < 40) return 0;
285 if (j < i+3) {
286 if (j != 63) return 0;
287 n1 = CHUNK_SIZE(self);
288 if (n1-n <= MMAP_THRESHOLD) return 0;
289 } else {
290 n1 = CHUNK_SIZE(self);
291 }
292 if (bin_index(n1-n) != j) return 0;
293
294 next = NEXT_CHUNK(self);
295 split = (void *)((char *)self + n);
296
297 split->prev = self->prev;
298 split->next = self->next;
299 split->prev->next = split;
300 split->next->prev = split;
Rich Felker5d0965c2011-06-26 16:12:43 -0400301 split->psize = n | C_INUSE;
302 split->csize = n1-n;
303 next->psize = n1-n;
304 self->csize = n | C_INUSE;
Rich Felker0b44a032011-02-12 00:22:29 -0500305 return 1;
306}
307
308static void trim(struct chunk *self, size_t n)
309{
310 size_t n1 = CHUNK_SIZE(self);
311 struct chunk *next, *split;
312
313 if (n >= n1 - DONTCARE) return;
314
315 next = NEXT_CHUNK(self);
316 split = (void *)((char *)self + n);
317
Rich Felker5d0965c2011-06-26 16:12:43 -0400318 split->psize = n | C_INUSE;
319 split->csize = n1-n | C_INUSE;
320 next->psize = n1-n | C_INUSE;
321 self->csize = n | C_INUSE;
Rich Felker0b44a032011-02-12 00:22:29 -0500322
323 free(CHUNK_TO_MEM(split));
324}
325
326void *malloc(size_t n)
327{
328 struct chunk *c;
329 int i, j;
330
Rich Felker26031da2011-02-20 16:16:33 -0500331 if (adjust_size(&n) < 0) return 0;
Rich Felker0b44a032011-02-12 00:22:29 -0500332
333 if (n > MMAP_THRESHOLD) {
Rich Felkerb761bd12011-04-04 17:26:41 -0400334 size_t len = n + OVERHEAD + PAGE_SIZE - 1 & -PAGE_SIZE;
Rich Felker0b44a032011-02-12 00:22:29 -0500335 char *base = __mmap(0, len, PROT_READ|PROT_WRITE,
336 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
337 if (base == (void *)-1) return 0;
Rich Felker5d0965c2011-06-26 16:12:43 -0400338 c = (void *)(base + SIZE_ALIGN - OVERHEAD);
339 c->csize = len - (SIZE_ALIGN - OVERHEAD);
340 c->psize = SIZE_ALIGN - OVERHEAD;
Rich Felker0b44a032011-02-12 00:22:29 -0500341 return CHUNK_TO_MEM(c);
342 }
343
344 i = bin_index_up(n);
345 for (;;) {
346 uint64_t mask = mal.binmap & -(1ULL<<i);
347 if (!mask) {
Rich Felkerbf878582011-04-01 23:07:03 -0400348 if (init_malloc(n) > 0) continue;
Rich Felker0b44a032011-02-12 00:22:29 -0500349 c = expand_heap(n);
350 if (!c) return 0;
351 if (alloc_rev(c)) {
352 struct chunk *x = c;
353 c = PREV_CHUNK(c);
Rich Felker5d0965c2011-06-26 16:12:43 -0400354 NEXT_CHUNK(x)->psize = c->csize =
355 x->csize + CHUNK_SIZE(c);
Rich Felker0b44a032011-02-12 00:22:29 -0500356 }
357 break;
358 }
359 j = first_set(mask);
360 lock_bin(j);
361 c = mal.bins[j].head;
Rich Felker5d0965c2011-06-26 16:12:43 -0400362 if (c != BIN_TO_CHUNK(j) && j == bin_index(c->csize)) {
Rich Felker0b44a032011-02-12 00:22:29 -0500363 if (!pretrim(c, n, i, j)) unbin(c, j);
364 unlock_bin(j);
365 break;
366 }
367 unlock_bin(j);
368 }
369
370 /* Now patch up in case we over-allocated */
371 trim(c, n);
372
373 return CHUNK_TO_MEM(c);
374}
375
376void *realloc(void *p, size_t n)
377{
378 struct chunk *self, *next;
379 size_t n0, n1;
380 void *new;
381
382 if (!p) return malloc(n);
Rich Felker0b44a032011-02-12 00:22:29 -0500383
384 if (adjust_size(&n) < 0) return 0;
385
386 self = MEM_TO_CHUNK(p);
387 n1 = n0 = CHUNK_SIZE(self);
388
389 if (IS_MMAPPED(self)) {
Rich Felker5d0965c2011-06-26 16:12:43 -0400390 size_t extra = self->psize;
Rich Felker0b44a032011-02-12 00:22:29 -0500391 char *base = (char *)self - extra;
392 size_t oldlen = n0 + extra;
393 size_t newlen = n + extra;
Rich Felker09582002011-03-23 13:24:00 -0400394 /* Crash on realloc of freed chunk */
Rich Felker1c8bead2011-08-23 09:43:45 -0400395 if (extra & 1) a_crash();
Rich Felker0b44a032011-02-12 00:22:29 -0500396 if (newlen < PAGE_SIZE && (new = malloc(n))) {
397 memcpy(new, p, n-OVERHEAD);
398 free(p);
399 return new;
400 }
401 newlen = (newlen + PAGE_SIZE-1) & -PAGE_SIZE;
402 if (oldlen == newlen) return p;
403 base = __mremap(base, oldlen, newlen, MREMAP_MAYMOVE);
404 if (base == (void *)-1)
405 return newlen < oldlen ? p : 0;
406 self = (void *)(base + extra);
Rich Felker5d0965c2011-06-26 16:12:43 -0400407 self->csize = newlen - extra;
Rich Felker0b44a032011-02-12 00:22:29 -0500408 return CHUNK_TO_MEM(self);
409 }
410
411 next = NEXT_CHUNK(self);
412
413 /* Merge adjacent chunks if we need more space. This is not
414 * a waste of time even if we fail to get enough space, because our
415 * subsequent call to free would otherwise have to do the merge. */
416 if (n > n1 && alloc_fwd(next)) {
417 n1 += CHUNK_SIZE(next);
418 next = NEXT_CHUNK(next);
419 }
420 /* FIXME: find what's wrong here and reenable it..? */
421 if (0 && n > n1 && alloc_rev(self)) {
422 self = PREV_CHUNK(self);
423 n1 += CHUNK_SIZE(self);
424 }
Rich Felker5d0965c2011-06-26 16:12:43 -0400425 self->csize = n1 | C_INUSE;
426 next->psize = n1 | C_INUSE;
Rich Felker0b44a032011-02-12 00:22:29 -0500427
428 /* If we got enough space, split off the excess and return */
429 if (n <= n1) {
430 //memmove(CHUNK_TO_MEM(self), p, n0-OVERHEAD);
431 trim(self, n);
432 return CHUNK_TO_MEM(self);
433 }
434
435 /* As a last resort, allocate a new chunk and copy to it. */
436 new = malloc(n-OVERHEAD);
437 if (!new) return 0;
438 memcpy(new, p, n0-OVERHEAD);
439 free(CHUNK_TO_MEM(self));
440 return new;
441}
442
443void free(void *p)
444{
445 struct chunk *self = MEM_TO_CHUNK(p);
446 struct chunk *next;
447 size_t final_size, new_size, size;
448 int reclaim=0;
449 int i;
450
451 if (!p) return;
452
453 if (IS_MMAPPED(self)) {
Rich Felker5d0965c2011-06-26 16:12:43 -0400454 size_t extra = self->psize;
Rich Felker0b44a032011-02-12 00:22:29 -0500455 char *base = (char *)self - extra;
456 size_t len = CHUNK_SIZE(self) + extra;
Rich Felker09582002011-03-23 13:24:00 -0400457 /* Crash on double free */
Rich Felker1c8bead2011-08-23 09:43:45 -0400458 if (extra & 1) a_crash();
Rich Felker0b44a032011-02-12 00:22:29 -0500459 __munmap(base, len);
460 return;
461 }
462
463 final_size = new_size = CHUNK_SIZE(self);
464 next = NEXT_CHUNK(self);
465
466 for (;;) {
467 /* Replace middle of large chunks with fresh zero pages */
Rich Felker5d0965c2011-06-26 16:12:43 -0400468 if (reclaim && (self->psize & next->csize & C_INUSE)) {
Rich Felker0b44a032011-02-12 00:22:29 -0500469 uintptr_t a = (uintptr_t)self + SIZE_ALIGN+PAGE_SIZE-1 & -PAGE_SIZE;
470 uintptr_t b = (uintptr_t)next - SIZE_ALIGN & -PAGE_SIZE;
471#if 1
472 __madvise((void *)a, b-a, MADV_DONTNEED);
473#else
474 __mmap((void *)a, b-a, PROT_READ|PROT_WRITE,
475 MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED, -1, 0);
476#endif
477 }
478
Rich Felker5d0965c2011-06-26 16:12:43 -0400479 if (self->psize & next->csize & C_INUSE) {
480 self->csize = final_size | C_INUSE;
481 next->psize = final_size | C_INUSE;
Rich Felker0b44a032011-02-12 00:22:29 -0500482 i = bin_index(final_size);
483 lock_bin(i);
484 lock(mal.free_lock);
Rich Felker5d0965c2011-06-26 16:12:43 -0400485 if (self->psize & next->csize & C_INUSE)
Rich Felker0b44a032011-02-12 00:22:29 -0500486 break;
487 unlock(mal.free_lock);
488 unlock_bin(i);
489 }
490
491 if (alloc_rev(self)) {
492 self = PREV_CHUNK(self);
493 size = CHUNK_SIZE(self);
494 final_size += size;
495 if (new_size+size > RECLAIM && (new_size+size^size) > size)
496 reclaim = 1;
497 }
498
499 if (alloc_fwd(next)) {
500 size = CHUNK_SIZE(next);
501 final_size += size;
502 if (new_size+size > RECLAIM && (new_size+size^size) > size)
503 reclaim = 1;
504 next = NEXT_CHUNK(next);
505 }
506 }
507
Rich Felker5d0965c2011-06-26 16:12:43 -0400508 self->csize = final_size;
509 next->psize = final_size;
Rich Felker0b44a032011-02-12 00:22:29 -0500510 unlock(mal.free_lock);
511
512 self->next = BIN_TO_CHUNK(i);
513 self->prev = mal.bins[i].tail;
514 self->next->prev = self;
515 self->prev->next = self;
516
517 if (!(mal.binmap & 1ULL<<i))
518 a_or_64(&mal.binmap, 1ULL<<i);
519
520 unlock_bin(i);
521}