blob: 1b642d2896b5e416c1aa06c626ce621cddd40c2f [file] [log] [blame]
Carl Shapiro69759ea2011-07-21 18:13:35 -07001/*
2 Default header file for malloc-2.8.x, written by Doug Lea
3 and released to the public domain, as explained at
4 http://creativecommons.org/licenses/publicdomain.
5
6 last update: Mon Aug 15 08:55:52 2005 Doug Lea (dl at gee)
7
8 This header is for ANSI C/C++ only. You can set any of
9 the following #defines before including:
10
11 * If USE_DL_PREFIX is defined, it is assumed that malloc.c
12 was also compiled with this option, so all routines
13 have names starting with "dl".
14
15 * If HAVE_USR_INCLUDE_MALLOC_H is defined, it is assumed that this
16 file will be #included AFTER <malloc.h>. This is needed only if
17 your system defines a struct mallinfo that is incompatible with the
18 standard one declared here. Otherwise, you can include this file
19 INSTEAD of your system system <malloc.h>. At least on ANSI, all
20 declarations should be compatible with system versions
21
22 * If MSPACES is defined, declarations for mspace versions are included.
23*/
24
25#ifndef MALLOC_280_H
26#define MALLOC_280_H
27
28#ifdef __cplusplus
29extern "C" {
30#endif
31
32#include <stddef.h> /* for size_t */
33
34#if !ONLY_MSPACES
35
36/* Check an additional macro for the five primary functions */
37#if !defined(USE_DL_PREFIX)
38#define dlcalloc calloc
39#define dlfree free
40#define dlmalloc malloc
41#define dlmemalign memalign
42#define dlrealloc realloc
43#endif
44
45#ifndef USE_DL_PREFIX
46#define dlvalloc valloc
47#define dlpvalloc pvalloc
48#define dlmallinfo mallinfo
49#define dlmallopt mallopt
50#define dlmalloc_trim malloc_trim
51#define dlmalloc_walk_free_pages \
52 malloc_walk_free_pages
53#define dlmalloc_walk_heap \
54 malloc_walk_heap
55#define dlmalloc_stats malloc_stats
56#define dlmalloc_usable_size malloc_usable_size
57#define dlmalloc_footprint malloc_footprint
58#define dlmalloc_max_allowed_footprint \
59 malloc_max_allowed_footprint
60#define dlmalloc_set_max_allowed_footprint \
61 malloc_set_max_allowed_footprint
62#define dlmalloc_max_footprint malloc_max_footprint
63#define dlindependent_calloc independent_calloc
64#define dlindependent_comalloc independent_comalloc
65#endif /* USE_DL_PREFIX */
66
67
68/*
69 malloc(size_t n)
70 Returns a pointer to a newly allocated chunk of at least n bytes, or
71 null if no space is available, in which case errno is set to ENOMEM
72 on ANSI C systems.
73
74 If n is zero, malloc returns a minimum-sized chunk. (The minimum
75 size is 16 bytes on most 32bit systems, and 32 bytes on 64bit
76 systems.) Note that size_t is an unsigned type, so calls with
77 arguments that would be negative if signed are interpreted as
78 requests for huge amounts of space, which will often fail. The
79 maximum supported value of n differs across systems, but is in all
80 cases less than the maximum representable value of a size_t.
81*/
82void* dlmalloc(size_t);
83
84/*
85 free(void* p)
86 Releases the chunk of memory pointed to by p, that had been previously
87 allocated using malloc or a related routine such as realloc.
88 It has no effect if p is null. If p was not malloced or already
89 freed, free(p) will by default cuase the current program to abort.
90*/
91void dlfree(void*);
92
93/*
94 calloc(size_t n_elements, size_t element_size);
95 Returns a pointer to n_elements * element_size bytes, with all locations
96 set to zero.
97*/
98void* dlcalloc(size_t, size_t);
99
100/*
101 realloc(void* p, size_t n)
102 Returns a pointer to a chunk of size n that contains the same data
103 as does chunk p up to the minimum of (n, p's size) bytes, or null
104 if no space is available.
105
106 The returned pointer may or may not be the same as p. The algorithm
107 prefers extending p in most cases when possible, otherwise it
108 employs the equivalent of a malloc-copy-free sequence.
109
110 If p is null, realloc is equivalent to malloc.
111
112 If space is not available, realloc returns null, errno is set (if on
113 ANSI) and p is NOT freed.
114
115 if n is for fewer bytes than already held by p, the newly unused
116 space is lopped off and freed if possible. realloc with a size
117 argument of zero (re)allocates a minimum-sized chunk.
118
119 The old unix realloc convention of allowing the last-free'd chunk
120 to be used as an argument to realloc is not supported.
121*/
122
123void* dlrealloc(void*, size_t);
124
125/*
126 memalign(size_t alignment, size_t n);
127 Returns a pointer to a newly allocated chunk of n bytes, aligned
128 in accord with the alignment argument.
129
130 The alignment argument should be a power of two. If the argument is
131 not a power of two, the nearest greater power is used.
132 8-byte alignment is guaranteed by normal malloc calls, so don't
133 bother calling memalign with an argument of 8 or less.
134
135 Overreliance on memalign is a sure way to fragment space.
136*/
137void* dlmemalign(size_t, size_t);
138
139/*
140 valloc(size_t n);
141 Equivalent to memalign(pagesize, n), where pagesize is the page
142 size of the system. If the pagesize is unknown, 4096 is used.
143*/
144void* dlvalloc(size_t);
145
146/*
147 mallopt(int parameter_number, int parameter_value)
148 Sets tunable parameters The format is to provide a
149 (parameter-number, parameter-value) pair. mallopt then sets the
150 corresponding parameter to the argument value if it can (i.e., so
151 long as the value is meaningful), and returns 1 if successful else
152 0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
153 normally defined in malloc.h. None of these are use in this malloc,
154 so setting them has no effect. But this malloc also supports other
155 options in mallopt:
156
157 Symbol param # default allowed param values
158 M_TRIM_THRESHOLD -1 2*1024*1024 any (-1U disables trimming)
159 M_GRANULARITY -2 page size any power of 2 >= page size
160 M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support)
161*/
162int dlmallopt(int, int);
163
164#define M_TRIM_THRESHOLD (-1)
165#define M_GRANULARITY (-2)
166#define M_MMAP_THRESHOLD (-3)
167
168
169/*
170 malloc_footprint();
171 Returns the number of bytes obtained from the system. The total
172 number of bytes allocated by malloc, realloc etc., is less than this
173 value. Unlike mallinfo, this function returns only a precomputed
174 result, so can be called frequently to monitor memory consumption.
175 Even if locks are otherwise defined, this function does not use them,
176 so results might not be up to date.
177*/
178size_t dlmalloc_footprint();
179
180/*
181 malloc_max_allowed_footprint();
182 Returns the number of bytes that the heap is allowed to obtain
183 from the system. malloc_footprint() should always return a
184 size less than or equal to max_allowed_footprint, unless the
185 max_allowed_footprint was set to a value smaller than the
186 footprint at the time.
187
188 This function is only available if dlmalloc.c was compiled
189 with USE_MAX_ALLOWED_FOOTPRINT set.
190*/
191size_t dlmalloc_max_allowed_footprint();
192
193/*
194 malloc_set_max_allowed_footprint();
195 Set the maximum number of bytes that the heap is allowed to
196 obtain from the system. The size will be rounded up to a whole
197 page, and the rounded number will be returned from future calls
198 to malloc_max_allowed_footprint(). If the new max_allowed_footprint
199 is larger than the current footprint, the heap will never grow
200 larger than max_allowed_footprint. If the new max_allowed_footprint
201 is smaller than the current footprint, the heap will not grow
202 further.
203
204 This function is only available if dlmalloc.c was compiled
205 with USE_MAX_ALLOWED_FOOTPRINT set.
206
207 TODO: try to force the heap to give up memory in the shrink case,
208 and update this comment once that happens.
209*/
210void dlmalloc_set_max_allowed_footprint(size_t bytes);
211
212/*
213 malloc_max_footprint();
214 Returns the maximum number of bytes obtained from the system. This
215 value will be greater than current footprint if deallocated space
216 has been reclaimed by the system. The peak number of bytes allocated
217 by malloc, realloc etc., is less than this value. Unlike mallinfo,
218 this function returns only a precomputed result, so can be called
219 frequently to monitor memory consumption. Even if locks are
220 otherwise defined, this function does not use them, so results might
221 not be up to date.
222*/
223size_t dlmalloc_max_footprint(void);
224
225#if !NO_MALLINFO
226/*
227 mallinfo()
228 Returns (by copy) a struct containing various summary statistics:
229
230 arena: current total non-mmapped bytes allocated from system
231 ordblks: the number of free chunks
232 smblks: always zero.
233 hblks: current number of mmapped regions
234 hblkhd: total bytes held in mmapped regions
235 usmblks: the maximum total allocated space. This will be greater
236 than current total if trimming has occurred.
237 fsmblks: always zero
238 uordblks: current total allocated space (normal or mmapped)
239 fordblks: total free space
240 keepcost: the maximum number of bytes that could ideally be released
241 back to system via malloc_trim. ("ideally" means that
242 it ignores page restrictions etc.)
243
244 Because these fields are ints, but internal bookkeeping may
245 be kept as longs, the reported values may wrap around zero and
246 thus be inaccurate.
247*/
248#ifndef HAVE_USR_INCLUDE_MALLOC_H
249#ifndef _MALLOC_H_
250#ifndef MALLINFO_FIELD_TYPE
251#define MALLINFO_FIELD_TYPE size_t
252#endif /* MALLINFO_FIELD_TYPE */
253struct mallinfo {
254 MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */
255 MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */
256 MALLINFO_FIELD_TYPE smblks; /* always 0 */
257 MALLINFO_FIELD_TYPE hblks; /* always 0 */
258 MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */
259 MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */
260 MALLINFO_FIELD_TYPE fsmblks; /* always 0 */
261 MALLINFO_FIELD_TYPE uordblks; /* total allocated space */
262 MALLINFO_FIELD_TYPE fordblks; /* total free space */
263 MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */
264};
265#endif /* _MALLOC_H_ */
266#endif /* HAVE_USR_INCLUDE_MALLOC_H */
267
268struct mallinfo dlmallinfo(void);
269#endif /* NO_MALLINFO */
270
271/*
272 independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);
273
274 independent_calloc is similar to calloc, but instead of returning a
275 single cleared space, it returns an array of pointers to n_elements
276 independent elements that can hold contents of size elem_size, each
277 of which starts out cleared, and can be independently freed,
278 realloc'ed etc. The elements are guaranteed to be adjacently
279 allocated (this is not guaranteed to occur with multiple callocs or
280 mallocs), which may also improve cache locality in some
281 applications.
282
283 The "chunks" argument is optional (i.e., may be null, which is
284 probably the most typical usage). If it is null, the returned array
285 is itself dynamically allocated and should also be freed when it is
286 no longer needed. Otherwise, the chunks array must be of at least
287 n_elements in length. It is filled in with the pointers to the
288 chunks.
289
290 In either case, independent_calloc returns this pointer array, or
291 null if the allocation failed. If n_elements is zero and "chunks"
292 is null, it returns a chunk representing an array with zero elements
293 (which should be freed if not wanted).
294
295 Each element must be individually freed when it is no longer
296 needed. If you'd like to instead be able to free all at once, you
297 should instead use regular calloc and assign pointers into this
298 space to represent elements. (In this case though, you cannot
299 independently free elements.)
300
301 independent_calloc simplifies and speeds up implementations of many
302 kinds of pools. It may also be useful when constructing large data
303 structures that initially have a fixed number of fixed-sized nodes,
304 but the number is not known at compile time, and some of the nodes
305 may later need to be freed. For example:
306
307 struct Node { int item; struct Node* next; };
308
309 struct Node* build_list() {
310 struct Node** pool;
311 int n = read_number_of_nodes_needed();
312 if (n <= 0) return 0;
313 pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
314 if (pool == 0) die();
315 // organize into a linked list...
316 struct Node* first = pool[0];
317 for (i = 0; i < n-1; ++i)
318 pool[i]->next = pool[i+1];
319 free(pool); // Can now free the array (or not, if it is needed later)
320 return first;
321 }
322*/
323void** dlindependent_calloc(size_t, size_t, void**);
324
325/*
326 independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
327
328 independent_comalloc allocates, all at once, a set of n_elements
329 chunks with sizes indicated in the "sizes" array. It returns
330 an array of pointers to these elements, each of which can be
331 independently freed, realloc'ed etc. The elements are guaranteed to
332 be adjacently allocated (this is not guaranteed to occur with
333 multiple callocs or mallocs), which may also improve cache locality
334 in some applications.
335
336 The "chunks" argument is optional (i.e., may be null). If it is null
337 the returned array is itself dynamically allocated and should also
338 be freed when it is no longer needed. Otherwise, the chunks array
339 must be of at least n_elements in length. It is filled in with the
340 pointers to the chunks.
341
342 In either case, independent_comalloc returns this pointer array, or
343 null if the allocation failed. If n_elements is zero and chunks is
344 null, it returns a chunk representing an array with zero elements
345 (which should be freed if not wanted).
346
347 Each element must be individually freed when it is no longer
348 needed. If you'd like to instead be able to free all at once, you
349 should instead use a single regular malloc, and assign pointers at
350 particular offsets in the aggregate space. (In this case though, you
351 cannot independently free elements.)
352
353 independent_comallac differs from independent_calloc in that each
354 element may have a different size, and also that it does not
355 automatically clear elements.
356
357 independent_comalloc can be used to speed up allocation in cases
358 where several structs or objects must always be allocated at the
359 same time. For example:
360
361 struct Head { ... }
362 struct Foot { ... }
363
364 void send_message(char* msg) {
365 int msglen = strlen(msg);
366 size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
367 void* chunks[3];
368 if (independent_comalloc(3, sizes, chunks) == 0)
369 die();
370 struct Head* head = (struct Head*)(chunks[0]);
371 char* body = (char*)(chunks[1]);
372 struct Foot* foot = (struct Foot*)(chunks[2]);
373 // ...
374 }
375
376 In general though, independent_comalloc is worth using only for
377 larger values of n_elements. For small values, you probably won't
378 detect enough difference from series of malloc calls to bother.
379
380 Overuse of independent_comalloc can increase overall memory usage,
381 since it cannot reuse existing noncontiguous small chunks that
382 might be available for some of the elements.
383*/
384void** dlindependent_comalloc(size_t, size_t*, void**);
385
386
387/*
388 pvalloc(size_t n);
389 Equivalent to valloc(minimum-page-that-holds(n)), that is,
390 round up n to nearest pagesize.
391 */
392void* dlpvalloc(size_t);
393
394/*
395 malloc_trim(size_t pad);
396
397 If possible, gives memory back to the system (via negative arguments
398 to sbrk) if there is unused memory at the `high' end of the malloc
399 pool or in unused MMAP segments. You can call this after freeing
400 large blocks of memory to potentially reduce the system-level memory
401 requirements of a program. However, it cannot guarantee to reduce
402 memory. Under some allocation patterns, some large free blocks of
403 memory will be locked between two used chunks, so they cannot be
404 given back to the system.
405
406 The `pad' argument to malloc_trim represents the amount of free
407 trailing space to leave untrimmed. If this argument is zero, only
408 the minimum amount of memory to maintain internal data structures
409 will be left. Non-zero arguments can be supplied to maintain enough
410 trailing space to service future expected allocations without having
411 to re-obtain memory from the system.
412
413 Malloc_trim returns 1 if it actually released any memory, else 0.
414*/
415int dlmalloc_trim(size_t);
416
417/*
418 malloc_walk_free_pages(handler, harg)
419
420 Calls the provided handler on each free region in the heap. The
421 memory between start and end are guaranteed not to contain any
422 important data, so the handler is free to alter the contents
423 in any way. This can be used to advise the OS that large free
424 regions may be swapped out.
425
426 The value in harg will be passed to each call of the handler.
427 */
428void dlmalloc_walk_free_pages(void(*handler)(void *start, void *end, void *arg),
429 void *harg);
430
431/*
432 malloc_walk_heap(handler, harg)
433
434 Calls the provided handler on each object or free region in the
435 heap. The handler will receive the chunk pointer and length, the
436 object pointer and length, and the value in harg on each call.
437 */
438void dlmalloc_walk_heap(void(*handler)(const void *chunkptr, size_t chunklen,
439 const void *userptr, size_t userlen,
440 void *arg),
441 void *harg);
442
443/*
444 malloc_usable_size(void* p);
445
446 Returns the number of bytes you can actually use in
447 an allocated chunk, which may be more than you requested (although
448 often not) due to alignment and minimum size constraints.
449 You can use this many bytes without worrying about
450 overwriting other allocated objects. This is not a particularly great
451 programming practice. malloc_usable_size can be more useful in
452 debugging and assertions, for example:
453
454 p = malloc(n);
455 assert(malloc_usable_size(p) >= 256);
456*/
457size_t dlmalloc_usable_size(void*);
458
459/*
460 malloc_stats();
461 Prints on stderr the amount of space obtained from the system (both
462 via sbrk and mmap), the maximum amount (which may be more than
463 current if malloc_trim and/or munmap got called), and the current
464 number of bytes allocated via malloc (or realloc, etc) but not yet
465 freed. Note that this is the number of bytes allocated, not the
466 number requested. It will be larger than the number requested
467 because of alignment and bookkeeping overhead. Because it includes
468 alignment wastage as being in use, this figure may be greater than
469 zero even when no user-level chunks are allocated.
470
471 The reported current and maximum system memory can be inaccurate if
472 a program makes other calls to system memory allocation functions
473 (normally sbrk) outside of malloc.
474
475 malloc_stats prints only the most commonly interesting statistics.
476 More information can be obtained by calling mallinfo.
477*/
478void dlmalloc_stats();
479
480#endif /* !ONLY_MSPACES */
481
482#if MSPACES
483
484/*
485 mspace is an opaque type representing an independent
486 region of space that supports mspace_malloc, etc.
487*/
488typedef void* mspace;
489
490/*
491 create_mspace creates and returns a new independent space with the
492 given initial capacity, or, if 0, the default granularity size. It
493 returns null if there is no system memory available to create the
494 space. If argument locked is non-zero, the space uses a separate
495 lock to control access. The capacity of the space will grow
496 dynamically as needed to service mspace_malloc requests. You can
497 control the sizes of incremental increases of this space by
498 compiling with a different DEFAULT_GRANULARITY or dynamically
499 setting with mallopt(M_GRANULARITY, value).
500*/
501mspace create_mspace(size_t capacity, int locked);
502
503/*
504 destroy_mspace destroys the given space, and attempts to return all
505 of its memory back to the system, returning the total number of
506 bytes freed. After destruction, the results of access to all memory
507 used by the space become undefined.
508*/
509size_t destroy_mspace(mspace msp);
510
511/*
512 create_mspace_with_base uses the memory supplied as the initial base
513 of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this
514 space is used for bookkeeping, so the capacity must be at least this
515 large. (Otherwise 0 is returned.) When this initial space is
516 exhausted, additional memory will be obtained from the system.
517 Destroying this space will deallocate all additionally allocated
518 space (if possible) but not the initial base.
519*/
520mspace create_mspace_with_base(void* base, size_t capacity, int locked);
521
522/*
523 mspace_malloc behaves as malloc, but operates within
524 the given space.
525*/
526void* mspace_malloc(mspace msp, size_t bytes);
527
528/*
529 mspace_free behaves as free, but operates within
530 the given space.
531
532 If compiled with FOOTERS==1, mspace_free is not actually needed.
533 free may be called instead of mspace_free because freed chunks from
534 any space are handled by their originating spaces.
535*/
536void mspace_free(mspace msp, void* mem);
537
538/*
539 mspace_realloc behaves as realloc, but operates within
540 the given space.
541
542 If compiled with FOOTERS==1, mspace_realloc is not actually
543 needed. realloc may be called instead of mspace_realloc because
544 realloced chunks from any space are handled by their originating
545 spaces.
546*/
547void* mspace_realloc(mspace msp, void* mem, size_t newsize);
548
549/*
550 mspace_merge_objects will merge allocated memory mema and memb
551 together, provided memb immediately follows mema. It is roughly as
552 if memb has been freed and mema has been realloced to a larger size.
553 On successfully merging, mema will be returned. If either argument
554 is null or memb does not immediately follow mema, null will be
555 returned.
556
557 Both mema and memb should have been previously allocated using
558 malloc or a related routine such as realloc. If either mema or memb
559 was not malloced or was previously freed, the result is undefined,
560 but like mspace_free, the default is to abort the program.
561*/
562void* mspace_merge_objects(mspace msp, void* mema, void* memb);
563
564/*
565 mspace_calloc behaves as calloc, but operates within
566 the given space.
567*/
568void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
569
570/*
571 mspace_memalign behaves as memalign, but operates within
572 the given space.
573*/
574void* mspace_memalign(mspace msp, size_t alignment, size_t bytes);
575
576/*
577 mspace_independent_calloc behaves as independent_calloc, but
578 operates within the given space.
579*/
580void** mspace_independent_calloc(mspace msp, size_t n_elements,
581 size_t elem_size, void* chunks[]);
582
583/*
584 mspace_independent_comalloc behaves as independent_comalloc, but
585 operates within the given space.
586*/
587void** mspace_independent_comalloc(mspace msp, size_t n_elements,
588 size_t sizes[], void* chunks[]);
589
590/*
591 mspace_footprint() returns the number of bytes obtained from the
592 system for this space.
593*/
594size_t mspace_footprint(mspace msp);
595
596/*
597 mspace_max_allowed_footprint() returns the number of bytes that
598 this space is allowed to obtain from the system. See
599 malloc_max_allowed_footprint() for a more in-depth description.
600
601 This function is only available if dlmalloc.c was compiled
602 with USE_MAX_ALLOWED_FOOTPRINT set.
603*/
604size_t mspace_max_allowed_footprint(mspace msp);
605
606/*
607 mspace_set_max_allowed_footprint() sets the maximum number of
608 bytes (rounded up to a page) that this space is allowed to
609 obtain from the system. See malloc_set_max_allowed_footprint()
610 for a more in-depth description.
611
612 This function is only available if dlmalloc.c was compiled
613 with USE_MAX_ALLOWED_FOOTPRINT set.
614*/
615void mspace_set_max_allowed_footprint(mspace msp, size_t bytes);
616
617/*
618 mspace_max_footprint() returns the maximum number of bytes obtained
619 from the system over the lifetime of this space.
620*/
621size_t mspace_max_footprint(mspace msp);
622
623
624#if !NO_MALLINFO
625/*
626 mspace_mallinfo behaves as mallinfo, but reports properties of
627 the given space.
628*/
629struct mallinfo mspace_mallinfo(mspace msp);
630#endif /* NO_MALLINFO */
631
632/*
633 mspace_malloc_stats behaves as malloc_stats, but reports
634 properties of the given space.
635*/
636void mspace_malloc_stats(mspace msp);
637
638/*
639 mspace_trim behaves as malloc_trim, but
640 operates within the given space.
641*/
642int mspace_trim(mspace msp, size_t pad);
643
644/*
645 An alias for mallopt.
646*/
647int mspace_mallopt(int, int);
648
649#endif /* MSPACES */
650
651#ifdef __cplusplus
652}; /* end of extern "C" */
653#endif
654
655#endif /* MALLOC_280_H */