blob: 43ddc728992f77c044d67ecc7bf789f339937331 [file] [log] [blame]
Neil Schemenauera35c6882001-02-27 04:45:05 +00001/* An object allocator for Python.
2
3 Here is an introduction to the layers of the Python memory architecture,
4 showing where the object allocator is actually used (layer +2), It is
5 called for every object allocation and deallocation (PyObject_New/Del),
6 unless the object-specific allocators implement a proprietary allocation
7 scheme (ex.: ints use a simple free list). This is also the place where
8 the cyclic garbage collector operates selectively on container objects.
9
10
11 Object-specific allocators
12 _____ ______ ______ ________
13 [ int ] [ dict ] [ list ] ... [ string ] Python core |
14+3 | <----- Object-specific memory -----> | <-- Non-object memory --> |
15 _______________________________ | |
16 [ Python's object allocator ] | |
17+2 | ####### Object memory ####### | <------ Internal buffers ------> |
18 ______________________________________________________________ |
19 [ Python's raw memory allocator (PyMem_ API) ] |
20+1 | <----- Python memory (under PyMem manager's control) ------> | |
21 __________________________________________________________________
22 [ Underlying general-purpose allocator (ex: C library malloc) ]
23 0 | <------ Virtual memory allocated for the python process -------> |
24
25 =========================================================================
26 _______________________________________________________________________
27 [ OS-specific Virtual Memory Manager (VMM) ]
28-1 | <--- Kernel dynamic storage allocation & management (page-based) ---> |
29 __________________________________ __________________________________
30 [ ] [ ]
31-2 | <-- Physical memory: ROM/RAM --> | | <-- Secondary storage (swap) --> |
32
33*/
34/*==========================================================================*/
35
36/* A fast, special-purpose memory allocator for small blocks, to be used
37 on top of a general-purpose malloc -- heavily based on previous art. */
38
39/* Vladimir Marangozov -- August 2000 */
40
41/*
42 * "Memory management is where the rubber meets the road -- if we do the wrong
43 * thing at any level, the results will not be good. And if we don't make the
44 * levels work well together, we are in serious trouble." (1)
45 *
46 * (1) Paul R. Wilson, Mark S. Johnstone, Michael Neely, and David Boles,
47 * "Dynamic Storage Allocation: A Survey and Critical Review",
48 * in Proc. 1995 Int'l. Workshop on Memory Management, September 1995.
49 */
50
51/* #undef WITH_MEMORY_LIMITS */ /* disable mem limit checks */
52#define WITH_MALLOC_HOOKS /* for profiling & debugging */
53
54/*==========================================================================*/
55
56/*
57 * Public functions exported by this allocator.
58 *
59 * -- Define and use these names in your code to obtain or release memory --
60 */
61#define _THIS_MALLOC PyCore_OBJECT_MALLOC_FUNC
62#define _THIS_CALLOC /* unused */
63#define _THIS_REALLOC PyCore_OBJECT_REALLOC_FUNC
64#define _THIS_FREE PyCore_OBJECT_FREE_FUNC
65
66/*
67 * Underlying allocator's functions called by this allocator.
68 * The underlying allocator is usually the one which comes with libc.
69 *
70 * -- Don't use these functions in your code (to avoid mixing allocators) --
71 *
72 * Redefine these __only__ if you are using a 3rd party general purpose
73 * allocator which exports functions with names _other_ than the standard
74 * malloc, calloc, realloc, free.
75 */
76#define _SYSTEM_MALLOC PyCore_MALLOC_FUNC
77#define _SYSTEM_CALLOC /* unused */
78#define _SYSTEM_REALLOC PyCore_REALLOC_FUNC
79#define _SYSTEM_FREE PyCore_FREE_FUNC
80
81/*
82 * If malloc hooks are needed, names of the hooks' set & fetch
83 * functions exported by this allocator.
84 */
85#ifdef WITH_MALLOC_HOOKS
86#define _SET_HOOKS _PyCore_ObjectMalloc_SetHooks
87#define _FETCH_HOOKS _PyCore_ObjectMalloc_FetchHooks
88#endif
89
90/*==========================================================================*/
91
92/*
93 * Allocation strategy abstract:
94 *
95 * For small requests, the allocator sub-allocates <Big> blocks of memory.
96 * Requests greater than 256 bytes are routed to the system's allocator.
97 *
98 * Small requests are grouped in size classes spaced 8 bytes apart, due
99 * to the required valid alignment of the returned address. Requests of
100 * a particular size are serviced from memory pools of 4K (one VMM page).
101 * Pools are fragmented on demand and contain free lists of blocks of one
102 * particular size class. In other words, there is a fixed-size allocator
103 * for each size class. Free pools are shared by the different allocators
104 * thus minimizing the space reserved for a particular size class.
105 *
106 * This allocation strategy is a variant of what is known as "simple
107 * segregated storage based on array of free lists". The main drawback of
108 * simple segregated storage is that we might end up with lot of reserved
109 * memory for the different free lists, which degenerate in time. To avoid
110 * this, we partition each free list in pools and we share dynamically the
111 * reserved space between all free lists. This technique is quite efficient
112 * for memory intensive programs which allocate mainly small-sized blocks.
113 *
114 * For small requests we have the following table:
115 *
116 * Request in bytes Size of allocated block Size class idx
117 * ----------------------------------------------------------------
118 * 1-8 8 0
119 * 9-16 16 1
120 * 17-24 24 2
121 * 25-32 32 3
122 * 33-40 40 4
123 * 41-48 48 5
124 * 49-56 56 6
125 * 57-64 64 7
126 * 65-72 72 8
127 * ... ... ...
128 * 241-248 248 30
129 * 249-256 256 31
130 *
131 * 0, 257 and up: routed to the underlying allocator.
132 */
133
134/*==========================================================================*/
135
136/*
137 * -- Main tunable settings section --
138 */
139
140/*
141 * Alignment of addresses returned to the user. 8-bytes alignment works
142 * on most current architectures (with 32-bit or 64-bit address busses).
143 * The alignment value is also used for grouping small requests in size
144 * classes spaced ALIGNMENT bytes apart.
145 *
146 * You shouldn't change this unless you know what you are doing.
147 */
148
149#define ALIGNMENT 8 /* must be 2^N */
150#define ALIGNMENT_SHIFT 3
151#define ALIGNMENT_MASK (ALIGNMENT - 1)
152
153/*
154 * Max size threshold below which malloc requests are considered to be
155 * small enough in order to use preallocated memory pools. You can tune
156 * this value according to your application behaviour and memory needs.
157 *
158 * The following invariants must hold:
159 * 1) ALIGNMENT <= SMALL_REQUEST_THRESHOLD <= 256
160 * 2) SMALL_REQUEST_THRESHOLD == N * ALIGNMENT
161 *
162 * Although not required, for better performance and space efficiency,
163 * it is recommended that SMALL_REQUEST_THRESHOLD is set to a power of 2.
164 */
165
166/*
167 * For Python compiled on systems with 32 bit pointers and integers,
168 * a value of 64 (= 8 * 8) is a reasonable speed/space tradeoff for
169 * the object allocator. To adjust automatically this threshold for
170 * systems with 64 bit pointers, we make this setting depend on a
171 * Python-specific slot size unit = sizeof(long) + sizeof(void *),
172 * which is expected to be 8, 12 or 16 bytes.
173 */
174
175#define _PYOBJECT_THRESHOLD ((SIZEOF_LONG + SIZEOF_VOID_P) * ALIGNMENT)
176
177#define SMALL_REQUEST_THRESHOLD _PYOBJECT_THRESHOLD /* must be N * ALIGNMENT */
178
179#define NB_SMALL_SIZE_CLASSES (SMALL_REQUEST_THRESHOLD / ALIGNMENT)
180
181/*
182 * The system's VMM page size can be obtained on most unices with a
183 * getpagesize() call or deduced from various header files. To make
184 * things simpler, we assume that it is 4K, which is OK for most systems.
185 * It is probably better if this is the native page size, but it doesn't
186 * have to be.
187 */
188
189#define SYSTEM_PAGE_SIZE (4 * 1024)
190#define SYSTEM_PAGE_SIZE_MASK (SYSTEM_PAGE_SIZE - 1)
191
192/*
193 * Maximum amount of memory managed by the allocator for small requests.
194 */
195
196#ifdef WITH_MEMORY_LIMITS
197#ifndef SMALL_MEMORY_LIMIT
198#define SMALL_MEMORY_LIMIT (64 * 1024 * 1024) /* 64 MB -- more? */
199#endif
200#endif
201
202/*
203 * The allocator sub-allocates <Big> blocks of memory (called arenas) aligned
204 * on a page boundary. This is a reserved virtual address space for the
205 * current process (obtained through a malloc call). In no way this means
206 * that the memory arenas will be used entirely. A malloc(<Big>) is usually
207 * an address range reservation for <Big> bytes, unless all pages within this
208 * space are referenced subsequently. So malloc'ing big blocks and not using
209 * them does not mean "wasting memory". It's an addressable range wastage...
210 *
211 * Therefore, allocating arenas with malloc is not optimal, because there is
212 * some address space wastage, but this is the most portable way to request
213 * memory from the system accross various platforms.
214 */
215
216#define ARENA_SIZE (256 * 1024 - SYSTEM_PAGE_SIZE) /* 256k - 1p */
217
218#ifdef WITH_MEMORY_LIMITS
219#define MAX_ARENAS (SMALL_MEMORY_LIMIT / ARENA_SIZE)
220#endif
221
222/*
223 * Size of the pools used for small blocks. Should be a power of 2,
224 * between 1K and SYSTEM_PAGE_SIZE, that is: 1k, 2k, 4k, eventually 8k.
225 */
226
227#define POOL_SIZE SYSTEM_PAGE_SIZE /* must be 2^N */
228#define POOL_SIZE_MASK SYSTEM_PAGE_SIZE_MASK
229#define POOL_MAGIC 0x74D3A651 /* authentication id */
230
231#define ARENA_NB_POOLS (ARENA_SIZE / POOL_SIZE)
232#define ARENA_NB_PAGES (ARENA_SIZE / SYSTEM_PAGE_SIZE)
233
234/*
235 * -- End of tunable settings section --
236 */
237
238/*==========================================================================*/
239
240/*
241 * Locking
242 *
243 * To reduce lock contention, it would probably be better to refine the
244 * crude function locking with per size class locking. I'm not positive
245 * however, whether it's worth switching to such locking policy because
246 * of the performance penalty it might introduce.
247 *
248 * The following macros describe the simplest (should also be the fastest)
249 * lock object on a particular platform and the init/fini/lock/unlock
250 * operations on it. The locks defined here are not expected to be recursive
251 * because it is assumed that they will always be called in the order:
252 * INIT, [LOCK, UNLOCK]*, FINI.
253 */
254
255/*
256 * Python's threads are serialized, so object malloc locking is disabled.
257 */
258#define SIMPLELOCK_DECL(lock) /* simple lock declaration */
259#define SIMPLELOCK_INIT(lock) /* allocate (if needed) and initialize */
260#define SIMPLELOCK_FINI(lock) /* free/destroy an existing lock */
261#define SIMPLELOCK_LOCK(lock) /* acquire released lock */
262#define SIMPLELOCK_UNLOCK(lock) /* release acquired lock */
263
264/*
265 * Basic types
266 * I don't care if these are defined in <sys/types.h> or elsewhere. Axiom.
267 */
268
269#undef uchar
270#define uchar unsigned char /* assuming == 8 bits */
271
272#undef ushort
273#define ushort unsigned short /* assuming >= 16 bits */
274
275#undef uint
276#define uint unsigned int /* assuming >= 16 bits */
277
278#undef ulong
279#define ulong unsigned long /* assuming >= 32 bits */
280
281#undef off_t
282#define off_t uint /* 16 bits <= off_t <= 64 bits */
283
284/* When you say memory, my mind reasons in terms of (pointers to) blocks */
285typedef uchar block;
286
287/* Pool for small blocks */
288struct pool_header {
Tim Petersb2336522001-03-11 18:36:13 +0000289 union { block *_padding;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000290 uint count; } ref; /* number of allocated blocks */
291 block *freeblock; /* pool's free list head */
292 struct pool_header *nextpool; /* next pool of this size class */
293 struct pool_header *prevpool; /* previous pool "" */
294 struct pool_header *pooladdr; /* pool address (always aligned) */
295 uint magic; /* pool magic number */
296 uint szidx; /* block size class index */
297 uint capacity; /* pool capacity in # of blocks */
298};
299
300typedef struct pool_header *poolp;
301
302#undef ROUNDUP
303#define ROUNDUP(x) (((x) + ALIGNMENT_MASK) & ~ALIGNMENT_MASK)
304#define POOL_OVERHEAD ROUNDUP(sizeof(struct pool_header))
305
306#define DUMMY_SIZE_IDX 0xffff /* size class of newly cached pools */
307
308/*==========================================================================*/
309
310/*
311 * This malloc lock
312 */
Tim Petersb2336522001-03-11 18:36:13 +0000313SIMPLELOCK_DECL(_malloc_lock);
314#define LOCK() SIMPLELOCK_LOCK(_malloc_lock)
315#define UNLOCK() SIMPLELOCK_UNLOCK(_malloc_lock)
316#define LOCK_INIT() SIMPLELOCK_INIT(_malloc_lock)
317#define LOCK_FINI() SIMPLELOCK_FINI(_malloc_lock)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000318
319/*
320 * Pool table -- doubly linked lists of partially used pools
321 */
322#define PTA(x) ((poolp )((uchar *)&(usedpools[2*(x)]) - 2*sizeof(block *)))
323#define PT(x) PTA(x), PTA(x)
324
325static poolp usedpools[2 * ((NB_SMALL_SIZE_CLASSES + 7) / 8) * 8] = {
326 PT(0), PT(1), PT(2), PT(3), PT(4), PT(5), PT(6), PT(7)
327#if NB_SMALL_SIZE_CLASSES > 8
328 , PT(8), PT(9), PT(10), PT(11), PT(12), PT(13), PT(14), PT(15)
329#if NB_SMALL_SIZE_CLASSES > 16
330 , PT(16), PT(17), PT(18), PT(19), PT(20), PT(21), PT(22), PT(23)
331#if NB_SMALL_SIZE_CLASSES > 24
332 , PT(24), PT(25), PT(26), PT(27), PT(28), PT(29), PT(30), PT(31)
333#if NB_SMALL_SIZE_CLASSES > 32
334 , PT(32), PT(33), PT(34), PT(35), PT(36), PT(37), PT(38), PT(39)
335#if NB_SMALL_SIZE_CLASSES > 40
336 , PT(40), PT(41), PT(42), PT(43), PT(44), PT(45), PT(46), PT(47)
337#if NB_SMALL_SIZE_CLASSES > 48
338 , PT(48), PT(49), PT(50), PT(51), PT(52), PT(53), PT(54), PT(55)
339#if NB_SMALL_SIZE_CLASSES > 56
340 , PT(56), PT(57), PT(58), PT(59), PT(60), PT(61), PT(62), PT(63)
341#endif /* NB_SMALL_SIZE_CLASSES > 56 */
342#endif /* NB_SMALL_SIZE_CLASSES > 48 */
343#endif /* NB_SMALL_SIZE_CLASSES > 40 */
344#endif /* NB_SMALL_SIZE_CLASSES > 32 */
345#endif /* NB_SMALL_SIZE_CLASSES > 24 */
346#endif /* NB_SMALL_SIZE_CLASSES > 16 */
347#endif /* NB_SMALL_SIZE_CLASSES > 8 */
348};
349
350/*
351 * Free (cached) pools
352 */
353static poolp freepools = NULL; /* free list for cached pools */
354
355/*
356 * Arenas
357 */
358static uint arenacnt = 0; /* number of allocated arenas */
359static uint watermark = ARENA_NB_POOLS; /* number of pools allocated from
360 the current arena */
361static block *arenalist = NULL; /* list of allocated arenas */
362static block *arenabase = NULL; /* free space start address in
363 current arena */
364
365/*
366 * Hooks
367 */
368#ifdef WITH_MALLOC_HOOKS
369static void *(*malloc_hook)(size_t) = NULL;
370static void *(*calloc_hook)(size_t, size_t) = NULL;
371static void *(*realloc_hook)(void *, size_t) = NULL;
372static void (*free_hook)(void *) = NULL;
373#endif /* !WITH_MALLOC_HOOKS */
374
375/*==========================================================================*/
376
377/* malloc */
378
379/*
380 * The basic blocks are ordered by decreasing execution frequency,
381 * which minimizes the number of jumps in the most common cases,
382 * improves branching prediction and instruction scheduling (small
383 * block allocations typically result in a couple of instructions).
384 * Unless the optimizer reorders everything, being too smart...
385 */
386
387void *
388_THIS_MALLOC(size_t nbytes)
389{
390 block *bp;
391 poolp pool;
392 poolp next;
393 uint size;
394
395#ifdef WITH_MALLOC_HOOKS
396 if (malloc_hook != NULL)
397 return (*malloc_hook)(nbytes);
398#endif
399
400 /*
401 * This implicitly redirects malloc(0)
402 */
403 if ((nbytes - 1) < SMALL_REQUEST_THRESHOLD) {
404 LOCK();
405 /*
406 * Most frequent paths first
407 */
408 size = (uint )(nbytes - 1) >> ALIGNMENT_SHIFT;
409 pool = usedpools[size + size];
410 if (pool != pool->nextpool) {
411 /*
412 * There is a used pool for this size class.
413 * Pick up the head block of its free list.
414 */
415 ++pool->ref.count;
416 bp = pool->freeblock;
417 if ((pool->freeblock = *(block **)bp) != NULL) {
418 UNLOCK();
419 return (void *)bp;
420 }
421 /*
422 * Reached the end of the free list, try to extend it
423 */
424 if (pool->ref.count < pool->capacity) {
425 /*
426 * There is room for another block
427 */
428 size++;
429 size <<= ALIGNMENT_SHIFT; /* block size */
430 pool->freeblock = (block *)pool + \
431 POOL_OVERHEAD + \
432 pool->ref.count * size;
433 *(block **)(pool->freeblock) = NULL;
434 UNLOCK();
435 return (void *)bp;
436 }
437 /*
438 * Pool is full, unlink from used pools
439 */
440 next = pool->nextpool;
441 pool = pool->prevpool;
442 next->prevpool = pool;
443 pool->nextpool = next;
444 UNLOCK();
445 return (void *)bp;
446 }
447 /*
448 * Try to get a cached free pool
449 */
450 pool = freepools;
451 if (pool != NULL) {
452 /*
453 * Unlink from cached pools
454 */
455 freepools = pool->nextpool;
456 init_pool:
457 /*
458 * Frontlink to used pools
459 */
460 next = usedpools[size + size]; /* == prev */
461 pool->nextpool = next;
462 pool->prevpool = next;
463 next->nextpool = pool;
464 next->prevpool = pool;
465 pool->ref.count = 1;
466 if (pool->szidx == size) {
467 /*
468 * Luckily, this pool last contained blocks
469 * of the same size class, so its header
470 * and free list are already initialized.
471 */
472 bp = pool->freeblock;
473 pool->freeblock = *(block **)bp;
474 UNLOCK();
475 return (void *)bp;
476 }
477 /*
478 * Initialize the pool header and free list
479 * then return the first block.
480 */
481 pool->szidx = size;
482 size++;
483 size <<= ALIGNMENT_SHIFT; /* block size */
484 bp = (block *)pool + POOL_OVERHEAD;
485 pool->freeblock = bp + size;
486 *(block **)(pool->freeblock) = NULL;
487 pool->capacity = (POOL_SIZE - POOL_OVERHEAD) / size;
488 UNLOCK();
489 return (void *)bp;
490 }
491 /*
492 * Allocate new pool
493 */
494 if (watermark < ARENA_NB_POOLS) {
495 /* commit malloc(POOL_SIZE) from the current arena */
496 commit_pool:
497 watermark++;
498 pool = (poolp )arenabase;
499 arenabase += POOL_SIZE;
500 pool->pooladdr = pool;
501 pool->magic = (uint )POOL_MAGIC;
502 pool->szidx = DUMMY_SIZE_IDX;
503 goto init_pool;
504 }
505 /*
506 * Allocate new arena
507 */
508#ifdef WITH_MEMORY_LIMITS
509 if (!(arenacnt < MAX_ARENAS)) {
510 UNLOCK();
511 goto redirect;
512 }
513#endif
514 /*
515 * With malloc, we can't avoid loosing one page address space
516 * per arena due to the required alignment on page boundaries.
517 */
518 bp = (block *)_SYSTEM_MALLOC(ARENA_SIZE + SYSTEM_PAGE_SIZE);
519 if (bp == NULL) {
520 UNLOCK();
521 goto redirect;
522 }
523 /*
524 * Keep a reference in the list of allocated arenas. We might
525 * want to release (some of) them in the future. The first
526 * word is never used, no matter whether the returned address
527 * is page-aligned or not, so we safely store a pointer in it.
528 */
529 *(block **)bp = arenalist;
530 arenalist = bp;
531 arenacnt++;
532 watermark = 0;
533 /* Page-round up */
534 arenabase = bp + (SYSTEM_PAGE_SIZE -
535 ((off_t )bp & SYSTEM_PAGE_SIZE_MASK));
536 goto commit_pool;
537 }
538
539 /* The small block allocator ends here. */
540
541 redirect:
542
543 /*
544 * Redirect the original request to the underlying (libc) allocator.
545 * We jump here on bigger requests, on error in the code above (as a
546 * last chance to serve the request) or when the max memory limit
547 * has been reached.
548 */
549 return (void *)_SYSTEM_MALLOC(nbytes);
550}
551
552/* free */
553
554void
555_THIS_FREE(void *p)
556{
557 poolp pool;
558 poolp next, prev;
559 uint size;
560 off_t offset;
561
562#ifdef WITH_MALLOC_HOOKS
563 if (free_hook != NULL) {
564 (*free_hook)(p);
565 return;
566 }
567#endif
568
569 if (p == NULL) /* free(NULL) has no effect */
570 return;
571
572 offset = (off_t )p & POOL_SIZE_MASK;
573 pool = (poolp )((block *)p - offset);
574 if (pool->pooladdr != pool || pool->magic != (uint )POOL_MAGIC) {
575 _SYSTEM_FREE(p);
576 return;
577 }
578
579 LOCK();
580 /*
581 * At this point, the pool is not empty
582 */
583 if ((*(block **)p = pool->freeblock) == NULL) {
584 /*
585 * Pool was full
586 */
587 pool->freeblock = (block *)p;
588 --pool->ref.count;
589 /*
590 * Frontlink to used pools
591 * This mimics LRU pool usage for new allocations and
592 * targets optimal filling when several pools contain
593 * blocks of the same size class.
594 */
595 size = pool->szidx;
596 next = usedpools[size + size];
597 prev = next->prevpool;
598 pool->nextpool = next;
599 pool->prevpool = prev;
600 next->prevpool = pool;
601 prev->nextpool = pool;
602 UNLOCK();
603 return;
604 }
605 /*
606 * Pool was not full
607 */
608 pool->freeblock = (block *)p;
609 if (--pool->ref.count != 0) {
610 UNLOCK();
611 return;
612 }
613 /*
614 * Pool is now empty, unlink from used pools
615 */
616 next = pool->nextpool;
617 prev = pool->prevpool;
618 next->prevpool = prev;
619 prev->nextpool = next;
620 /*
621 * Frontlink to free pools
622 * This ensures that previously freed pools will be allocated
623 * later (being not referenced, they are perhaps paged out).
624 */
625 pool->nextpool = freepools;
626 freepools = pool;
627 UNLOCK();
628 return;
629}
630
631/* realloc */
632
633void *
634_THIS_REALLOC(void *p, size_t nbytes)
635{
636 block *bp;
637 poolp pool;
638 uint size;
639
640#ifdef WITH_MALLOC_HOOKS
641 if (realloc_hook != NULL)
642 return (*realloc_hook)(p, nbytes);
643#endif
644
645 if (p == NULL)
646 return _THIS_MALLOC(nbytes);
647
648 /* realloc(p, 0) on big blocks is redirected. */
649 pool = (poolp )((block *)p - ((off_t )p & POOL_SIZE_MASK));
650 if (pool->pooladdr != pool || pool->magic != (uint )POOL_MAGIC) {
651 /* We haven't allocated this block */
652 if (!(nbytes > SMALL_REQUEST_THRESHOLD) && nbytes) {
653 /* small request */
654 size = nbytes;
655 goto malloc_copy_free;
656 }
657 bp = (block *)_SYSTEM_REALLOC(p, nbytes);
658 }
659 else {
660 /* We're in charge of this block */
661 size = (pool->szidx + 1) << ALIGNMENT_SHIFT; /* block size */
662 if (size >= nbytes) {
663 /* Don't bother if a smaller size was requested
664 except for realloc(p, 0) == free(p), ret NULL */
665 if (nbytes == 0) {
666 _THIS_FREE(p);
667 bp = NULL;
668 }
669 else
670 bp = (block *)p;
671 }
672 else {
673
674 malloc_copy_free:
675
676 bp = (block *)_THIS_MALLOC(nbytes);
677 if (bp != NULL) {
678 memcpy(bp, p, size);
679 _THIS_FREE(p);
680 }
681 }
682 }
683 return (void *)bp;
684}
685
686/* calloc */
687
688/* -- unused --
689void *
690_THIS_CALLOC(size_t nbel, size_t elsz)
691{
692 void *p;
693 size_t nbytes;
694
695#ifdef WITH_MALLOC_HOOKS
696 if (calloc_hook != NULL)
697 return (*calloc_hook)(nbel, elsz);
698#endif
699
700 nbytes = nbel * elsz;
701 p = _THIS_MALLOC(nbytes);
702 if (p != NULL)
703 memset(p, 0, nbytes);
704 return p;
705}
706*/
707
708/*==========================================================================*/
709
710/*
711 * Hooks
712 */
713
714#ifdef WITH_MALLOC_HOOKS
715
716void
717_SET_HOOKS( void *(*malloc_func)(size_t),
718 void *(*calloc_func)(size_t, size_t),
719 void *(*realloc_func)(void *, size_t),
720 void (*free_func)(void *) )
721{
722 LOCK();
723 malloc_hook = malloc_func;
724 calloc_hook = calloc_func;
725 realloc_hook = realloc_func;
726 free_hook = free_func;
727 UNLOCK();
728}
729
730void
731_FETCH_HOOKS( void *(**malloc_funcp)(size_t),
732 void *(**calloc_funcp)(size_t, size_t),
733 void *(**realloc_funcp)(void *, size_t),
734 void (**free_funcp)(void *) )
735{
736 LOCK();
737 *malloc_funcp = malloc_hook;
738 *calloc_funcp = calloc_hook;
739 *realloc_funcp = realloc_hook;
740 *free_funcp = free_hook;
741 UNLOCK();
742}
743#endif /* !WITH_MALLOC_HOOKS */