blob: 92358c187e20891b75a510a760abf342050b41c9 [file] [log] [blame]
sewardjde4a1d02002-03-22 01:27:54 +00001
2/*--------------------------------------------------------------------*/
3/*--- An implementation of malloc/free which doesn't use sbrk. ---*/
4/*--- vg_malloc2.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of Valgrind, an x86 protected-mode emulator
9 designed for debugging and profiling binaries on x86-Unixes.
10
11 Copyright (C) 2000-2002 Julian Seward
12 jseward@acm.org
sewardjde4a1d02002-03-22 01:27:54 +000013
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
njn25e49d8e72002-09-23 09:36:25 +000029 The GNU General Public License is contained in the file COPYING.
sewardjde4a1d02002-03-22 01:27:54 +000030*/
31
32
33#include "vg_include.h"
34
35/* Define to turn on (heavyweight) debugging machinery. */
36/* #define DEBUG_MALLOC */
37
38
39/*------------------------------------------------------------*/
40/*--- Structs n stuff ---*/
41/*------------------------------------------------------------*/
42
43#define VG_REDZONE_LO_MASK 0x31415927
44#define VG_REDZONE_HI_MASK 0x14141356
45
46#define VG_N_MALLOC_LISTS 16 /* do not change this */
47
48
49typedef UInt Word;
50typedef Word WordF;
51typedef Word WordL;
52
53
54/* A superblock. */
55typedef
56 struct _Superblock {
57 struct _Superblock* next;
58 /* number of payload words in this superblock. */
59 Int n_payload_words;
60 Word payload_words[0];
61 }
62 Superblock;
63
64
65/* An arena. */
66typedef
67 struct {
68 Char* name;
69 Int rz_szW; /* Red zone size in words */
70 Bool rz_check; /* Check red-zone on free? */
71 Int min_sblockW; /* Minimum superblock size */
72 WordF* freelist[VG_N_MALLOC_LISTS];
73 Superblock* sblocks;
74 /* Stats only. */
75 UInt bytes_on_loan;
76 UInt bytes_mmaped;
77 UInt bytes_on_loan_max;
78 }
79 Arena;
80
81
82/* Block layout:
83
84 this block total sizeW (1 word)
85 freelist previous ptr (1 word)
86 freelist next ptr (1 word)
87 red zone words (depends on .rz_szW field of Arena)
88 (payload words)
89 red zone words (depends on .rz_szW field of Arena)
90 this block total sizeW (1 word)
91
92 Total size in words (bszW) and payload size in words (pszW)
93 are related by
94 bszW == pszW + 4 + 2 * a->rz_szW
95
96 Furthermore, both size fields in the block are negative if it is
97 not in use, and positive if it is in use. A block size of zero
98 is not possible, because a block always has at least four words
99 of overhead.
100*/
101typedef
102 struct {
103 Int bszW_lo;
104 Word* prev;
105 Word* next;
106 Word redzone[0];
107 }
108 BlockHeader;
109
110
111/*------------------------------------------------------------*/
112/*--- Forwardses ... and misc ... ---*/
113/*------------------------------------------------------------*/
114
115static Bool blockSane ( Arena* a, Word* b );
116
117/* Align ptr p upwards to an align-sized boundary. */
118static
119void* align_upwards ( void* p, Int align )
120{
121 Addr a = (Addr)p;
122 if ((a % align) == 0) return (void*)a;
123 return (void*)(a - (a % align) + align);
124}
125
126
127/*------------------------------------------------------------*/
128/*--- Arena management stuff ---*/
129/*------------------------------------------------------------*/
130
131/* The arena structures themselves. */
132static Arena vg_arena[VG_N_ARENAS];
133
134/* Functions external to this module identify arenas using ArenaIds,
135 not Arena*s. This fn converts the former to the latter. */
136static Arena* arenaId_to_ArenaP ( ArenaId arena )
137{
138 vg_assert(arena >= 0 && arena < VG_N_ARENAS);
139 return & vg_arena[arena];
140}
141
142
143/* Initialise an arena. */
144static
145void arena_init ( Arena* a, Char* name,
146 Int rz_szW, Bool rz_check, Int min_sblockW )
147{
148 Int i;
149 vg_assert((min_sblockW % VKI_WORDS_PER_PAGE) == 0);
150 a->name = name;
151 a->rz_szW = rz_szW;
152 a->rz_check = rz_check;
153 a->min_sblockW = min_sblockW;
154 for (i = 0; i < VG_N_MALLOC_LISTS; i++) a->freelist[i] = NULL;
155 a->sblocks = NULL;
156 a->bytes_on_loan = 0;
157 a->bytes_mmaped = 0;
158 a->bytes_on_loan_max = 0;
159}
160
161
162/* Print vital stats for an arena. */
163void VG_(show_all_arena_stats) ( void )
164{
165 Int i;
166 for (i = 0; i < VG_N_ARENAS; i++) {
167 VG_(message)(Vg_DebugMsg,
168 "Arena `%s': %7d max useful, %7d mmap'd, %7d current useful",
169 vg_arena[i].name,
170 vg_arena[i].bytes_on_loan_max,
171 vg_arena[i].bytes_mmaped,
172 vg_arena[i].bytes_on_loan
173 );
174 }
175}
176
177
178/* It is important that this library is self-initialising, because it
179 may get called very early on -- as a result of C++ static
180 constructor initialisations -- before Valgrind itself is
njn25e49d8e72002-09-23 09:36:25 +0000181 initialised. Hence VG_(arena_malloc)() and VG_(arena_free)() below always
182 call ensure_mm_init() to ensure things are correctly initialised. */
sewardjde4a1d02002-03-22 01:27:54 +0000183
184static
185void ensure_mm_init ( void )
186{
187 static Bool init_done = False;
njn25e49d8e72002-09-23 09:36:25 +0000188
sewardjde4a1d02002-03-22 01:27:54 +0000189 if (init_done) return;
190
191 /* Use a checked red zone size of 1 word for our internal stuff,
192 and an unchecked zone of arbitrary size for the client. Of
193 course the client's red zone is checked really, but using the
194 addressibility maps, not by the mechanism implemented here,
195 which merely checks at the time of freeing that the red zone
196 words are unchanged. */
197
njn25e49d8e72002-09-23 09:36:25 +0000198 arena_init ( &vg_arena[VG_AR_CORE], "core ",
sewardjde4a1d02002-03-22 01:27:54 +0000199 1, True, 262144 );
200
njn25e49d8e72002-09-23 09:36:25 +0000201 arena_init ( &vg_arena[VG_AR_SKIN], "skin ",
sewardjde4a1d02002-03-22 01:27:54 +0000202 1, True, 262144 );
203
njn25e49d8e72002-09-23 09:36:25 +0000204 arena_init ( &vg_arena[VG_AR_SYMTAB], "symtab ",
205 1, True, 262144 );
206
207 arena_init ( &vg_arena[VG_AR_JITTER], "JITter ",
208 1, True, 8192 );
209
210 arena_init ( &vg_arena[VG_AR_CLIENT], "client ",
sewardjde4a1d02002-03-22 01:27:54 +0000211 VG_AR_CLIENT_REDZONE_SZW, False, 262144 );
212
njn25e49d8e72002-09-23 09:36:25 +0000213 arena_init ( &vg_arena[VG_AR_DEMANGLE], "demangle",
sewardjde4a1d02002-03-22 01:27:54 +0000214 4 /*paranoid*/, True, 16384 );
215
njn25e49d8e72002-09-23 09:36:25 +0000216 arena_init ( &vg_arena[VG_AR_EXECTXT], "exectxt ",
sewardjde4a1d02002-03-22 01:27:54 +0000217 1, True, 16384 );
218
njn25e49d8e72002-09-23 09:36:25 +0000219 arena_init ( &vg_arena[VG_AR_ERRORS], "errors ",
sewardjde4a1d02002-03-22 01:27:54 +0000220 1, True, 16384 );
221
222 arena_init ( &vg_arena[VG_AR_TRANSIENT], "transien",
223 2, True, 16384 );
224
225 init_done = True;
226# ifdef DEBUG_MALLOC
227 VG_(mallocSanityCheckAll)();
228# endif
229}
230
231
232/*------------------------------------------------------------*/
233/*--- Arena management stuff ---*/
234/*------------------------------------------------------------*/
235
236static
237Superblock* newSuperblock ( Arena* a, Int cszW )
238{
239 Superblock* sb;
240 cszW += 2; /* Take into account sb->next and sb->n_words fields */
241 if (cszW < a->min_sblockW) cszW = a->min_sblockW;
242 while ((cszW % VKI_WORDS_PER_PAGE) > 0) cszW++;
sewardje9047952002-06-05 20:28:33 +0000243 sb = VG_(get_memory_from_mmap) ( cszW * sizeof(Word),
244 "newSuperblock" );
sewardjde4a1d02002-03-22 01:27:54 +0000245 sb->n_payload_words = cszW - 2;
246 a->bytes_mmaped += cszW * sizeof(Word);
247 if (0)
248 VG_(message)(Vg_DebugMsg, "newSuperblock, %d payload words",
249 sb->n_payload_words);
250 return sb;
251}
252
253
254/* Find the superblock containing the given chunk. */
255static
256Superblock* findSb ( Arena* a, UInt* ch )
257{
258 Superblock* sb;
259 for (sb = a->sblocks; sb; sb = sb->next)
260 if (&sb->payload_words[0] <= ch
261 && ch < &sb->payload_words[sb->n_payload_words])
262 return sb;
263 VG_(printf)("findSb: can't find pointer %p in arena `%s'\n",
264 ch, a->name );
265 VG_(panic)("findSb: vg_free() in wrong arena?");
266 return NULL; /*NOTREACHED*/
267}
268
269
270/*------------------------------------------------------------*/
271/*--- Low-level functions for working with blocks. ---*/
272/*------------------------------------------------------------*/
273
274/* Add the not-in-use attribute to a bszW. */
275static __inline__
276Int mk_free_bszW ( Int bszW )
277{
278 vg_assert(bszW != 0);
279 return (bszW < 0) ? bszW : -bszW;
280}
281
282/* Add the in-use attribute to a bszW. */
283static __inline__
284Int mk_inuse_bszW ( Int bszW )
285{
286 vg_assert(bszW != 0);
287 return (bszW < 0) ? -bszW : bszW;
288}
289
290/* Remove the in-use/not-in-use attribute from a bszW, leaving just
291 the size. */
292static __inline__
293Int mk_plain_bszW ( Int bszW )
294{
295 vg_assert(bszW != 0);
296 return (bszW < 0) ? -bszW : bszW;
297}
298
299/* Does this bszW have the in-use attribute ? */
300static __inline__
301Bool is_inuse_bszW ( Int bszW )
302{
303 vg_assert(bszW != 0);
304 return (bszW < 0) ? False : True;
305}
306
307
308/* Given the addr of the first word of a block, return the addr of the
309 last word. */
310static __inline__
311WordL* first_to_last ( WordF* fw )
312{
313 return fw + mk_plain_bszW(fw[0]) - 1;
314}
315
316/* Given the addr of the last word of a block, return the addr of the
317 first word. */
318static __inline__
319WordF* last_to_first ( WordL* lw )
320{
321 return lw - mk_plain_bszW(lw[0]) + 1;
322}
323
324
325/* Given the addr of the first word of a block, return the addr of the
326 first word of its payload. */
327static __inline__
328Word* first_to_payload ( Arena* a, WordF* fw )
329{
330 return & fw[3 + a->rz_szW];
331}
332
333/* Given the addr of the first word of a the payload of a block,
334 return the addr of the first word of the block. */
335static __inline__
336Word* payload_to_first ( Arena* a, WordF* payload )
337{
338 return & payload[- 3 - a->rz_szW];
339}
340
341/* Set and get the lower size field of a block. */
342static __inline__
343void set_bszW_lo ( WordF* fw, Int bszW ) {
344 fw[0] = bszW;
345}
346static __inline__
347Int get_bszW_lo ( WordF* fw )
348{
349 return fw[0];
350}
351
352
353/* Set and get the next and previous link fields of a block. */
354static __inline__
355void set_prev_p ( WordF* fw, Word* prev_p ) {
356 fw[1] = (Word)prev_p;
357}
358static __inline__
359void set_next_p ( WordF* fw, Word* next_p ) {
360 fw[2] = (Word)next_p;
361}
362static __inline__
363Word* get_prev_p ( WordF* fw ) {
364 return (Word*)(fw[1]);
365}
366static __inline__
367Word* get_next_p ( WordF* fw ) {
368 return (Word*)(fw[2]);
369}
370
371
372/* Set and get the upper size field of a block. */
373static __inline__
374void set_bszW_hi ( WordF* fw, Int bszW ) {
375 WordL* lw = first_to_last(fw);
376 vg_assert(lw == fw + mk_plain_bszW(bszW) - 1);
377 lw[0] = bszW;
378}
379static __inline__
380Int get_bszW_hi ( WordF* fw ) {
381 WordL* lw = first_to_last(fw);
382 return lw[0];
383}
384
385/* Get the upper size field of a block, given a pointer to the last
386 word of it. */
387static __inline__
388Int get_bszW_hi_from_last_word ( WordL* lw ) {
389 WordF* fw = last_to_first(lw);
390 return get_bszW_lo(fw);
391}
392
393
394/* Read and write the lower and upper red-zone words of a block. */
395static __inline__
396void set_rz_lo_word ( Arena* a, WordF* fw, Int rz_wordno, Word w )
397{
398 fw[3 + rz_wordno] = w;
399}
400static __inline__
401void set_rz_hi_word ( Arena* a, WordF* fw, Int rz_wordno, Word w )
402{
403 WordL* lw = first_to_last(fw);
404 lw[-1-rz_wordno] = w;
405}
406static __inline__
407Word get_rz_lo_word ( Arena* a, WordF* fw, Int rz_wordno )
408{
409 return fw[3 + rz_wordno];
410}
411static __inline__
412Word get_rz_hi_word ( Arena* a, WordF* fw, Int rz_wordno )
413{
414 WordL* lw = first_to_last(fw);
415 return lw[-1-rz_wordno];
416}
417
418
419/* Return the lower, upper and total overhead in words for a block.
420 These are determined purely by which arena the block lives in. */
421static __inline__
422Int overhead_szW_lo ( Arena* a )
423{
424 return 3 + a->rz_szW;
425}
426static __inline__
427Int overhead_szW_hi ( Arena* a )
428{
429 return 1 + a->rz_szW;
430}
431static __inline__
432Int overhead_szW ( Arena* a )
433{
434 return overhead_szW_lo(a) + overhead_szW_hi(a);
435}
436
437
438/* Convert pointer size in words to block size in words, and back. */
439static __inline__
440Int pszW_to_bszW ( Arena* a, Int pszW )
441{
442 vg_assert(pszW >= 0);
443 return pszW + overhead_szW(a);
444}
445static __inline__
446Int bszW_to_pszW ( Arena* a, Int bszW )
447{
448 Int pszW = bszW - overhead_szW(a);
449 vg_assert(pszW >= 0);
450 return pszW;
451}
452
453/*------------------------------------------------------------*/
454/*--- Functions for working with freelists. ---*/
455/*------------------------------------------------------------*/
456
457/* Determination of which freelist a block lives on is based on the
458 payload size, not block size, in words. */
459
460/* Convert a payload size in words to a freelist number. */
461
462static
463Int pszW_to_listNo ( Int pszW )
464{
465 vg_assert(pszW >= 0);
466 if (pszW <= 3) return 0;
467 if (pszW <= 4) return 1;
468 if (pszW <= 5) return 2;
469 if (pszW <= 6) return 3;
470 if (pszW <= 7) return 4;
471 if (pszW <= 8) return 5;
472 if (pszW <= 9) return 6;
473 if (pszW <= 10) return 7;
474 if (pszW <= 11) return 8;
475 if (pszW <= 12) return 9;
476 if (pszW <= 16) return 10;
477 if (pszW <= 32) return 11;
478 if (pszW <= 64) return 12;
479 if (pszW <= 128) return 13;
480 if (pszW <= 256) return 14;
481 return 15;
482}
483
484
485/* What are the minimum and maximum payload sizes for a given list? */
486
487static
488Int listNo_to_pszW_min ( Int listNo )
489{
490 Int pszW = 0;
491 vg_assert(listNo >= 0 && listNo <= VG_N_MALLOC_LISTS);
492 while (pszW_to_listNo(pszW) < listNo) pszW++;
493 return pszW;
494}
495
496static
497Int listNo_to_pszW_max ( Int listNo )
498{
499 vg_assert(listNo >= 0 && listNo <= VG_N_MALLOC_LISTS);
500 if (listNo == VG_N_MALLOC_LISTS-1) {
501 return 999999999;
502 } else {
503 return listNo_to_pszW_min(listNo+1) - 1;
504 }
505}
506
507
508/* A nasty hack to try and reduce fragmentation. Try and replace
509 a->freelist[lno] with another block on the same list but with a
510 lower address, with the idea of attempting to recycle the same
511 blocks rather than cruise through the address space. */
512
513static
514void swizzle ( Arena* a, Int lno )
515{
516 UInt* p_best;
517 UInt* pp;
518 UInt* pn;
519 Int i;
520
521 p_best = a->freelist[lno];
522 if (p_best == NULL) return;
523
524 pn = pp = p_best;
525 for (i = 0; i < 20; i++) {
526 pn = get_next_p(pn);
527 pp = get_prev_p(pp);
528 if (pn < p_best) p_best = pn;
529 if (pp < p_best) p_best = pp;
530 }
531 if (p_best < a->freelist[lno]) {
532# ifdef DEBUG_MALLOC
533 VG_(printf)("retreat by %d\n",
534 ((Char*)(a->freelist[lno])) - ((Char*)p_best));
535# endif
536 a->freelist[lno] = p_best;
537 }
538}
539
540
541/*------------------------------------------------------------*/
542/*--- Creating and deleting blocks. ---*/
543/*------------------------------------------------------------*/
544
545/* Mark the words at b .. b+bszW-1 as not in use, and add them to the
546 relevant free list. */
547
548static
549void mkFreeBlock ( Arena* a, Word* b, Int bszW, Int b_lno )
550{
551 Int pszW = bszW_to_pszW(a, bszW);
552 vg_assert(pszW >= 0);
553 vg_assert(b_lno == pszW_to_listNo(pszW));
554 /* Set the size fields and indicate not-in-use. */
555 set_bszW_lo(b, mk_free_bszW(bszW));
556 set_bszW_hi(b, mk_free_bszW(bszW));
557
558 /* Add to the relevant list. */
559 if (a->freelist[b_lno] == NULL) {
560 set_prev_p(b, b);
561 set_next_p(b, b);
562 a->freelist[b_lno] = b;
563 } else {
564 Word* b_prev = get_prev_p(a->freelist[b_lno]);
565 Word* b_next = a->freelist[b_lno];
566 set_next_p(b_prev, b);
567 set_prev_p(b_next, b);
568 set_next_p(b, b_next);
569 set_prev_p(b, b_prev);
570 }
571# ifdef DEBUG_MALLOC
572 (void)blockSane(a,b);
573# endif
574}
575
576
577/* Mark the words at b .. b+bszW-1 as in use, and set up the block
578 appropriately. */
579static
580void mkInuseBlock ( Arena* a, UInt* b, UInt bszW )
581{
582 Int i;
583 set_bszW_lo(b, mk_inuse_bszW(bszW));
584 set_bszW_hi(b, mk_inuse_bszW(bszW));
585 set_prev_p(b, NULL);
586 set_next_p(b, NULL);
587 if (a->rz_check) {
588 for (i = 0; i < a->rz_szW; i++) {
589 set_rz_lo_word(a, b, i, (UInt)b ^ VG_REDZONE_LO_MASK);
590 set_rz_hi_word(a, b, i, (UInt)b ^ VG_REDZONE_HI_MASK);
591 }
592 }
593# ifdef DEBUG_MALLOC
594 (void)blockSane(a,b);
595# endif
596}
597
598
599/* Remove a block from a given list. Does no sanity checking. */
600static
601void unlinkBlock ( Arena* a, UInt* b, Int listno )
602{
603 vg_assert(listno >= 0 && listno < VG_N_MALLOC_LISTS);
604 if (get_prev_p(b) == b) {
605 /* Only one element in the list; treat it specially. */
606 vg_assert(get_next_p(b) == b);
607 a->freelist[listno] = NULL;
608 } else {
609 UInt* b_prev = get_prev_p(b);
610 UInt* b_next = get_next_p(b);
611 a->freelist[listno] = b_prev;
612 set_next_p(b_prev, b_next);
613 set_prev_p(b_next, b_prev);
614 swizzle ( a, listno );
615 }
616 set_prev_p(b, NULL);
617 set_next_p(b, NULL);
618}
619
620
621/* Split an existing free block into two pieces, and put the fragment
622 (the second one along in memory) onto the relevant free list.
623 req_bszW is the required size of the block which isn't the
624 fragment. */
625static
626void splitChunk ( Arena* a, UInt* b, Int b_listno, UInt req_bszW )
627{
628 Int b_bszW, frag_bszW;
629 b_bszW = mk_plain_bszW(get_bszW_lo(b));
630 vg_assert(req_bszW < b_bszW);
631 frag_bszW = b_bszW - req_bszW;
632 vg_assert(frag_bszW >= overhead_szW(a));
633 /*
634 printf( "split %d into %d and %d\n",
635 b_bszW,req_bszW,frag_bszW );
636 */
637 vg_assert(bszW_to_pszW(a, frag_bszW) > 0);
638 unlinkBlock(a, b, b_listno);
639 mkInuseBlock(a, b, req_bszW);
640 mkFreeBlock(a, &b[req_bszW], frag_bszW,
641 pszW_to_listNo(bszW_to_pszW(a, frag_bszW)));
642}
643
644
645/*------------------------------------------------------------*/
646/*--- Sanity-check/debugging machinery. ---*/
647/*------------------------------------------------------------*/
648
649/* Do some crude sanity checks on a chunk. */
650static
651Bool blockSane ( Arena* a, Word* b )
652{
653# define BLEAT(str) VG_(printf)("blockSane: fail -- %s\n",str)
654 Int i;
655 if (get_bszW_lo(b) != get_bszW_hi(b))
656 {BLEAT("sizes");return False;}
657 if (a->rz_check && is_inuse_bszW(get_bszW_lo(b))) {
658 for (i = 0; i < a->rz_szW; i++) {
659 if (get_rz_lo_word(a, b, i) != ((Word)b ^ VG_REDZONE_LO_MASK))
660 {BLEAT("redzone-lo");return False;}
661 if (get_rz_hi_word(a, b, i) != ((Word)b ^ VG_REDZONE_HI_MASK))
662 {BLEAT("redzone-hi");return False;}
663 }
664 }
665 return True;
666# undef BLEAT
667}
668
669
670/* Print superblocks (only for debugging). */
671static
672void ppSuperblocks ( Arena* a )
673{
674 Int i, ch_bszW, blockno;
675 UInt* ch;
676 Superblock* sb = a->sblocks;
677 blockno = 1;
678
679 while (sb) {
680 VG_(printf)( "\n" );
681 VG_(printf)( "superblock %d at %p, sb->n_pl_ws = %d, next = %p\n",
682 blockno++, sb, sb->n_payload_words, sb->next );
683 i = 0;
684 while (True) {
685 if (i >= sb->n_payload_words) break;
686 ch = &sb->payload_words[i];
687 ch_bszW = get_bszW_lo(ch);
688 VG_(printf)( " block at %d, bszW %d: ", i, mk_plain_bszW(ch_bszW) );
689 VG_(printf)( "%s, ", is_inuse_bszW(ch_bszW) ? "inuse" : "free" );
690 VG_(printf)( "%s\n", blockSane(a,ch) ? "ok" : "BAD" );
691 i += mk_plain_bszW(ch_bszW);
692 }
693 if (i > sb->n_payload_words)
694 VG_(printf)( " last block overshoots end of SB\n");
695 sb = sb->next;
696 }
697 VG_(printf)( "end of superblocks\n\n" );
698}
699
700
701/* Sanity check both the superblocks and the chains. */
njn25e49d8e72002-09-23 09:36:25 +0000702static void mallocSanityCheckArena ( ArenaId aid )
sewardjde4a1d02002-03-22 01:27:54 +0000703{
704 Int i, superblockctr, b_bszW, b_pszW, blockctr_sb, blockctr_li;
705 Int blockctr_sb_free, listno, list_min_pszW, list_max_pszW;
706 Superblock* sb;
707 Bool thisFree, lastWasFree;
708 Word* b;
709 Word* b_prev;
710 UInt arena_bytes_on_loan;
711 Arena* a;
712
njn25e49d8e72002-09-23 09:36:25 +0000713# define BOMB VG_(panic)("mallocSanityCheckArena")
sewardjde4a1d02002-03-22 01:27:54 +0000714
715 a = arenaId_to_ArenaP(aid);
716
717 /* First, traverse all the superblocks, inspecting the chunks in
718 each. */
719 superblockctr = blockctr_sb = blockctr_sb_free = 0;
720 arena_bytes_on_loan = 0;
721 sb = a->sblocks;
722 while (sb) {
723 lastWasFree = False;
724 superblockctr++;
725 i = 0;
726 while (True) {
727 if (i >= sb->n_payload_words) break;
728 blockctr_sb++;
729 b = &sb->payload_words[i];
730 b_bszW = get_bszW_lo(b);
731 if (!blockSane(a, b)) {
njn25e49d8e72002-09-23 09:36:25 +0000732 VG_(printf)("mallocSanityCheckArena: sb %p, block %d (bszW %d): "
733 " BAD\n",
sewardjde4a1d02002-03-22 01:27:54 +0000734 sb, i, b_bszW );
735 BOMB;
736 }
737 thisFree = !is_inuse_bszW(b_bszW);
738 if (thisFree && lastWasFree) {
njn25e49d8e72002-09-23 09:36:25 +0000739 VG_(printf)("mallocSanityCheckArena: sb %p, block %d (bszW %d): "
740 "UNMERGED FREES\n",
sewardjde4a1d02002-03-22 01:27:54 +0000741 sb, i, b_bszW );
742 BOMB;
743 }
744 lastWasFree = thisFree;
745 if (thisFree) blockctr_sb_free++;
746 if (!thisFree)
747 arena_bytes_on_loan += sizeof(Word) * bszW_to_pszW(a, b_bszW);
748 i += mk_plain_bszW(b_bszW);
749 }
750 if (i > sb->n_payload_words) {
njn25e49d8e72002-09-23 09:36:25 +0000751 VG_(printf)( "mallocSanityCheckArena: sb %p: last block "
sewardjde4a1d02002-03-22 01:27:54 +0000752 "overshoots end\n", sb);
753 BOMB;
754 }
755 sb = sb->next;
756 }
757
758 if (arena_bytes_on_loan != a->bytes_on_loan) {
759 VG_(printf)(
njn25e49d8e72002-09-23 09:36:25 +0000760 "mallocSanityCheckArena: a->bytes_on_loan %d, "
sewardjde4a1d02002-03-22 01:27:54 +0000761 "arena_bytes_on_loan %d: "
762 "MISMATCH\n", a->bytes_on_loan, arena_bytes_on_loan);
763 ppSuperblocks(a);
764 BOMB;
765 }
766
767 /* Second, traverse each list, checking that the back pointers make
768 sense, counting blocks encountered, and checking that each block
769 is an appropriate size for this list. */
770 blockctr_li = 0;
771 for (listno = 0; listno < VG_N_MALLOC_LISTS; listno++) {
772 list_min_pszW = listNo_to_pszW_min(listno);
773 list_max_pszW = listNo_to_pszW_max(listno);
774 b = a->freelist[listno];
775 if (b == NULL) continue;
776 while (True) {
777 b_prev = b;
778 b = get_next_p(b);
779 if (get_prev_p(b) != b_prev) {
njn25e49d8e72002-09-23 09:36:25 +0000780 VG_(printf)( "mallocSanityCheckArena: list %d at %p: "
sewardjde4a1d02002-03-22 01:27:54 +0000781 "BAD LINKAGE\n",
782 listno, b );
783 BOMB;
784 }
785 b_pszW = bszW_to_pszW(a, mk_plain_bszW(get_bszW_lo(b)));
786 if (b_pszW < list_min_pszW || b_pszW > list_max_pszW) {
787 VG_(printf)(
njn25e49d8e72002-09-23 09:36:25 +0000788 "mallocSanityCheckArena: list %d at %p: "
sewardjde4a1d02002-03-22 01:27:54 +0000789 "WRONG CHAIN SIZE %d (%d, %d)\n",
790 listno, b, b_pszW, list_min_pszW, list_max_pszW );
791 BOMB;
792 }
793 blockctr_li++;
794 if (b == a->freelist[listno]) break;
795 }
796 }
797
798 if (blockctr_sb_free != blockctr_li) {
799 VG_(printf)(
njn25e49d8e72002-09-23 09:36:25 +0000800 "mallocSanityCheckArena: BLOCK COUNT MISMATCH "
sewardjde4a1d02002-03-22 01:27:54 +0000801 "(via sbs %d, via lists %d)\n",
802 blockctr_sb_free, blockctr_li );
803 ppSuperblocks(a);
804 BOMB;
805 }
806
807 VG_(message)(Vg_DebugMsg,
808 "mSC [%s]: %2d sbs, %5d tot bs, %4d/%-4d free bs, "
809 "%2d lists, %7d mmap, %7d loan",
810 a->name,
811 superblockctr,
812 blockctr_sb, blockctr_sb_free, blockctr_li,
813 VG_N_MALLOC_LISTS,
814 a->bytes_mmaped, a->bytes_on_loan);
815# undef BOMB
816}
817
818
819void VG_(mallocSanityCheckAll) ( void )
820{
821 Int i;
822 for (i = 0; i < VG_N_ARENAS; i++)
njn25e49d8e72002-09-23 09:36:25 +0000823 mallocSanityCheckArena ( i );
sewardjde4a1d02002-03-22 01:27:54 +0000824}
825
826
827/* Really, this isn't the right place for this. Nevertheless: find
828 out if an arena is empty -- currently has no bytes on loan. This
829 is useful for checking for memory leaks (of valgrind, not the
830 client.)
831*/
832Bool VG_(is_empty_arena) ( ArenaId aid )
833{
834 Arena* a;
835 Superblock* sb;
836 WordF* b;
837 Int b_bszW;
njn25e49d8e72002-09-23 09:36:25 +0000838
sewardjde4a1d02002-03-22 01:27:54 +0000839 ensure_mm_init();
840 a = arenaId_to_ArenaP(aid);
841 for (sb = a->sblocks; sb != NULL; sb = sb->next) {
842 /* If the superblock is empty, it should contain a single free
843 block, of the right size. */
844 b = &(sb->payload_words[0]);
845 b_bszW = get_bszW_lo(b);
846 if (is_inuse_bszW(b_bszW)) return False;
847 if (mk_plain_bszW(b_bszW) != sb->n_payload_words) return False;
848 /* So this block is not in use and is of the right size. Keep
849 going. */
850 }
851 return True;
852}
853
854
855/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +0000856/*--- Core-visible functions. ---*/
sewardjde4a1d02002-03-22 01:27:54 +0000857/*------------------------------------------------------------*/
858
njn25e49d8e72002-09-23 09:36:25 +0000859void* VG_(arena_malloc) ( ArenaId aid, Int req_pszB )
sewardjde4a1d02002-03-22 01:27:54 +0000860{
861 Int req_pszW, req_bszW, frag_bszW, b_bszW, lno;
862 Superblock* new_sb;
863 Word* b;
864 Arena* a;
865
866 VGP_PUSHCC(VgpMalloc);
867
868 ensure_mm_init();
869 a = arenaId_to_ArenaP(aid);
870
871 vg_assert(req_pszB >= 0);
872 vg_assert(req_pszB < 0x7FFFFFF0);
873
874 req_pszW = (req_pszB + VKI_BYTES_PER_WORD - 1) / VKI_BYTES_PER_WORD;
875
876 /* Keep gcc -O happy: */
877 b = NULL;
878
879 /* Start searching at this list. */
880 lno = pszW_to_listNo(req_pszW);
881
882 /* This loop finds a list which has a block big enough, or sets
883 req_listno to N_LISTS if no such block exists. */
884 while (True) {
885 if (lno == VG_N_MALLOC_LISTS) break;
886 /* If this list is empty, try the next one. */
887 if (a->freelist[lno] == NULL) {
888 lno++;
889 continue;
890 }
891 /* Scan a->list[lno] to find a big-enough chunk. */
892 b = a->freelist[lno];
893 b_bszW = mk_plain_bszW(get_bszW_lo(b));
894 while (True) {
895 if (bszW_to_pszW(a, b_bszW) >= req_pszW) break;
896 b = get_next_p(b);
897 b_bszW = mk_plain_bszW(get_bszW_lo(b));
898 if (b == a->freelist[lno]) break;
899 }
900 if (bszW_to_pszW(a, b_bszW) >= req_pszW) break;
901 /* No luck? Try a larger list. */
902 lno++;
903 }
904
905 /* Either lno < VG_N_MALLOC_LISTS and b points to the selected
906 block, or lno == VG_N_MALLOC_LISTS, and we have to allocate a
907 new superblock. */
908
909 if (lno == VG_N_MALLOC_LISTS) {
910 req_bszW = pszW_to_bszW(a, req_pszW);
911 new_sb = newSuperblock(a, req_bszW);
912 vg_assert(new_sb != NULL);
913 new_sb->next = a->sblocks;
914 a->sblocks = new_sb;
915 b = &(new_sb->payload_words[0]);
916 lno = pszW_to_listNo(bszW_to_pszW(a, new_sb->n_payload_words));
917 mkFreeBlock ( a, b, new_sb->n_payload_words, lno);
918 }
919
920 /* Ok, we can allocate from b, which lives in list req_listno. */
921 vg_assert(b != NULL);
922 vg_assert(lno >= 0 && lno < VG_N_MALLOC_LISTS);
923 vg_assert(a->freelist[lno] != NULL);
924 b_bszW = mk_plain_bszW(get_bszW_lo(b));
925 req_bszW = pszW_to_bszW(a, req_pszW);
926 /* req_bszW is the size of the block we are after. b_bszW is the
927 size of what we've actually got. */
928 vg_assert(b_bszW >= req_bszW);
929
930 /* Could we split this block and still get a useful fragment?
931 Where "useful" means that the payload size of the frag is at
932 least one word. */
933 frag_bszW = b_bszW - req_bszW;
934 if (frag_bszW > overhead_szW(a)) {
935 splitChunk(a, b, lno, req_bszW);
936 } else {
937 /* No, mark as in use and use as-is. */
938 unlinkBlock(a, b, lno);
939 /*
940 set_bszW_lo(b, mk_inuse_bszW(b_bszW));
941 set_bszW_hi(b, mk_inuse_bszW(b_bszW));
942 */
943 mkInuseBlock(a, b, b_bszW);
944 }
945 vg_assert(req_bszW <= mk_plain_bszW(get_bszW_lo(b)));
946
947 a->bytes_on_loan
948 += sizeof(Word)
949 * bszW_to_pszW(a, mk_plain_bszW(get_bszW_lo(b)));
950 if (a->bytes_on_loan > a->bytes_on_loan_max)
951 a->bytes_on_loan_max = a->bytes_on_loan;
952
953# ifdef DEBUG_MALLOC
njn25e49d8e72002-09-23 09:36:25 +0000954 mallocSanityCheckArena(aid);
sewardjde4a1d02002-03-22 01:27:54 +0000955# endif
956
njn25e49d8e72002-09-23 09:36:25 +0000957 VGP_POPCC(VgpMalloc);
sewardjde4a1d02002-03-22 01:27:54 +0000958 return first_to_payload(a, b);
959}
960
961
njn25e49d8e72002-09-23 09:36:25 +0000962void VG_(arena_free) ( ArenaId aid, void* ptr )
sewardjde4a1d02002-03-22 01:27:54 +0000963{
964 Superblock* sb;
965 UInt* sb_payl_firstw;
966 UInt* sb_payl_lastw;
967 UInt* other;
968 UInt* ch;
969 Int ch_bszW, ch_pszW, other_bszW, ch_listno;
970 Arena* a;
971
972 VGP_PUSHCC(VgpMalloc);
973
974 ensure_mm_init();
975 a = arenaId_to_ArenaP(aid);
976
njn25e49d8e72002-09-23 09:36:25 +0000977 if (ptr == NULL) {
978 VGP_POPCC(VgpMalloc);
979 return;
980 }
981
sewardjde4a1d02002-03-22 01:27:54 +0000982 ch = payload_to_first(a, ptr);
983
984# ifdef DEBUG_MALLOC
985 vg_assert(blockSane(a,ch));
986# endif
987
988 a->bytes_on_loan
989 -= sizeof(Word)
990 * bszW_to_pszW(a, mk_plain_bszW(get_bszW_lo(ch)));
991
992 sb = findSb( a, ch );
993 sb_payl_firstw = &(sb->payload_words[0]);
994 sb_payl_lastw = &(sb->payload_words[sb->n_payload_words-1]);
995
996 /* Put this chunk back on a list somewhere. */
997 ch_bszW = get_bszW_lo(ch);
998 ch_pszW = bszW_to_pszW(a, ch_bszW);
999 ch_listno = pszW_to_listNo(ch_pszW);
1000 mkFreeBlock( a, ch, ch_bszW, ch_listno );
1001
1002 /* See if this block can be merged with the following one. */
1003 other = ch + ch_bszW;
1004 /* overhead_szW(a) is the smallest possible bszW for this arena.
1005 So the nearest possible end to the block beginning at other is
1006 other+overhead_szW(a)-1. Hence the test below. */
1007 if (other+overhead_szW(a)-1 <= sb_payl_lastw) {
1008 other_bszW = get_bszW_lo(other);
1009 if (!is_inuse_bszW(other_bszW)) {
1010 /* VG_(printf)( "merge-successor\n"); */
1011 other_bszW = mk_plain_bszW(other_bszW);
1012# ifdef DEBUG_MALLOC
1013 vg_assert(blockSane(a, other));
1014# endif
1015 unlinkBlock( a, ch, ch_listno );
1016 unlinkBlock( a, other, pszW_to_listNo(bszW_to_pszW(a,other_bszW)) );
1017 ch_bszW += other_bszW;
1018 ch_listno = pszW_to_listNo(bszW_to_pszW(a, ch_bszW));
1019 mkFreeBlock( a, ch, ch_bszW, ch_listno );
1020 }
1021 }
1022
1023 /* See if this block can be merged with its predecessor. */
1024 if (ch-overhead_szW(a) >= sb_payl_firstw) {
1025 other_bszW = get_bszW_hi_from_last_word( ch-1 );
1026 if (!is_inuse_bszW(other_bszW)) {
1027 /* VG_(printf)( "merge-predecessor\n"); */
1028 other = last_to_first( ch-1 );
1029 other_bszW = mk_plain_bszW(other_bszW);
1030 unlinkBlock( a, ch, ch_listno );
1031 unlinkBlock( a, other, pszW_to_listNo(bszW_to_pszW(a, other_bszW)) );
1032 ch = other;
1033 ch_bszW += other_bszW;
1034 ch_listno = pszW_to_listNo(bszW_to_pszW(a, ch_bszW));
1035 mkFreeBlock( a, ch, ch_bszW, ch_listno );
1036 }
1037 }
1038
1039# ifdef DEBUG_MALLOC
njn25e49d8e72002-09-23 09:36:25 +00001040 mallocSanityCheckArena(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001041# endif
1042
njn25e49d8e72002-09-23 09:36:25 +00001043 VGP_POPCC(VgpMalloc);
sewardjde4a1d02002-03-22 01:27:54 +00001044}
1045
1046
1047/*
1048 The idea for malloc_aligned() is to allocate a big block, base, and
1049 then split it into two parts: frag, which is returned to the the
1050 free pool, and align, which is the bit we're really after. Here's
1051 a picture. L and H denote the block lower and upper overheads, in
1052 words. The details are gruesome. Note it is slightly complicated
1053 because the initial request to generate base may return a bigger
1054 block than we asked for, so it is important to distinguish the base
1055 request size and the base actual size.
1056
1057 frag_b align_b
1058 | |
1059 | frag_p | align_p
1060 | | | |
1061 v v v v
1062
1063 +---+ +---+---+ +---+
1064 | L |----------------| H | L |---------------| H |
1065 +---+ +---+---+ +---+
1066
1067 ^ ^ ^
1068 | | :
1069 | base_p this addr must be aligned
1070 |
1071 base_b
1072
1073 . . . . . . .
1074 <------ frag_bszW -------> . . .
1075 . <------------- base_pszW_act -----------> .
1076 . . . . . . .
1077
1078*/
njn25e49d8e72002-09-23 09:36:25 +00001079void* VG_(arena_malloc_aligned) ( ArenaId aid, Int req_alignB, Int req_pszB )
sewardjde4a1d02002-03-22 01:27:54 +00001080{
1081 Int req_alignW, req_pszW, base_pszW_req, base_pszW_act, frag_bszW;
1082 Word *base_b, *base_p, *align_p;
1083 UInt saved_bytes_on_loan;
1084 Arena* a;
1085
njn25e49d8e72002-09-23 09:36:25 +00001086 VGP_PUSHCC(VgpMalloc);
1087
sewardjde4a1d02002-03-22 01:27:54 +00001088 ensure_mm_init();
1089 a = arenaId_to_ArenaP(aid);
1090
1091 vg_assert(req_pszB >= 0);
1092 vg_assert(req_pszB < 0x7FFFFFF0);
1093
1094 /* Check that the requested alignment seems reasonable; that is, is
sewardjb5045ef2002-06-04 16:48:29 +00001095 a power of 2. */
sewardjde4a1d02002-03-22 01:27:54 +00001096 switch (req_alignB) {
sewardjc76be292002-04-24 20:32:50 +00001097 case 4:
sewardjde4a1d02002-03-22 01:27:54 +00001098 case 8: case 16: case 32: case 64: case 128: case 256:
1099 case 512: case 1024: case 2048: case 4096: case 8192:
1100 case 16384: case 32768: case 65536: case 131072:
sewardjb5045ef2002-06-04 16:48:29 +00001101 case 262144:
sewardjde4a1d02002-03-22 01:27:54 +00001102 case 1048576:
1103 /* can't be bothered to calculate larger ones */
1104 break;
1105 default:
1106 VG_(printf)("vg_malloc_aligned(%p, %d, %d)\nbad alignment request",
njn25e49d8e72002-09-23 09:36:25 +00001107 a, req_alignB, req_pszB );
sewardjde4a1d02002-03-22 01:27:54 +00001108 VG_(panic)("vg_malloc_aligned");
1109 /*NOTREACHED*/
1110 }
1111
1112 /* Required alignment, in words. Since it's constrained to be a
1113 power of 2 >= word size, no need to align the alignment. Still,
1114 we check. */
1115 req_alignW = req_alignB / VKI_BYTES_PER_WORD;
1116 vg_assert(req_alignB == req_alignW * VKI_BYTES_PER_WORD);
1117
1118 /* Required payload size for the aligned chunk. */
1119 req_pszW = (req_pszB + VKI_BYTES_PER_WORD - 1) / VKI_BYTES_PER_WORD;
1120
1121 /* Payload size to request for the big block that we will split
1122 up. */
1123 base_pszW_req = req_pszW + overhead_szW(a) + req_alignW;
1124
1125 /* Payload ptr for the block we are going to split. Note this
1126 changes a->bytes_on_loan; we save and restore it ourselves. */
1127 saved_bytes_on_loan = a->bytes_on_loan;
njn25e49d8e72002-09-23 09:36:25 +00001128 base_p = VG_(arena_malloc) ( aid, base_pszW_req * VKI_BYTES_PER_WORD );
sewardjde4a1d02002-03-22 01:27:54 +00001129 a->bytes_on_loan = saved_bytes_on_loan;
1130
1131 /* Block ptr for the block we are going to split. */
1132 base_b = payload_to_first ( a, base_p );
1133
1134 /* Pointer to the payload of the aligned block we are going to
1135 return. This has to be suitably aligned. */
1136 align_p = align_upwards ( base_b + 2 * overhead_szW_lo(a)
1137 + overhead_szW_hi(a),
1138 req_alignB );
1139
1140 /* The block size of the fragment we will create. This must be big
1141 enough to actually create a fragment. */
1142 frag_bszW = align_p - overhead_szW_lo(a) - base_b;
1143 vg_assert(frag_bszW >= overhead_szW(a));
1144
1145 /* The actual payload size of the block we are going to split. */
1146 base_pszW_act = bszW_to_pszW(a, mk_plain_bszW(get_bszW_lo(base_b)));
1147
1148 /* Create the fragment block, and put it back on the relevant free
1149 list. */
1150 mkFreeBlock ( a, base_b, frag_bszW,
1151 pszW_to_listNo(bszW_to_pszW(a, frag_bszW)) );
1152
1153 /* Create the aligned block. */
1154 mkInuseBlock ( a,
1155 align_p - overhead_szW_lo(a),
1156 base_p + base_pszW_act
1157 + overhead_szW_hi(a)
1158 - (align_p - overhead_szW_lo(a)) );
1159
1160 /* Final sanity checks. */
1161 vg_assert(( (UInt)align_p % req_alignB) == 0);
1162
1163 vg_assert(is_inuse_bszW(get_bszW_lo(payload_to_first(a, align_p))));
1164
1165 vg_assert(req_pszW
1166 <=
1167 bszW_to_pszW(a, mk_plain_bszW(get_bszW_lo(
1168 payload_to_first(a, align_p))))
1169 );
1170
1171 a->bytes_on_loan
1172 += sizeof(Word)
1173 * bszW_to_pszW(a, mk_plain_bszW(get_bszW_lo(
1174 payload_to_first(a, align_p))));
1175 if (a->bytes_on_loan > a->bytes_on_loan_max)
1176 a->bytes_on_loan_max = a->bytes_on_loan;
1177
1178# ifdef DEBUG_MALLOC
njn25e49d8e72002-09-23 09:36:25 +00001179 mallocSanityCheckArena(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001180# endif
1181
njn25e49d8e72002-09-23 09:36:25 +00001182 VGP_POPCC(VgpMalloc);
1183
sewardjde4a1d02002-03-22 01:27:54 +00001184 return align_p;
1185}
1186
1187
1188/*------------------------------------------------------------*/
1189/*--- Services layered on top of malloc/free. ---*/
1190/*------------------------------------------------------------*/
1191
njn25e49d8e72002-09-23 09:36:25 +00001192void* VG_(arena_calloc) ( ArenaId aid, Int nmemb, Int nbytes )
sewardjde4a1d02002-03-22 01:27:54 +00001193{
1194 Int i, size;
1195 UChar* p;
njn25e49d8e72002-09-23 09:36:25 +00001196
1197 VGP_PUSHCC(VgpMalloc);
1198
sewardjde4a1d02002-03-22 01:27:54 +00001199 size = nmemb * nbytes;
sewardjd0b9ac32002-05-01 00:10:28 +00001200 vg_assert(size >= 0);
njn25e49d8e72002-09-23 09:36:25 +00001201 p = VG_(arena_malloc) ( aid, size );
sewardjde4a1d02002-03-22 01:27:54 +00001202 for (i = 0; i < size; i++) p[i] = 0;
njn25e49d8e72002-09-23 09:36:25 +00001203
1204 VGP_POPCC(VgpMalloc);
1205
sewardjde4a1d02002-03-22 01:27:54 +00001206 return p;
1207}
1208
1209
njn25e49d8e72002-09-23 09:36:25 +00001210void* VG_(arena_realloc) ( ArenaId aid, void* ptr,
1211 Int req_alignB, Int req_pszB )
sewardjde4a1d02002-03-22 01:27:54 +00001212{
1213 Arena* a;
1214 Int old_bszW, old_pszW, old_pszB, i;
1215 UChar *p_old, *p_new;
1216 UInt* ch;
1217
njn25e49d8e72002-09-23 09:36:25 +00001218 VGP_PUSHCC(VgpMalloc);
1219
sewardjde4a1d02002-03-22 01:27:54 +00001220 ensure_mm_init();
1221 a = arenaId_to_ArenaP(aid);
1222
1223 vg_assert(req_pszB >= 0);
1224 vg_assert(req_pszB < 0x7FFFFFF0);
1225
1226 ch = payload_to_first(a, ptr);
1227 vg_assert(blockSane(a, ch));
1228
1229 old_bszW = get_bszW_lo(ch);
1230 vg_assert(is_inuse_bszW(old_bszW));
1231 old_bszW = mk_plain_bszW(old_bszW);
1232 old_pszW = bszW_to_pszW(a, old_bszW);
1233 old_pszB = old_pszW * VKI_BYTES_PER_WORD;
1234
njn25e49d8e72002-09-23 09:36:25 +00001235 if (req_pszB <= old_pszB) {
1236 VGP_POPCC(VgpMalloc);
1237 return ptr;
1238 }
sewardjde4a1d02002-03-22 01:27:54 +00001239
njn25e49d8e72002-09-23 09:36:25 +00001240 if (req_alignB == 4)
1241 p_new = VG_(arena_malloc) ( aid, req_pszB );
1242 else
1243 p_new = VG_(arena_malloc_aligned) ( aid, req_alignB, req_pszB );
1244
sewardjde4a1d02002-03-22 01:27:54 +00001245 p_old = (UChar*)ptr;
1246 for (i = 0; i < old_pszB; i++)
1247 p_new[i] = p_old[i];
1248
njn25e49d8e72002-09-23 09:36:25 +00001249 VG_(arena_free)(aid, p_old);
1250
1251 VGP_POPCC(VgpMalloc);
sewardjde4a1d02002-03-22 01:27:54 +00001252 return p_new;
1253}
1254
1255
1256/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00001257/*--- Skin-visible functions. ---*/
1258/*------------------------------------------------------------*/
1259
1260/* All just wrappers to avoid exposing arenas to skins */
1261
1262void* VG_(malloc) ( Int nbytes )
1263{
1264 return VG_(arena_malloc) ( VG_AR_SKIN, nbytes );
1265}
1266
1267void VG_(free) ( void* ptr )
1268{
1269 VG_(arena_free) ( VG_AR_SKIN, ptr );
1270}
1271
1272void* VG_(calloc) ( Int nmemb, Int nbytes )
1273{
1274 return VG_(arena_calloc) ( VG_AR_SKIN, nmemb, nbytes );
1275}
1276
1277void* VG_(realloc) ( void* ptr, Int size )
1278{
1279 return VG_(arena_realloc) ( VG_AR_SKIN, ptr, /*alignment*/4, size );
1280}
1281
1282void* VG_(malloc_aligned) ( Int req_alignB, Int req_pszB )
1283{
1284 return VG_(arena_malloc_aligned) ( VG_AR_SKIN, req_alignB, req_pszB );
1285}
1286
1287
1288/*------------------------------------------------------------*/
sewardjde4a1d02002-03-22 01:27:54 +00001289/*--- The original test driver machinery. ---*/
1290/*------------------------------------------------------------*/
1291
1292#if 0
1293
1294#if 1
1295#define N_TEST_TRANSACTIONS 100000000
1296#define N_TEST_ARR 200000
1297#define M_TEST_MALLOC 1000
1298#else
1299#define N_TEST_TRANSACTIONS 500000
1300#define N_TEST_ARR 30000
1301#define M_TEST_MALLOC 500
1302#endif
1303
1304
1305void* test_arr[N_TEST_ARR];
1306
1307int main ( int argc, char** argv )
1308{
1309 Int i, j, k, nbytes, qq;
1310 unsigned char* chp;
njn25e49d8e72002-09-23 09:36:25 +00001311 Arena* a = &arena[VG_AR_CORE];
sewardjde4a1d02002-03-22 01:27:54 +00001312 srandom(1);
1313 for (i = 0; i < N_TEST_ARR; i++)
1314 test_arr[i] = NULL;
1315
1316 for (i = 0; i < N_TEST_TRANSACTIONS; i++) {
1317 if (i % 50000 == 0) mallocSanityCheck(a);
1318 j = random() % N_TEST_ARR;
1319 if (test_arr[j]) {
1320 vg_free(a, test_arr[j]);
1321 test_arr[j] = NULL;
1322 } else {
1323 nbytes = 1 + random() % M_TEST_MALLOC;
1324 qq = random()%64;
1325 if (qq == 32)
1326 nbytes *= 17;
1327 else if (qq == 33)
1328 nbytes = 0;
1329 test_arr[j]
1330 = (i % 17) == 0
1331 ? vg_memalign(a, nbytes, 1<< (3+(random()%10)))
1332 : vg_malloc( a, nbytes );
1333 chp = test_arr[j];
1334 for (k = 0; k < nbytes; k++)
1335 chp[k] = (unsigned char)(k + 99);
1336 }
1337 }
1338
1339
1340 for (i = 0; i < N_TEST_ARR; i++) {
1341 if (test_arr[i]) {
1342 vg_free(a, test_arr[i]);
1343 test_arr[i] = NULL;
1344 }
1345 }
1346 mallocSanityCheck(a);
1347
1348 fprintf(stderr, "ALL DONE\n");
1349
1350 show_arena_stats(a);
1351 fprintf(stderr, "%d max useful, %d bytes mmap'd (%4.1f%%), %d useful\n",
1352 a->bytes_on_loan_max,
1353 a->bytes_mmaped,
1354 100.0 * (double)a->bytes_on_loan_max / (double)a->bytes_mmaped,
1355 a->bytes_on_loan );
1356
1357 return 0;
1358}
1359#endif /* 0 */
1360
1361
1362/*--------------------------------------------------------------------*/
1363/*--- end vg_malloc2.c ---*/
1364/*--------------------------------------------------------------------*/