blob: 81e713372a780c9b0e567fa3fedd66e41b5a0fa4 [file] [log] [blame]
sewardjde4a1d02002-03-22 01:27:54 +00001
2/*--------------------------------------------------------------------*/
3/*--- An implementation of malloc/free which doesn't use sbrk. ---*/
4/*--- vg_malloc2.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
njnc9539842002-10-02 13:26:35 +00008 This file is part of Valgrind, an extensible x86 protected-mode
9 emulator for monitoring program execution on x86-Unixes.
sewardjde4a1d02002-03-22 01:27:54 +000010
nethercotebb1c9912004-01-04 16:43:23 +000011 Copyright (C) 2000-2004 Julian Seward
sewardjde4a1d02002-03-22 01:27:54 +000012 jseward@acm.org
sewardjde4a1d02002-03-22 01:27:54 +000013
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
njn25e49d8e72002-09-23 09:36:25 +000029 The GNU General Public License is contained in the file COPYING.
sewardjde4a1d02002-03-22 01:27:54 +000030*/
31
32
nethercotef1e5e152004-09-01 23:58:16 +000033#include "core.h"
sewardjde4a1d02002-03-22 01:27:54 +000034
nethercote2d5b8162004-08-11 09:40:52 +000035//#define DEBUG_MALLOC // turn on heavyweight debugging machinery
36//#define VERBOSE_MALLOC // make verbose, esp. in debugging machinery
37
38/*------------------------------------------------------------*/
39/*--- Main types ---*/
40/*------------------------------------------------------------*/
41
42#define VG_N_MALLOC_LISTS 16 // do not change this
43
44// On 64-bit systems size_t is 64-bits, so bigger than this is possible.
45// We can worry about that when it happens...
46#define MAX_PSZB 0x7ffffff0
47
48typedef UChar UByte;
49
50/* Block layout:
51
52 this block total szB (sizeof(Int) bytes)
53 freelist previous ptr (sizeof(void*) bytes)
54 red zone bytes (depends on .rz_szB field of Arena)
55 (payload bytes)
56 red zone bytes (depends on .rz_szB field of Arena)
57 freelist next ptr (sizeof(void*) bytes)
58 this block total szB (sizeof(Int) bytes)
59
60 Total size in bytes (bszB) and payload size in bytes (pszB)
61 are related by:
62
63 bszB == pszB + 2*sizeof(Int) + 2*sizeof(void*) + 2*a->rz_szB
64
65 Furthermore, both size fields in the block are negative if it is
66 not in use, and positive if it is in use. A block size of zero
67 is not possible, because a block always has at least two Ints and two
68 pointers of overhead.
69
70 Nb: All Block payloads must be VG_MIN_MALLOC_SZB-aligned. This is
71 achieved by ensuring that Superblocks are VG_MIN_MALLOC_SZB-aligned
72 (see newSuperblock() for how), and that the lengths of the following
73 things are a multiple of VG_MIN_MALLOC_SZB:
74 - Superblock admin section lengths (due to elastic padding)
75 - Block admin section (low and high) lengths (due to elastic redzones)
76 - Block payload lengths (due to req_pszB rounding up)
77*/
78typedef
79 struct {
80 // No fields are actually used in this struct, because a Block has
81 // loads of variable sized fields and so can't be accessed
82 // meaningfully with normal fields. So we use access functions all
83 // the time. This struct gives us a type to use, though. Also, we
84 // make sizeof(Block) 1 byte so that we can do arithmetic with the
85 // Block* type in increments of 1!
86 UByte dummy;
87 }
88 Block;
89
90// A superblock. 'padding' is never used, it just ensures that if the
91// entire Superblock is aligned to VG_MIN_MALLOC_SZB, then payload_bytes[]
92// will be too. It can add small amounts of padding unnecessarily -- eg.
93// 8-bytes on 32-bit machines with an 8-byte VG_MIN_MALLOC_SZB -- because
94// it's too hard to make a constant expression that works perfectly in all
95// cases.
96// payload_bytes[] is made a single big Block when the Superblock is
97// created, and then can be split and the splittings remerged, but Blocks
98// always cover its entire length -- there's never any unused bytes at the
99// end, for example.
100typedef
101 struct _Superblock {
102 struct _Superblock* next;
103 Int n_payload_bytes;
104 UByte padding[ VG_MIN_MALLOC_SZB -
105 ((sizeof(void*) + sizeof(Int)) % VG_MIN_MALLOC_SZB) ];
106 UByte payload_bytes[0];
107 }
108 Superblock;
109
110// An arena. 'freelist' is a circular, doubly-linked list. 'rz_szB' is
111// elastic, in that it can be bigger than asked-for to ensure alignment.
112typedef
113 struct {
114 Char* name;
115 Bool clientmem; // Allocates in the client address space?
116 Int rz_szB; // Red zone size in bytes
117 Int min_sblock_szB; // Minimum superblock size in bytes
118 Block* freelist[VG_N_MALLOC_LISTS];
119 Superblock* sblocks;
120 // Stats only.
121 UInt bytes_on_loan;
122 UInt bytes_mmaped;
123 UInt bytes_on_loan_max;
124 }
125 Arena;
126
127
128/*------------------------------------------------------------*/
129/*--- Low-level functions for working with Blocks. ---*/
130/*------------------------------------------------------------*/
131
132// Mark a bszB as in-use, and not in-use.
133static __inline__
134Int mk_inuse_bszB ( Int bszB )
135{
136 vg_assert(bszB != 0);
137 return (bszB < 0) ? -bszB : bszB;
138}
139static __inline__
140Int mk_free_bszB ( Int bszB )
141{
142 vg_assert(bszB != 0);
143 return (bszB < 0) ? bszB : -bszB;
144}
145
146// Remove the in-use/not-in-use attribute from a bszB, leaving just
147// the size.
148static __inline__
149Int mk_plain_bszB ( Int bszB )
150{
151 vg_assert(bszB != 0);
152 return (bszB < 0) ? -bszB : bszB;
153}
154
155// Does this bszB have the in-use attribute?
156static __inline__
157Bool is_inuse_bszB ( Int bszB )
158{
159 vg_assert(bszB != 0);
160 return (bszB < 0) ? False : True;
161}
162
163
164// Set and get the lower size field of a block.
165static __inline__
166void set_bszB_lo ( Block* b, Int bszB )
167{
168 *(Int*)&b[0] = bszB;
169}
170static __inline__
171Int get_bszB_lo ( Block* b )
172{
173 return *(Int*)&b[0];
174}
175
176// Get the address of the last byte in a block
177static __inline__
178UByte* last_byte ( Block* b )
179{
180 UByte* b2 = (UByte*)b;
181 return &b2[mk_plain_bszB(get_bszB_lo(b)) - 1];
182}
183
184// Set and get the upper size field of a block.
185static __inline__
186void set_bszB_hi ( Block* b, Int bszB )
187{
188 UByte* b2 = (UByte*)b;
189 UByte* lb = last_byte(b);
190 vg_assert(lb == &b2[mk_plain_bszB(bszB) - 1]);
191 *(Int*)&lb[-sizeof(Int) + 1] = bszB;
192}
193static __inline__
194Int get_bszB_hi ( Block* b )
195{
196 UByte* lb = last_byte(b);
197 return *(Int*)&lb[-sizeof(Int) + 1];
198}
199
200
201// Given the addr of a block, return the addr of its payload.
202static __inline__
203UByte* get_block_payload ( Arena* a, Block* b )
204{
205 UByte* b2 = (UByte*)b;
206 return & b2[sizeof(Int) + sizeof(void*) + a->rz_szB];
207}
208// Given the addr of a block's payload, return the addr of the block itself.
209static __inline__
210Block* get_payload_block ( Arena* a, UByte* payload )
211{
212 return (Block*)&payload[-sizeof(Int) - sizeof(void*) - a->rz_szB];
213}
214
215
216// Set and get the next and previous link fields of a block.
217static __inline__
218void set_prev_b ( Block* b, Block* prev_p )
219{
220 UByte* b2 = (UByte*)b;
221 *(Block**)&b2[sizeof(Int)] = prev_p;
222}
223static __inline__
224void set_next_b ( Block* b, Block* next_p )
225{
226 UByte* lb = last_byte(b);
227 *(Block**)&lb[-sizeof(Int) - sizeof(void*) + 1] = next_p;
228}
229static __inline__
230Block* get_prev_b ( Block* b )
231{
232 UByte* b2 = (UByte*)b;
233 return *(Block**)&b2[sizeof(Int)];
234}
235static __inline__
236Block* get_next_b ( Block* b )
237{
238 UByte* lb = last_byte(b);
239 return *(Block**)&lb[-sizeof(Int) - sizeof(void*) + 1];
240}
241
242
243// Get the block immediately preceding this one in the Superblock.
244static __inline__
245Block* get_predecessor_block ( Block* b )
246{
247 UByte* b2 = (UByte*)b;
248 Int bszB = mk_plain_bszB( (*(Int*)&b2[-sizeof(Int)]) );
249 return (Block*)&b2[-bszB];
250}
251
252// Read and write the lower and upper red-zone bytes of a block.
253static __inline__
254void set_rz_lo_byte ( Arena* a, Block* b, Int rz_byteno, UByte v )
255{
256 UByte* b2 = (UByte*)b;
257 b2[sizeof(Int) + sizeof(void*) + rz_byteno] = v;
258}
259static __inline__
260void set_rz_hi_byte ( Arena* a, Block* b, Int rz_byteno, UByte v )
261{
262 UByte* lb = last_byte(b);
263 lb[-sizeof(Int) - sizeof(void*) - rz_byteno] = v;
264}
265static __inline__
266UByte get_rz_lo_byte ( Arena* a, Block* b, Int rz_byteno )
267{
268 UByte* b2 = (UByte*)b;
269 return b2[sizeof(Int) + sizeof(void*) + rz_byteno];
270}
271static __inline__
272UByte get_rz_hi_byte ( Arena* a, Block* b, Int rz_byteno )
273{
274 UByte* lb = last_byte(b);
275 return lb[-sizeof(Int) - sizeof(void*) - rz_byteno];
276}
277
278
279/* Return the lower, upper and total overhead in bytes for a block.
280 These are determined purely by which arena the block lives in. */
281static __inline__
282Int overhead_szB_lo ( Arena* a )
283{
284 return sizeof(Int) + sizeof(void*) + a->rz_szB;
285}
286static __inline__
287Int overhead_szB_hi ( Arena* a )
288{
289 return sizeof(void*) + sizeof(Int) + a->rz_szB;
290}
291static __inline__
292Int overhead_szB ( Arena* a )
293{
294 return overhead_szB_lo(a) + overhead_szB_hi(a);
295}
296
297// Return the minimum bszB for a block in this arena. Can have zero-length
298// payloads, so it's the size of the admin bytes.
299static __inline__
300Int min_useful_bszB ( Arena* a )
301{
302 return overhead_szB(a);
303}
304
305// Convert payload size <--> block size (both in bytes).
306static __inline__
307Int pszB_to_bszB ( Arena* a, Int pszB )
308{
309 vg_assert(pszB >= 0);
310 return pszB + overhead_szB(a);
311}
312static __inline__
313Int bszB_to_pszB ( Arena* a, Int bszB )
314{
315 Int pszB = bszB - overhead_szB(a);
316 vg_assert(pszB >= 0);
317 return pszB;
318}
319
320
321/*------------------------------------------------------------*/
322/*--- Arena management ---*/
323/*------------------------------------------------------------*/
324
325#define CORE_ARENA_MIN_SZB 1048576
326
327// The arena structures themselves.
328static Arena vg_arena[VG_N_ARENAS];
329
330// Functions external to this module identify arenas using ArenaIds,
331// not Arena*s. This fn converts the former to the latter.
332static Arena* arenaId_to_ArenaP ( ArenaId arena )
333{
334 vg_assert(arena >= 0 && arena < VG_N_ARENAS);
335 return & vg_arena[arena];
336}
337
338// Initialise an arena. rz_szB is the minimum redzone size; it might be
339// made bigger to ensure that VG_MIN_MALLOC_ALIGNMENT is observed.
340static
341void arena_init ( ArenaId aid, Char* name, Int rz_szB, Int min_sblock_szB )
342{
343 Int i;
344 Arena* a = arenaId_to_ArenaP(aid);
345
346 vg_assert(rz_szB >= 0);
nethercote73b526f2004-10-31 18:48:21 +0000347 vg_assert((min_sblock_szB % VKI_PAGE_SIZE) == 0);
nethercote2d5b8162004-08-11 09:40:52 +0000348 a->name = name;
349 a->clientmem = ( VG_AR_CLIENT == aid ? True : False );
350
351 // The size of the low and high admin sections in a block must be a
352 // multiple of VG_MIN_MALLOC_ALIGNMENT. So we round up the asked-for
353 // redzone size if necessary to achieve this.
354 a->rz_szB = rz_szB;
355 while (0 != overhead_szB_lo(a) % VG_MIN_MALLOC_SZB) a->rz_szB++;
356 vg_assert(overhead_szB_lo(a) == overhead_szB_hi(a));
357
358 a->min_sblock_szB = min_sblock_szB;
359 for (i = 0; i < VG_N_MALLOC_LISTS; i++) a->freelist[i] = NULL;
360 a->sblocks = NULL;
361 a->bytes_on_loan = 0;
362 a->bytes_mmaped = 0;
363 a->bytes_on_loan_max = 0;
364}
365
366/* Print vital stats for an arena. */
367void VG_(print_all_arena_stats) ( void )
368{
369 Int i;
370 for (i = 0; i < VG_N_ARENAS; i++) {
371 Arena* a = arenaId_to_ArenaP(i);
372 VG_(message)(Vg_DebugMsg,
373 "AR %8s: %8d mmap'd, %8d/%8d max/curr",
374 a->name, a->bytes_mmaped, a->bytes_on_loan_max, a->bytes_on_loan
375 );
376 }
377}
378
379/* This library is self-initialising, as it makes this more self-contained,
380 less coupled with the outside world. Hence VG_(arena_malloc)() and
381 VG_(arena_free)() below always call ensure_mm_init() to ensure things are
382 correctly initialised. */
383static
384void ensure_mm_init ( void )
385{
386 static Int client_rz_szB;
387 static Bool init_done = False;
388
389 if (init_done) {
390 // Make sure the client arena's redzone size never changes. Could
391 // happen if VG_(arena_malloc) was called too early, ie. before the
392 // tool was loaded.
393 vg_assert(client_rz_szB == VG_(vg_malloc_redzone_szB));
394 return;
395 }
396
397 /* No particular reason for this figure, it's just smallish */
398 sk_assert(VG_(vg_malloc_redzone_szB) < 128);
399 sk_assert(VG_(vg_malloc_redzone_szB) >= 0);
400 client_rz_szB = VG_(vg_malloc_redzone_szB);
401
402 /* Use checked red zones (of various sizes) for our internal stuff,
403 and an unchecked zone of arbitrary size for the client. Of
404 course the client's red zone can be checked by the tool, eg.
405 by using addressibility maps, but not by the mechanism implemented
406 here, which merely checks at the time of freeing that the red
407 zone bytes are unchanged.
408
409 Nb: redzone sizes are *minimums*; they could be made bigger to ensure
410 alignment. Eg. on 32-bit machines, 4 becomes 8, and 12 becomes 16;
411 but on 64-bit machines 4 stays as 4, and 12 stays as 12 --- the extra
412 4 bytes in both are accounted for by the larger prev/next ptr.
413 */
414 arena_init ( VG_AR_CORE, "core", 4, CORE_ARENA_MIN_SZB );
415 arena_init ( VG_AR_TOOL, "tool", 4, 1048576 );
416 arena_init ( VG_AR_SYMTAB, "symtab", 4, 1048576 );
417 arena_init ( VG_AR_JITTER, "JITter", 4, 32768 );
418 arena_init ( VG_AR_CLIENT, "client", client_rz_szB, 1048576 );
419 arena_init ( VG_AR_DEMANGLE, "demangle", 12/*paranoid*/, 65536 );
420 arena_init ( VG_AR_EXECTXT, "exectxt", 4, 65536 );
421 arena_init ( VG_AR_ERRORS, "errors", 4, 65536 );
422 arena_init ( VG_AR_TRANSIENT, "transien", 4, 65536 );
423
424 init_done = True;
425# ifdef DEBUG_MALLOC
426 VG_(sanity_check_malloc_all)();
427# endif
428}
429
430
431/*------------------------------------------------------------*/
432/*--- Superblock management ---*/
433/*------------------------------------------------------------*/
434
435// Align ptr p upwards to an align-sized boundary.
436static
437void* align_upwards ( void* p, Int align )
438{
439 Addr a = (Addr)p;
440 if ((a % align) == 0) return (void*)a;
441 return (void*)(a - (a % align) + align);
442}
443
444// If not enough memory available, either aborts (for non-client memory)
445// or returns 0 (for client memory).
446static
447Superblock* newSuperblock ( Arena* a, Int cszB )
448{
449 // The extra VG_MIN_MALLOC_SZB bytes are for possible alignment up.
450 static UByte bootstrap_superblock[CORE_ARENA_MIN_SZB+VG_MIN_MALLOC_SZB];
451 static Bool called_before = False;
452 Superblock* sb;
453
454 // Take into account admin bytes in the Superblock.
455 cszB += sizeof(Superblock);
456
457 if (cszB < a->min_sblock_szB) cszB = a->min_sblock_szB;
nethercote73b526f2004-10-31 18:48:21 +0000458 while ((cszB % VKI_PAGE_SIZE) > 0) cszB++;
nethercote2d5b8162004-08-11 09:40:52 +0000459
460 if (!called_before) {
461 // First time we're called -- use the special static bootstrap
462 // superblock (see comment at top of main() for details).
463 called_before = True;
464 vg_assert(a == arenaId_to_ArenaP(VG_AR_CORE));
465 vg_assert(CORE_ARENA_MIN_SZB >= cszB);
466 // Ensure sb is suitably aligned.
467 sb = (Superblock*)align_upwards( bootstrap_superblock,
468 VG_MIN_MALLOC_SZB );
469 } else if (a->clientmem) {
470 // client allocation -- return 0 to client if it fails
471 sb = (Superblock *)
472 VG_(client_alloc)(0, cszB,
473 VKI_PROT_READ|VKI_PROT_WRITE|VKI_PROT_EXEC, 0);
474 if (NULL == sb)
475 return 0;
476 } else {
477 // non-client allocation -- aborts if it fails
478 sb = VG_(get_memory_from_mmap) ( cszB, "newSuperblock" );
479 }
480 vg_assert(NULL != sb);
481 vg_assert(0 == (Addr)sb % VG_MIN_MALLOC_SZB);
482 sb->n_payload_bytes = cszB - sizeof(Superblock);
483 a->bytes_mmaped += cszB;
484 if (0)
485 VG_(message)(Vg_DebugMsg, "newSuperblock, %d payload bytes",
486 sb->n_payload_bytes);
487 return sb;
488}
489
490// Find the superblock containing the given chunk.
491static
492Superblock* findSb ( Arena* a, Block* b )
493{
494 Superblock* sb;
495 for (sb = a->sblocks; sb; sb = sb->next)
496 if ((Block*)&sb->payload_bytes[0] <= b
497 && b < (Block*)&sb->payload_bytes[sb->n_payload_bytes])
498 return sb;
499 VG_(printf)("findSb: can't find pointer %p in arena `%s'\n", b, a->name );
500 VG_(core_panic)("findSb: VG_(arena_free)() in wrong arena?");
501 return NULL; /*NOTREACHED*/
502}
503
sewardjde4a1d02002-03-22 01:27:54 +0000504
fitzhardinge98abfc72003-12-16 02:05:15 +0000505/*------------------------------------------------------------*/
506/*--- Command line options ---*/
507/*------------------------------------------------------------*/
508
nethercote2d5b8162004-08-11 09:40:52 +0000509/* Round malloc sizes up to a multiple of VG_SLOPPY_MALLOC_SZB bytes?
510 default: NO
511 Nb: the allocator always rounds blocks up to a multiple of
512 VG_MIN_MALLOC_SZB. VG_(clo_sloppy_malloc) is relevant eg. for
513 Memcheck, which will be byte-precise with addressability maps on its
514 malloc allocations unless --sloppy-malloc=yes. */
515Bool VG_(clo_sloppy_malloc) = False;
fitzhardinge98abfc72003-12-16 02:05:15 +0000516
517/* DEBUG: print malloc details? default: NO */
nethercote2d5b8162004-08-11 09:40:52 +0000518Bool VG_(clo_trace_malloc) = False;
fitzhardinge98abfc72003-12-16 02:05:15 +0000519
520/* Minimum alignment in functions that don't specify alignment explicitly.
nethercote2d5b8162004-08-11 09:40:52 +0000521 default: 0, i.e. use VG_MIN_MALLOC_SZB. */
522Int VG_(clo_alignment) = VG_MIN_MALLOC_SZB;
fitzhardinge98abfc72003-12-16 02:05:15 +0000523
524
525Bool VG_(replacement_malloc_process_cmd_line_option)(Char* arg)
526{
jsewardb1a26ae2004-03-14 03:06:37 +0000527 if (VG_CLO_STREQN(12, arg, "--alignment=")) {
fitzhardinge98abfc72003-12-16 02:05:15 +0000528 VG_(clo_alignment) = (Int)VG_(atoll)(&arg[12]);
529
nethercote2d5b8162004-08-11 09:40:52 +0000530 if (VG_(clo_alignment) < VG_MIN_MALLOC_SZB
fitzhardinge98abfc72003-12-16 02:05:15 +0000531 || VG_(clo_alignment) > 4096
532 || VG_(log2)( VG_(clo_alignment) ) == -1 /* not a power of 2 */) {
533 VG_(message)(Vg_UserMsg, "");
534 VG_(message)(Vg_UserMsg,
535 "Invalid --alignment= setting. "
nethercote2d5b8162004-08-11 09:40:52 +0000536 "Should be a power of 2, >= %d, <= 4096.", VG_MIN_MALLOC_SZB);
fitzhardinge98abfc72003-12-16 02:05:15 +0000537 VG_(bad_option)("--alignment");
538 }
539 }
540
nethercotef28481f2004-07-10 13:56:19 +0000541 else VG_BOOL_CLO("--sloppy-malloc", VG_(clo_sloppy_malloc))
542 else VG_BOOL_CLO("--trace-malloc", VG_(clo_trace_malloc))
fitzhardinge98abfc72003-12-16 02:05:15 +0000543 else
544 return False;
545
546 return True;
547}
548
549void VG_(replacement_malloc_print_usage)(void)
550{
551 VG_(printf)(
nethercote2d5b8162004-08-11 09:40:52 +0000552" --sloppy-malloc=no|yes round malloc sizes to multiple of %d? [no]\n"
553" --alignment=<number> set minimum alignment of allocations [%d]\n",
554 VG_SLOPPY_MALLOC_SZB, VG_MIN_MALLOC_SZB
fitzhardinge98abfc72003-12-16 02:05:15 +0000555 );
556}
557
558void VG_(replacement_malloc_print_debug_usage)(void)
559{
560 VG_(printf)(
561" --trace-malloc=no|yes show client malloc details? [no]\n"
562 );
563}
564
sewardjde4a1d02002-03-22 01:27:54 +0000565
566/*------------------------------------------------------------*/
sewardjde4a1d02002-03-22 01:27:54 +0000567/*--- Functions for working with freelists. ---*/
568/*------------------------------------------------------------*/
569
nethercote2d5b8162004-08-11 09:40:52 +0000570// Nb: Determination of which freelist a block lives on is based on the
571// payload size, not block size.
sewardjde4a1d02002-03-22 01:27:54 +0000572
nethercote2d5b8162004-08-11 09:40:52 +0000573// Convert a payload size in bytes to a freelist number.
sewardjde4a1d02002-03-22 01:27:54 +0000574static
nethercote2d5b8162004-08-11 09:40:52 +0000575Int pszB_to_listNo ( Int pszB )
sewardjde4a1d02002-03-22 01:27:54 +0000576{
nethercote2d5b8162004-08-11 09:40:52 +0000577 vg_assert(pszB >= 0);
578 vg_assert(0 == pszB % VG_MIN_MALLOC_SZB);
579 pszB /= VG_MIN_MALLOC_SZB;
580 if (pszB <= 2) return 0;
581 if (pszB <= 3) return 1;
582 if (pszB <= 4) return 2;
583 if (pszB <= 5) return 3;
584 if (pszB <= 6) return 4;
585 if (pszB <= 7) return 5;
586 if (pszB <= 8) return 6;
587 if (pszB <= 9) return 7;
588 if (pszB <= 10) return 8;
589 if (pszB <= 11) return 9;
590 if (pszB <= 12) return 10;
591 if (pszB <= 16) return 11;
592 if (pszB <= 32) return 12;
593 if (pszB <= 64) return 13;
594 if (pszB <= 128) return 14;
sewardjde4a1d02002-03-22 01:27:54 +0000595 return 15;
596}
597
nethercote2d5b8162004-08-11 09:40:52 +0000598// What is the minimum payload size for a given list?
sewardjde4a1d02002-03-22 01:27:54 +0000599static
nethercote2d5b8162004-08-11 09:40:52 +0000600Int listNo_to_pszB_min ( Int listNo )
sewardjde4a1d02002-03-22 01:27:54 +0000601{
nethercote2d5b8162004-08-11 09:40:52 +0000602 Int pszB = 0;
sewardjde4a1d02002-03-22 01:27:54 +0000603 vg_assert(listNo >= 0 && listNo <= VG_N_MALLOC_LISTS);
nethercote2d5b8162004-08-11 09:40:52 +0000604 while (pszB_to_listNo(pszB) < listNo) pszB += VG_MIN_MALLOC_SZB;
605 return pszB;
sewardjde4a1d02002-03-22 01:27:54 +0000606}
607
nethercote2d5b8162004-08-11 09:40:52 +0000608// What is the maximum payload size for a given list?
sewardjde4a1d02002-03-22 01:27:54 +0000609static
nethercote2d5b8162004-08-11 09:40:52 +0000610Int listNo_to_pszB_max ( Int listNo )
sewardjde4a1d02002-03-22 01:27:54 +0000611{
612 vg_assert(listNo >= 0 && listNo <= VG_N_MALLOC_LISTS);
613 if (listNo == VG_N_MALLOC_LISTS-1) {
nethercote2d5b8162004-08-11 09:40:52 +0000614 return MAX_PSZB;
sewardjde4a1d02002-03-22 01:27:54 +0000615 } else {
nethercote2d5b8162004-08-11 09:40:52 +0000616 return listNo_to_pszB_min(listNo+1) - 1;
sewardjde4a1d02002-03-22 01:27:54 +0000617 }
618}
619
620
621/* A nasty hack to try and reduce fragmentation. Try and replace
622 a->freelist[lno] with another block on the same list but with a
623 lower address, with the idea of attempting to recycle the same
624 blocks rather than cruise through the address space. */
sewardjde4a1d02002-03-22 01:27:54 +0000625static
626void swizzle ( Arena* a, Int lno )
627{
nethercote2d5b8162004-08-11 09:40:52 +0000628 Block* p_best;
629 Block* pp;
630 Block* pn;
631 Int i;
sewardjde4a1d02002-03-22 01:27:54 +0000632
633 p_best = a->freelist[lno];
634 if (p_best == NULL) return;
635
636 pn = pp = p_best;
637 for (i = 0; i < 20; i++) {
nethercote2d5b8162004-08-11 09:40:52 +0000638 pn = get_next_b(pn);
639 pp = get_prev_b(pp);
sewardjde4a1d02002-03-22 01:27:54 +0000640 if (pn < p_best) p_best = pn;
641 if (pp < p_best) p_best = pp;
642 }
643 if (p_best < a->freelist[lno]) {
nethercote2d5b8162004-08-11 09:40:52 +0000644# ifdef VERBOSE_MALLOC
645 VG_(printf)("retreat by %d\n", a->freelist[lno] - p_best);
sewardjde4a1d02002-03-22 01:27:54 +0000646# endif
647 a->freelist[lno] = p_best;
648 }
649}
650
651
652/*------------------------------------------------------------*/
sewardjde4a1d02002-03-22 01:27:54 +0000653/*--- Sanity-check/debugging machinery. ---*/
654/*------------------------------------------------------------*/
655
nethercote2d5b8162004-08-11 09:40:52 +0000656#define VG_REDZONE_LO_MASK 0x31
657#define VG_REDZONE_HI_MASK 0x7c
658
659// Do some crude sanity checks on a chunk.
sewardjde4a1d02002-03-22 01:27:54 +0000660static
nethercote2d5b8162004-08-11 09:40:52 +0000661Bool blockSane ( Arena* a, Block* b )
sewardjde4a1d02002-03-22 01:27:54 +0000662{
663# define BLEAT(str) VG_(printf)("blockSane: fail -- %s\n",str)
664 Int i;
nethercote2d5b8162004-08-11 09:40:52 +0000665 if (get_bszB_lo(b) != get_bszB_hi(b))
sewardjde4a1d02002-03-22 01:27:54 +0000666 {BLEAT("sizes");return False;}
nethercote2d5b8162004-08-11 09:40:52 +0000667 if (!a->clientmem && is_inuse_bszB(get_bszB_lo(b))) {
668 for (i = 0; i < a->rz_szB; i++) {
669 if (get_rz_lo_byte(a, b, i) !=
670 (UByte)(((Addr)b&0xff) ^ VG_REDZONE_LO_MASK))
671 {BLEAT("redzone-lo");return False;}
672 if (get_rz_hi_byte(a, b, i) !=
673 (UByte)(((Addr)b&0xff) ^ VG_REDZONE_HI_MASK))
674 {BLEAT("redzone-hi");return False;}
sewardjde4a1d02002-03-22 01:27:54 +0000675 }
676 }
677 return True;
678# undef BLEAT
679}
680
nethercote2d5b8162004-08-11 09:40:52 +0000681// Print superblocks (only for debugging).
sewardjde4a1d02002-03-22 01:27:54 +0000682static
683void ppSuperblocks ( Arena* a )
684{
nethercote2d5b8162004-08-11 09:40:52 +0000685 Int i, b_bszB, blockno;
686 Block* b;
sewardjde4a1d02002-03-22 01:27:54 +0000687 Superblock* sb = a->sblocks;
688 blockno = 1;
689
690 while (sb) {
691 VG_(printf)( "\n" );
nethercote2d5b8162004-08-11 09:40:52 +0000692 VG_(printf)( "superblock %d at %p, sb->n_pl_bs = %d, next = %p\n",
693 blockno++, sb, sb->n_payload_bytes, sb->next );
694 for (i = 0; i < sb->n_payload_bytes; i += mk_plain_bszB(b_bszB)) {
695 b = (Block*)&sb->payload_bytes[i];
696 b_bszB = get_bszB_lo(b);
697 VG_(printf)( " block at %d, bszB %d: ", i, mk_plain_bszB(b_bszB) );
698 VG_(printf)( "%s, ", is_inuse_bszB(b_bszB) ? "inuse" : "free");
699 VG_(printf)( "%s\n", blockSane(a, b) ? "ok" : "BAD" );
sewardjde4a1d02002-03-22 01:27:54 +0000700 }
nethercote2d5b8162004-08-11 09:40:52 +0000701 vg_assert(i == sb->n_payload_bytes); // no overshoot at end of Sb
sewardjde4a1d02002-03-22 01:27:54 +0000702 sb = sb->next;
703 }
704 VG_(printf)( "end of superblocks\n\n" );
705}
706
nethercote2d5b8162004-08-11 09:40:52 +0000707// Sanity check both the superblocks and the chains.
nethercote885dd912004-08-03 23:14:00 +0000708static void sanity_check_malloc_arena ( ArenaId aid )
sewardjde4a1d02002-03-22 01:27:54 +0000709{
nethercote2d5b8162004-08-11 09:40:52 +0000710 Int i, superblockctr, b_bszB, b_pszB, blockctr_sb, blockctr_li;
711 Int blockctr_sb_free, listno, list_min_pszB, list_max_pszB;
sewardjde4a1d02002-03-22 01:27:54 +0000712 Superblock* sb;
713 Bool thisFree, lastWasFree;
nethercote2d5b8162004-08-11 09:40:52 +0000714 Block* b;
715 Block* b_prev;
sewardjde4a1d02002-03-22 01:27:54 +0000716 UInt arena_bytes_on_loan;
717 Arena* a;
718
nethercote885dd912004-08-03 23:14:00 +0000719# define BOMB VG_(core_panic)("sanity_check_malloc_arena")
sewardjde4a1d02002-03-22 01:27:54 +0000720
721 a = arenaId_to_ArenaP(aid);
722
nethercote2d5b8162004-08-11 09:40:52 +0000723 // First, traverse all the superblocks, inspecting the Blocks in each.
sewardjde4a1d02002-03-22 01:27:54 +0000724 superblockctr = blockctr_sb = blockctr_sb_free = 0;
725 arena_bytes_on_loan = 0;
726 sb = a->sblocks;
727 while (sb) {
728 lastWasFree = False;
729 superblockctr++;
nethercote2d5b8162004-08-11 09:40:52 +0000730 for (i = 0; i < sb->n_payload_bytes; i += mk_plain_bszB(b_bszB)) {
sewardjde4a1d02002-03-22 01:27:54 +0000731 blockctr_sb++;
nethercote2d5b8162004-08-11 09:40:52 +0000732 b = (Block*)&sb->payload_bytes[i];
733 b_bszB = get_bszB_lo(b);
sewardjde4a1d02002-03-22 01:27:54 +0000734 if (!blockSane(a, b)) {
nethercote2d5b8162004-08-11 09:40:52 +0000735 VG_(printf)("sanity_check_malloc_arena: sb %p, block %d (bszB %d): "
736 " BAD\n", sb, i, b_bszB );
sewardjde4a1d02002-03-22 01:27:54 +0000737 BOMB;
738 }
nethercote2d5b8162004-08-11 09:40:52 +0000739 thisFree = !is_inuse_bszB(b_bszB);
sewardjde4a1d02002-03-22 01:27:54 +0000740 if (thisFree && lastWasFree) {
nethercote2d5b8162004-08-11 09:40:52 +0000741 VG_(printf)("sanity_check_malloc_arena: sb %p, block %d (bszB %d): "
njn25e49d8e72002-09-23 09:36:25 +0000742 "UNMERGED FREES\n",
nethercote2d5b8162004-08-11 09:40:52 +0000743 sb, i, b_bszB );
sewardjde4a1d02002-03-22 01:27:54 +0000744 BOMB;
745 }
sewardjde4a1d02002-03-22 01:27:54 +0000746 if (thisFree) blockctr_sb_free++;
747 if (!thisFree)
nethercote2d5b8162004-08-11 09:40:52 +0000748 arena_bytes_on_loan += bszB_to_pszB(a, b_bszB);
749 lastWasFree = thisFree;
sewardjde4a1d02002-03-22 01:27:54 +0000750 }
nethercote2d5b8162004-08-11 09:40:52 +0000751 if (i > sb->n_payload_bytes) {
nethercote885dd912004-08-03 23:14:00 +0000752 VG_(printf)( "sanity_check_malloc_arena: sb %p: last block "
sewardjde4a1d02002-03-22 01:27:54 +0000753 "overshoots end\n", sb);
754 BOMB;
755 }
756 sb = sb->next;
757 }
758
759 if (arena_bytes_on_loan != a->bytes_on_loan) {
nethercote2d5b8162004-08-11 09:40:52 +0000760# ifdef VERBOSE_MALLOC
761 VG_(printf)( "sanity_check_malloc_arena: a->bytes_on_loan %d, "
762 "arena_bytes_on_loan %d: "
763 "MISMATCH\n", a->bytes_on_loan, arena_bytes_on_loan);
764# endif
sewardjde4a1d02002-03-22 01:27:54 +0000765 ppSuperblocks(a);
766 BOMB;
767 }
768
769 /* Second, traverse each list, checking that the back pointers make
770 sense, counting blocks encountered, and checking that each block
771 is an appropriate size for this list. */
772 blockctr_li = 0;
773 for (listno = 0; listno < VG_N_MALLOC_LISTS; listno++) {
nethercote2d5b8162004-08-11 09:40:52 +0000774 list_min_pszB = listNo_to_pszB_min(listno);
775 list_max_pszB = listNo_to_pszB_max(listno);
sewardjde4a1d02002-03-22 01:27:54 +0000776 b = a->freelist[listno];
777 if (b == NULL) continue;
778 while (True) {
779 b_prev = b;
nethercote2d5b8162004-08-11 09:40:52 +0000780 b = get_next_b(b);
781 if (get_prev_b(b) != b_prev) {
nethercote885dd912004-08-03 23:14:00 +0000782 VG_(printf)( "sanity_check_malloc_arena: list %d at %p: "
sewardjde4a1d02002-03-22 01:27:54 +0000783 "BAD LINKAGE\n",
784 listno, b );
785 BOMB;
786 }
nethercote2d5b8162004-08-11 09:40:52 +0000787 b_pszB = bszB_to_pszB(a, mk_plain_bszB(get_bszB_lo(b)));
788 if (b_pszB < list_min_pszB || b_pszB > list_max_pszB) {
sewardjde4a1d02002-03-22 01:27:54 +0000789 VG_(printf)(
nethercote885dd912004-08-03 23:14:00 +0000790 "sanity_check_malloc_arena: list %d at %p: "
nethercote2d5b8162004-08-11 09:40:52 +0000791 "WRONG CHAIN SIZE %dB (%dB, %dB)\n",
792 listno, b, b_pszB, list_min_pszB, list_max_pszB );
sewardjde4a1d02002-03-22 01:27:54 +0000793 BOMB;
794 }
795 blockctr_li++;
796 if (b == a->freelist[listno]) break;
797 }
798 }
799
800 if (blockctr_sb_free != blockctr_li) {
nethercote2d5b8162004-08-11 09:40:52 +0000801# ifdef VERBOSE_MALLOC
802 VG_(printf)( "sanity_check_malloc_arena: BLOCK COUNT MISMATCH "
803 "(via sbs %d, via lists %d)\n",
804 blockctr_sb_free, blockctr_li );
805# endif
sewardjde4a1d02002-03-22 01:27:54 +0000806 ppSuperblocks(a);
807 BOMB;
808 }
809
nethercote885dd912004-08-03 23:14:00 +0000810 if (VG_(clo_verbosity) > 2)
811 VG_(message)(Vg_DebugMsg,
812 "AR %8s: %2d sbs, %5d bs, %2d/%-2d free bs, "
813 "%7d mmap, %7d loan",
814 a->name,
815 superblockctr,
816 blockctr_sb, blockctr_sb_free, blockctr_li,
817 a->bytes_mmaped, a->bytes_on_loan);
sewardjde4a1d02002-03-22 01:27:54 +0000818# undef BOMB
819}
820
821
nethercote885dd912004-08-03 23:14:00 +0000822void VG_(sanity_check_malloc_all) ( void )
sewardjde4a1d02002-03-22 01:27:54 +0000823{
824 Int i;
825 for (i = 0; i < VG_N_ARENAS; i++)
nethercote885dd912004-08-03 23:14:00 +0000826 sanity_check_malloc_arena ( i );
sewardjde4a1d02002-03-22 01:27:54 +0000827}
828
sewardjde4a1d02002-03-22 01:27:54 +0000829/* Really, this isn't the right place for this. Nevertheless: find
830 out if an arena is empty -- currently has no bytes on loan. This
831 is useful for checking for memory leaks (of valgrind, not the
nethercote2d5b8162004-08-11 09:40:52 +0000832 client.) */
sewardjde4a1d02002-03-22 01:27:54 +0000833Bool VG_(is_empty_arena) ( ArenaId aid )
834{
835 Arena* a;
836 Superblock* sb;
nethercote2d5b8162004-08-11 09:40:52 +0000837 Block* b;
838 Int b_bszB;
njn25e49d8e72002-09-23 09:36:25 +0000839
sewardjde4a1d02002-03-22 01:27:54 +0000840 ensure_mm_init();
841 a = arenaId_to_ArenaP(aid);
842 for (sb = a->sblocks; sb != NULL; sb = sb->next) {
nethercote2d5b8162004-08-11 09:40:52 +0000843 // If the superblock is empty, it should contain a single free
844 // block, of the right size.
845 b = (Block*)&sb->payload_bytes[0];
846 b_bszB = get_bszB_lo(b);
847 if (is_inuse_bszB(b_bszB)) return False;
848 if (mk_plain_bszB(b_bszB) != sb->n_payload_bytes) return False;
849 // If we reach here, this block is not in use and is of the right
850 // size, so keep going around the loop...
sewardjde4a1d02002-03-22 01:27:54 +0000851 }
852 return True;
853}
854
855
nethercote2d5b8162004-08-11 09:40:52 +0000856/*------------------------------------------------------------*/
857/*--- Creating and deleting blocks. ---*/
858/*------------------------------------------------------------*/
859
860// Mark the bytes at b .. b+bszB-1 as not in use, and add them to the
861// relevant free list.
862
863static
864void mkFreeBlock ( Arena* a, Block* b, Int bszB, Int b_lno )
jsewardb1a26ae2004-03-14 03:06:37 +0000865{
nethercote2d5b8162004-08-11 09:40:52 +0000866 Int pszB = bszB_to_pszB(a, bszB);
867 vg_assert(pszB >= 0);
868 vg_assert(b_lno == pszB_to_listNo(pszB));
869 // Set the size fields and indicate not-in-use.
870 set_bszB_lo(b, mk_free_bszB(bszB));
871 set_bszB_hi(b, mk_free_bszB(bszB));
872
873 // Add to the relevant list.
874 if (a->freelist[b_lno] == NULL) {
875 set_prev_b(b, b);
876 set_next_b(b, b);
877 a->freelist[b_lno] = b;
878 } else {
879 Block* b_prev = get_prev_b(a->freelist[b_lno]);
880 Block* b_next = a->freelist[b_lno];
881 set_next_b(b_prev, b);
882 set_prev_b(b_next, b);
883 set_next_b(b, b_next);
884 set_prev_b(b, b_prev);
885 }
886# ifdef DEBUG_MALLOC
887 (void)blockSane(a,b);
888# endif
889}
890
891// Mark the bytes at b .. b+bszB-1 as in use, and set up the block
892// appropriately.
893static
894void mkInuseBlock ( Arena* a, Block* b, UInt bszB )
895{
896 Int i;
897 vg_assert(bszB >= min_useful_bszB(a));
898 set_bszB_lo(b, mk_inuse_bszB(bszB));
899 set_bszB_hi(b, mk_inuse_bszB(bszB));
900 set_prev_b(b, NULL); // Take off freelist
901 set_next_b(b, NULL); // ditto
902 if (!a->clientmem) {
903 for (i = 0; i < a->rz_szB; i++) {
904 set_rz_lo_byte(a, b, i, (UByte)(((Addr)b&0xff) ^ VG_REDZONE_LO_MASK));
905 set_rz_hi_byte(a, b, i, (UByte)(((Addr)b&0xff) ^ VG_REDZONE_HI_MASK));
906 }
907 }
908# ifdef DEBUG_MALLOC
909 (void)blockSane(a,b);
910# endif
911}
912
913// Remove a block from a given list. Does no sanity checking.
914static
915void unlinkBlock ( Arena* a, Block* b, Int listno )
916{
917 vg_assert(listno >= 0 && listno < VG_N_MALLOC_LISTS);
918 if (get_prev_b(b) == b) {
919 // Only one element in the list; treat it specially.
920 vg_assert(get_next_b(b) == b);
921 a->freelist[listno] = NULL;
922 } else {
923 Block* b_prev = get_prev_b(b);
924 Block* b_next = get_next_b(b);
925 a->freelist[listno] = b_prev;
926 set_next_b(b_prev, b_next);
927 set_prev_b(b_next, b_prev);
928 swizzle ( a, listno );
929 }
930 set_prev_b(b, NULL);
931 set_next_b(b, NULL);
jsewardb1a26ae2004-03-14 03:06:37 +0000932}
933
934
sewardjde4a1d02002-03-22 01:27:54 +0000935/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +0000936/*--- Core-visible functions. ---*/
sewardjde4a1d02002-03-22 01:27:54 +0000937/*------------------------------------------------------------*/
938
nethercote2d5b8162004-08-11 09:40:52 +0000939// Align the request size.
940static __inline__
941Int align_req_pszB ( Int req_pszB )
942{
943 Int n = VG_MIN_MALLOC_SZB-1;
944 return ((req_pszB + n) & (~n));
945}
946
njn25e49d8e72002-09-23 09:36:25 +0000947void* VG_(arena_malloc) ( ArenaId aid, Int req_pszB )
sewardjde4a1d02002-03-22 01:27:54 +0000948{
nethercote2d5b8162004-08-11 09:40:52 +0000949 Int req_bszB, frag_bszB, b_bszB, lno;
sewardjde4a1d02002-03-22 01:27:54 +0000950 Superblock* new_sb;
nethercote2d5b8162004-08-11 09:40:52 +0000951 Block* b = NULL;
sewardjde4a1d02002-03-22 01:27:54 +0000952 Arena* a;
jsewardb1a26ae2004-03-14 03:06:37 +0000953 void* v;
sewardjde4a1d02002-03-22 01:27:54 +0000954
955 VGP_PUSHCC(VgpMalloc);
956
957 ensure_mm_init();
958 a = arenaId_to_ArenaP(aid);
959
nethercote2d5b8162004-08-11 09:40:52 +0000960 vg_assert(0 <= req_pszB && req_pszB < MAX_PSZB);
961 req_pszB = align_req_pszB(req_pszB);
962 req_bszB = pszB_to_bszB(a, req_pszB);
sewardjde4a1d02002-03-22 01:27:54 +0000963
nethercote2d5b8162004-08-11 09:40:52 +0000964 // Scan through all the big-enough freelists for a block.
965 for (lno = pszB_to_listNo(req_pszB); lno < VG_N_MALLOC_LISTS; lno++) {
sewardjde4a1d02002-03-22 01:27:54 +0000966 b = a->freelist[lno];
nethercote2d5b8162004-08-11 09:40:52 +0000967 if (NULL == b) continue; // If this list is empty, try the next one.
sewardjde4a1d02002-03-22 01:27:54 +0000968 while (True) {
nethercote2d5b8162004-08-11 09:40:52 +0000969 b_bszB = mk_plain_bszB(get_bszB_lo(b));
970 if (b_bszB >= req_bszB) goto obtained_block; // success!
971 b = get_next_b(b);
972 if (b == a->freelist[lno]) break; // traversed entire freelist
sewardjde4a1d02002-03-22 01:27:54 +0000973 }
sewardjde4a1d02002-03-22 01:27:54 +0000974 }
975
nethercote2d5b8162004-08-11 09:40:52 +0000976 // If we reach here, no suitable block found, allocate a new superblock
977 vg_assert(lno == VG_N_MALLOC_LISTS);
978 new_sb = newSuperblock(a, req_bszB);
979 if (NULL == new_sb) {
980 // Should only fail if for client, otherwise, should have aborted
981 // already.
982 vg_assert(VG_AR_CLIENT == aid);
983 return NULL;
sewardjde4a1d02002-03-22 01:27:54 +0000984 }
nethercote2d5b8162004-08-11 09:40:52 +0000985 new_sb->next = a->sblocks;
986 a->sblocks = new_sb;
987 b = (Block*)&new_sb->payload_bytes[0];
988 lno = pszB_to_listNo(bszB_to_pszB(a, new_sb->n_payload_bytes));
989 mkFreeBlock ( a, b, new_sb->n_payload_bytes, lno);
990 // fall through
sewardjde4a1d02002-03-22 01:27:54 +0000991
nethercote2d5b8162004-08-11 09:40:52 +0000992 obtained_block:
993 // Ok, we can allocate from b, which lives in list lno.
sewardjde4a1d02002-03-22 01:27:54 +0000994 vg_assert(b != NULL);
995 vg_assert(lno >= 0 && lno < VG_N_MALLOC_LISTS);
996 vg_assert(a->freelist[lno] != NULL);
nethercote2d5b8162004-08-11 09:40:52 +0000997 b_bszB = mk_plain_bszB(get_bszB_lo(b));
998 // req_bszB is the size of the block we are after. b_bszB is the
999 // size of what we've actually got. */
1000 vg_assert(b_bszB >= req_bszB);
sewardjde4a1d02002-03-22 01:27:54 +00001001
nethercote2d5b8162004-08-11 09:40:52 +00001002 // Could we split this block and still get a useful fragment?
1003 frag_bszB = b_bszB - req_bszB;
1004 if (frag_bszB >= min_useful_bszB(a)) {
1005 // Yes, split block in two, put the fragment on the appropriate free
1006 // list, and update b_bszB accordingly.
1007 // printf( "split %dB into %dB and %dB\n", b_bszB, req_bszB, frag_bszB );
sewardjde4a1d02002-03-22 01:27:54 +00001008 unlinkBlock(a, b, lno);
nethercote2d5b8162004-08-11 09:40:52 +00001009 mkInuseBlock(a, b, req_bszB);
1010 mkFreeBlock(a, &b[req_bszB], frag_bszB,
1011 pszB_to_listNo(bszB_to_pszB(a, frag_bszB)));
1012 b_bszB = mk_plain_bszB(get_bszB_lo(b));
1013 } else {
1014 // No, mark as in use and use as-is.
1015 unlinkBlock(a, b, lno);
1016 mkInuseBlock(a, b, b_bszB);
sewardjde4a1d02002-03-22 01:27:54 +00001017 }
sewardjde4a1d02002-03-22 01:27:54 +00001018
nethercote2d5b8162004-08-11 09:40:52 +00001019 // Update stats
1020 a->bytes_on_loan += bszB_to_pszB(a, b_bszB);
sewardjde4a1d02002-03-22 01:27:54 +00001021 if (a->bytes_on_loan > a->bytes_on_loan_max)
1022 a->bytes_on_loan_max = a->bytes_on_loan;
1023
1024# ifdef DEBUG_MALLOC
nethercote885dd912004-08-03 23:14:00 +00001025 sanity_check_malloc_arena(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001026# endif
1027
njn25e49d8e72002-09-23 09:36:25 +00001028 VGP_POPCC(VgpMalloc);
nethercote2d5b8162004-08-11 09:40:52 +00001029 v = get_block_payload(a, b);
1030 vg_assert( (((Addr)v) & (VG_MIN_MALLOC_SZB-1)) == 0 );
jsewardb1a26ae2004-03-14 03:06:37 +00001031 return v;
sewardjde4a1d02002-03-22 01:27:54 +00001032}
1033
1034
njn25e49d8e72002-09-23 09:36:25 +00001035void VG_(arena_free) ( ArenaId aid, void* ptr )
sewardjde4a1d02002-03-22 01:27:54 +00001036{
1037 Superblock* sb;
nethercote2d5b8162004-08-11 09:40:52 +00001038 UByte* sb_start;
1039 UByte* sb_end;
1040 Block* other;
1041 Block* b;
1042 Int b_bszB, b_pszB, other_bszB, b_listno;
sewardjde4a1d02002-03-22 01:27:54 +00001043 Arena* a;
1044
1045 VGP_PUSHCC(VgpMalloc);
1046
1047 ensure_mm_init();
1048 a = arenaId_to_ArenaP(aid);
1049
njn25e49d8e72002-09-23 09:36:25 +00001050 if (ptr == NULL) {
1051 VGP_POPCC(VgpMalloc);
1052 return;
1053 }
1054
nethercote2d5b8162004-08-11 09:40:52 +00001055 b = get_payload_block(a, ptr);
sewardjde4a1d02002-03-22 01:27:54 +00001056
1057# ifdef DEBUG_MALLOC
nethercote2d5b8162004-08-11 09:40:52 +00001058 vg_assert(blockSane(a, b));
sewardjde4a1d02002-03-22 01:27:54 +00001059# endif
1060
nethercote2d5b8162004-08-11 09:40:52 +00001061 a->bytes_on_loan -= bszB_to_pszB(a, mk_plain_bszB(get_bszB_lo(b)));
sewardjde4a1d02002-03-22 01:27:54 +00001062
nethercote2d5b8162004-08-11 09:40:52 +00001063 sb = findSb( a, b );
1064 sb_start = &sb->payload_bytes[0];
1065 sb_end = &sb->payload_bytes[sb->n_payload_bytes - 1];
sewardjde4a1d02002-03-22 01:27:54 +00001066
nethercote2d5b8162004-08-11 09:40:52 +00001067 // Put this chunk back on a list somewhere.
1068 b_bszB = get_bszB_lo(b);
1069 b_pszB = bszB_to_pszB(a, b_bszB);
1070 b_listno = pszB_to_listNo(b_pszB);
1071 mkFreeBlock( a, b, b_bszB, b_listno );
sewardjde4a1d02002-03-22 01:27:54 +00001072
nethercote2d5b8162004-08-11 09:40:52 +00001073 // See if this block can be merged with its successor.
1074 // First test if we're far enough before the superblock's end to possibly
1075 // have a successor.
1076 other = b + b_bszB;
1077 if (other+min_useful_bszB(a)-1 <= (Block*)sb_end) {
1078 // Ok, we have a successor, merge if it's not in use.
1079 other_bszB = get_bszB_lo(other);
1080 if (!is_inuse_bszB(other_bszB)) {
1081 // VG_(printf)( "merge-successor\n");
1082 other_bszB = mk_plain_bszB(other_bszB);
sewardjde4a1d02002-03-22 01:27:54 +00001083# ifdef DEBUG_MALLOC
1084 vg_assert(blockSane(a, other));
1085# endif
nethercote2d5b8162004-08-11 09:40:52 +00001086 unlinkBlock( a, b, b_listno );
1087 unlinkBlock( a, other, pszB_to_listNo(bszB_to_pszB(a,other_bszB)) );
1088 b_bszB += other_bszB;
1089 b_listno = pszB_to_listNo(bszB_to_pszB(a, b_bszB));
1090 mkFreeBlock( a, b, b_bszB, b_listno );
sewardjde4a1d02002-03-22 01:27:54 +00001091 }
nethercote2d5b8162004-08-11 09:40:52 +00001092 } else {
1093 // Not enough space for successor: check that b is the last block
1094 // ie. there are no unused bytes at the end of the Superblock.
1095 vg_assert(other-1 == (Block*)sb_end);
sewardjde4a1d02002-03-22 01:27:54 +00001096 }
1097
nethercote2d5b8162004-08-11 09:40:52 +00001098 // Then see if this block can be merged with its predecessor.
1099 // First test if we're far enough after the superblock's start to possibly
1100 // have a predecessor.
1101 if (b >= (Block*)sb_start + min_useful_bszB(a)) {
1102 // Ok, we have a predecessor, merge if it's not in use.
1103 other = get_predecessor_block( b );
1104 other_bszB = get_bszB_lo(other);
1105 if (!is_inuse_bszB(other_bszB)) {
1106 // VG_(printf)( "merge-predecessor\n");
1107 other_bszB = mk_plain_bszB(other_bszB);
1108 unlinkBlock( a, b, b_listno );
1109 unlinkBlock( a, other, pszB_to_listNo(bszB_to_pszB(a, other_bszB)) );
1110 b = other;
1111 b_bszB += other_bszB;
1112 b_listno = pszB_to_listNo(bszB_to_pszB(a, b_bszB));
1113 mkFreeBlock( a, b, b_bszB, b_listno );
sewardjde4a1d02002-03-22 01:27:54 +00001114 }
nethercote2d5b8162004-08-11 09:40:52 +00001115 } else {
1116 // Not enough space for predecessor: check that b is the first block,
1117 // ie. there are no unused bytes at the start of the Superblock.
1118 vg_assert((Block*)sb_start == b);
sewardjde4a1d02002-03-22 01:27:54 +00001119 }
1120
1121# ifdef DEBUG_MALLOC
nethercote885dd912004-08-03 23:14:00 +00001122 sanity_check_malloc_arena(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001123# endif
1124
njn25e49d8e72002-09-23 09:36:25 +00001125 VGP_POPCC(VgpMalloc);
sewardjde4a1d02002-03-22 01:27:54 +00001126}
1127
1128
1129/*
1130 The idea for malloc_aligned() is to allocate a big block, base, and
1131 then split it into two parts: frag, which is returned to the the
1132 free pool, and align, which is the bit we're really after. Here's
1133 a picture. L and H denote the block lower and upper overheads, in
nethercote2d5b8162004-08-11 09:40:52 +00001134 bytes. The details are gruesome. Note it is slightly complicated
sewardjde4a1d02002-03-22 01:27:54 +00001135 because the initial request to generate base may return a bigger
1136 block than we asked for, so it is important to distinguish the base
1137 request size and the base actual size.
1138
1139 frag_b align_b
1140 | |
1141 | frag_p | align_p
1142 | | | |
1143 v v v v
1144
1145 +---+ +---+---+ +---+
1146 | L |----------------| H | L |---------------| H |
1147 +---+ +---+---+ +---+
1148
1149 ^ ^ ^
1150 | | :
1151 | base_p this addr must be aligned
1152 |
1153 base_b
1154
1155 . . . . . . .
nethercote2d5b8162004-08-11 09:40:52 +00001156 <------ frag_bszB -------> . . .
1157 . <------------- base_pszB_act -----------> .
sewardjde4a1d02002-03-22 01:27:54 +00001158 . . . . . . .
1159
1160*/
njn25e49d8e72002-09-23 09:36:25 +00001161void* VG_(arena_malloc_aligned) ( ArenaId aid, Int req_alignB, Int req_pszB )
sewardjde4a1d02002-03-22 01:27:54 +00001162{
nethercote2d5b8162004-08-11 09:40:52 +00001163 Int base_pszB_req, base_pszB_act, frag_bszB;
1164 Block *base_b, *align_b;
1165 UByte *base_p, *align_p;
sewardjde4a1d02002-03-22 01:27:54 +00001166 UInt saved_bytes_on_loan;
1167 Arena* a;
1168
njn25e49d8e72002-09-23 09:36:25 +00001169 VGP_PUSHCC(VgpMalloc);
1170
sewardjde4a1d02002-03-22 01:27:54 +00001171 ensure_mm_init();
1172 a = arenaId_to_ArenaP(aid);
1173
nethercote2d5b8162004-08-11 09:40:52 +00001174 vg_assert(0 <= req_pszB && req_pszB < MAX_PSZB);
sewardjde4a1d02002-03-22 01:27:54 +00001175
nethercote2d5b8162004-08-11 09:40:52 +00001176 // Check that the requested alignment seems reasonable; that is, is
1177 // a power of 2.
1178 if (req_alignB < VG_MIN_MALLOC_SZB
1179 || req_alignB > 1048576
1180 || VG_(log2)( VG_(clo_alignment) ) == -1 /* not a power of 2 */) {
1181 VG_(printf)("VG_(arena_malloc_aligned)(%p, %d, %d)\nbad alignment",
1182 a, req_alignB, req_pszB );
1183 VG_(core_panic)("VG_(arena_malloc_aligned)");
1184 /*NOTREACHED*/
sewardjde4a1d02002-03-22 01:27:54 +00001185 }
nethercote2d5b8162004-08-11 09:40:52 +00001186 // Paranoid
1187 vg_assert(req_alignB % VG_MIN_MALLOC_SZB == 0);
sewardjde4a1d02002-03-22 01:27:54 +00001188
1189 /* Required payload size for the aligned chunk. */
nethercote2d5b8162004-08-11 09:40:52 +00001190 req_pszB = align_req_pszB(req_pszB);
sewardjde4a1d02002-03-22 01:27:54 +00001191
nethercote2d5b8162004-08-11 09:40:52 +00001192 /* Payload size to request for the big block that we will split up. */
1193 base_pszB_req = req_pszB + min_useful_bszB(a) + req_alignB;
sewardjde4a1d02002-03-22 01:27:54 +00001194
1195 /* Payload ptr for the block we are going to split. Note this
1196 changes a->bytes_on_loan; we save and restore it ourselves. */
1197 saved_bytes_on_loan = a->bytes_on_loan;
nethercote2d5b8162004-08-11 09:40:52 +00001198 base_p = VG_(arena_malloc) ( aid, base_pszB_req );
sewardjde4a1d02002-03-22 01:27:54 +00001199 a->bytes_on_loan = saved_bytes_on_loan;
1200
1201 /* Block ptr for the block we are going to split. */
nethercote2d5b8162004-08-11 09:40:52 +00001202 base_b = get_payload_block ( a, base_p );
sewardjde4a1d02002-03-22 01:27:54 +00001203
1204 /* Pointer to the payload of the aligned block we are going to
1205 return. This has to be suitably aligned. */
nethercote2d5b8162004-08-11 09:40:52 +00001206 align_p = align_upwards ( base_b + 2 * overhead_szB_lo(a)
1207 + overhead_szB_hi(a),
sewardjde4a1d02002-03-22 01:27:54 +00001208 req_alignB );
nethercote2d5b8162004-08-11 09:40:52 +00001209 align_b = get_payload_block(a, align_p);
sewardjde4a1d02002-03-22 01:27:54 +00001210
1211 /* The block size of the fragment we will create. This must be big
1212 enough to actually create a fragment. */
nethercote2d5b8162004-08-11 09:40:52 +00001213 frag_bszB = align_b - base_b;
1214
1215 vg_assert(frag_bszB >= min_useful_bszB(a));
sewardjde4a1d02002-03-22 01:27:54 +00001216
1217 /* The actual payload size of the block we are going to split. */
nethercote2d5b8162004-08-11 09:40:52 +00001218 base_pszB_act = bszB_to_pszB(a, mk_plain_bszB(get_bszB_lo(base_b)));
sewardjde4a1d02002-03-22 01:27:54 +00001219
nethercote2d5b8162004-08-11 09:40:52 +00001220 /* Create the fragment block, and put it back on the relevant free list. */
1221 mkFreeBlock ( a, base_b, frag_bszB,
1222 pszB_to_listNo(bszB_to_pszB(a, frag_bszB)) );
sewardjde4a1d02002-03-22 01:27:54 +00001223
1224 /* Create the aligned block. */
nethercote2d5b8162004-08-11 09:40:52 +00001225 mkInuseBlock ( a, align_b,
1226 base_p + base_pszB_act
1227 + overhead_szB_hi(a) - (UByte*)align_b );
sewardjde4a1d02002-03-22 01:27:54 +00001228
1229 /* Final sanity checks. */
nethercote2d5b8162004-08-11 09:40:52 +00001230 vg_assert( is_inuse_bszB(get_bszB_lo(get_payload_block(a, align_p))) );
sewardjde4a1d02002-03-22 01:27:54 +00001231
nethercote2d5b8162004-08-11 09:40:52 +00001232 vg_assert(req_pszB
sewardjde4a1d02002-03-22 01:27:54 +00001233 <=
nethercote2d5b8162004-08-11 09:40:52 +00001234 bszB_to_pszB(a, mk_plain_bszB(get_bszB_lo(
1235 get_payload_block(a, align_p))))
sewardjde4a1d02002-03-22 01:27:54 +00001236 );
1237
1238 a->bytes_on_loan
nethercote2d5b8162004-08-11 09:40:52 +00001239 += bszB_to_pszB(a, mk_plain_bszB(get_bszB_lo(
1240 get_payload_block(a, align_p))));
sewardjde4a1d02002-03-22 01:27:54 +00001241 if (a->bytes_on_loan > a->bytes_on_loan_max)
1242 a->bytes_on_loan_max = a->bytes_on_loan;
1243
1244# ifdef DEBUG_MALLOC
nethercote885dd912004-08-03 23:14:00 +00001245 sanity_check_malloc_arena(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001246# endif
1247
njn25e49d8e72002-09-23 09:36:25 +00001248 VGP_POPCC(VgpMalloc);
1249
nethercote2d5b8162004-08-11 09:40:52 +00001250 vg_assert( (((Addr)align_p) % req_alignB) == 0 );
1251 return align_p;
1252}
1253
1254
1255Int VG_(arena_payload_szB) ( ArenaId aid, void* ptr )
1256{
1257 Arena* a = arenaId_to_ArenaP(aid);
1258 Block* b = get_payload_block(a, ptr);
1259 return bszB_to_pszB(a, get_bszB_lo(b));
sewardjde4a1d02002-03-22 01:27:54 +00001260}
1261
1262
1263/*------------------------------------------------------------*/
1264/*--- Services layered on top of malloc/free. ---*/
1265/*------------------------------------------------------------*/
1266
njn3e884182003-04-15 13:03:23 +00001267void* VG_(arena_calloc) ( ArenaId aid, Int alignB, Int nmemb, Int nbytes )
sewardjde4a1d02002-03-22 01:27:54 +00001268{
1269 Int i, size;
1270 UChar* p;
njn25e49d8e72002-09-23 09:36:25 +00001271
1272 VGP_PUSHCC(VgpMalloc);
1273
sewardjde4a1d02002-03-22 01:27:54 +00001274 size = nmemb * nbytes;
sewardjd0b9ac32002-05-01 00:10:28 +00001275 vg_assert(size >= 0);
njn3e884182003-04-15 13:03:23 +00001276
nethercote2d5b8162004-08-11 09:40:52 +00001277 if (alignB == VG_MIN_MALLOC_SZB)
njn3e884182003-04-15 13:03:23 +00001278 p = VG_(arena_malloc) ( aid, size );
1279 else
1280 p = VG_(arena_malloc_aligned) ( aid, alignB, size );
1281
sewardjde4a1d02002-03-22 01:27:54 +00001282 for (i = 0; i < size; i++) p[i] = 0;
njn25e49d8e72002-09-23 09:36:25 +00001283
1284 VGP_POPCC(VgpMalloc);
1285
sewardjde4a1d02002-03-22 01:27:54 +00001286 return p;
1287}
1288
1289
njn25e49d8e72002-09-23 09:36:25 +00001290void* VG_(arena_realloc) ( ArenaId aid, void* ptr,
jsewardb1a26ae2004-03-14 03:06:37 +00001291 Int req_alignB, Int req_pszB )
sewardjde4a1d02002-03-22 01:27:54 +00001292{
1293 Arena* a;
nethercote2d5b8162004-08-11 09:40:52 +00001294 Int old_bszB, old_pszB, i;
sewardjde4a1d02002-03-22 01:27:54 +00001295 UChar *p_old, *p_new;
nethercote2d5b8162004-08-11 09:40:52 +00001296 Block* b;
sewardjde4a1d02002-03-22 01:27:54 +00001297
njn25e49d8e72002-09-23 09:36:25 +00001298 VGP_PUSHCC(VgpMalloc);
1299
sewardjde4a1d02002-03-22 01:27:54 +00001300 ensure_mm_init();
1301 a = arenaId_to_ArenaP(aid);
1302
nethercote2d5b8162004-08-11 09:40:52 +00001303 vg_assert(0 <= req_pszB && req_pszB < MAX_PSZB);
sewardjde4a1d02002-03-22 01:27:54 +00001304
nethercote2d5b8162004-08-11 09:40:52 +00001305 b = get_payload_block(a, ptr);
1306 vg_assert(blockSane(a, b));
sewardjde4a1d02002-03-22 01:27:54 +00001307
nethercote2d5b8162004-08-11 09:40:52 +00001308 old_bszB = get_bszB_lo(b);
1309 vg_assert(is_inuse_bszB(old_bszB));
1310 old_bszB = mk_plain_bszB(old_bszB);
1311 old_pszB = bszB_to_pszB(a, old_bszB);
sewardjde4a1d02002-03-22 01:27:54 +00001312
njn25e49d8e72002-09-23 09:36:25 +00001313 if (req_pszB <= old_pszB) {
1314 VGP_POPCC(VgpMalloc);
1315 return ptr;
1316 }
sewardjde4a1d02002-03-22 01:27:54 +00001317
nethercote2d5b8162004-08-11 09:40:52 +00001318 if (req_alignB == VG_MIN_MALLOC_SZB)
njn25e49d8e72002-09-23 09:36:25 +00001319 p_new = VG_(arena_malloc) ( aid, req_pszB );
nethercote2d5b8162004-08-11 09:40:52 +00001320 else {
njn25e49d8e72002-09-23 09:36:25 +00001321 p_new = VG_(arena_malloc_aligned) ( aid, req_alignB, req_pszB );
nethercote2d5b8162004-08-11 09:40:52 +00001322 }
njn25e49d8e72002-09-23 09:36:25 +00001323
sewardjde4a1d02002-03-22 01:27:54 +00001324 p_old = (UChar*)ptr;
1325 for (i = 0; i < old_pszB; i++)
1326 p_new[i] = p_old[i];
1327
njn25e49d8e72002-09-23 09:36:25 +00001328 VG_(arena_free)(aid, p_old);
1329
1330 VGP_POPCC(VgpMalloc);
sewardjde4a1d02002-03-22 01:27:54 +00001331 return p_new;
1332}
1333
1334
1335/*------------------------------------------------------------*/
nethercote996901a2004-08-03 13:29:09 +00001336/*--- Tool-visible functions. ---*/
njn25e49d8e72002-09-23 09:36:25 +00001337/*------------------------------------------------------------*/
1338
nethercote2d5b8162004-08-11 09:40:52 +00001339// All just wrappers to avoid exposing arenas to tools.
njn25e49d8e72002-09-23 09:36:25 +00001340
1341void* VG_(malloc) ( Int nbytes )
1342{
nethercote60f5b822004-01-26 17:24:42 +00001343 return VG_(arena_malloc) ( VG_AR_TOOL, nbytes );
njn25e49d8e72002-09-23 09:36:25 +00001344}
1345
1346void VG_(free) ( void* ptr )
1347{
nethercote60f5b822004-01-26 17:24:42 +00001348 VG_(arena_free) ( VG_AR_TOOL, ptr );
njn25e49d8e72002-09-23 09:36:25 +00001349}
1350
1351void* VG_(calloc) ( Int nmemb, Int nbytes )
1352{
nethercote2d5b8162004-08-11 09:40:52 +00001353 return VG_(arena_calloc) ( VG_AR_TOOL, VG_MIN_MALLOC_SZB, nmemb, nbytes );
njn25e49d8e72002-09-23 09:36:25 +00001354}
1355
1356void* VG_(realloc) ( void* ptr, Int size )
1357{
nethercote2d5b8162004-08-11 09:40:52 +00001358 return VG_(arena_realloc) ( VG_AR_TOOL, ptr, VG_MIN_MALLOC_SZB, size );
njn25e49d8e72002-09-23 09:36:25 +00001359}
1360
1361void* VG_(malloc_aligned) ( Int req_alignB, Int req_pszB )
1362{
nethercote60f5b822004-01-26 17:24:42 +00001363 return VG_(arena_malloc_aligned) ( VG_AR_TOOL, req_alignB, req_pszB );
njn25e49d8e72002-09-23 09:36:25 +00001364}
1365
1366
njn3e884182003-04-15 13:03:23 +00001367void* VG_(cli_malloc) ( UInt align, Int nbytes )
1368{
nethercote2d5b8162004-08-11 09:40:52 +00001369 // 'align' should be valid by now. VG_(arena_malloc_aligned)() will
1370 // abort if it's not.
1371 if (VG_MIN_MALLOC_SZB == align)
njn3e884182003-04-15 13:03:23 +00001372 return VG_(arena_malloc) ( VG_AR_CLIENT, nbytes );
1373 else
sewardjf1accbc2003-07-12 01:26:52 +00001374 return VG_(arena_malloc_aligned) ( VG_AR_CLIENT, align, nbytes );
njn3e884182003-04-15 13:03:23 +00001375}
1376
1377void VG_(cli_free) ( void* p )
1378{
1379 VG_(arena_free) ( VG_AR_CLIENT, p );
1380}
1381
1382
1383Bool VG_(addr_is_in_block)( Addr a, Addr start, UInt size )
1384{
1385 return (start - VG_(vg_malloc_redzone_szB) <= a
1386 && a < start + size + VG_(vg_malloc_redzone_szB));
1387}
1388
1389
njn25e49d8e72002-09-23 09:36:25 +00001390/*------------------------------------------------------------*/
sewardjde4a1d02002-03-22 01:27:54 +00001391/*--- The original test driver machinery. ---*/
1392/*------------------------------------------------------------*/
1393
1394#if 0
1395
1396#if 1
1397#define N_TEST_TRANSACTIONS 100000000
1398#define N_TEST_ARR 200000
1399#define M_TEST_MALLOC 1000
1400#else
1401#define N_TEST_TRANSACTIONS 500000
1402#define N_TEST_ARR 30000
1403#define M_TEST_MALLOC 500
1404#endif
1405
1406
1407void* test_arr[N_TEST_ARR];
1408
1409int main ( int argc, char** argv )
1410{
1411 Int i, j, k, nbytes, qq;
1412 unsigned char* chp;
njn25e49d8e72002-09-23 09:36:25 +00001413 Arena* a = &arena[VG_AR_CORE];
sewardjde4a1d02002-03-22 01:27:54 +00001414 srandom(1);
1415 for (i = 0; i < N_TEST_ARR; i++)
1416 test_arr[i] = NULL;
1417
1418 for (i = 0; i < N_TEST_TRANSACTIONS; i++) {
1419 if (i % 50000 == 0) mallocSanityCheck(a);
1420 j = random() % N_TEST_ARR;
1421 if (test_arr[j]) {
1422 vg_free(a, test_arr[j]);
1423 test_arr[j] = NULL;
1424 } else {
1425 nbytes = 1 + random() % M_TEST_MALLOC;
1426 qq = random()%64;
1427 if (qq == 32)
1428 nbytes *= 17;
1429 else if (qq == 33)
1430 nbytes = 0;
1431 test_arr[j]
1432 = (i % 17) == 0
1433 ? vg_memalign(a, nbytes, 1<< (3+(random()%10)))
1434 : vg_malloc( a, nbytes );
1435 chp = test_arr[j];
1436 for (k = 0; k < nbytes; k++)
1437 chp[k] = (unsigned char)(k + 99);
1438 }
1439 }
1440
1441
1442 for (i = 0; i < N_TEST_ARR; i++) {
1443 if (test_arr[i]) {
1444 vg_free(a, test_arr[i]);
1445 test_arr[i] = NULL;
1446 }
1447 }
1448 mallocSanityCheck(a);
1449
1450 fprintf(stderr, "ALL DONE\n");
1451
1452 show_arena_stats(a);
1453 fprintf(stderr, "%d max useful, %d bytes mmap'd (%4.1f%%), %d useful\n",
1454 a->bytes_on_loan_max,
1455 a->bytes_mmaped,
nethercote2d5b8162004-08-11 09:40:52 +00001456 100.0 * (double)a->bytes_on_loan_max / (double)a->bytes_mmaped,
sewardjde4a1d02002-03-22 01:27:54 +00001457 a->bytes_on_loan );
1458
1459 return 0;
1460}
1461#endif /* 0 */
1462
1463
1464/*--------------------------------------------------------------------*/
1465/*--- end vg_malloc2.c ---*/
1466/*--------------------------------------------------------------------*/