blob: db660c5ca1e7ed04e431aabead1c456b1e77d9e1 [file] [log] [blame]
sewardjde4a1d02002-03-22 01:27:54 +00001
2/*--------------------------------------------------------------------*/
3/*--- An implementation of malloc/free which doesn't use sbrk. ---*/
4/*--- vg_malloc2.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
njnc9539842002-10-02 13:26:35 +00008 This file is part of Valgrind, an extensible x86 protected-mode
9 emulator for monitoring program execution on x86-Unixes.
sewardjde4a1d02002-03-22 01:27:54 +000010
nethercotebb1c9912004-01-04 16:43:23 +000011 Copyright (C) 2000-2004 Julian Seward
sewardjde4a1d02002-03-22 01:27:54 +000012 jseward@acm.org
sewardjde4a1d02002-03-22 01:27:54 +000013
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
njn25e49d8e72002-09-23 09:36:25 +000029 The GNU General Public License is contained in the file COPYING.
sewardjde4a1d02002-03-22 01:27:54 +000030*/
31
32
nethercotef1e5e152004-09-01 23:58:16 +000033#include "core.h"
sewardjde4a1d02002-03-22 01:27:54 +000034
nethercote2d5b8162004-08-11 09:40:52 +000035//#define DEBUG_MALLOC // turn on heavyweight debugging machinery
36//#define VERBOSE_MALLOC // make verbose, esp. in debugging machinery
37
38/*------------------------------------------------------------*/
39/*--- Main types ---*/
40/*------------------------------------------------------------*/
41
42#define VG_N_MALLOC_LISTS 16 // do not change this
43
nethercote7ac7f7b2004-11-02 12:36:02 +000044// The amount you can ask for is limited only by sizeof(SizeT)...
45#define MAX_PSZB (~((SizeT)0x0))
nethercote2d5b8162004-08-11 09:40:52 +000046
47typedef UChar UByte;
48
49/* Block layout:
50
nethercote7ac7f7b2004-11-02 12:36:02 +000051 this block total szB (sizeof(SizeT) bytes)
nethercote2d5b8162004-08-11 09:40:52 +000052 freelist previous ptr (sizeof(void*) bytes)
53 red zone bytes (depends on .rz_szB field of Arena)
54 (payload bytes)
55 red zone bytes (depends on .rz_szB field of Arena)
56 freelist next ptr (sizeof(void*) bytes)
nethercote7ac7f7b2004-11-02 12:36:02 +000057 this block total szB (sizeof(SizeT) bytes)
nethercote2d5b8162004-08-11 09:40:52 +000058
59 Total size in bytes (bszB) and payload size in bytes (pszB)
60 are related by:
61
nethercote7ac7f7b2004-11-02 12:36:02 +000062 bszB == pszB + 2*sizeof(SizeT) + 2*sizeof(void*) + 2*a->rz_szB
nethercote2d5b8162004-08-11 09:40:52 +000063
nethercote7ac7f7b2004-11-02 12:36:02 +000064 Furthermore, both size fields in the block have their least-sifnificant
65 bit set if the block is not in use, and unset if it is in use.
66 (The bottom 3 or so bits are always free for this because of alignment.)
67 A block size of zero is not possible, because a block always has at
68 least two SizeTs and two pointers of overhead.
nethercote2d5b8162004-08-11 09:40:52 +000069
70 Nb: All Block payloads must be VG_MIN_MALLOC_SZB-aligned. This is
71 achieved by ensuring that Superblocks are VG_MIN_MALLOC_SZB-aligned
72 (see newSuperblock() for how), and that the lengths of the following
73 things are a multiple of VG_MIN_MALLOC_SZB:
74 - Superblock admin section lengths (due to elastic padding)
75 - Block admin section (low and high) lengths (due to elastic redzones)
76 - Block payload lengths (due to req_pszB rounding up)
77*/
78typedef
79 struct {
80 // No fields are actually used in this struct, because a Block has
81 // loads of variable sized fields and so can't be accessed
82 // meaningfully with normal fields. So we use access functions all
83 // the time. This struct gives us a type to use, though. Also, we
84 // make sizeof(Block) 1 byte so that we can do arithmetic with the
85 // Block* type in increments of 1!
86 UByte dummy;
87 }
88 Block;
89
90// A superblock. 'padding' is never used, it just ensures that if the
91// entire Superblock is aligned to VG_MIN_MALLOC_SZB, then payload_bytes[]
92// will be too. It can add small amounts of padding unnecessarily -- eg.
93// 8-bytes on 32-bit machines with an 8-byte VG_MIN_MALLOC_SZB -- because
94// it's too hard to make a constant expression that works perfectly in all
95// cases.
96// payload_bytes[] is made a single big Block when the Superblock is
97// created, and then can be split and the splittings remerged, but Blocks
98// always cover its entire length -- there's never any unused bytes at the
99// end, for example.
100typedef
101 struct _Superblock {
102 struct _Superblock* next;
nethercote7ac7f7b2004-11-02 12:36:02 +0000103 SizeT n_payload_bytes;
nethercote2d5b8162004-08-11 09:40:52 +0000104 UByte padding[ VG_MIN_MALLOC_SZB -
nethercote7ac7f7b2004-11-02 12:36:02 +0000105 ((sizeof(struct _Superblock*) + sizeof(SizeT)) %
106 VG_MIN_MALLOC_SZB) ];
nethercote2d5b8162004-08-11 09:40:52 +0000107 UByte payload_bytes[0];
108 }
109 Superblock;
110
111// An arena. 'freelist' is a circular, doubly-linked list. 'rz_szB' is
112// elastic, in that it can be bigger than asked-for to ensure alignment.
113typedef
114 struct {
115 Char* name;
116 Bool clientmem; // Allocates in the client address space?
njn0e742df2004-11-30 13:26:29 +0000117 SizeT rz_szB; // Red zone size in bytes
nethercote7ac7f7b2004-11-02 12:36:02 +0000118 SizeT min_sblock_szB; // Minimum superblock size in bytes
nethercote2d5b8162004-08-11 09:40:52 +0000119 Block* freelist[VG_N_MALLOC_LISTS];
120 Superblock* sblocks;
121 // Stats only.
nethercote7ac7f7b2004-11-02 12:36:02 +0000122 SizeT bytes_on_loan;
123 SizeT bytes_mmaped;
124 SizeT bytes_on_loan_max;
nethercote2d5b8162004-08-11 09:40:52 +0000125 }
126 Arena;
127
128
129/*------------------------------------------------------------*/
130/*--- Low-level functions for working with Blocks. ---*/
131/*------------------------------------------------------------*/
132
nethercote7ac7f7b2004-11-02 12:36:02 +0000133#define SIZE_T_0x1 ((SizeT)0x1)
134
nethercote2d5b8162004-08-11 09:40:52 +0000135// Mark a bszB as in-use, and not in-use.
136static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000137SizeT mk_inuse_bszB ( SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000138{
139 vg_assert(bszB != 0);
nethercote7ac7f7b2004-11-02 12:36:02 +0000140 return bszB & (~SIZE_T_0x1);
nethercote2d5b8162004-08-11 09:40:52 +0000141}
142static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000143SizeT mk_free_bszB ( SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000144{
145 vg_assert(bszB != 0);
nethercote7ac7f7b2004-11-02 12:36:02 +0000146 return bszB | SIZE_T_0x1;
nethercote2d5b8162004-08-11 09:40:52 +0000147}
148
149// Remove the in-use/not-in-use attribute from a bszB, leaving just
150// the size.
151static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000152SizeT mk_plain_bszB ( SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000153{
154 vg_assert(bszB != 0);
nethercote7ac7f7b2004-11-02 12:36:02 +0000155 return bszB & (~SIZE_T_0x1);
nethercote2d5b8162004-08-11 09:40:52 +0000156}
157
158// Does this bszB have the in-use attribute?
159static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000160Bool is_inuse_bszB ( SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000161{
162 vg_assert(bszB != 0);
nethercote7ac7f7b2004-11-02 12:36:02 +0000163 return (0 != (bszB & SIZE_T_0x1)) ? False : True;
nethercote2d5b8162004-08-11 09:40:52 +0000164}
165
166
167// Set and get the lower size field of a block.
168static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000169void set_bszB_lo ( Block* b, SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000170{
nethercote7ac7f7b2004-11-02 12:36:02 +0000171 *(SizeT*)&b[0] = bszB;
nethercote2d5b8162004-08-11 09:40:52 +0000172}
173static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000174SizeT get_bszB_lo ( Block* b )
nethercote2d5b8162004-08-11 09:40:52 +0000175{
nethercote7ac7f7b2004-11-02 12:36:02 +0000176 return *(SizeT*)&b[0];
nethercote2d5b8162004-08-11 09:40:52 +0000177}
178
179// Get the address of the last byte in a block
180static __inline__
181UByte* last_byte ( Block* b )
182{
183 UByte* b2 = (UByte*)b;
184 return &b2[mk_plain_bszB(get_bszB_lo(b)) - 1];
185}
186
187// Set and get the upper size field of a block.
188static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000189void set_bszB_hi ( Block* b, SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000190{
191 UByte* b2 = (UByte*)b;
192 UByte* lb = last_byte(b);
193 vg_assert(lb == &b2[mk_plain_bszB(bszB) - 1]);
nethercote7ac7f7b2004-11-02 12:36:02 +0000194 *(SizeT*)&lb[-sizeof(SizeT) + 1] = bszB;
nethercote2d5b8162004-08-11 09:40:52 +0000195}
196static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000197SizeT get_bszB_hi ( Block* b )
nethercote2d5b8162004-08-11 09:40:52 +0000198{
199 UByte* lb = last_byte(b);
nethercote7ac7f7b2004-11-02 12:36:02 +0000200 return *(SizeT*)&lb[-sizeof(SizeT) + 1];
nethercote2d5b8162004-08-11 09:40:52 +0000201}
202
203
nethercote7ac7f7b2004-11-02 12:36:02 +0000204// Return the lower, upper and total overhead in bytes for a block.
205// These are determined purely by which arena the block lives in.
206static __inline__
njn0e742df2004-11-30 13:26:29 +0000207SizeT overhead_szB_lo ( Arena* a )
nethercote7ac7f7b2004-11-02 12:36:02 +0000208{
209 return sizeof(SizeT) + sizeof(void*) + a->rz_szB;
210}
211static __inline__
njn0e742df2004-11-30 13:26:29 +0000212SizeT overhead_szB_hi ( Arena* a )
nethercote7ac7f7b2004-11-02 12:36:02 +0000213{
214 return a->rz_szB + sizeof(void*) + sizeof(SizeT);
215}
216static __inline__
njn0e742df2004-11-30 13:26:29 +0000217SizeT overhead_szB ( Arena* a )
nethercote7ac7f7b2004-11-02 12:36:02 +0000218{
219 return overhead_szB_lo(a) + overhead_szB_hi(a);
220}
221
nethercote2d5b8162004-08-11 09:40:52 +0000222// Given the addr of a block, return the addr of its payload.
223static __inline__
224UByte* get_block_payload ( Arena* a, Block* b )
225{
226 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000227 return & b2[ overhead_szB_lo(a) ];
nethercote2d5b8162004-08-11 09:40:52 +0000228}
229// Given the addr of a block's payload, return the addr of the block itself.
230static __inline__
231Block* get_payload_block ( Arena* a, UByte* payload )
232{
nethercote7ac7f7b2004-11-02 12:36:02 +0000233 return (Block*)&payload[ -overhead_szB_lo(a) ];
nethercote2d5b8162004-08-11 09:40:52 +0000234}
235
236
237// Set and get the next and previous link fields of a block.
238static __inline__
239void set_prev_b ( Block* b, Block* prev_p )
240{
241 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000242 *(Block**)&b2[sizeof(SizeT)] = prev_p;
nethercote2d5b8162004-08-11 09:40:52 +0000243}
244static __inline__
245void set_next_b ( Block* b, Block* next_p )
246{
247 UByte* lb = last_byte(b);
nethercote7ac7f7b2004-11-02 12:36:02 +0000248 *(Block**)&lb[-sizeof(SizeT) - sizeof(void*) + 1] = next_p;
nethercote2d5b8162004-08-11 09:40:52 +0000249}
250static __inline__
251Block* get_prev_b ( Block* b )
252{
253 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000254 return *(Block**)&b2[sizeof(SizeT)];
nethercote2d5b8162004-08-11 09:40:52 +0000255}
256static __inline__
257Block* get_next_b ( Block* b )
258{
259 UByte* lb = last_byte(b);
nethercote7ac7f7b2004-11-02 12:36:02 +0000260 return *(Block**)&lb[-sizeof(SizeT) - sizeof(void*) + 1];
nethercote2d5b8162004-08-11 09:40:52 +0000261}
262
263
264// Get the block immediately preceding this one in the Superblock.
265static __inline__
266Block* get_predecessor_block ( Block* b )
267{
268 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000269 SizeT bszB = mk_plain_bszB( (*(SizeT*)&b2[-sizeof(SizeT)]) );
nethercote2d5b8162004-08-11 09:40:52 +0000270 return (Block*)&b2[-bszB];
271}
272
273// Read and write the lower and upper red-zone bytes of a block.
274static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000275void set_rz_lo_byte ( Arena* a, Block* b, UInt rz_byteno, UByte v )
nethercote2d5b8162004-08-11 09:40:52 +0000276{
277 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000278 b2[sizeof(SizeT) + sizeof(void*) + rz_byteno] = v;
nethercote2d5b8162004-08-11 09:40:52 +0000279}
280static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000281void set_rz_hi_byte ( Arena* a, Block* b, UInt rz_byteno, UByte v )
nethercote2d5b8162004-08-11 09:40:52 +0000282{
283 UByte* lb = last_byte(b);
nethercote7ac7f7b2004-11-02 12:36:02 +0000284 lb[-sizeof(SizeT) - sizeof(void*) - rz_byteno] = v;
nethercote2d5b8162004-08-11 09:40:52 +0000285}
286static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000287UByte get_rz_lo_byte ( Arena* a, Block* b, UInt rz_byteno )
nethercote2d5b8162004-08-11 09:40:52 +0000288{
289 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000290 return b2[sizeof(SizeT) + sizeof(void*) + rz_byteno];
nethercote2d5b8162004-08-11 09:40:52 +0000291}
292static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000293UByte get_rz_hi_byte ( Arena* a, Block* b, UInt rz_byteno )
nethercote2d5b8162004-08-11 09:40:52 +0000294{
295 UByte* lb = last_byte(b);
nethercote7ac7f7b2004-11-02 12:36:02 +0000296 return lb[-sizeof(SizeT) - sizeof(void*) - rz_byteno];
nethercote2d5b8162004-08-11 09:40:52 +0000297}
298
299
nethercote2d5b8162004-08-11 09:40:52 +0000300// Return the minimum bszB for a block in this arena. Can have zero-length
301// payloads, so it's the size of the admin bytes.
302static __inline__
njn0e742df2004-11-30 13:26:29 +0000303SizeT min_useful_bszB ( Arena* a )
nethercote2d5b8162004-08-11 09:40:52 +0000304{
305 return overhead_szB(a);
306}
307
308// Convert payload size <--> block size (both in bytes).
309static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000310SizeT pszB_to_bszB ( Arena* a, SizeT pszB )
nethercote2d5b8162004-08-11 09:40:52 +0000311{
nethercote2d5b8162004-08-11 09:40:52 +0000312 return pszB + overhead_szB(a);
313}
314static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000315SizeT bszB_to_pszB ( Arena* a, SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000316{
nethercote7ac7f7b2004-11-02 12:36:02 +0000317 vg_assert(bszB >= overhead_szB(a));
318 return bszB - overhead_szB(a);
nethercote2d5b8162004-08-11 09:40:52 +0000319}
320
321
322/*------------------------------------------------------------*/
323/*--- Arena management ---*/
324/*------------------------------------------------------------*/
325
326#define CORE_ARENA_MIN_SZB 1048576
327
328// The arena structures themselves.
329static Arena vg_arena[VG_N_ARENAS];
330
331// Functions external to this module identify arenas using ArenaIds,
332// not Arena*s. This fn converts the former to the latter.
333static Arena* arenaId_to_ArenaP ( ArenaId arena )
334{
335 vg_assert(arena >= 0 && arena < VG_N_ARENAS);
336 return & vg_arena[arena];
337}
338
339// Initialise an arena. rz_szB is the minimum redzone size; it might be
340// made bigger to ensure that VG_MIN_MALLOC_ALIGNMENT is observed.
341static
njn0e742df2004-11-30 13:26:29 +0000342void arena_init ( ArenaId aid, Char* name, SizeT rz_szB, SizeT min_sblock_szB )
nethercote2d5b8162004-08-11 09:40:52 +0000343{
nethercote7ac7f7b2004-11-02 12:36:02 +0000344 SizeT i;
nethercote2d5b8162004-08-11 09:40:52 +0000345 Arena* a = arenaId_to_ArenaP(aid);
346
nethercote7ac7f7b2004-11-02 12:36:02 +0000347 vg_assert(rz_szB < 128); // ensure reasonable size
nethercote73b526f2004-10-31 18:48:21 +0000348 vg_assert((min_sblock_szB % VKI_PAGE_SIZE) == 0);
nethercote2d5b8162004-08-11 09:40:52 +0000349 a->name = name;
350 a->clientmem = ( VG_AR_CLIENT == aid ? True : False );
351
352 // The size of the low and high admin sections in a block must be a
353 // multiple of VG_MIN_MALLOC_ALIGNMENT. So we round up the asked-for
354 // redzone size if necessary to achieve this.
355 a->rz_szB = rz_szB;
356 while (0 != overhead_szB_lo(a) % VG_MIN_MALLOC_SZB) a->rz_szB++;
357 vg_assert(overhead_szB_lo(a) == overhead_szB_hi(a));
358
359 a->min_sblock_szB = min_sblock_szB;
360 for (i = 0; i < VG_N_MALLOC_LISTS; i++) a->freelist[i] = NULL;
361 a->sblocks = NULL;
362 a->bytes_on_loan = 0;
363 a->bytes_mmaped = 0;
364 a->bytes_on_loan_max = 0;
365}
366
367/* Print vital stats for an arena. */
368void VG_(print_all_arena_stats) ( void )
369{
nethercote7ac7f7b2004-11-02 12:36:02 +0000370 UInt i;
nethercote2d5b8162004-08-11 09:40:52 +0000371 for (i = 0; i < VG_N_ARENAS; i++) {
372 Arena* a = arenaId_to_ArenaP(i);
373 VG_(message)(Vg_DebugMsg,
374 "AR %8s: %8d mmap'd, %8d/%8d max/curr",
375 a->name, a->bytes_mmaped, a->bytes_on_loan_max, a->bytes_on_loan
376 );
377 }
378}
379
380/* This library is self-initialising, as it makes this more self-contained,
381 less coupled with the outside world. Hence VG_(arena_malloc)() and
382 VG_(arena_free)() below always call ensure_mm_init() to ensure things are
383 correctly initialised. */
384static
385void ensure_mm_init ( void )
386{
njn0e742df2004-11-30 13:26:29 +0000387 static SizeT client_rz_szB;
388 static Bool init_done = False;
nethercote2d5b8162004-08-11 09:40:52 +0000389
390 if (init_done) {
391 // Make sure the client arena's redzone size never changes. Could
392 // happen if VG_(arena_malloc) was called too early, ie. before the
393 // tool was loaded.
394 vg_assert(client_rz_szB == VG_(vg_malloc_redzone_szB));
395 return;
396 }
397
398 /* No particular reason for this figure, it's just smallish */
njnca82cc02004-11-22 17:18:48 +0000399 tl_assert(VG_(vg_malloc_redzone_szB) < 128);
nethercote2d5b8162004-08-11 09:40:52 +0000400 client_rz_szB = VG_(vg_malloc_redzone_szB);
401
402 /* Use checked red zones (of various sizes) for our internal stuff,
403 and an unchecked zone of arbitrary size for the client. Of
404 course the client's red zone can be checked by the tool, eg.
405 by using addressibility maps, but not by the mechanism implemented
406 here, which merely checks at the time of freeing that the red
407 zone bytes are unchanged.
408
409 Nb: redzone sizes are *minimums*; they could be made bigger to ensure
410 alignment. Eg. on 32-bit machines, 4 becomes 8, and 12 becomes 16;
411 but on 64-bit machines 4 stays as 4, and 12 stays as 12 --- the extra
412 4 bytes in both are accounted for by the larger prev/next ptr.
413 */
414 arena_init ( VG_AR_CORE, "core", 4, CORE_ARENA_MIN_SZB );
415 arena_init ( VG_AR_TOOL, "tool", 4, 1048576 );
416 arena_init ( VG_AR_SYMTAB, "symtab", 4, 1048576 );
417 arena_init ( VG_AR_JITTER, "JITter", 4, 32768 );
418 arena_init ( VG_AR_CLIENT, "client", client_rz_szB, 1048576 );
419 arena_init ( VG_AR_DEMANGLE, "demangle", 12/*paranoid*/, 65536 );
420 arena_init ( VG_AR_EXECTXT, "exectxt", 4, 65536 );
421 arena_init ( VG_AR_ERRORS, "errors", 4, 65536 );
422 arena_init ( VG_AR_TRANSIENT, "transien", 4, 65536 );
423
424 init_done = True;
425# ifdef DEBUG_MALLOC
426 VG_(sanity_check_malloc_all)();
427# endif
428}
429
430
431/*------------------------------------------------------------*/
432/*--- Superblock management ---*/
433/*------------------------------------------------------------*/
434
435// Align ptr p upwards to an align-sized boundary.
436static
nethercote7ac7f7b2004-11-02 12:36:02 +0000437void* align_upwards ( void* p, SizeT align )
nethercote2d5b8162004-08-11 09:40:52 +0000438{
439 Addr a = (Addr)p;
440 if ((a % align) == 0) return (void*)a;
441 return (void*)(a - (a % align) + align);
442}
443
444// If not enough memory available, either aborts (for non-client memory)
445// or returns 0 (for client memory).
446static
nethercote7ac7f7b2004-11-02 12:36:02 +0000447Superblock* newSuperblock ( Arena* a, SizeT cszB )
nethercote2d5b8162004-08-11 09:40:52 +0000448{
449 // The extra VG_MIN_MALLOC_SZB bytes are for possible alignment up.
450 static UByte bootstrap_superblock[CORE_ARENA_MIN_SZB+VG_MIN_MALLOC_SZB];
451 static Bool called_before = False;
452 Superblock* sb;
453
454 // Take into account admin bytes in the Superblock.
455 cszB += sizeof(Superblock);
456
457 if (cszB < a->min_sblock_szB) cszB = a->min_sblock_szB;
nethercote73b526f2004-10-31 18:48:21 +0000458 while ((cszB % VKI_PAGE_SIZE) > 0) cszB++;
nethercote2d5b8162004-08-11 09:40:52 +0000459
460 if (!called_before) {
461 // First time we're called -- use the special static bootstrap
462 // superblock (see comment at top of main() for details).
463 called_before = True;
464 vg_assert(a == arenaId_to_ArenaP(VG_AR_CORE));
465 vg_assert(CORE_ARENA_MIN_SZB >= cszB);
466 // Ensure sb is suitably aligned.
467 sb = (Superblock*)align_upwards( bootstrap_superblock,
468 VG_MIN_MALLOC_SZB );
469 } else if (a->clientmem) {
470 // client allocation -- return 0 to client if it fails
471 sb = (Superblock *)
472 VG_(client_alloc)(0, cszB,
473 VKI_PROT_READ|VKI_PROT_WRITE|VKI_PROT_EXEC, 0);
474 if (NULL == sb)
475 return 0;
476 } else {
477 // non-client allocation -- aborts if it fails
478 sb = VG_(get_memory_from_mmap) ( cszB, "newSuperblock" );
479 }
480 vg_assert(NULL != sb);
481 vg_assert(0 == (Addr)sb % VG_MIN_MALLOC_SZB);
482 sb->n_payload_bytes = cszB - sizeof(Superblock);
483 a->bytes_mmaped += cszB;
484 if (0)
485 VG_(message)(Vg_DebugMsg, "newSuperblock, %d payload bytes",
486 sb->n_payload_bytes);
487 return sb;
488}
489
490// Find the superblock containing the given chunk.
491static
492Superblock* findSb ( Arena* a, Block* b )
493{
494 Superblock* sb;
495 for (sb = a->sblocks; sb; sb = sb->next)
496 if ((Block*)&sb->payload_bytes[0] <= b
497 && b < (Block*)&sb->payload_bytes[sb->n_payload_bytes])
498 return sb;
499 VG_(printf)("findSb: can't find pointer %p in arena `%s'\n", b, a->name );
500 VG_(core_panic)("findSb: VG_(arena_free)() in wrong arena?");
501 return NULL; /*NOTREACHED*/
502}
503
sewardjde4a1d02002-03-22 01:27:54 +0000504
fitzhardinge98abfc72003-12-16 02:05:15 +0000505/*------------------------------------------------------------*/
506/*--- Command line options ---*/
507/*------------------------------------------------------------*/
508
nethercote2d5b8162004-08-11 09:40:52 +0000509/* Round malloc sizes up to a multiple of VG_SLOPPY_MALLOC_SZB bytes?
510 default: NO
511 Nb: the allocator always rounds blocks up to a multiple of
512 VG_MIN_MALLOC_SZB. VG_(clo_sloppy_malloc) is relevant eg. for
513 Memcheck, which will be byte-precise with addressability maps on its
514 malloc allocations unless --sloppy-malloc=yes. */
515Bool VG_(clo_sloppy_malloc) = False;
fitzhardinge98abfc72003-12-16 02:05:15 +0000516
517/* DEBUG: print malloc details? default: NO */
nethercote2d5b8162004-08-11 09:40:52 +0000518Bool VG_(clo_trace_malloc) = False;
fitzhardinge98abfc72003-12-16 02:05:15 +0000519
520/* Minimum alignment in functions that don't specify alignment explicitly.
nethercote2d5b8162004-08-11 09:40:52 +0000521 default: 0, i.e. use VG_MIN_MALLOC_SZB. */
nethercote7ac7f7b2004-11-02 12:36:02 +0000522UInt VG_(clo_alignment) = VG_MIN_MALLOC_SZB;
fitzhardinge98abfc72003-12-16 02:05:15 +0000523
524
525Bool VG_(replacement_malloc_process_cmd_line_option)(Char* arg)
526{
jsewardb1a26ae2004-03-14 03:06:37 +0000527 if (VG_CLO_STREQN(12, arg, "--alignment=")) {
nethercote7ac7f7b2004-11-02 12:36:02 +0000528 VG_(clo_alignment) = (UInt)VG_(atoll)(&arg[12]);
fitzhardinge98abfc72003-12-16 02:05:15 +0000529
nethercote2d5b8162004-08-11 09:40:52 +0000530 if (VG_(clo_alignment) < VG_MIN_MALLOC_SZB
fitzhardinge98abfc72003-12-16 02:05:15 +0000531 || VG_(clo_alignment) > 4096
532 || VG_(log2)( VG_(clo_alignment) ) == -1 /* not a power of 2 */) {
533 VG_(message)(Vg_UserMsg, "");
534 VG_(message)(Vg_UserMsg,
535 "Invalid --alignment= setting. "
nethercote2d5b8162004-08-11 09:40:52 +0000536 "Should be a power of 2, >= %d, <= 4096.", VG_MIN_MALLOC_SZB);
fitzhardinge98abfc72003-12-16 02:05:15 +0000537 VG_(bad_option)("--alignment");
538 }
539 }
540
nethercotef28481f2004-07-10 13:56:19 +0000541 else VG_BOOL_CLO("--sloppy-malloc", VG_(clo_sloppy_malloc))
542 else VG_BOOL_CLO("--trace-malloc", VG_(clo_trace_malloc))
fitzhardinge98abfc72003-12-16 02:05:15 +0000543 else
544 return False;
545
546 return True;
547}
548
549void VG_(replacement_malloc_print_usage)(void)
550{
551 VG_(printf)(
nethercote2d5b8162004-08-11 09:40:52 +0000552" --sloppy-malloc=no|yes round malloc sizes to multiple of %d? [no]\n"
553" --alignment=<number> set minimum alignment of allocations [%d]\n",
554 VG_SLOPPY_MALLOC_SZB, VG_MIN_MALLOC_SZB
fitzhardinge98abfc72003-12-16 02:05:15 +0000555 );
556}
557
558void VG_(replacement_malloc_print_debug_usage)(void)
559{
560 VG_(printf)(
561" --trace-malloc=no|yes show client malloc details? [no]\n"
562 );
563}
564
sewardjde4a1d02002-03-22 01:27:54 +0000565
566/*------------------------------------------------------------*/
sewardjde4a1d02002-03-22 01:27:54 +0000567/*--- Functions for working with freelists. ---*/
568/*------------------------------------------------------------*/
569
nethercote2d5b8162004-08-11 09:40:52 +0000570// Nb: Determination of which freelist a block lives on is based on the
571// payload size, not block size.
sewardjde4a1d02002-03-22 01:27:54 +0000572
nethercote2d5b8162004-08-11 09:40:52 +0000573// Convert a payload size in bytes to a freelist number.
sewardjde4a1d02002-03-22 01:27:54 +0000574static
nethercote7ac7f7b2004-11-02 12:36:02 +0000575UInt pszB_to_listNo ( SizeT pszB )
sewardjde4a1d02002-03-22 01:27:54 +0000576{
nethercote2d5b8162004-08-11 09:40:52 +0000577 vg_assert(0 == pszB % VG_MIN_MALLOC_SZB);
578 pszB /= VG_MIN_MALLOC_SZB;
579 if (pszB <= 2) return 0;
580 if (pszB <= 3) return 1;
581 if (pszB <= 4) return 2;
582 if (pszB <= 5) return 3;
583 if (pszB <= 6) return 4;
584 if (pszB <= 7) return 5;
585 if (pszB <= 8) return 6;
586 if (pszB <= 9) return 7;
587 if (pszB <= 10) return 8;
588 if (pszB <= 11) return 9;
589 if (pszB <= 12) return 10;
590 if (pszB <= 16) return 11;
591 if (pszB <= 32) return 12;
592 if (pszB <= 64) return 13;
593 if (pszB <= 128) return 14;
sewardjde4a1d02002-03-22 01:27:54 +0000594 return 15;
595}
596
nethercote2d5b8162004-08-11 09:40:52 +0000597// What is the minimum payload size for a given list?
sewardjde4a1d02002-03-22 01:27:54 +0000598static
nethercote7ac7f7b2004-11-02 12:36:02 +0000599SizeT listNo_to_pszB_min ( UInt listNo )
sewardjde4a1d02002-03-22 01:27:54 +0000600{
nethercote7ac7f7b2004-11-02 12:36:02 +0000601 SizeT pszB = 0;
602 vg_assert(listNo <= VG_N_MALLOC_LISTS);
nethercote2d5b8162004-08-11 09:40:52 +0000603 while (pszB_to_listNo(pszB) < listNo) pszB += VG_MIN_MALLOC_SZB;
604 return pszB;
sewardjde4a1d02002-03-22 01:27:54 +0000605}
606
nethercote2d5b8162004-08-11 09:40:52 +0000607// What is the maximum payload size for a given list?
sewardjde4a1d02002-03-22 01:27:54 +0000608static
nethercote7ac7f7b2004-11-02 12:36:02 +0000609SizeT listNo_to_pszB_max ( UInt listNo )
sewardjde4a1d02002-03-22 01:27:54 +0000610{
nethercote7ac7f7b2004-11-02 12:36:02 +0000611 vg_assert(listNo <= VG_N_MALLOC_LISTS);
sewardjde4a1d02002-03-22 01:27:54 +0000612 if (listNo == VG_N_MALLOC_LISTS-1) {
nethercote2d5b8162004-08-11 09:40:52 +0000613 return MAX_PSZB;
sewardjde4a1d02002-03-22 01:27:54 +0000614 } else {
nethercote2d5b8162004-08-11 09:40:52 +0000615 return listNo_to_pszB_min(listNo+1) - 1;
sewardjde4a1d02002-03-22 01:27:54 +0000616 }
617}
618
619
620/* A nasty hack to try and reduce fragmentation. Try and replace
621 a->freelist[lno] with another block on the same list but with a
622 lower address, with the idea of attempting to recycle the same
623 blocks rather than cruise through the address space. */
sewardjde4a1d02002-03-22 01:27:54 +0000624static
nethercote7ac7f7b2004-11-02 12:36:02 +0000625void swizzle ( Arena* a, UInt lno )
sewardjde4a1d02002-03-22 01:27:54 +0000626{
nethercote2d5b8162004-08-11 09:40:52 +0000627 Block* p_best;
628 Block* pp;
629 Block* pn;
nethercote7ac7f7b2004-11-02 12:36:02 +0000630 UInt i;
sewardjde4a1d02002-03-22 01:27:54 +0000631
632 p_best = a->freelist[lno];
633 if (p_best == NULL) return;
634
635 pn = pp = p_best;
636 for (i = 0; i < 20; i++) {
nethercote2d5b8162004-08-11 09:40:52 +0000637 pn = get_next_b(pn);
638 pp = get_prev_b(pp);
sewardjde4a1d02002-03-22 01:27:54 +0000639 if (pn < p_best) p_best = pn;
640 if (pp < p_best) p_best = pp;
641 }
642 if (p_best < a->freelist[lno]) {
nethercote2d5b8162004-08-11 09:40:52 +0000643# ifdef VERBOSE_MALLOC
644 VG_(printf)("retreat by %d\n", a->freelist[lno] - p_best);
sewardjde4a1d02002-03-22 01:27:54 +0000645# endif
646 a->freelist[lno] = p_best;
647 }
648}
649
650
651/*------------------------------------------------------------*/
sewardjde4a1d02002-03-22 01:27:54 +0000652/*--- Sanity-check/debugging machinery. ---*/
653/*------------------------------------------------------------*/
654
nethercote2d5b8162004-08-11 09:40:52 +0000655#define VG_REDZONE_LO_MASK 0x31
656#define VG_REDZONE_HI_MASK 0x7c
657
nethercote7ac7f7b2004-11-02 12:36:02 +0000658// Do some crude sanity checks on a Block.
sewardjde4a1d02002-03-22 01:27:54 +0000659static
nethercote2d5b8162004-08-11 09:40:52 +0000660Bool blockSane ( Arena* a, Block* b )
sewardjde4a1d02002-03-22 01:27:54 +0000661{
662# define BLEAT(str) VG_(printf)("blockSane: fail -- %s\n",str)
nethercote7ac7f7b2004-11-02 12:36:02 +0000663 UInt i;
nethercote2d5b8162004-08-11 09:40:52 +0000664 if (get_bszB_lo(b) != get_bszB_hi(b))
sewardjde4a1d02002-03-22 01:27:54 +0000665 {BLEAT("sizes");return False;}
nethercote2d5b8162004-08-11 09:40:52 +0000666 if (!a->clientmem && is_inuse_bszB(get_bszB_lo(b))) {
667 for (i = 0; i < a->rz_szB; i++) {
668 if (get_rz_lo_byte(a, b, i) !=
669 (UByte)(((Addr)b&0xff) ^ VG_REDZONE_LO_MASK))
670 {BLEAT("redzone-lo");return False;}
671 if (get_rz_hi_byte(a, b, i) !=
672 (UByte)(((Addr)b&0xff) ^ VG_REDZONE_HI_MASK))
673 {BLEAT("redzone-hi");return False;}
sewardjde4a1d02002-03-22 01:27:54 +0000674 }
675 }
676 return True;
677# undef BLEAT
678}
679
nethercote2d5b8162004-08-11 09:40:52 +0000680// Print superblocks (only for debugging).
sewardjde4a1d02002-03-22 01:27:54 +0000681static
682void ppSuperblocks ( Arena* a )
683{
nethercote7ac7f7b2004-11-02 12:36:02 +0000684 UInt i, blockno;
685 SizeT b_bszB;
nethercote2d5b8162004-08-11 09:40:52 +0000686 Block* b;
sewardjde4a1d02002-03-22 01:27:54 +0000687 Superblock* sb = a->sblocks;
688 blockno = 1;
689
690 while (sb) {
691 VG_(printf)( "\n" );
nethercote2d5b8162004-08-11 09:40:52 +0000692 VG_(printf)( "superblock %d at %p, sb->n_pl_bs = %d, next = %p\n",
693 blockno++, sb, sb->n_payload_bytes, sb->next );
694 for (i = 0; i < sb->n_payload_bytes; i += mk_plain_bszB(b_bszB)) {
695 b = (Block*)&sb->payload_bytes[i];
696 b_bszB = get_bszB_lo(b);
697 VG_(printf)( " block at %d, bszB %d: ", i, mk_plain_bszB(b_bszB) );
698 VG_(printf)( "%s, ", is_inuse_bszB(b_bszB) ? "inuse" : "free");
699 VG_(printf)( "%s\n", blockSane(a, b) ? "ok" : "BAD" );
sewardjde4a1d02002-03-22 01:27:54 +0000700 }
nethercote2d5b8162004-08-11 09:40:52 +0000701 vg_assert(i == sb->n_payload_bytes); // no overshoot at end of Sb
sewardjde4a1d02002-03-22 01:27:54 +0000702 sb = sb->next;
703 }
704 VG_(printf)( "end of superblocks\n\n" );
705}
706
nethercote2d5b8162004-08-11 09:40:52 +0000707// Sanity check both the superblocks and the chains.
nethercote885dd912004-08-03 23:14:00 +0000708static void sanity_check_malloc_arena ( ArenaId aid )
sewardjde4a1d02002-03-22 01:27:54 +0000709{
nethercote7ac7f7b2004-11-02 12:36:02 +0000710 UInt i, superblockctr, blockctr_sb, blockctr_li;
711 UInt blockctr_sb_free, listno;
712 SizeT b_bszB, b_pszB, list_min_pszB, list_max_pszB;
sewardjde4a1d02002-03-22 01:27:54 +0000713 Superblock* sb;
714 Bool thisFree, lastWasFree;
nethercote2d5b8162004-08-11 09:40:52 +0000715 Block* b;
716 Block* b_prev;
nethercote7ac7f7b2004-11-02 12:36:02 +0000717 SizeT arena_bytes_on_loan;
sewardjde4a1d02002-03-22 01:27:54 +0000718 Arena* a;
719
nethercote885dd912004-08-03 23:14:00 +0000720# define BOMB VG_(core_panic)("sanity_check_malloc_arena")
sewardjde4a1d02002-03-22 01:27:54 +0000721
722 a = arenaId_to_ArenaP(aid);
723
nethercote2d5b8162004-08-11 09:40:52 +0000724 // First, traverse all the superblocks, inspecting the Blocks in each.
sewardjde4a1d02002-03-22 01:27:54 +0000725 superblockctr = blockctr_sb = blockctr_sb_free = 0;
726 arena_bytes_on_loan = 0;
727 sb = a->sblocks;
728 while (sb) {
729 lastWasFree = False;
730 superblockctr++;
nethercote2d5b8162004-08-11 09:40:52 +0000731 for (i = 0; i < sb->n_payload_bytes; i += mk_plain_bszB(b_bszB)) {
sewardjde4a1d02002-03-22 01:27:54 +0000732 blockctr_sb++;
nethercote2d5b8162004-08-11 09:40:52 +0000733 b = (Block*)&sb->payload_bytes[i];
734 b_bszB = get_bszB_lo(b);
sewardjde4a1d02002-03-22 01:27:54 +0000735 if (!blockSane(a, b)) {
nethercote2d5b8162004-08-11 09:40:52 +0000736 VG_(printf)("sanity_check_malloc_arena: sb %p, block %d (bszB %d): "
737 " BAD\n", sb, i, b_bszB );
sewardjde4a1d02002-03-22 01:27:54 +0000738 BOMB;
739 }
nethercote2d5b8162004-08-11 09:40:52 +0000740 thisFree = !is_inuse_bszB(b_bszB);
sewardjde4a1d02002-03-22 01:27:54 +0000741 if (thisFree && lastWasFree) {
nethercote2d5b8162004-08-11 09:40:52 +0000742 VG_(printf)("sanity_check_malloc_arena: sb %p, block %d (bszB %d): "
njn25e49d8e72002-09-23 09:36:25 +0000743 "UNMERGED FREES\n",
nethercote2d5b8162004-08-11 09:40:52 +0000744 sb, i, b_bszB );
sewardjde4a1d02002-03-22 01:27:54 +0000745 BOMB;
746 }
sewardjde4a1d02002-03-22 01:27:54 +0000747 if (thisFree) blockctr_sb_free++;
748 if (!thisFree)
nethercote2d5b8162004-08-11 09:40:52 +0000749 arena_bytes_on_loan += bszB_to_pszB(a, b_bszB);
750 lastWasFree = thisFree;
sewardjde4a1d02002-03-22 01:27:54 +0000751 }
nethercote2d5b8162004-08-11 09:40:52 +0000752 if (i > sb->n_payload_bytes) {
nethercote885dd912004-08-03 23:14:00 +0000753 VG_(printf)( "sanity_check_malloc_arena: sb %p: last block "
sewardjde4a1d02002-03-22 01:27:54 +0000754 "overshoots end\n", sb);
755 BOMB;
756 }
757 sb = sb->next;
758 }
759
760 if (arena_bytes_on_loan != a->bytes_on_loan) {
nethercote2d5b8162004-08-11 09:40:52 +0000761# ifdef VERBOSE_MALLOC
762 VG_(printf)( "sanity_check_malloc_arena: a->bytes_on_loan %d, "
763 "arena_bytes_on_loan %d: "
764 "MISMATCH\n", a->bytes_on_loan, arena_bytes_on_loan);
765# endif
sewardjde4a1d02002-03-22 01:27:54 +0000766 ppSuperblocks(a);
767 BOMB;
768 }
769
770 /* Second, traverse each list, checking that the back pointers make
771 sense, counting blocks encountered, and checking that each block
772 is an appropriate size for this list. */
773 blockctr_li = 0;
774 for (listno = 0; listno < VG_N_MALLOC_LISTS; listno++) {
nethercote2d5b8162004-08-11 09:40:52 +0000775 list_min_pszB = listNo_to_pszB_min(listno);
776 list_max_pszB = listNo_to_pszB_max(listno);
sewardjde4a1d02002-03-22 01:27:54 +0000777 b = a->freelist[listno];
778 if (b == NULL) continue;
779 while (True) {
780 b_prev = b;
nethercote2d5b8162004-08-11 09:40:52 +0000781 b = get_next_b(b);
782 if (get_prev_b(b) != b_prev) {
nethercote885dd912004-08-03 23:14:00 +0000783 VG_(printf)( "sanity_check_malloc_arena: list %d at %p: "
sewardjde4a1d02002-03-22 01:27:54 +0000784 "BAD LINKAGE\n",
785 listno, b );
786 BOMB;
787 }
nethercote2d5b8162004-08-11 09:40:52 +0000788 b_pszB = bszB_to_pszB(a, mk_plain_bszB(get_bszB_lo(b)));
789 if (b_pszB < list_min_pszB || b_pszB > list_max_pszB) {
sewardjde4a1d02002-03-22 01:27:54 +0000790 VG_(printf)(
nethercote885dd912004-08-03 23:14:00 +0000791 "sanity_check_malloc_arena: list %d at %p: "
nethercote2d5b8162004-08-11 09:40:52 +0000792 "WRONG CHAIN SIZE %dB (%dB, %dB)\n",
793 listno, b, b_pszB, list_min_pszB, list_max_pszB );
sewardjde4a1d02002-03-22 01:27:54 +0000794 BOMB;
795 }
796 blockctr_li++;
797 if (b == a->freelist[listno]) break;
798 }
799 }
800
801 if (blockctr_sb_free != blockctr_li) {
nethercote2d5b8162004-08-11 09:40:52 +0000802# ifdef VERBOSE_MALLOC
803 VG_(printf)( "sanity_check_malloc_arena: BLOCK COUNT MISMATCH "
804 "(via sbs %d, via lists %d)\n",
805 blockctr_sb_free, blockctr_li );
806# endif
sewardjde4a1d02002-03-22 01:27:54 +0000807 ppSuperblocks(a);
808 BOMB;
809 }
810
nethercote885dd912004-08-03 23:14:00 +0000811 if (VG_(clo_verbosity) > 2)
812 VG_(message)(Vg_DebugMsg,
813 "AR %8s: %2d sbs, %5d bs, %2d/%-2d free bs, "
814 "%7d mmap, %7d loan",
815 a->name,
816 superblockctr,
817 blockctr_sb, blockctr_sb_free, blockctr_li,
818 a->bytes_mmaped, a->bytes_on_loan);
sewardjde4a1d02002-03-22 01:27:54 +0000819# undef BOMB
820}
821
822
nethercote885dd912004-08-03 23:14:00 +0000823void VG_(sanity_check_malloc_all) ( void )
sewardjde4a1d02002-03-22 01:27:54 +0000824{
nethercote7ac7f7b2004-11-02 12:36:02 +0000825 UInt i;
sewardjde4a1d02002-03-22 01:27:54 +0000826 for (i = 0; i < VG_N_ARENAS; i++)
nethercote885dd912004-08-03 23:14:00 +0000827 sanity_check_malloc_arena ( i );
sewardjde4a1d02002-03-22 01:27:54 +0000828}
829
sewardjde4a1d02002-03-22 01:27:54 +0000830/* Really, this isn't the right place for this. Nevertheless: find
831 out if an arena is empty -- currently has no bytes on loan. This
832 is useful for checking for memory leaks (of valgrind, not the
nethercote2d5b8162004-08-11 09:40:52 +0000833 client.) */
sewardjde4a1d02002-03-22 01:27:54 +0000834Bool VG_(is_empty_arena) ( ArenaId aid )
835{
836 Arena* a;
837 Superblock* sb;
nethercote2d5b8162004-08-11 09:40:52 +0000838 Block* b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000839 SizeT b_bszB;
njn25e49d8e72002-09-23 09:36:25 +0000840
sewardjde4a1d02002-03-22 01:27:54 +0000841 ensure_mm_init();
842 a = arenaId_to_ArenaP(aid);
843 for (sb = a->sblocks; sb != NULL; sb = sb->next) {
nethercote2d5b8162004-08-11 09:40:52 +0000844 // If the superblock is empty, it should contain a single free
845 // block, of the right size.
846 b = (Block*)&sb->payload_bytes[0];
847 b_bszB = get_bszB_lo(b);
848 if (is_inuse_bszB(b_bszB)) return False;
849 if (mk_plain_bszB(b_bszB) != sb->n_payload_bytes) return False;
850 // If we reach here, this block is not in use and is of the right
851 // size, so keep going around the loop...
sewardjde4a1d02002-03-22 01:27:54 +0000852 }
853 return True;
854}
855
856
nethercote2d5b8162004-08-11 09:40:52 +0000857/*------------------------------------------------------------*/
858/*--- Creating and deleting blocks. ---*/
859/*------------------------------------------------------------*/
860
861// Mark the bytes at b .. b+bszB-1 as not in use, and add them to the
862// relevant free list.
863
864static
nethercote7ac7f7b2004-11-02 12:36:02 +0000865void mkFreeBlock ( Arena* a, Block* b, SizeT bszB, UInt b_lno )
jsewardb1a26ae2004-03-14 03:06:37 +0000866{
nethercote7ac7f7b2004-11-02 12:36:02 +0000867 SizeT pszB = bszB_to_pszB(a, bszB);
nethercote2d5b8162004-08-11 09:40:52 +0000868 vg_assert(b_lno == pszB_to_listNo(pszB));
869 // Set the size fields and indicate not-in-use.
870 set_bszB_lo(b, mk_free_bszB(bszB));
871 set_bszB_hi(b, mk_free_bszB(bszB));
872
873 // Add to the relevant list.
874 if (a->freelist[b_lno] == NULL) {
875 set_prev_b(b, b);
876 set_next_b(b, b);
877 a->freelist[b_lno] = b;
878 } else {
879 Block* b_prev = get_prev_b(a->freelist[b_lno]);
880 Block* b_next = a->freelist[b_lno];
881 set_next_b(b_prev, b);
882 set_prev_b(b_next, b);
883 set_next_b(b, b_next);
884 set_prev_b(b, b_prev);
885 }
886# ifdef DEBUG_MALLOC
887 (void)blockSane(a,b);
888# endif
889}
890
891// Mark the bytes at b .. b+bszB-1 as in use, and set up the block
892// appropriately.
893static
nethercote7ac7f7b2004-11-02 12:36:02 +0000894void mkInuseBlock ( Arena* a, Block* b, SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000895{
nethercote7ac7f7b2004-11-02 12:36:02 +0000896 UInt i;
nethercote2d5b8162004-08-11 09:40:52 +0000897 vg_assert(bszB >= min_useful_bszB(a));
898 set_bszB_lo(b, mk_inuse_bszB(bszB));
899 set_bszB_hi(b, mk_inuse_bszB(bszB));
900 set_prev_b(b, NULL); // Take off freelist
901 set_next_b(b, NULL); // ditto
902 if (!a->clientmem) {
903 for (i = 0; i < a->rz_szB; i++) {
904 set_rz_lo_byte(a, b, i, (UByte)(((Addr)b&0xff) ^ VG_REDZONE_LO_MASK));
905 set_rz_hi_byte(a, b, i, (UByte)(((Addr)b&0xff) ^ VG_REDZONE_HI_MASK));
906 }
907 }
908# ifdef DEBUG_MALLOC
909 (void)blockSane(a,b);
910# endif
911}
912
913// Remove a block from a given list. Does no sanity checking.
914static
nethercote7ac7f7b2004-11-02 12:36:02 +0000915void unlinkBlock ( Arena* a, Block* b, UInt listno )
nethercote2d5b8162004-08-11 09:40:52 +0000916{
nethercote7ac7f7b2004-11-02 12:36:02 +0000917 vg_assert(listno < VG_N_MALLOC_LISTS);
nethercote2d5b8162004-08-11 09:40:52 +0000918 if (get_prev_b(b) == b) {
919 // Only one element in the list; treat it specially.
920 vg_assert(get_next_b(b) == b);
921 a->freelist[listno] = NULL;
922 } else {
923 Block* b_prev = get_prev_b(b);
924 Block* b_next = get_next_b(b);
925 a->freelist[listno] = b_prev;
926 set_next_b(b_prev, b_next);
927 set_prev_b(b_next, b_prev);
928 swizzle ( a, listno );
929 }
930 set_prev_b(b, NULL);
931 set_next_b(b, NULL);
jsewardb1a26ae2004-03-14 03:06:37 +0000932}
933
934
sewardjde4a1d02002-03-22 01:27:54 +0000935/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +0000936/*--- Core-visible functions. ---*/
sewardjde4a1d02002-03-22 01:27:54 +0000937/*------------------------------------------------------------*/
938
nethercote2d5b8162004-08-11 09:40:52 +0000939// Align the request size.
940static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000941SizeT align_req_pszB ( SizeT req_pszB )
nethercote2d5b8162004-08-11 09:40:52 +0000942{
nethercote7ac7f7b2004-11-02 12:36:02 +0000943 SizeT n = VG_MIN_MALLOC_SZB-1;
nethercote2d5b8162004-08-11 09:40:52 +0000944 return ((req_pszB + n) & (~n));
945}
946
nethercote7ac7f7b2004-11-02 12:36:02 +0000947void* VG_(arena_malloc) ( ArenaId aid, SizeT req_pszB )
sewardjde4a1d02002-03-22 01:27:54 +0000948{
nethercote7ac7f7b2004-11-02 12:36:02 +0000949 SizeT req_bszB, frag_bszB, b_bszB;
950 UInt lno;
sewardjde4a1d02002-03-22 01:27:54 +0000951 Superblock* new_sb;
nethercote2d5b8162004-08-11 09:40:52 +0000952 Block* b = NULL;
sewardjde4a1d02002-03-22 01:27:54 +0000953 Arena* a;
jsewardb1a26ae2004-03-14 03:06:37 +0000954 void* v;
sewardjde4a1d02002-03-22 01:27:54 +0000955
956 VGP_PUSHCC(VgpMalloc);
957
958 ensure_mm_init();
959 a = arenaId_to_ArenaP(aid);
960
nethercote7ac7f7b2004-11-02 12:36:02 +0000961 vg_assert(req_pszB < MAX_PSZB);
nethercote2d5b8162004-08-11 09:40:52 +0000962 req_pszB = align_req_pszB(req_pszB);
963 req_bszB = pszB_to_bszB(a, req_pszB);
sewardjde4a1d02002-03-22 01:27:54 +0000964
nethercote2d5b8162004-08-11 09:40:52 +0000965 // Scan through all the big-enough freelists for a block.
966 for (lno = pszB_to_listNo(req_pszB); lno < VG_N_MALLOC_LISTS; lno++) {
sewardjde4a1d02002-03-22 01:27:54 +0000967 b = a->freelist[lno];
nethercote2d5b8162004-08-11 09:40:52 +0000968 if (NULL == b) continue; // If this list is empty, try the next one.
sewardjde4a1d02002-03-22 01:27:54 +0000969 while (True) {
nethercote2d5b8162004-08-11 09:40:52 +0000970 b_bszB = mk_plain_bszB(get_bszB_lo(b));
971 if (b_bszB >= req_bszB) goto obtained_block; // success!
972 b = get_next_b(b);
973 if (b == a->freelist[lno]) break; // traversed entire freelist
sewardjde4a1d02002-03-22 01:27:54 +0000974 }
sewardjde4a1d02002-03-22 01:27:54 +0000975 }
976
nethercote2d5b8162004-08-11 09:40:52 +0000977 // If we reach here, no suitable block found, allocate a new superblock
978 vg_assert(lno == VG_N_MALLOC_LISTS);
979 new_sb = newSuperblock(a, req_bszB);
980 if (NULL == new_sb) {
981 // Should only fail if for client, otherwise, should have aborted
982 // already.
983 vg_assert(VG_AR_CLIENT == aid);
984 return NULL;
sewardjde4a1d02002-03-22 01:27:54 +0000985 }
nethercote2d5b8162004-08-11 09:40:52 +0000986 new_sb->next = a->sblocks;
987 a->sblocks = new_sb;
988 b = (Block*)&new_sb->payload_bytes[0];
989 lno = pszB_to_listNo(bszB_to_pszB(a, new_sb->n_payload_bytes));
990 mkFreeBlock ( a, b, new_sb->n_payload_bytes, lno);
991 // fall through
sewardjde4a1d02002-03-22 01:27:54 +0000992
nethercote2d5b8162004-08-11 09:40:52 +0000993 obtained_block:
994 // Ok, we can allocate from b, which lives in list lno.
sewardjde4a1d02002-03-22 01:27:54 +0000995 vg_assert(b != NULL);
nethercote7ac7f7b2004-11-02 12:36:02 +0000996 vg_assert(lno < VG_N_MALLOC_LISTS);
sewardjde4a1d02002-03-22 01:27:54 +0000997 vg_assert(a->freelist[lno] != NULL);
nethercote2d5b8162004-08-11 09:40:52 +0000998 b_bszB = mk_plain_bszB(get_bszB_lo(b));
999 // req_bszB is the size of the block we are after. b_bszB is the
1000 // size of what we've actually got. */
1001 vg_assert(b_bszB >= req_bszB);
sewardjde4a1d02002-03-22 01:27:54 +00001002
nethercote2d5b8162004-08-11 09:40:52 +00001003 // Could we split this block and still get a useful fragment?
1004 frag_bszB = b_bszB - req_bszB;
1005 if (frag_bszB >= min_useful_bszB(a)) {
1006 // Yes, split block in two, put the fragment on the appropriate free
1007 // list, and update b_bszB accordingly.
1008 // printf( "split %dB into %dB and %dB\n", b_bszB, req_bszB, frag_bszB );
sewardjde4a1d02002-03-22 01:27:54 +00001009 unlinkBlock(a, b, lno);
nethercote2d5b8162004-08-11 09:40:52 +00001010 mkInuseBlock(a, b, req_bszB);
1011 mkFreeBlock(a, &b[req_bszB], frag_bszB,
1012 pszB_to_listNo(bszB_to_pszB(a, frag_bszB)));
1013 b_bszB = mk_plain_bszB(get_bszB_lo(b));
1014 } else {
1015 // No, mark as in use and use as-is.
1016 unlinkBlock(a, b, lno);
1017 mkInuseBlock(a, b, b_bszB);
sewardjde4a1d02002-03-22 01:27:54 +00001018 }
sewardjde4a1d02002-03-22 01:27:54 +00001019
nethercote2d5b8162004-08-11 09:40:52 +00001020 // Update stats
1021 a->bytes_on_loan += bszB_to_pszB(a, b_bszB);
sewardjde4a1d02002-03-22 01:27:54 +00001022 if (a->bytes_on_loan > a->bytes_on_loan_max)
1023 a->bytes_on_loan_max = a->bytes_on_loan;
1024
1025# ifdef DEBUG_MALLOC
nethercote885dd912004-08-03 23:14:00 +00001026 sanity_check_malloc_arena(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001027# endif
1028
njn25e49d8e72002-09-23 09:36:25 +00001029 VGP_POPCC(VgpMalloc);
nethercote2d5b8162004-08-11 09:40:52 +00001030 v = get_block_payload(a, b);
1031 vg_assert( (((Addr)v) & (VG_MIN_MALLOC_SZB-1)) == 0 );
jsewardb1a26ae2004-03-14 03:06:37 +00001032 return v;
sewardjde4a1d02002-03-22 01:27:54 +00001033}
1034
1035
njn25e49d8e72002-09-23 09:36:25 +00001036void VG_(arena_free) ( ArenaId aid, void* ptr )
sewardjde4a1d02002-03-22 01:27:54 +00001037{
1038 Superblock* sb;
nethercote2d5b8162004-08-11 09:40:52 +00001039 UByte* sb_start;
1040 UByte* sb_end;
1041 Block* other;
1042 Block* b;
nethercote7ac7f7b2004-11-02 12:36:02 +00001043 SizeT b_bszB, b_pszB, other_bszB;
1044 UInt b_listno;
sewardjde4a1d02002-03-22 01:27:54 +00001045 Arena* a;
1046
1047 VGP_PUSHCC(VgpMalloc);
1048
1049 ensure_mm_init();
1050 a = arenaId_to_ArenaP(aid);
1051
njn25e49d8e72002-09-23 09:36:25 +00001052 if (ptr == NULL) {
1053 VGP_POPCC(VgpMalloc);
1054 return;
1055 }
1056
nethercote2d5b8162004-08-11 09:40:52 +00001057 b = get_payload_block(a, ptr);
sewardjde4a1d02002-03-22 01:27:54 +00001058
1059# ifdef DEBUG_MALLOC
nethercote2d5b8162004-08-11 09:40:52 +00001060 vg_assert(blockSane(a, b));
sewardjde4a1d02002-03-22 01:27:54 +00001061# endif
1062
nethercote2d5b8162004-08-11 09:40:52 +00001063 a->bytes_on_loan -= bszB_to_pszB(a, mk_plain_bszB(get_bszB_lo(b)));
sewardjde4a1d02002-03-22 01:27:54 +00001064
nethercote2d5b8162004-08-11 09:40:52 +00001065 sb = findSb( a, b );
1066 sb_start = &sb->payload_bytes[0];
1067 sb_end = &sb->payload_bytes[sb->n_payload_bytes - 1];
sewardjde4a1d02002-03-22 01:27:54 +00001068
nethercote2d5b8162004-08-11 09:40:52 +00001069 // Put this chunk back on a list somewhere.
1070 b_bszB = get_bszB_lo(b);
1071 b_pszB = bszB_to_pszB(a, b_bszB);
1072 b_listno = pszB_to_listNo(b_pszB);
1073 mkFreeBlock( a, b, b_bszB, b_listno );
sewardjde4a1d02002-03-22 01:27:54 +00001074
nethercote2d5b8162004-08-11 09:40:52 +00001075 // See if this block can be merged with its successor.
1076 // First test if we're far enough before the superblock's end to possibly
1077 // have a successor.
1078 other = b + b_bszB;
1079 if (other+min_useful_bszB(a)-1 <= (Block*)sb_end) {
1080 // Ok, we have a successor, merge if it's not in use.
1081 other_bszB = get_bszB_lo(other);
1082 if (!is_inuse_bszB(other_bszB)) {
1083 // VG_(printf)( "merge-successor\n");
1084 other_bszB = mk_plain_bszB(other_bszB);
sewardjde4a1d02002-03-22 01:27:54 +00001085# ifdef DEBUG_MALLOC
1086 vg_assert(blockSane(a, other));
1087# endif
nethercote2d5b8162004-08-11 09:40:52 +00001088 unlinkBlock( a, b, b_listno );
1089 unlinkBlock( a, other, pszB_to_listNo(bszB_to_pszB(a,other_bszB)) );
1090 b_bszB += other_bszB;
1091 b_listno = pszB_to_listNo(bszB_to_pszB(a, b_bszB));
1092 mkFreeBlock( a, b, b_bszB, b_listno );
sewardjde4a1d02002-03-22 01:27:54 +00001093 }
nethercote2d5b8162004-08-11 09:40:52 +00001094 } else {
1095 // Not enough space for successor: check that b is the last block
1096 // ie. there are no unused bytes at the end of the Superblock.
1097 vg_assert(other-1 == (Block*)sb_end);
sewardjde4a1d02002-03-22 01:27:54 +00001098 }
1099
nethercote2d5b8162004-08-11 09:40:52 +00001100 // Then see if this block can be merged with its predecessor.
1101 // First test if we're far enough after the superblock's start to possibly
1102 // have a predecessor.
1103 if (b >= (Block*)sb_start + min_useful_bszB(a)) {
1104 // Ok, we have a predecessor, merge if it's not in use.
1105 other = get_predecessor_block( b );
1106 other_bszB = get_bszB_lo(other);
1107 if (!is_inuse_bszB(other_bszB)) {
1108 // VG_(printf)( "merge-predecessor\n");
1109 other_bszB = mk_plain_bszB(other_bszB);
1110 unlinkBlock( a, b, b_listno );
1111 unlinkBlock( a, other, pszB_to_listNo(bszB_to_pszB(a, other_bszB)) );
1112 b = other;
1113 b_bszB += other_bszB;
1114 b_listno = pszB_to_listNo(bszB_to_pszB(a, b_bszB));
1115 mkFreeBlock( a, b, b_bszB, b_listno );
sewardjde4a1d02002-03-22 01:27:54 +00001116 }
nethercote2d5b8162004-08-11 09:40:52 +00001117 } else {
1118 // Not enough space for predecessor: check that b is the first block,
1119 // ie. there are no unused bytes at the start of the Superblock.
1120 vg_assert((Block*)sb_start == b);
sewardjde4a1d02002-03-22 01:27:54 +00001121 }
1122
1123# ifdef DEBUG_MALLOC
nethercote885dd912004-08-03 23:14:00 +00001124 sanity_check_malloc_arena(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001125# endif
1126
njn25e49d8e72002-09-23 09:36:25 +00001127 VGP_POPCC(VgpMalloc);
sewardjde4a1d02002-03-22 01:27:54 +00001128}
1129
1130
1131/*
1132 The idea for malloc_aligned() is to allocate a big block, base, and
1133 then split it into two parts: frag, which is returned to the the
1134 free pool, and align, which is the bit we're really after. Here's
1135 a picture. L and H denote the block lower and upper overheads, in
nethercote2d5b8162004-08-11 09:40:52 +00001136 bytes. The details are gruesome. Note it is slightly complicated
sewardjde4a1d02002-03-22 01:27:54 +00001137 because the initial request to generate base may return a bigger
1138 block than we asked for, so it is important to distinguish the base
1139 request size and the base actual size.
1140
1141 frag_b align_b
1142 | |
1143 | frag_p | align_p
1144 | | | |
1145 v v v v
1146
1147 +---+ +---+---+ +---+
1148 | L |----------------| H | L |---------------| H |
1149 +---+ +---+---+ +---+
1150
1151 ^ ^ ^
1152 | | :
1153 | base_p this addr must be aligned
1154 |
1155 base_b
1156
1157 . . . . . . .
nethercote2d5b8162004-08-11 09:40:52 +00001158 <------ frag_bszB -------> . . .
1159 . <------------- base_pszB_act -----------> .
sewardjde4a1d02002-03-22 01:27:54 +00001160 . . . . . . .
1161
1162*/
nethercote7ac7f7b2004-11-02 12:36:02 +00001163void* VG_(arena_malloc_aligned) ( ArenaId aid, SizeT req_alignB, SizeT req_pszB )
sewardjde4a1d02002-03-22 01:27:54 +00001164{
nethercote7ac7f7b2004-11-02 12:36:02 +00001165 SizeT base_pszB_req, base_pszB_act, frag_bszB;
nethercote2d5b8162004-08-11 09:40:52 +00001166 Block *base_b, *align_b;
1167 UByte *base_p, *align_p;
nethercote7ac7f7b2004-11-02 12:36:02 +00001168 SizeT saved_bytes_on_loan;
sewardjde4a1d02002-03-22 01:27:54 +00001169 Arena* a;
1170
njn25e49d8e72002-09-23 09:36:25 +00001171 VGP_PUSHCC(VgpMalloc);
1172
sewardjde4a1d02002-03-22 01:27:54 +00001173 ensure_mm_init();
1174 a = arenaId_to_ArenaP(aid);
1175
nethercote7ac7f7b2004-11-02 12:36:02 +00001176 vg_assert(req_pszB < MAX_PSZB);
sewardjde4a1d02002-03-22 01:27:54 +00001177
nethercote2d5b8162004-08-11 09:40:52 +00001178 // Check that the requested alignment seems reasonable; that is, is
1179 // a power of 2.
1180 if (req_alignB < VG_MIN_MALLOC_SZB
1181 || req_alignB > 1048576
1182 || VG_(log2)( VG_(clo_alignment) ) == -1 /* not a power of 2 */) {
1183 VG_(printf)("VG_(arena_malloc_aligned)(%p, %d, %d)\nbad alignment",
1184 a, req_alignB, req_pszB );
1185 VG_(core_panic)("VG_(arena_malloc_aligned)");
1186 /*NOTREACHED*/
sewardjde4a1d02002-03-22 01:27:54 +00001187 }
nethercote2d5b8162004-08-11 09:40:52 +00001188 // Paranoid
1189 vg_assert(req_alignB % VG_MIN_MALLOC_SZB == 0);
sewardjde4a1d02002-03-22 01:27:54 +00001190
1191 /* Required payload size for the aligned chunk. */
nethercote2d5b8162004-08-11 09:40:52 +00001192 req_pszB = align_req_pszB(req_pszB);
sewardjde4a1d02002-03-22 01:27:54 +00001193
nethercote2d5b8162004-08-11 09:40:52 +00001194 /* Payload size to request for the big block that we will split up. */
1195 base_pszB_req = req_pszB + min_useful_bszB(a) + req_alignB;
sewardjde4a1d02002-03-22 01:27:54 +00001196
1197 /* Payload ptr for the block we are going to split. Note this
1198 changes a->bytes_on_loan; we save and restore it ourselves. */
1199 saved_bytes_on_loan = a->bytes_on_loan;
nethercote2d5b8162004-08-11 09:40:52 +00001200 base_p = VG_(arena_malloc) ( aid, base_pszB_req );
sewardjde4a1d02002-03-22 01:27:54 +00001201 a->bytes_on_loan = saved_bytes_on_loan;
1202
1203 /* Block ptr for the block we are going to split. */
nethercote2d5b8162004-08-11 09:40:52 +00001204 base_b = get_payload_block ( a, base_p );
sewardjde4a1d02002-03-22 01:27:54 +00001205
1206 /* Pointer to the payload of the aligned block we are going to
1207 return. This has to be suitably aligned. */
nethercote2d5b8162004-08-11 09:40:52 +00001208 align_p = align_upwards ( base_b + 2 * overhead_szB_lo(a)
1209 + overhead_szB_hi(a),
sewardjde4a1d02002-03-22 01:27:54 +00001210 req_alignB );
nethercote2d5b8162004-08-11 09:40:52 +00001211 align_b = get_payload_block(a, align_p);
sewardjde4a1d02002-03-22 01:27:54 +00001212
1213 /* The block size of the fragment we will create. This must be big
1214 enough to actually create a fragment. */
nethercote2d5b8162004-08-11 09:40:52 +00001215 frag_bszB = align_b - base_b;
1216
1217 vg_assert(frag_bszB >= min_useful_bszB(a));
sewardjde4a1d02002-03-22 01:27:54 +00001218
1219 /* The actual payload size of the block we are going to split. */
nethercote2d5b8162004-08-11 09:40:52 +00001220 base_pszB_act = bszB_to_pszB(a, mk_plain_bszB(get_bszB_lo(base_b)));
sewardjde4a1d02002-03-22 01:27:54 +00001221
nethercote2d5b8162004-08-11 09:40:52 +00001222 /* Create the fragment block, and put it back on the relevant free list. */
1223 mkFreeBlock ( a, base_b, frag_bszB,
1224 pszB_to_listNo(bszB_to_pszB(a, frag_bszB)) );
sewardjde4a1d02002-03-22 01:27:54 +00001225
1226 /* Create the aligned block. */
nethercote2d5b8162004-08-11 09:40:52 +00001227 mkInuseBlock ( a, align_b,
1228 base_p + base_pszB_act
1229 + overhead_szB_hi(a) - (UByte*)align_b );
sewardjde4a1d02002-03-22 01:27:54 +00001230
1231 /* Final sanity checks. */
nethercote2d5b8162004-08-11 09:40:52 +00001232 vg_assert( is_inuse_bszB(get_bszB_lo(get_payload_block(a, align_p))) );
sewardjde4a1d02002-03-22 01:27:54 +00001233
nethercote2d5b8162004-08-11 09:40:52 +00001234 vg_assert(req_pszB
sewardjde4a1d02002-03-22 01:27:54 +00001235 <=
nethercote2d5b8162004-08-11 09:40:52 +00001236 bszB_to_pszB(a, mk_plain_bszB(get_bszB_lo(
1237 get_payload_block(a, align_p))))
sewardjde4a1d02002-03-22 01:27:54 +00001238 );
1239
1240 a->bytes_on_loan
nethercote2d5b8162004-08-11 09:40:52 +00001241 += bszB_to_pszB(a, mk_plain_bszB(get_bszB_lo(
1242 get_payload_block(a, align_p))));
sewardjde4a1d02002-03-22 01:27:54 +00001243 if (a->bytes_on_loan > a->bytes_on_loan_max)
1244 a->bytes_on_loan_max = a->bytes_on_loan;
1245
1246# ifdef DEBUG_MALLOC
nethercote885dd912004-08-03 23:14:00 +00001247 sanity_check_malloc_arena(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001248# endif
1249
njn25e49d8e72002-09-23 09:36:25 +00001250 VGP_POPCC(VgpMalloc);
1251
nethercote2d5b8162004-08-11 09:40:52 +00001252 vg_assert( (((Addr)align_p) % req_alignB) == 0 );
1253 return align_p;
1254}
1255
1256
nethercote7ac7f7b2004-11-02 12:36:02 +00001257SizeT VG_(arena_payload_szB) ( ArenaId aid, void* ptr )
nethercote2d5b8162004-08-11 09:40:52 +00001258{
1259 Arena* a = arenaId_to_ArenaP(aid);
1260 Block* b = get_payload_block(a, ptr);
1261 return bszB_to_pszB(a, get_bszB_lo(b));
sewardjde4a1d02002-03-22 01:27:54 +00001262}
1263
1264
1265/*------------------------------------------------------------*/
1266/*--- Services layered on top of malloc/free. ---*/
1267/*------------------------------------------------------------*/
1268
nethercote7ac7f7b2004-11-02 12:36:02 +00001269void* VG_(arena_calloc) ( ArenaId aid, SizeT alignB, SizeT nmemb, SizeT nbytes )
sewardjde4a1d02002-03-22 01:27:54 +00001270{
nethercote7ac7f7b2004-11-02 12:36:02 +00001271 UInt i;
1272 SizeT size;
sewardjde4a1d02002-03-22 01:27:54 +00001273 UChar* p;
njn25e49d8e72002-09-23 09:36:25 +00001274
1275 VGP_PUSHCC(VgpMalloc);
1276
sewardjde4a1d02002-03-22 01:27:54 +00001277 size = nmemb * nbytes;
nethercote7ac7f7b2004-11-02 12:36:02 +00001278 vg_assert(size >= nmemb && size >= nbytes); // check against overflow
njn3e884182003-04-15 13:03:23 +00001279
nethercote2d5b8162004-08-11 09:40:52 +00001280 if (alignB == VG_MIN_MALLOC_SZB)
njn3e884182003-04-15 13:03:23 +00001281 p = VG_(arena_malloc) ( aid, size );
1282 else
1283 p = VG_(arena_malloc_aligned) ( aid, alignB, size );
1284
sewardjde4a1d02002-03-22 01:27:54 +00001285 for (i = 0; i < size; i++) p[i] = 0;
njn25e49d8e72002-09-23 09:36:25 +00001286
1287 VGP_POPCC(VgpMalloc);
1288
sewardjde4a1d02002-03-22 01:27:54 +00001289 return p;
1290}
1291
1292
njn25e49d8e72002-09-23 09:36:25 +00001293void* VG_(arena_realloc) ( ArenaId aid, void* ptr,
nethercote7ac7f7b2004-11-02 12:36:02 +00001294 SizeT req_alignB, SizeT req_pszB )
sewardjde4a1d02002-03-22 01:27:54 +00001295{
1296 Arena* a;
nethercote7ac7f7b2004-11-02 12:36:02 +00001297 SizeT old_bszB, old_pszB;
1298 UInt i;
sewardjde4a1d02002-03-22 01:27:54 +00001299 UChar *p_old, *p_new;
nethercote2d5b8162004-08-11 09:40:52 +00001300 Block* b;
sewardjde4a1d02002-03-22 01:27:54 +00001301
njn25e49d8e72002-09-23 09:36:25 +00001302 VGP_PUSHCC(VgpMalloc);
1303
sewardjde4a1d02002-03-22 01:27:54 +00001304 ensure_mm_init();
1305 a = arenaId_to_ArenaP(aid);
1306
nethercote7ac7f7b2004-11-02 12:36:02 +00001307 vg_assert(req_pszB < MAX_PSZB);
sewardjde4a1d02002-03-22 01:27:54 +00001308
nethercote2d5b8162004-08-11 09:40:52 +00001309 b = get_payload_block(a, ptr);
1310 vg_assert(blockSane(a, b));
sewardjde4a1d02002-03-22 01:27:54 +00001311
nethercote2d5b8162004-08-11 09:40:52 +00001312 old_bszB = get_bszB_lo(b);
1313 vg_assert(is_inuse_bszB(old_bszB));
1314 old_bszB = mk_plain_bszB(old_bszB);
1315 old_pszB = bszB_to_pszB(a, old_bszB);
sewardjde4a1d02002-03-22 01:27:54 +00001316
njn25e49d8e72002-09-23 09:36:25 +00001317 if (req_pszB <= old_pszB) {
1318 VGP_POPCC(VgpMalloc);
1319 return ptr;
1320 }
sewardjde4a1d02002-03-22 01:27:54 +00001321
nethercote2d5b8162004-08-11 09:40:52 +00001322 if (req_alignB == VG_MIN_MALLOC_SZB)
njn25e49d8e72002-09-23 09:36:25 +00001323 p_new = VG_(arena_malloc) ( aid, req_pszB );
nethercote2d5b8162004-08-11 09:40:52 +00001324 else {
njn25e49d8e72002-09-23 09:36:25 +00001325 p_new = VG_(arena_malloc_aligned) ( aid, req_alignB, req_pszB );
nethercote2d5b8162004-08-11 09:40:52 +00001326 }
njn25e49d8e72002-09-23 09:36:25 +00001327
sewardjde4a1d02002-03-22 01:27:54 +00001328 p_old = (UChar*)ptr;
1329 for (i = 0; i < old_pszB; i++)
1330 p_new[i] = p_old[i];
1331
njn25e49d8e72002-09-23 09:36:25 +00001332 VG_(arena_free)(aid, p_old);
1333
1334 VGP_POPCC(VgpMalloc);
sewardjde4a1d02002-03-22 01:27:54 +00001335 return p_new;
1336}
1337
1338
1339/*------------------------------------------------------------*/
nethercote996901a2004-08-03 13:29:09 +00001340/*--- Tool-visible functions. ---*/
njn25e49d8e72002-09-23 09:36:25 +00001341/*------------------------------------------------------------*/
1342
nethercote2d5b8162004-08-11 09:40:52 +00001343// All just wrappers to avoid exposing arenas to tools.
njn25e49d8e72002-09-23 09:36:25 +00001344
nethercote7ac7f7b2004-11-02 12:36:02 +00001345void* VG_(malloc) ( SizeT nbytes )
njn25e49d8e72002-09-23 09:36:25 +00001346{
nethercote60f5b822004-01-26 17:24:42 +00001347 return VG_(arena_malloc) ( VG_AR_TOOL, nbytes );
njn25e49d8e72002-09-23 09:36:25 +00001348}
1349
1350void VG_(free) ( void* ptr )
1351{
nethercote60f5b822004-01-26 17:24:42 +00001352 VG_(arena_free) ( VG_AR_TOOL, ptr );
njn25e49d8e72002-09-23 09:36:25 +00001353}
1354
nethercote7ac7f7b2004-11-02 12:36:02 +00001355void* VG_(calloc) ( SizeT nmemb, SizeT nbytes )
njn25e49d8e72002-09-23 09:36:25 +00001356{
nethercote2d5b8162004-08-11 09:40:52 +00001357 return VG_(arena_calloc) ( VG_AR_TOOL, VG_MIN_MALLOC_SZB, nmemb, nbytes );
njn25e49d8e72002-09-23 09:36:25 +00001358}
1359
nethercote7ac7f7b2004-11-02 12:36:02 +00001360void* VG_(realloc) ( void* ptr, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00001361{
nethercote2d5b8162004-08-11 09:40:52 +00001362 return VG_(arena_realloc) ( VG_AR_TOOL, ptr, VG_MIN_MALLOC_SZB, size );
njn25e49d8e72002-09-23 09:36:25 +00001363}
1364
nethercote7ac7f7b2004-11-02 12:36:02 +00001365void* VG_(malloc_aligned) ( SizeT req_alignB, SizeT req_pszB )
njn25e49d8e72002-09-23 09:36:25 +00001366{
nethercote60f5b822004-01-26 17:24:42 +00001367 return VG_(arena_malloc_aligned) ( VG_AR_TOOL, req_alignB, req_pszB );
njn25e49d8e72002-09-23 09:36:25 +00001368}
1369
1370
nethercote7ac7f7b2004-11-02 12:36:02 +00001371void* VG_(cli_malloc) ( SizeT align, SizeT nbytes )
njn3e884182003-04-15 13:03:23 +00001372{
nethercote2d5b8162004-08-11 09:40:52 +00001373 // 'align' should be valid by now. VG_(arena_malloc_aligned)() will
1374 // abort if it's not.
1375 if (VG_MIN_MALLOC_SZB == align)
njn3e884182003-04-15 13:03:23 +00001376 return VG_(arena_malloc) ( VG_AR_CLIENT, nbytes );
1377 else
sewardjf1accbc2003-07-12 01:26:52 +00001378 return VG_(arena_malloc_aligned) ( VG_AR_CLIENT, align, nbytes );
njn3e884182003-04-15 13:03:23 +00001379}
1380
1381void VG_(cli_free) ( void* p )
1382{
1383 VG_(arena_free) ( VG_AR_CLIENT, p );
1384}
1385
1386
nethercote7ac7f7b2004-11-02 12:36:02 +00001387Bool VG_(addr_is_in_block)( Addr a, Addr start, SizeT size )
njn3e884182003-04-15 13:03:23 +00001388{
1389 return (start - VG_(vg_malloc_redzone_szB) <= a
1390 && a < start + size + VG_(vg_malloc_redzone_szB));
1391}
1392
1393
njn25e49d8e72002-09-23 09:36:25 +00001394/*------------------------------------------------------------*/
sewardjde4a1d02002-03-22 01:27:54 +00001395/*--- The original test driver machinery. ---*/
1396/*------------------------------------------------------------*/
1397
1398#if 0
1399
1400#if 1
1401#define N_TEST_TRANSACTIONS 100000000
1402#define N_TEST_ARR 200000
1403#define M_TEST_MALLOC 1000
1404#else
1405#define N_TEST_TRANSACTIONS 500000
1406#define N_TEST_ARR 30000
1407#define M_TEST_MALLOC 500
1408#endif
1409
1410
1411void* test_arr[N_TEST_ARR];
1412
1413int main ( int argc, char** argv )
1414{
1415 Int i, j, k, nbytes, qq;
1416 unsigned char* chp;
njn25e49d8e72002-09-23 09:36:25 +00001417 Arena* a = &arena[VG_AR_CORE];
sewardjde4a1d02002-03-22 01:27:54 +00001418 srandom(1);
1419 for (i = 0; i < N_TEST_ARR; i++)
1420 test_arr[i] = NULL;
1421
1422 for (i = 0; i < N_TEST_TRANSACTIONS; i++) {
1423 if (i % 50000 == 0) mallocSanityCheck(a);
1424 j = random() % N_TEST_ARR;
1425 if (test_arr[j]) {
1426 vg_free(a, test_arr[j]);
1427 test_arr[j] = NULL;
1428 } else {
1429 nbytes = 1 + random() % M_TEST_MALLOC;
1430 qq = random()%64;
1431 if (qq == 32)
1432 nbytes *= 17;
1433 else if (qq == 33)
1434 nbytes = 0;
1435 test_arr[j]
1436 = (i % 17) == 0
1437 ? vg_memalign(a, nbytes, 1<< (3+(random()%10)))
1438 : vg_malloc( a, nbytes );
1439 chp = test_arr[j];
1440 for (k = 0; k < nbytes; k++)
1441 chp[k] = (unsigned char)(k + 99);
1442 }
1443 }
1444
1445
1446 for (i = 0; i < N_TEST_ARR; i++) {
1447 if (test_arr[i]) {
1448 vg_free(a, test_arr[i]);
1449 test_arr[i] = NULL;
1450 }
1451 }
1452 mallocSanityCheck(a);
1453
1454 fprintf(stderr, "ALL DONE\n");
1455
1456 show_arena_stats(a);
1457 fprintf(stderr, "%d max useful, %d bytes mmap'd (%4.1f%%), %d useful\n",
1458 a->bytes_on_loan_max,
1459 a->bytes_mmaped,
nethercote2d5b8162004-08-11 09:40:52 +00001460 100.0 * (double)a->bytes_on_loan_max / (double)a->bytes_mmaped,
sewardjde4a1d02002-03-22 01:27:54 +00001461 a->bytes_on_loan );
1462
1463 return 0;
1464}
1465#endif /* 0 */
1466
1467
1468/*--------------------------------------------------------------------*/
1469/*--- end vg_malloc2.c ---*/
1470/*--------------------------------------------------------------------*/