blob: 58e054e776f13ae6f65d2f035e0ef89b2049e17e [file] [log] [blame]
sewardjde4a1d02002-03-22 01:27:54 +00001
2/*--------------------------------------------------------------------*/
3/*--- An implementation of malloc/free which doesn't use sbrk. ---*/
4/*--- vg_malloc2.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
njnb9c427c2004-12-01 14:14:42 +00008 This file is part of Valgrind, a dynamic binary instrumentation
9 framework.
sewardjde4a1d02002-03-22 01:27:54 +000010
njn53612422005-03-12 16:22:54 +000011 Copyright (C) 2000-2005 Julian Seward
sewardjde4a1d02002-03-22 01:27:54 +000012 jseward@acm.org
sewardjde4a1d02002-03-22 01:27:54 +000013
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
njn25e49d8e72002-09-23 09:36:25 +000029 The GNU General Public License is contained in the file COPYING.
sewardjde4a1d02002-03-22 01:27:54 +000030*/
31
32
nethercotef1e5e152004-09-01 23:58:16 +000033#include "core.h"
sewardjb5f6f512005-03-10 23:59:00 +000034//zz#include "memcheck/memcheck.h"
sewardjde4a1d02002-03-22 01:27:54 +000035
nethercote2d5b8162004-08-11 09:40:52 +000036//#define DEBUG_MALLOC // turn on heavyweight debugging machinery
37//#define VERBOSE_MALLOC // make verbose, esp. in debugging machinery
38
39/*------------------------------------------------------------*/
40/*--- Main types ---*/
41/*------------------------------------------------------------*/
42
43#define VG_N_MALLOC_LISTS 16 // do not change this
44
nethercote7ac7f7b2004-11-02 12:36:02 +000045// The amount you can ask for is limited only by sizeof(SizeT)...
46#define MAX_PSZB (~((SizeT)0x0))
nethercote2d5b8162004-08-11 09:40:52 +000047
48typedef UChar UByte;
49
50/* Block layout:
51
nethercote7ac7f7b2004-11-02 12:36:02 +000052 this block total szB (sizeof(SizeT) bytes)
nethercote2d5b8162004-08-11 09:40:52 +000053 freelist previous ptr (sizeof(void*) bytes)
54 red zone bytes (depends on .rz_szB field of Arena)
55 (payload bytes)
56 red zone bytes (depends on .rz_szB field of Arena)
57 freelist next ptr (sizeof(void*) bytes)
nethercote7ac7f7b2004-11-02 12:36:02 +000058 this block total szB (sizeof(SizeT) bytes)
nethercote2d5b8162004-08-11 09:40:52 +000059
60 Total size in bytes (bszB) and payload size in bytes (pszB)
61 are related by:
62
nethercote7ac7f7b2004-11-02 12:36:02 +000063 bszB == pszB + 2*sizeof(SizeT) + 2*sizeof(void*) + 2*a->rz_szB
nethercote2d5b8162004-08-11 09:40:52 +000064
nethercote7ac7f7b2004-11-02 12:36:02 +000065 Furthermore, both size fields in the block have their least-sifnificant
66 bit set if the block is not in use, and unset if it is in use.
67 (The bottom 3 or so bits are always free for this because of alignment.)
68 A block size of zero is not possible, because a block always has at
69 least two SizeTs and two pointers of overhead.
nethercote2d5b8162004-08-11 09:40:52 +000070
71 Nb: All Block payloads must be VG_MIN_MALLOC_SZB-aligned. This is
72 achieved by ensuring that Superblocks are VG_MIN_MALLOC_SZB-aligned
73 (see newSuperblock() for how), and that the lengths of the following
74 things are a multiple of VG_MIN_MALLOC_SZB:
75 - Superblock admin section lengths (due to elastic padding)
76 - Block admin section (low and high) lengths (due to elastic redzones)
77 - Block payload lengths (due to req_pszB rounding up)
78*/
79typedef
80 struct {
81 // No fields are actually used in this struct, because a Block has
82 // loads of variable sized fields and so can't be accessed
83 // meaningfully with normal fields. So we use access functions all
84 // the time. This struct gives us a type to use, though. Also, we
85 // make sizeof(Block) 1 byte so that we can do arithmetic with the
86 // Block* type in increments of 1!
87 UByte dummy;
88 }
89 Block;
90
91// A superblock. 'padding' is never used, it just ensures that if the
92// entire Superblock is aligned to VG_MIN_MALLOC_SZB, then payload_bytes[]
93// will be too. It can add small amounts of padding unnecessarily -- eg.
94// 8-bytes on 32-bit machines with an 8-byte VG_MIN_MALLOC_SZB -- because
95// it's too hard to make a constant expression that works perfectly in all
96// cases.
97// payload_bytes[] is made a single big Block when the Superblock is
98// created, and then can be split and the splittings remerged, but Blocks
99// always cover its entire length -- there's never any unused bytes at the
100// end, for example.
101typedef
102 struct _Superblock {
103 struct _Superblock* next;
nethercote7ac7f7b2004-11-02 12:36:02 +0000104 SizeT n_payload_bytes;
nethercote2d5b8162004-08-11 09:40:52 +0000105 UByte padding[ VG_MIN_MALLOC_SZB -
nethercote7ac7f7b2004-11-02 12:36:02 +0000106 ((sizeof(struct _Superblock*) + sizeof(SizeT)) %
107 VG_MIN_MALLOC_SZB) ];
nethercote2d5b8162004-08-11 09:40:52 +0000108 UByte payload_bytes[0];
109 }
110 Superblock;
111
112// An arena. 'freelist' is a circular, doubly-linked list. 'rz_szB' is
113// elastic, in that it can be bigger than asked-for to ensure alignment.
114typedef
115 struct {
116 Char* name;
117 Bool clientmem; // Allocates in the client address space?
njn0e742df2004-11-30 13:26:29 +0000118 SizeT rz_szB; // Red zone size in bytes
nethercote7ac7f7b2004-11-02 12:36:02 +0000119 SizeT min_sblock_szB; // Minimum superblock size in bytes
nethercote2d5b8162004-08-11 09:40:52 +0000120 Block* freelist[VG_N_MALLOC_LISTS];
121 Superblock* sblocks;
122 // Stats only.
nethercote7ac7f7b2004-11-02 12:36:02 +0000123 SizeT bytes_on_loan;
124 SizeT bytes_mmaped;
125 SizeT bytes_on_loan_max;
nethercote2d5b8162004-08-11 09:40:52 +0000126 }
127 Arena;
128
129
130/*------------------------------------------------------------*/
131/*--- Low-level functions for working with Blocks. ---*/
132/*------------------------------------------------------------*/
133
nethercote7ac7f7b2004-11-02 12:36:02 +0000134#define SIZE_T_0x1 ((SizeT)0x1)
135
nethercote2d5b8162004-08-11 09:40:52 +0000136// Mark a bszB as in-use, and not in-use.
137static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000138SizeT mk_inuse_bszB ( SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000139{
140 vg_assert(bszB != 0);
nethercote7ac7f7b2004-11-02 12:36:02 +0000141 return bszB & (~SIZE_T_0x1);
nethercote2d5b8162004-08-11 09:40:52 +0000142}
143static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000144SizeT mk_free_bszB ( SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000145{
146 vg_assert(bszB != 0);
nethercote7ac7f7b2004-11-02 12:36:02 +0000147 return bszB | SIZE_T_0x1;
nethercote2d5b8162004-08-11 09:40:52 +0000148}
149
150// Remove the in-use/not-in-use attribute from a bszB, leaving just
151// the size.
152static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000153SizeT mk_plain_bszB ( SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000154{
155 vg_assert(bszB != 0);
nethercote7ac7f7b2004-11-02 12:36:02 +0000156 return bszB & (~SIZE_T_0x1);
nethercote2d5b8162004-08-11 09:40:52 +0000157}
158
159// Does this bszB have the in-use attribute?
160static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000161Bool is_inuse_bszB ( SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000162{
163 vg_assert(bszB != 0);
nethercote7ac7f7b2004-11-02 12:36:02 +0000164 return (0 != (bszB & SIZE_T_0x1)) ? False : True;
nethercote2d5b8162004-08-11 09:40:52 +0000165}
166
167
168// Set and get the lower size field of a block.
169static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000170void set_bszB_lo ( Block* b, SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000171{
nethercote7ac7f7b2004-11-02 12:36:02 +0000172 *(SizeT*)&b[0] = bszB;
nethercote2d5b8162004-08-11 09:40:52 +0000173}
174static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000175SizeT get_bszB_lo ( Block* b )
nethercote2d5b8162004-08-11 09:40:52 +0000176{
nethercote7ac7f7b2004-11-02 12:36:02 +0000177 return *(SizeT*)&b[0];
nethercote2d5b8162004-08-11 09:40:52 +0000178}
179
180// Get the address of the last byte in a block
181static __inline__
182UByte* last_byte ( Block* b )
183{
184 UByte* b2 = (UByte*)b;
185 return &b2[mk_plain_bszB(get_bszB_lo(b)) - 1];
186}
187
188// Set and get the upper size field of a block.
189static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000190void set_bszB_hi ( Block* b, SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000191{
192 UByte* b2 = (UByte*)b;
193 UByte* lb = last_byte(b);
194 vg_assert(lb == &b2[mk_plain_bszB(bszB) - 1]);
nethercote7ac7f7b2004-11-02 12:36:02 +0000195 *(SizeT*)&lb[-sizeof(SizeT) + 1] = bszB;
nethercote2d5b8162004-08-11 09:40:52 +0000196}
197static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000198SizeT get_bszB_hi ( Block* b )
nethercote2d5b8162004-08-11 09:40:52 +0000199{
200 UByte* lb = last_byte(b);
nethercote7ac7f7b2004-11-02 12:36:02 +0000201 return *(SizeT*)&lb[-sizeof(SizeT) + 1];
nethercote2d5b8162004-08-11 09:40:52 +0000202}
203
204
nethercote7ac7f7b2004-11-02 12:36:02 +0000205// Return the lower, upper and total overhead in bytes for a block.
206// These are determined purely by which arena the block lives in.
207static __inline__
njn0e742df2004-11-30 13:26:29 +0000208SizeT overhead_szB_lo ( Arena* a )
nethercote7ac7f7b2004-11-02 12:36:02 +0000209{
210 return sizeof(SizeT) + sizeof(void*) + a->rz_szB;
211}
212static __inline__
njn0e742df2004-11-30 13:26:29 +0000213SizeT overhead_szB_hi ( Arena* a )
nethercote7ac7f7b2004-11-02 12:36:02 +0000214{
215 return a->rz_szB + sizeof(void*) + sizeof(SizeT);
216}
217static __inline__
njn0e742df2004-11-30 13:26:29 +0000218SizeT overhead_szB ( Arena* a )
nethercote7ac7f7b2004-11-02 12:36:02 +0000219{
220 return overhead_szB_lo(a) + overhead_szB_hi(a);
221}
222
nethercote2d5b8162004-08-11 09:40:52 +0000223// Given the addr of a block, return the addr of its payload.
224static __inline__
225UByte* get_block_payload ( Arena* a, Block* b )
226{
227 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000228 return & b2[ overhead_szB_lo(a) ];
nethercote2d5b8162004-08-11 09:40:52 +0000229}
230// Given the addr of a block's payload, return the addr of the block itself.
231static __inline__
232Block* get_payload_block ( Arena* a, UByte* payload )
233{
nethercote7ac7f7b2004-11-02 12:36:02 +0000234 return (Block*)&payload[ -overhead_szB_lo(a) ];
nethercote2d5b8162004-08-11 09:40:52 +0000235}
236
237
238// Set and get the next and previous link fields of a block.
239static __inline__
240void set_prev_b ( Block* b, Block* prev_p )
241{
242 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000243 *(Block**)&b2[sizeof(SizeT)] = prev_p;
nethercote2d5b8162004-08-11 09:40:52 +0000244}
245static __inline__
246void set_next_b ( Block* b, Block* next_p )
247{
248 UByte* lb = last_byte(b);
nethercote7ac7f7b2004-11-02 12:36:02 +0000249 *(Block**)&lb[-sizeof(SizeT) - sizeof(void*) + 1] = next_p;
nethercote2d5b8162004-08-11 09:40:52 +0000250}
251static __inline__
252Block* get_prev_b ( Block* b )
253{
254 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000255 return *(Block**)&b2[sizeof(SizeT)];
nethercote2d5b8162004-08-11 09:40:52 +0000256}
257static __inline__
258Block* get_next_b ( Block* b )
259{
260 UByte* lb = last_byte(b);
nethercote7ac7f7b2004-11-02 12:36:02 +0000261 return *(Block**)&lb[-sizeof(SizeT) - sizeof(void*) + 1];
nethercote2d5b8162004-08-11 09:40:52 +0000262}
263
264
265// Get the block immediately preceding this one in the Superblock.
266static __inline__
267Block* get_predecessor_block ( Block* b )
268{
269 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000270 SizeT bszB = mk_plain_bszB( (*(SizeT*)&b2[-sizeof(SizeT)]) );
nethercote2d5b8162004-08-11 09:40:52 +0000271 return (Block*)&b2[-bszB];
272}
273
274// Read and write the lower and upper red-zone bytes of a block.
275static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000276void set_rz_lo_byte ( Arena* a, Block* b, UInt rz_byteno, UByte v )
nethercote2d5b8162004-08-11 09:40:52 +0000277{
278 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000279 b2[sizeof(SizeT) + sizeof(void*) + rz_byteno] = v;
nethercote2d5b8162004-08-11 09:40:52 +0000280}
281static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000282void set_rz_hi_byte ( Arena* a, Block* b, UInt rz_byteno, UByte v )
nethercote2d5b8162004-08-11 09:40:52 +0000283{
284 UByte* lb = last_byte(b);
nethercote7ac7f7b2004-11-02 12:36:02 +0000285 lb[-sizeof(SizeT) - sizeof(void*) - rz_byteno] = v;
nethercote2d5b8162004-08-11 09:40:52 +0000286}
287static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000288UByte get_rz_lo_byte ( Arena* a, Block* b, UInt rz_byteno )
nethercote2d5b8162004-08-11 09:40:52 +0000289{
290 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000291 return b2[sizeof(SizeT) + sizeof(void*) + rz_byteno];
nethercote2d5b8162004-08-11 09:40:52 +0000292}
293static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000294UByte get_rz_hi_byte ( Arena* a, Block* b, UInt rz_byteno )
nethercote2d5b8162004-08-11 09:40:52 +0000295{
296 UByte* lb = last_byte(b);
nethercote7ac7f7b2004-11-02 12:36:02 +0000297 return lb[-sizeof(SizeT) - sizeof(void*) - rz_byteno];
nethercote2d5b8162004-08-11 09:40:52 +0000298}
299
300
nethercote2d5b8162004-08-11 09:40:52 +0000301// Return the minimum bszB for a block in this arena. Can have zero-length
302// payloads, so it's the size of the admin bytes.
303static __inline__
njn0e742df2004-11-30 13:26:29 +0000304SizeT min_useful_bszB ( Arena* a )
nethercote2d5b8162004-08-11 09:40:52 +0000305{
306 return overhead_szB(a);
307}
308
309// Convert payload size <--> block size (both in bytes).
310static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000311SizeT pszB_to_bszB ( Arena* a, SizeT pszB )
nethercote2d5b8162004-08-11 09:40:52 +0000312{
nethercote2d5b8162004-08-11 09:40:52 +0000313 return pszB + overhead_szB(a);
314}
315static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000316SizeT bszB_to_pszB ( Arena* a, SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000317{
nethercote7ac7f7b2004-11-02 12:36:02 +0000318 vg_assert(bszB >= overhead_szB(a));
319 return bszB - overhead_szB(a);
nethercote2d5b8162004-08-11 09:40:52 +0000320}
321
322
323/*------------------------------------------------------------*/
324/*--- Arena management ---*/
325/*------------------------------------------------------------*/
326
327#define CORE_ARENA_MIN_SZB 1048576
328
329// The arena structures themselves.
330static Arena vg_arena[VG_N_ARENAS];
331
332// Functions external to this module identify arenas using ArenaIds,
333// not Arena*s. This fn converts the former to the latter.
334static Arena* arenaId_to_ArenaP ( ArenaId arena )
335{
336 vg_assert(arena >= 0 && arena < VG_N_ARENAS);
337 return & vg_arena[arena];
338}
339
340// Initialise an arena. rz_szB is the minimum redzone size; it might be
341// made bigger to ensure that VG_MIN_MALLOC_ALIGNMENT is observed.
342static
njn0e742df2004-11-30 13:26:29 +0000343void arena_init ( ArenaId aid, Char* name, SizeT rz_szB, SizeT min_sblock_szB )
nethercote2d5b8162004-08-11 09:40:52 +0000344{
nethercote7ac7f7b2004-11-02 12:36:02 +0000345 SizeT i;
nethercote2d5b8162004-08-11 09:40:52 +0000346 Arena* a = arenaId_to_ArenaP(aid);
347
nethercote7ac7f7b2004-11-02 12:36:02 +0000348 vg_assert(rz_szB < 128); // ensure reasonable size
nethercote73b526f2004-10-31 18:48:21 +0000349 vg_assert((min_sblock_szB % VKI_PAGE_SIZE) == 0);
nethercote2d5b8162004-08-11 09:40:52 +0000350 a->name = name;
351 a->clientmem = ( VG_AR_CLIENT == aid ? True : False );
352
353 // The size of the low and high admin sections in a block must be a
354 // multiple of VG_MIN_MALLOC_ALIGNMENT. So we round up the asked-for
355 // redzone size if necessary to achieve this.
356 a->rz_szB = rz_szB;
357 while (0 != overhead_szB_lo(a) % VG_MIN_MALLOC_SZB) a->rz_szB++;
358 vg_assert(overhead_szB_lo(a) == overhead_szB_hi(a));
359
360 a->min_sblock_szB = min_sblock_szB;
361 for (i = 0; i < VG_N_MALLOC_LISTS; i++) a->freelist[i] = NULL;
362 a->sblocks = NULL;
363 a->bytes_on_loan = 0;
364 a->bytes_mmaped = 0;
365 a->bytes_on_loan_max = 0;
366}
367
368/* Print vital stats for an arena. */
369void VG_(print_all_arena_stats) ( void )
370{
nethercote7ac7f7b2004-11-02 12:36:02 +0000371 UInt i;
nethercote2d5b8162004-08-11 09:40:52 +0000372 for (i = 0; i < VG_N_ARENAS; i++) {
373 Arena* a = arenaId_to_ArenaP(i);
374 VG_(message)(Vg_DebugMsg,
njn669ef072005-03-13 05:46:57 +0000375 "%8s: %8d mmap'd, %8d/%8d max/curr",
nethercote2d5b8162004-08-11 09:40:52 +0000376 a->name, a->bytes_mmaped, a->bytes_on_loan_max, a->bytes_on_loan
377 );
378 }
379}
380
381/* This library is self-initialising, as it makes this more self-contained,
382 less coupled with the outside world. Hence VG_(arena_malloc)() and
383 VG_(arena_free)() below always call ensure_mm_init() to ensure things are
384 correctly initialised. */
385static
386void ensure_mm_init ( void )
387{
njn0e742df2004-11-30 13:26:29 +0000388 static SizeT client_rz_szB;
389 static Bool init_done = False;
nethercote2d5b8162004-08-11 09:40:52 +0000390
391 if (init_done) {
392 // Make sure the client arena's redzone size never changes. Could
393 // happen if VG_(arena_malloc) was called too early, ie. before the
394 // tool was loaded.
395 vg_assert(client_rz_szB == VG_(vg_malloc_redzone_szB));
396 return;
397 }
398
399 /* No particular reason for this figure, it's just smallish */
njnca82cc02004-11-22 17:18:48 +0000400 tl_assert(VG_(vg_malloc_redzone_szB) < 128);
nethercote2d5b8162004-08-11 09:40:52 +0000401 client_rz_szB = VG_(vg_malloc_redzone_szB);
402
403 /* Use checked red zones (of various sizes) for our internal stuff,
404 and an unchecked zone of arbitrary size for the client. Of
405 course the client's red zone can be checked by the tool, eg.
406 by using addressibility maps, but not by the mechanism implemented
407 here, which merely checks at the time of freeing that the red
408 zone bytes are unchanged.
409
410 Nb: redzone sizes are *minimums*; they could be made bigger to ensure
411 alignment. Eg. on 32-bit machines, 4 becomes 8, and 12 becomes 16;
412 but on 64-bit machines 4 stays as 4, and 12 stays as 12 --- the extra
413 4 bytes in both are accounted for by the larger prev/next ptr.
414 */
415 arena_init ( VG_AR_CORE, "core", 4, CORE_ARENA_MIN_SZB );
416 arena_init ( VG_AR_TOOL, "tool", 4, 1048576 );
417 arena_init ( VG_AR_SYMTAB, "symtab", 4, 1048576 );
418 arena_init ( VG_AR_JITTER, "JITter", 4, 32768 );
419 arena_init ( VG_AR_CLIENT, "client", client_rz_szB, 1048576 );
420 arena_init ( VG_AR_DEMANGLE, "demangle", 12/*paranoid*/, 65536 );
421 arena_init ( VG_AR_EXECTXT, "exectxt", 4, 65536 );
422 arena_init ( VG_AR_ERRORS, "errors", 4, 65536 );
423 arena_init ( VG_AR_TRANSIENT, "transien", 4, 65536 );
424
425 init_done = True;
426# ifdef DEBUG_MALLOC
427 VG_(sanity_check_malloc_all)();
428# endif
429}
430
431
432/*------------------------------------------------------------*/
433/*--- Superblock management ---*/
434/*------------------------------------------------------------*/
435
436// Align ptr p upwards to an align-sized boundary.
437static
nethercote7ac7f7b2004-11-02 12:36:02 +0000438void* align_upwards ( void* p, SizeT align )
nethercote2d5b8162004-08-11 09:40:52 +0000439{
440 Addr a = (Addr)p;
441 if ((a % align) == 0) return (void*)a;
442 return (void*)(a - (a % align) + align);
443}
444
445// If not enough memory available, either aborts (for non-client memory)
446// or returns 0 (for client memory).
447static
nethercote7ac7f7b2004-11-02 12:36:02 +0000448Superblock* newSuperblock ( Arena* a, SizeT cszB )
nethercote2d5b8162004-08-11 09:40:52 +0000449{
450 // The extra VG_MIN_MALLOC_SZB bytes are for possible alignment up.
451 static UByte bootstrap_superblock[CORE_ARENA_MIN_SZB+VG_MIN_MALLOC_SZB];
sewardj548be6d2005-02-16 01:31:37 +0000452 static Bool called_before = True; //False;
nethercote2d5b8162004-08-11 09:40:52 +0000453 Superblock* sb;
454
455 // Take into account admin bytes in the Superblock.
456 cszB += sizeof(Superblock);
457
458 if (cszB < a->min_sblock_szB) cszB = a->min_sblock_szB;
nethercote73b526f2004-10-31 18:48:21 +0000459 while ((cszB % VKI_PAGE_SIZE) > 0) cszB++;
nethercote2d5b8162004-08-11 09:40:52 +0000460
461 if (!called_before) {
462 // First time we're called -- use the special static bootstrap
463 // superblock (see comment at top of main() for details).
464 called_before = True;
465 vg_assert(a == arenaId_to_ArenaP(VG_AR_CORE));
466 vg_assert(CORE_ARENA_MIN_SZB >= cszB);
467 // Ensure sb is suitably aligned.
468 sb = (Superblock*)align_upwards( bootstrap_superblock,
469 VG_MIN_MALLOC_SZB );
470 } else if (a->clientmem) {
471 // client allocation -- return 0 to client if it fails
472 sb = (Superblock *)
473 VG_(client_alloc)(0, cszB,
474 VKI_PROT_READ|VKI_PROT_WRITE|VKI_PROT_EXEC, 0);
475 if (NULL == sb)
476 return 0;
477 } else {
478 // non-client allocation -- aborts if it fails
479 sb = VG_(get_memory_from_mmap) ( cszB, "newSuperblock" );
480 }
481 vg_assert(NULL != sb);
sewardjb5f6f512005-03-10 23:59:00 +0000482 //zzVALGRIND_MAKE_WRITABLE(sb, cszB);
nethercote2d5b8162004-08-11 09:40:52 +0000483 vg_assert(0 == (Addr)sb % VG_MIN_MALLOC_SZB);
484 sb->n_payload_bytes = cszB - sizeof(Superblock);
485 a->bytes_mmaped += cszB;
486 if (0)
487 VG_(message)(Vg_DebugMsg, "newSuperblock, %d payload bytes",
488 sb->n_payload_bytes);
489 return sb;
490}
491
492// Find the superblock containing the given chunk.
493static
494Superblock* findSb ( Arena* a, Block* b )
495{
496 Superblock* sb;
497 for (sb = a->sblocks; sb; sb = sb->next)
498 if ((Block*)&sb->payload_bytes[0] <= b
499 && b < (Block*)&sb->payload_bytes[sb->n_payload_bytes])
500 return sb;
501 VG_(printf)("findSb: can't find pointer %p in arena `%s'\n", b, a->name );
502 VG_(core_panic)("findSb: VG_(arena_free)() in wrong arena?");
503 return NULL; /*NOTREACHED*/
504}
505
sewardjde4a1d02002-03-22 01:27:54 +0000506
fitzhardinge98abfc72003-12-16 02:05:15 +0000507/*------------------------------------------------------------*/
508/*--- Command line options ---*/
509/*------------------------------------------------------------*/
510
nethercote2d5b8162004-08-11 09:40:52 +0000511/* Round malloc sizes up to a multiple of VG_SLOPPY_MALLOC_SZB bytes?
512 default: NO
513 Nb: the allocator always rounds blocks up to a multiple of
514 VG_MIN_MALLOC_SZB. VG_(clo_sloppy_malloc) is relevant eg. for
515 Memcheck, which will be byte-precise with addressability maps on its
516 malloc allocations unless --sloppy-malloc=yes. */
517Bool VG_(clo_sloppy_malloc) = False;
fitzhardinge98abfc72003-12-16 02:05:15 +0000518
519/* DEBUG: print malloc details? default: NO */
nethercote2d5b8162004-08-11 09:40:52 +0000520Bool VG_(clo_trace_malloc) = False;
fitzhardinge98abfc72003-12-16 02:05:15 +0000521
522/* Minimum alignment in functions that don't specify alignment explicitly.
nethercote2d5b8162004-08-11 09:40:52 +0000523 default: 0, i.e. use VG_MIN_MALLOC_SZB. */
nethercote7ac7f7b2004-11-02 12:36:02 +0000524UInt VG_(clo_alignment) = VG_MIN_MALLOC_SZB;
fitzhardinge98abfc72003-12-16 02:05:15 +0000525
526
527Bool VG_(replacement_malloc_process_cmd_line_option)(Char* arg)
528{
jsewardb1a26ae2004-03-14 03:06:37 +0000529 if (VG_CLO_STREQN(12, arg, "--alignment=")) {
nethercote7ac7f7b2004-11-02 12:36:02 +0000530 VG_(clo_alignment) = (UInt)VG_(atoll)(&arg[12]);
fitzhardinge98abfc72003-12-16 02:05:15 +0000531
nethercote2d5b8162004-08-11 09:40:52 +0000532 if (VG_(clo_alignment) < VG_MIN_MALLOC_SZB
fitzhardinge98abfc72003-12-16 02:05:15 +0000533 || VG_(clo_alignment) > 4096
534 || VG_(log2)( VG_(clo_alignment) ) == -1 /* not a power of 2 */) {
535 VG_(message)(Vg_UserMsg, "");
536 VG_(message)(Vg_UserMsg,
537 "Invalid --alignment= setting. "
nethercote2d5b8162004-08-11 09:40:52 +0000538 "Should be a power of 2, >= %d, <= 4096.", VG_MIN_MALLOC_SZB);
fitzhardinge98abfc72003-12-16 02:05:15 +0000539 VG_(bad_option)("--alignment");
540 }
541 }
542
nethercotef28481f2004-07-10 13:56:19 +0000543 else VG_BOOL_CLO("--sloppy-malloc", VG_(clo_sloppy_malloc))
544 else VG_BOOL_CLO("--trace-malloc", VG_(clo_trace_malloc))
fitzhardinge98abfc72003-12-16 02:05:15 +0000545 else
546 return False;
547
548 return True;
549}
550
551void VG_(replacement_malloc_print_usage)(void)
552{
553 VG_(printf)(
nethercote2d5b8162004-08-11 09:40:52 +0000554" --sloppy-malloc=no|yes round malloc sizes to multiple of %d? [no]\n"
555" --alignment=<number> set minimum alignment of allocations [%d]\n",
556 VG_SLOPPY_MALLOC_SZB, VG_MIN_MALLOC_SZB
fitzhardinge98abfc72003-12-16 02:05:15 +0000557 );
558}
559
560void VG_(replacement_malloc_print_debug_usage)(void)
561{
562 VG_(printf)(
563" --trace-malloc=no|yes show client malloc details? [no]\n"
564 );
565}
566
sewardjde4a1d02002-03-22 01:27:54 +0000567
568/*------------------------------------------------------------*/
sewardjde4a1d02002-03-22 01:27:54 +0000569/*--- Functions for working with freelists. ---*/
570/*------------------------------------------------------------*/
571
nethercote2d5b8162004-08-11 09:40:52 +0000572// Nb: Determination of which freelist a block lives on is based on the
573// payload size, not block size.
sewardjde4a1d02002-03-22 01:27:54 +0000574
nethercote2d5b8162004-08-11 09:40:52 +0000575// Convert a payload size in bytes to a freelist number.
sewardjde4a1d02002-03-22 01:27:54 +0000576static
nethercote7ac7f7b2004-11-02 12:36:02 +0000577UInt pszB_to_listNo ( SizeT pszB )
sewardjde4a1d02002-03-22 01:27:54 +0000578{
nethercote2d5b8162004-08-11 09:40:52 +0000579 vg_assert(0 == pszB % VG_MIN_MALLOC_SZB);
580 pszB /= VG_MIN_MALLOC_SZB;
581 if (pszB <= 2) return 0;
582 if (pszB <= 3) return 1;
583 if (pszB <= 4) return 2;
584 if (pszB <= 5) return 3;
585 if (pszB <= 6) return 4;
586 if (pszB <= 7) return 5;
587 if (pszB <= 8) return 6;
588 if (pszB <= 9) return 7;
589 if (pszB <= 10) return 8;
590 if (pszB <= 11) return 9;
591 if (pszB <= 12) return 10;
592 if (pszB <= 16) return 11;
593 if (pszB <= 32) return 12;
594 if (pszB <= 64) return 13;
595 if (pszB <= 128) return 14;
sewardjde4a1d02002-03-22 01:27:54 +0000596 return 15;
597}
598
nethercote2d5b8162004-08-11 09:40:52 +0000599// What is the minimum payload size for a given list?
sewardjde4a1d02002-03-22 01:27:54 +0000600static
nethercote7ac7f7b2004-11-02 12:36:02 +0000601SizeT listNo_to_pszB_min ( UInt listNo )
sewardjde4a1d02002-03-22 01:27:54 +0000602{
nethercote7ac7f7b2004-11-02 12:36:02 +0000603 SizeT pszB = 0;
604 vg_assert(listNo <= VG_N_MALLOC_LISTS);
nethercote2d5b8162004-08-11 09:40:52 +0000605 while (pszB_to_listNo(pszB) < listNo) pszB += VG_MIN_MALLOC_SZB;
606 return pszB;
sewardjde4a1d02002-03-22 01:27:54 +0000607}
608
nethercote2d5b8162004-08-11 09:40:52 +0000609// What is the maximum payload size for a given list?
sewardjde4a1d02002-03-22 01:27:54 +0000610static
nethercote7ac7f7b2004-11-02 12:36:02 +0000611SizeT listNo_to_pszB_max ( UInt listNo )
sewardjde4a1d02002-03-22 01:27:54 +0000612{
nethercote7ac7f7b2004-11-02 12:36:02 +0000613 vg_assert(listNo <= VG_N_MALLOC_LISTS);
sewardjde4a1d02002-03-22 01:27:54 +0000614 if (listNo == VG_N_MALLOC_LISTS-1) {
nethercote2d5b8162004-08-11 09:40:52 +0000615 return MAX_PSZB;
sewardjde4a1d02002-03-22 01:27:54 +0000616 } else {
nethercote2d5b8162004-08-11 09:40:52 +0000617 return listNo_to_pszB_min(listNo+1) - 1;
sewardjde4a1d02002-03-22 01:27:54 +0000618 }
619}
620
621
622/* A nasty hack to try and reduce fragmentation. Try and replace
623 a->freelist[lno] with another block on the same list but with a
624 lower address, with the idea of attempting to recycle the same
625 blocks rather than cruise through the address space. */
sewardjde4a1d02002-03-22 01:27:54 +0000626static
nethercote7ac7f7b2004-11-02 12:36:02 +0000627void swizzle ( Arena* a, UInt lno )
sewardjde4a1d02002-03-22 01:27:54 +0000628{
nethercote2d5b8162004-08-11 09:40:52 +0000629 Block* p_best;
630 Block* pp;
631 Block* pn;
nethercote7ac7f7b2004-11-02 12:36:02 +0000632 UInt i;
sewardjde4a1d02002-03-22 01:27:54 +0000633
634 p_best = a->freelist[lno];
635 if (p_best == NULL) return;
636
637 pn = pp = p_best;
638 for (i = 0; i < 20; i++) {
nethercote2d5b8162004-08-11 09:40:52 +0000639 pn = get_next_b(pn);
640 pp = get_prev_b(pp);
sewardjde4a1d02002-03-22 01:27:54 +0000641 if (pn < p_best) p_best = pn;
642 if (pp < p_best) p_best = pp;
643 }
644 if (p_best < a->freelist[lno]) {
nethercote2d5b8162004-08-11 09:40:52 +0000645# ifdef VERBOSE_MALLOC
646 VG_(printf)("retreat by %d\n", a->freelist[lno] - p_best);
sewardjde4a1d02002-03-22 01:27:54 +0000647# endif
648 a->freelist[lno] = p_best;
649 }
650}
651
652
653/*------------------------------------------------------------*/
sewardjde4a1d02002-03-22 01:27:54 +0000654/*--- Sanity-check/debugging machinery. ---*/
655/*------------------------------------------------------------*/
656
nethercote2d5b8162004-08-11 09:40:52 +0000657#define VG_REDZONE_LO_MASK 0x31
658#define VG_REDZONE_HI_MASK 0x7c
659
nethercote7ac7f7b2004-11-02 12:36:02 +0000660// Do some crude sanity checks on a Block.
sewardjde4a1d02002-03-22 01:27:54 +0000661static
nethercote2d5b8162004-08-11 09:40:52 +0000662Bool blockSane ( Arena* a, Block* b )
sewardjde4a1d02002-03-22 01:27:54 +0000663{
664# define BLEAT(str) VG_(printf)("blockSane: fail -- %s\n",str)
nethercote7ac7f7b2004-11-02 12:36:02 +0000665 UInt i;
nethercote2d5b8162004-08-11 09:40:52 +0000666 if (get_bszB_lo(b) != get_bszB_hi(b))
sewardjde4a1d02002-03-22 01:27:54 +0000667 {BLEAT("sizes");return False;}
nethercote2d5b8162004-08-11 09:40:52 +0000668 if (!a->clientmem && is_inuse_bszB(get_bszB_lo(b))) {
669 for (i = 0; i < a->rz_szB; i++) {
670 if (get_rz_lo_byte(a, b, i) !=
671 (UByte)(((Addr)b&0xff) ^ VG_REDZONE_LO_MASK))
672 {BLEAT("redzone-lo");return False;}
673 if (get_rz_hi_byte(a, b, i) !=
674 (UByte)(((Addr)b&0xff) ^ VG_REDZONE_HI_MASK))
675 {BLEAT("redzone-hi");return False;}
sewardjde4a1d02002-03-22 01:27:54 +0000676 }
677 }
678 return True;
679# undef BLEAT
680}
681
nethercote2d5b8162004-08-11 09:40:52 +0000682// Print superblocks (only for debugging).
sewardjde4a1d02002-03-22 01:27:54 +0000683static
684void ppSuperblocks ( Arena* a )
685{
nethercote7ac7f7b2004-11-02 12:36:02 +0000686 UInt i, blockno;
687 SizeT b_bszB;
nethercote2d5b8162004-08-11 09:40:52 +0000688 Block* b;
sewardjde4a1d02002-03-22 01:27:54 +0000689 Superblock* sb = a->sblocks;
690 blockno = 1;
691
692 while (sb) {
693 VG_(printf)( "\n" );
nethercote2d5b8162004-08-11 09:40:52 +0000694 VG_(printf)( "superblock %d at %p, sb->n_pl_bs = %d, next = %p\n",
695 blockno++, sb, sb->n_payload_bytes, sb->next );
696 for (i = 0; i < sb->n_payload_bytes; i += mk_plain_bszB(b_bszB)) {
697 b = (Block*)&sb->payload_bytes[i];
698 b_bszB = get_bszB_lo(b);
699 VG_(printf)( " block at %d, bszB %d: ", i, mk_plain_bszB(b_bszB) );
700 VG_(printf)( "%s, ", is_inuse_bszB(b_bszB) ? "inuse" : "free");
701 VG_(printf)( "%s\n", blockSane(a, b) ? "ok" : "BAD" );
sewardjde4a1d02002-03-22 01:27:54 +0000702 }
nethercote2d5b8162004-08-11 09:40:52 +0000703 vg_assert(i == sb->n_payload_bytes); // no overshoot at end of Sb
sewardjde4a1d02002-03-22 01:27:54 +0000704 sb = sb->next;
705 }
706 VG_(printf)( "end of superblocks\n\n" );
707}
708
nethercote2d5b8162004-08-11 09:40:52 +0000709// Sanity check both the superblocks and the chains.
nethercote885dd912004-08-03 23:14:00 +0000710static void sanity_check_malloc_arena ( ArenaId aid )
sewardjde4a1d02002-03-22 01:27:54 +0000711{
nethercote7ac7f7b2004-11-02 12:36:02 +0000712 UInt i, superblockctr, blockctr_sb, blockctr_li;
713 UInt blockctr_sb_free, listno;
714 SizeT b_bszB, b_pszB, list_min_pszB, list_max_pszB;
sewardjde4a1d02002-03-22 01:27:54 +0000715 Superblock* sb;
716 Bool thisFree, lastWasFree;
nethercote2d5b8162004-08-11 09:40:52 +0000717 Block* b;
718 Block* b_prev;
nethercote7ac7f7b2004-11-02 12:36:02 +0000719 SizeT arena_bytes_on_loan;
sewardjde4a1d02002-03-22 01:27:54 +0000720 Arena* a;
721
nethercote885dd912004-08-03 23:14:00 +0000722# define BOMB VG_(core_panic)("sanity_check_malloc_arena")
sewardjde4a1d02002-03-22 01:27:54 +0000723
724 a = arenaId_to_ArenaP(aid);
725
nethercote2d5b8162004-08-11 09:40:52 +0000726 // First, traverse all the superblocks, inspecting the Blocks in each.
sewardjde4a1d02002-03-22 01:27:54 +0000727 superblockctr = blockctr_sb = blockctr_sb_free = 0;
728 arena_bytes_on_loan = 0;
729 sb = a->sblocks;
730 while (sb) {
731 lastWasFree = False;
732 superblockctr++;
nethercote2d5b8162004-08-11 09:40:52 +0000733 for (i = 0; i < sb->n_payload_bytes; i += mk_plain_bszB(b_bszB)) {
sewardjde4a1d02002-03-22 01:27:54 +0000734 blockctr_sb++;
nethercote2d5b8162004-08-11 09:40:52 +0000735 b = (Block*)&sb->payload_bytes[i];
736 b_bszB = get_bszB_lo(b);
sewardjde4a1d02002-03-22 01:27:54 +0000737 if (!blockSane(a, b)) {
nethercote2d5b8162004-08-11 09:40:52 +0000738 VG_(printf)("sanity_check_malloc_arena: sb %p, block %d (bszB %d): "
739 " BAD\n", sb, i, b_bszB );
sewardjde4a1d02002-03-22 01:27:54 +0000740 BOMB;
741 }
nethercote2d5b8162004-08-11 09:40:52 +0000742 thisFree = !is_inuse_bszB(b_bszB);
sewardjde4a1d02002-03-22 01:27:54 +0000743 if (thisFree && lastWasFree) {
nethercote2d5b8162004-08-11 09:40:52 +0000744 VG_(printf)("sanity_check_malloc_arena: sb %p, block %d (bszB %d): "
njn25e49d8e72002-09-23 09:36:25 +0000745 "UNMERGED FREES\n",
nethercote2d5b8162004-08-11 09:40:52 +0000746 sb, i, b_bszB );
sewardjde4a1d02002-03-22 01:27:54 +0000747 BOMB;
748 }
sewardjde4a1d02002-03-22 01:27:54 +0000749 if (thisFree) blockctr_sb_free++;
750 if (!thisFree)
nethercote2d5b8162004-08-11 09:40:52 +0000751 arena_bytes_on_loan += bszB_to_pszB(a, b_bszB);
752 lastWasFree = thisFree;
sewardjde4a1d02002-03-22 01:27:54 +0000753 }
nethercote2d5b8162004-08-11 09:40:52 +0000754 if (i > sb->n_payload_bytes) {
nethercote885dd912004-08-03 23:14:00 +0000755 VG_(printf)( "sanity_check_malloc_arena: sb %p: last block "
sewardjde4a1d02002-03-22 01:27:54 +0000756 "overshoots end\n", sb);
757 BOMB;
758 }
759 sb = sb->next;
760 }
761
762 if (arena_bytes_on_loan != a->bytes_on_loan) {
nethercote2d5b8162004-08-11 09:40:52 +0000763# ifdef VERBOSE_MALLOC
764 VG_(printf)( "sanity_check_malloc_arena: a->bytes_on_loan %d, "
765 "arena_bytes_on_loan %d: "
766 "MISMATCH\n", a->bytes_on_loan, arena_bytes_on_loan);
767# endif
sewardjde4a1d02002-03-22 01:27:54 +0000768 ppSuperblocks(a);
769 BOMB;
770 }
771
772 /* Second, traverse each list, checking that the back pointers make
773 sense, counting blocks encountered, and checking that each block
774 is an appropriate size for this list. */
775 blockctr_li = 0;
776 for (listno = 0; listno < VG_N_MALLOC_LISTS; listno++) {
nethercote2d5b8162004-08-11 09:40:52 +0000777 list_min_pszB = listNo_to_pszB_min(listno);
778 list_max_pszB = listNo_to_pszB_max(listno);
sewardjde4a1d02002-03-22 01:27:54 +0000779 b = a->freelist[listno];
780 if (b == NULL) continue;
781 while (True) {
782 b_prev = b;
nethercote2d5b8162004-08-11 09:40:52 +0000783 b = get_next_b(b);
784 if (get_prev_b(b) != b_prev) {
nethercote885dd912004-08-03 23:14:00 +0000785 VG_(printf)( "sanity_check_malloc_arena: list %d at %p: "
sewardjde4a1d02002-03-22 01:27:54 +0000786 "BAD LINKAGE\n",
787 listno, b );
788 BOMB;
789 }
nethercote2d5b8162004-08-11 09:40:52 +0000790 b_pszB = bszB_to_pszB(a, mk_plain_bszB(get_bszB_lo(b)));
791 if (b_pszB < list_min_pszB || b_pszB > list_max_pszB) {
sewardjde4a1d02002-03-22 01:27:54 +0000792 VG_(printf)(
nethercote885dd912004-08-03 23:14:00 +0000793 "sanity_check_malloc_arena: list %d at %p: "
nethercote2d5b8162004-08-11 09:40:52 +0000794 "WRONG CHAIN SIZE %dB (%dB, %dB)\n",
795 listno, b, b_pszB, list_min_pszB, list_max_pszB );
sewardjde4a1d02002-03-22 01:27:54 +0000796 BOMB;
797 }
798 blockctr_li++;
799 if (b == a->freelist[listno]) break;
800 }
801 }
802
803 if (blockctr_sb_free != blockctr_li) {
nethercote2d5b8162004-08-11 09:40:52 +0000804# ifdef VERBOSE_MALLOC
805 VG_(printf)( "sanity_check_malloc_arena: BLOCK COUNT MISMATCH "
806 "(via sbs %d, via lists %d)\n",
807 blockctr_sb_free, blockctr_li );
808# endif
sewardjde4a1d02002-03-22 01:27:54 +0000809 ppSuperblocks(a);
810 BOMB;
811 }
812
nethercote885dd912004-08-03 23:14:00 +0000813 if (VG_(clo_verbosity) > 2)
814 VG_(message)(Vg_DebugMsg,
njn669ef072005-03-13 05:46:57 +0000815 "%8s: %2d sbs, %5d bs, %2d/%-2d free bs, "
nethercote885dd912004-08-03 23:14:00 +0000816 "%7d mmap, %7d loan",
817 a->name,
818 superblockctr,
819 blockctr_sb, blockctr_sb_free, blockctr_li,
820 a->bytes_mmaped, a->bytes_on_loan);
sewardjde4a1d02002-03-22 01:27:54 +0000821# undef BOMB
822}
823
824
nethercote885dd912004-08-03 23:14:00 +0000825void VG_(sanity_check_malloc_all) ( void )
sewardjde4a1d02002-03-22 01:27:54 +0000826{
nethercote7ac7f7b2004-11-02 12:36:02 +0000827 UInt i;
sewardjde4a1d02002-03-22 01:27:54 +0000828 for (i = 0; i < VG_N_ARENAS; i++)
nethercote885dd912004-08-03 23:14:00 +0000829 sanity_check_malloc_arena ( i );
sewardjde4a1d02002-03-22 01:27:54 +0000830}
831
sewardjde4a1d02002-03-22 01:27:54 +0000832/* Really, this isn't the right place for this. Nevertheless: find
833 out if an arena is empty -- currently has no bytes on loan. This
834 is useful for checking for memory leaks (of valgrind, not the
nethercote2d5b8162004-08-11 09:40:52 +0000835 client.) */
sewardjde4a1d02002-03-22 01:27:54 +0000836Bool VG_(is_empty_arena) ( ArenaId aid )
837{
838 Arena* a;
839 Superblock* sb;
nethercote2d5b8162004-08-11 09:40:52 +0000840 Block* b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000841 SizeT b_bszB;
njn25e49d8e72002-09-23 09:36:25 +0000842
sewardjde4a1d02002-03-22 01:27:54 +0000843 ensure_mm_init();
844 a = arenaId_to_ArenaP(aid);
845 for (sb = a->sblocks; sb != NULL; sb = sb->next) {
nethercote2d5b8162004-08-11 09:40:52 +0000846 // If the superblock is empty, it should contain a single free
847 // block, of the right size.
848 b = (Block*)&sb->payload_bytes[0];
849 b_bszB = get_bszB_lo(b);
850 if (is_inuse_bszB(b_bszB)) return False;
851 if (mk_plain_bszB(b_bszB) != sb->n_payload_bytes) return False;
852 // If we reach here, this block is not in use and is of the right
853 // size, so keep going around the loop...
sewardjde4a1d02002-03-22 01:27:54 +0000854 }
855 return True;
856}
857
858
nethercote2d5b8162004-08-11 09:40:52 +0000859/*------------------------------------------------------------*/
860/*--- Creating and deleting blocks. ---*/
861/*------------------------------------------------------------*/
862
863// Mark the bytes at b .. b+bszB-1 as not in use, and add them to the
864// relevant free list.
865
866static
nethercote7ac7f7b2004-11-02 12:36:02 +0000867void mkFreeBlock ( Arena* a, Block* b, SizeT bszB, UInt b_lno )
jsewardb1a26ae2004-03-14 03:06:37 +0000868{
nethercote7ac7f7b2004-11-02 12:36:02 +0000869 SizeT pszB = bszB_to_pszB(a, bszB);
nethercote2d5b8162004-08-11 09:40:52 +0000870 vg_assert(b_lno == pszB_to_listNo(pszB));
sewardjb5f6f512005-03-10 23:59:00 +0000871 //zzVALGRIND_MAKE_WRITABLE(b, bszB);
nethercote2d5b8162004-08-11 09:40:52 +0000872 // Set the size fields and indicate not-in-use.
873 set_bszB_lo(b, mk_free_bszB(bszB));
874 set_bszB_hi(b, mk_free_bszB(bszB));
875
876 // Add to the relevant list.
877 if (a->freelist[b_lno] == NULL) {
878 set_prev_b(b, b);
879 set_next_b(b, b);
880 a->freelist[b_lno] = b;
881 } else {
882 Block* b_prev = get_prev_b(a->freelist[b_lno]);
883 Block* b_next = a->freelist[b_lno];
884 set_next_b(b_prev, b);
885 set_prev_b(b_next, b);
886 set_next_b(b, b_next);
887 set_prev_b(b, b_prev);
888 }
889# ifdef DEBUG_MALLOC
890 (void)blockSane(a,b);
891# endif
892}
893
894// Mark the bytes at b .. b+bszB-1 as in use, and set up the block
895// appropriately.
896static
nethercote7ac7f7b2004-11-02 12:36:02 +0000897void mkInuseBlock ( Arena* a, Block* b, SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000898{
nethercote7ac7f7b2004-11-02 12:36:02 +0000899 UInt i;
nethercote2d5b8162004-08-11 09:40:52 +0000900 vg_assert(bszB >= min_useful_bszB(a));
sewardjb5f6f512005-03-10 23:59:00 +0000901 //zzVALGRIND_MAKE_WRITABLE(b, bszB);
nethercote2d5b8162004-08-11 09:40:52 +0000902 set_bszB_lo(b, mk_inuse_bszB(bszB));
903 set_bszB_hi(b, mk_inuse_bszB(bszB));
904 set_prev_b(b, NULL); // Take off freelist
905 set_next_b(b, NULL); // ditto
906 if (!a->clientmem) {
907 for (i = 0; i < a->rz_szB; i++) {
908 set_rz_lo_byte(a, b, i, (UByte)(((Addr)b&0xff) ^ VG_REDZONE_LO_MASK));
909 set_rz_hi_byte(a, b, i, (UByte)(((Addr)b&0xff) ^ VG_REDZONE_HI_MASK));
910 }
911 }
912# ifdef DEBUG_MALLOC
913 (void)blockSane(a,b);
914# endif
915}
916
917// Remove a block from a given list. Does no sanity checking.
918static
nethercote7ac7f7b2004-11-02 12:36:02 +0000919void unlinkBlock ( Arena* a, Block* b, UInt listno )
nethercote2d5b8162004-08-11 09:40:52 +0000920{
nethercote7ac7f7b2004-11-02 12:36:02 +0000921 vg_assert(listno < VG_N_MALLOC_LISTS);
nethercote2d5b8162004-08-11 09:40:52 +0000922 if (get_prev_b(b) == b) {
923 // Only one element in the list; treat it specially.
924 vg_assert(get_next_b(b) == b);
925 a->freelist[listno] = NULL;
926 } else {
927 Block* b_prev = get_prev_b(b);
928 Block* b_next = get_next_b(b);
929 a->freelist[listno] = b_prev;
930 set_next_b(b_prev, b_next);
931 set_prev_b(b_next, b_prev);
932 swizzle ( a, listno );
933 }
934 set_prev_b(b, NULL);
935 set_next_b(b, NULL);
jsewardb1a26ae2004-03-14 03:06:37 +0000936}
937
938
sewardjde4a1d02002-03-22 01:27:54 +0000939/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +0000940/*--- Core-visible functions. ---*/
sewardjde4a1d02002-03-22 01:27:54 +0000941/*------------------------------------------------------------*/
942
nethercote2d5b8162004-08-11 09:40:52 +0000943// Align the request size.
944static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000945SizeT align_req_pszB ( SizeT req_pszB )
nethercote2d5b8162004-08-11 09:40:52 +0000946{
nethercote7ac7f7b2004-11-02 12:36:02 +0000947 SizeT n = VG_MIN_MALLOC_SZB-1;
nethercote2d5b8162004-08-11 09:40:52 +0000948 return ((req_pszB + n) & (~n));
949}
950
nethercote7ac7f7b2004-11-02 12:36:02 +0000951void* VG_(arena_malloc) ( ArenaId aid, SizeT req_pszB )
sewardjde4a1d02002-03-22 01:27:54 +0000952{
nethercote7ac7f7b2004-11-02 12:36:02 +0000953 SizeT req_bszB, frag_bszB, b_bszB;
954 UInt lno;
sewardjde4a1d02002-03-22 01:27:54 +0000955 Superblock* new_sb;
nethercote2d5b8162004-08-11 09:40:52 +0000956 Block* b = NULL;
sewardjde4a1d02002-03-22 01:27:54 +0000957 Arena* a;
jsewardb1a26ae2004-03-14 03:06:37 +0000958 void* v;
sewardjde4a1d02002-03-22 01:27:54 +0000959
960 VGP_PUSHCC(VgpMalloc);
961
962 ensure_mm_init();
963 a = arenaId_to_ArenaP(aid);
964
nethercote7ac7f7b2004-11-02 12:36:02 +0000965 vg_assert(req_pszB < MAX_PSZB);
nethercote2d5b8162004-08-11 09:40:52 +0000966 req_pszB = align_req_pszB(req_pszB);
967 req_bszB = pszB_to_bszB(a, req_pszB);
sewardjde4a1d02002-03-22 01:27:54 +0000968
nethercote2d5b8162004-08-11 09:40:52 +0000969 // Scan through all the big-enough freelists for a block.
970 for (lno = pszB_to_listNo(req_pszB); lno < VG_N_MALLOC_LISTS; lno++) {
sewardjde4a1d02002-03-22 01:27:54 +0000971 b = a->freelist[lno];
nethercote2d5b8162004-08-11 09:40:52 +0000972 if (NULL == b) continue; // If this list is empty, try the next one.
sewardjde4a1d02002-03-22 01:27:54 +0000973 while (True) {
nethercote2d5b8162004-08-11 09:40:52 +0000974 b_bszB = mk_plain_bszB(get_bszB_lo(b));
975 if (b_bszB >= req_bszB) goto obtained_block; // success!
976 b = get_next_b(b);
977 if (b == a->freelist[lno]) break; // traversed entire freelist
sewardjde4a1d02002-03-22 01:27:54 +0000978 }
sewardjde4a1d02002-03-22 01:27:54 +0000979 }
980
nethercote2d5b8162004-08-11 09:40:52 +0000981 // If we reach here, no suitable block found, allocate a new superblock
982 vg_assert(lno == VG_N_MALLOC_LISTS);
983 new_sb = newSuperblock(a, req_bszB);
984 if (NULL == new_sb) {
985 // Should only fail if for client, otherwise, should have aborted
986 // already.
987 vg_assert(VG_AR_CLIENT == aid);
988 return NULL;
sewardjde4a1d02002-03-22 01:27:54 +0000989 }
nethercote2d5b8162004-08-11 09:40:52 +0000990 new_sb->next = a->sblocks;
991 a->sblocks = new_sb;
992 b = (Block*)&new_sb->payload_bytes[0];
993 lno = pszB_to_listNo(bszB_to_pszB(a, new_sb->n_payload_bytes));
994 mkFreeBlock ( a, b, new_sb->n_payload_bytes, lno);
995 // fall through
sewardjde4a1d02002-03-22 01:27:54 +0000996
nethercote2d5b8162004-08-11 09:40:52 +0000997 obtained_block:
998 // Ok, we can allocate from b, which lives in list lno.
sewardjde4a1d02002-03-22 01:27:54 +0000999 vg_assert(b != NULL);
nethercote7ac7f7b2004-11-02 12:36:02 +00001000 vg_assert(lno < VG_N_MALLOC_LISTS);
sewardjde4a1d02002-03-22 01:27:54 +00001001 vg_assert(a->freelist[lno] != NULL);
nethercote2d5b8162004-08-11 09:40:52 +00001002 b_bszB = mk_plain_bszB(get_bszB_lo(b));
1003 // req_bszB is the size of the block we are after. b_bszB is the
1004 // size of what we've actually got. */
1005 vg_assert(b_bszB >= req_bszB);
sewardjde4a1d02002-03-22 01:27:54 +00001006
nethercote2d5b8162004-08-11 09:40:52 +00001007 // Could we split this block and still get a useful fragment?
1008 frag_bszB = b_bszB - req_bszB;
1009 if (frag_bszB >= min_useful_bszB(a)) {
1010 // Yes, split block in two, put the fragment on the appropriate free
1011 // list, and update b_bszB accordingly.
1012 // printf( "split %dB into %dB and %dB\n", b_bszB, req_bszB, frag_bszB );
sewardjde4a1d02002-03-22 01:27:54 +00001013 unlinkBlock(a, b, lno);
nethercote2d5b8162004-08-11 09:40:52 +00001014 mkInuseBlock(a, b, req_bszB);
1015 mkFreeBlock(a, &b[req_bszB], frag_bszB,
1016 pszB_to_listNo(bszB_to_pszB(a, frag_bszB)));
1017 b_bszB = mk_plain_bszB(get_bszB_lo(b));
1018 } else {
1019 // No, mark as in use and use as-is.
1020 unlinkBlock(a, b, lno);
1021 mkInuseBlock(a, b, b_bszB);
sewardjde4a1d02002-03-22 01:27:54 +00001022 }
sewardjde4a1d02002-03-22 01:27:54 +00001023
nethercote2d5b8162004-08-11 09:40:52 +00001024 // Update stats
1025 a->bytes_on_loan += bszB_to_pszB(a, b_bszB);
sewardjde4a1d02002-03-22 01:27:54 +00001026 if (a->bytes_on_loan > a->bytes_on_loan_max)
1027 a->bytes_on_loan_max = a->bytes_on_loan;
1028
1029# ifdef DEBUG_MALLOC
nethercote885dd912004-08-03 23:14:00 +00001030 sanity_check_malloc_arena(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001031# endif
1032
njn25e49d8e72002-09-23 09:36:25 +00001033 VGP_POPCC(VgpMalloc);
nethercote2d5b8162004-08-11 09:40:52 +00001034 v = get_block_payload(a, b);
1035 vg_assert( (((Addr)v) & (VG_MIN_MALLOC_SZB-1)) == 0 );
sewardjb5f6f512005-03-10 23:59:00 +00001036
1037 VALGRIND_MALLOCLIKE_BLOCK(v, req_pszB, 0, False);
jsewardb1a26ae2004-03-14 03:06:37 +00001038 return v;
sewardjde4a1d02002-03-22 01:27:54 +00001039}
1040
1041
njn25e49d8e72002-09-23 09:36:25 +00001042void VG_(arena_free) ( ArenaId aid, void* ptr )
sewardjde4a1d02002-03-22 01:27:54 +00001043{
1044 Superblock* sb;
nethercote2d5b8162004-08-11 09:40:52 +00001045 UByte* sb_start;
1046 UByte* sb_end;
1047 Block* other;
1048 Block* b;
nethercote7ac7f7b2004-11-02 12:36:02 +00001049 SizeT b_bszB, b_pszB, other_bszB;
1050 UInt b_listno;
sewardjde4a1d02002-03-22 01:27:54 +00001051 Arena* a;
1052
1053 VGP_PUSHCC(VgpMalloc);
1054
1055 ensure_mm_init();
1056 a = arenaId_to_ArenaP(aid);
1057
njn25e49d8e72002-09-23 09:36:25 +00001058 if (ptr == NULL) {
1059 VGP_POPCC(VgpMalloc);
1060 return;
1061 }
1062
nethercote2d5b8162004-08-11 09:40:52 +00001063 b = get_payload_block(a, ptr);
sewardjde4a1d02002-03-22 01:27:54 +00001064
1065# ifdef DEBUG_MALLOC
nethercote2d5b8162004-08-11 09:40:52 +00001066 vg_assert(blockSane(a, b));
sewardjde4a1d02002-03-22 01:27:54 +00001067# endif
1068
nethercote2d5b8162004-08-11 09:40:52 +00001069 a->bytes_on_loan -= bszB_to_pszB(a, mk_plain_bszB(get_bszB_lo(b)));
sewardjde4a1d02002-03-22 01:27:54 +00001070
nethercote2d5b8162004-08-11 09:40:52 +00001071 sb = findSb( a, b );
1072 sb_start = &sb->payload_bytes[0];
1073 sb_end = &sb->payload_bytes[sb->n_payload_bytes - 1];
sewardjde4a1d02002-03-22 01:27:54 +00001074
nethercote2d5b8162004-08-11 09:40:52 +00001075 // Put this chunk back on a list somewhere.
1076 b_bszB = get_bszB_lo(b);
1077 b_pszB = bszB_to_pszB(a, b_bszB);
1078 b_listno = pszB_to_listNo(b_pszB);
1079 mkFreeBlock( a, b, b_bszB, b_listno );
sewardjde4a1d02002-03-22 01:27:54 +00001080
nethercote2d5b8162004-08-11 09:40:52 +00001081 // See if this block can be merged with its successor.
1082 // First test if we're far enough before the superblock's end to possibly
1083 // have a successor.
1084 other = b + b_bszB;
1085 if (other+min_useful_bszB(a)-1 <= (Block*)sb_end) {
1086 // Ok, we have a successor, merge if it's not in use.
1087 other_bszB = get_bszB_lo(other);
1088 if (!is_inuse_bszB(other_bszB)) {
1089 // VG_(printf)( "merge-successor\n");
1090 other_bszB = mk_plain_bszB(other_bszB);
sewardjde4a1d02002-03-22 01:27:54 +00001091# ifdef DEBUG_MALLOC
1092 vg_assert(blockSane(a, other));
1093# endif
nethercote2d5b8162004-08-11 09:40:52 +00001094 unlinkBlock( a, b, b_listno );
1095 unlinkBlock( a, other, pszB_to_listNo(bszB_to_pszB(a,other_bszB)) );
1096 b_bszB += other_bszB;
1097 b_listno = pszB_to_listNo(bszB_to_pszB(a, b_bszB));
1098 mkFreeBlock( a, b, b_bszB, b_listno );
sewardjde4a1d02002-03-22 01:27:54 +00001099 }
nethercote2d5b8162004-08-11 09:40:52 +00001100 } else {
1101 // Not enough space for successor: check that b is the last block
1102 // ie. there are no unused bytes at the end of the Superblock.
1103 vg_assert(other-1 == (Block*)sb_end);
sewardjde4a1d02002-03-22 01:27:54 +00001104 }
1105
nethercote2d5b8162004-08-11 09:40:52 +00001106 // Then see if this block can be merged with its predecessor.
1107 // First test if we're far enough after the superblock's start to possibly
1108 // have a predecessor.
1109 if (b >= (Block*)sb_start + min_useful_bszB(a)) {
1110 // Ok, we have a predecessor, merge if it's not in use.
1111 other = get_predecessor_block( b );
1112 other_bszB = get_bszB_lo(other);
1113 if (!is_inuse_bszB(other_bszB)) {
1114 // VG_(printf)( "merge-predecessor\n");
1115 other_bszB = mk_plain_bszB(other_bszB);
1116 unlinkBlock( a, b, b_listno );
1117 unlinkBlock( a, other, pszB_to_listNo(bszB_to_pszB(a, other_bszB)) );
1118 b = other;
1119 b_bszB += other_bszB;
1120 b_listno = pszB_to_listNo(bszB_to_pszB(a, b_bszB));
1121 mkFreeBlock( a, b, b_bszB, b_listno );
sewardjde4a1d02002-03-22 01:27:54 +00001122 }
nethercote2d5b8162004-08-11 09:40:52 +00001123 } else {
1124 // Not enough space for predecessor: check that b is the first block,
1125 // ie. there are no unused bytes at the start of the Superblock.
1126 vg_assert((Block*)sb_start == b);
sewardjde4a1d02002-03-22 01:27:54 +00001127 }
1128
1129# ifdef DEBUG_MALLOC
nethercote885dd912004-08-03 23:14:00 +00001130 sanity_check_malloc_arena(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001131# endif
1132
sewardjb5f6f512005-03-10 23:59:00 +00001133 VALGRIND_FREELIKE_BLOCK(ptr, 0);
1134
njn25e49d8e72002-09-23 09:36:25 +00001135 VGP_POPCC(VgpMalloc);
sewardjde4a1d02002-03-22 01:27:54 +00001136}
1137
1138
1139/*
1140 The idea for malloc_aligned() is to allocate a big block, base, and
1141 then split it into two parts: frag, which is returned to the the
1142 free pool, and align, which is the bit we're really after. Here's
1143 a picture. L and H denote the block lower and upper overheads, in
nethercote2d5b8162004-08-11 09:40:52 +00001144 bytes. The details are gruesome. Note it is slightly complicated
sewardjde4a1d02002-03-22 01:27:54 +00001145 because the initial request to generate base may return a bigger
1146 block than we asked for, so it is important to distinguish the base
1147 request size and the base actual size.
1148
1149 frag_b align_b
1150 | |
1151 | frag_p | align_p
1152 | | | |
1153 v v v v
1154
1155 +---+ +---+---+ +---+
1156 | L |----------------| H | L |---------------| H |
1157 +---+ +---+---+ +---+
1158
1159 ^ ^ ^
1160 | | :
1161 | base_p this addr must be aligned
1162 |
1163 base_b
1164
1165 . . . . . . .
nethercote2d5b8162004-08-11 09:40:52 +00001166 <------ frag_bszB -------> . . .
1167 . <------------- base_pszB_act -----------> .
sewardjde4a1d02002-03-22 01:27:54 +00001168 . . . . . . .
1169
1170*/
nethercote7ac7f7b2004-11-02 12:36:02 +00001171void* VG_(arena_malloc_aligned) ( ArenaId aid, SizeT req_alignB, SizeT req_pszB )
sewardjde4a1d02002-03-22 01:27:54 +00001172{
nethercote7ac7f7b2004-11-02 12:36:02 +00001173 SizeT base_pszB_req, base_pszB_act, frag_bszB;
nethercote2d5b8162004-08-11 09:40:52 +00001174 Block *base_b, *align_b;
1175 UByte *base_p, *align_p;
nethercote7ac7f7b2004-11-02 12:36:02 +00001176 SizeT saved_bytes_on_loan;
sewardjde4a1d02002-03-22 01:27:54 +00001177 Arena* a;
1178
njn25e49d8e72002-09-23 09:36:25 +00001179 VGP_PUSHCC(VgpMalloc);
1180
sewardjde4a1d02002-03-22 01:27:54 +00001181 ensure_mm_init();
1182 a = arenaId_to_ArenaP(aid);
1183
nethercote7ac7f7b2004-11-02 12:36:02 +00001184 vg_assert(req_pszB < MAX_PSZB);
sewardjde4a1d02002-03-22 01:27:54 +00001185
nethercote2d5b8162004-08-11 09:40:52 +00001186 // Check that the requested alignment seems reasonable; that is, is
1187 // a power of 2.
1188 if (req_alignB < VG_MIN_MALLOC_SZB
1189 || req_alignB > 1048576
1190 || VG_(log2)( VG_(clo_alignment) ) == -1 /* not a power of 2 */) {
1191 VG_(printf)("VG_(arena_malloc_aligned)(%p, %d, %d)\nbad alignment",
1192 a, req_alignB, req_pszB );
1193 VG_(core_panic)("VG_(arena_malloc_aligned)");
1194 /*NOTREACHED*/
sewardjde4a1d02002-03-22 01:27:54 +00001195 }
nethercote2d5b8162004-08-11 09:40:52 +00001196 // Paranoid
1197 vg_assert(req_alignB % VG_MIN_MALLOC_SZB == 0);
sewardjde4a1d02002-03-22 01:27:54 +00001198
1199 /* Required payload size for the aligned chunk. */
nethercote2d5b8162004-08-11 09:40:52 +00001200 req_pszB = align_req_pszB(req_pszB);
sewardjde4a1d02002-03-22 01:27:54 +00001201
nethercote2d5b8162004-08-11 09:40:52 +00001202 /* Payload size to request for the big block that we will split up. */
1203 base_pszB_req = req_pszB + min_useful_bszB(a) + req_alignB;
sewardjde4a1d02002-03-22 01:27:54 +00001204
1205 /* Payload ptr for the block we are going to split. Note this
1206 changes a->bytes_on_loan; we save and restore it ourselves. */
1207 saved_bytes_on_loan = a->bytes_on_loan;
nethercote2d5b8162004-08-11 09:40:52 +00001208 base_p = VG_(arena_malloc) ( aid, base_pszB_req );
sewardjde4a1d02002-03-22 01:27:54 +00001209 a->bytes_on_loan = saved_bytes_on_loan;
1210
1211 /* Block ptr for the block we are going to split. */
nethercote2d5b8162004-08-11 09:40:52 +00001212 base_b = get_payload_block ( a, base_p );
sewardjde4a1d02002-03-22 01:27:54 +00001213
1214 /* Pointer to the payload of the aligned block we are going to
1215 return. This has to be suitably aligned. */
nethercote2d5b8162004-08-11 09:40:52 +00001216 align_p = align_upwards ( base_b + 2 * overhead_szB_lo(a)
1217 + overhead_szB_hi(a),
sewardjde4a1d02002-03-22 01:27:54 +00001218 req_alignB );
nethercote2d5b8162004-08-11 09:40:52 +00001219 align_b = get_payload_block(a, align_p);
sewardjde4a1d02002-03-22 01:27:54 +00001220
1221 /* The block size of the fragment we will create. This must be big
1222 enough to actually create a fragment. */
nethercote2d5b8162004-08-11 09:40:52 +00001223 frag_bszB = align_b - base_b;
1224
1225 vg_assert(frag_bszB >= min_useful_bszB(a));
sewardjde4a1d02002-03-22 01:27:54 +00001226
1227 /* The actual payload size of the block we are going to split. */
nethercote2d5b8162004-08-11 09:40:52 +00001228 base_pszB_act = bszB_to_pszB(a, mk_plain_bszB(get_bszB_lo(base_b)));
sewardjde4a1d02002-03-22 01:27:54 +00001229
nethercote2d5b8162004-08-11 09:40:52 +00001230 /* Create the fragment block, and put it back on the relevant free list. */
1231 mkFreeBlock ( a, base_b, frag_bszB,
1232 pszB_to_listNo(bszB_to_pszB(a, frag_bszB)) );
sewardjde4a1d02002-03-22 01:27:54 +00001233
1234 /* Create the aligned block. */
nethercote2d5b8162004-08-11 09:40:52 +00001235 mkInuseBlock ( a, align_b,
1236 base_p + base_pszB_act
1237 + overhead_szB_hi(a) - (UByte*)align_b );
sewardjde4a1d02002-03-22 01:27:54 +00001238
1239 /* Final sanity checks. */
nethercote2d5b8162004-08-11 09:40:52 +00001240 vg_assert( is_inuse_bszB(get_bszB_lo(get_payload_block(a, align_p))) );
sewardjde4a1d02002-03-22 01:27:54 +00001241
nethercote2d5b8162004-08-11 09:40:52 +00001242 vg_assert(req_pszB
sewardjde4a1d02002-03-22 01:27:54 +00001243 <=
nethercote2d5b8162004-08-11 09:40:52 +00001244 bszB_to_pszB(a, mk_plain_bszB(get_bszB_lo(
1245 get_payload_block(a, align_p))))
sewardjde4a1d02002-03-22 01:27:54 +00001246 );
1247
1248 a->bytes_on_loan
nethercote2d5b8162004-08-11 09:40:52 +00001249 += bszB_to_pszB(a, mk_plain_bszB(get_bszB_lo(
1250 get_payload_block(a, align_p))));
sewardjde4a1d02002-03-22 01:27:54 +00001251 if (a->bytes_on_loan > a->bytes_on_loan_max)
1252 a->bytes_on_loan_max = a->bytes_on_loan;
1253
1254# ifdef DEBUG_MALLOC
nethercote885dd912004-08-03 23:14:00 +00001255 sanity_check_malloc_arena(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001256# endif
1257
njn25e49d8e72002-09-23 09:36:25 +00001258 VGP_POPCC(VgpMalloc);
1259
nethercote2d5b8162004-08-11 09:40:52 +00001260 vg_assert( (((Addr)align_p) % req_alignB) == 0 );
sewardjb5f6f512005-03-10 23:59:00 +00001261
1262 VALGRIND_MALLOCLIKE_BLOCK(align_p, req_pszB, 0, False);
1263
nethercote2d5b8162004-08-11 09:40:52 +00001264 return align_p;
1265}
1266
1267
nethercote7ac7f7b2004-11-02 12:36:02 +00001268SizeT VG_(arena_payload_szB) ( ArenaId aid, void* ptr )
nethercote2d5b8162004-08-11 09:40:52 +00001269{
1270 Arena* a = arenaId_to_ArenaP(aid);
1271 Block* b = get_payload_block(a, ptr);
1272 return bszB_to_pszB(a, get_bszB_lo(b));
sewardjde4a1d02002-03-22 01:27:54 +00001273}
1274
1275
1276/*------------------------------------------------------------*/
1277/*--- Services layered on top of malloc/free. ---*/
1278/*------------------------------------------------------------*/
1279
njn926ed472005-03-11 04:44:10 +00001280void* VG_(arena_calloc) ( ArenaId aid, SizeT alignB, SizeT nmemb,
1281 SizeT bytes_per_memb )
sewardjde4a1d02002-03-22 01:27:54 +00001282{
nethercote7ac7f7b2004-11-02 12:36:02 +00001283 SizeT size;
sewardjde4a1d02002-03-22 01:27:54 +00001284 UChar* p;
njn25e49d8e72002-09-23 09:36:25 +00001285
1286 VGP_PUSHCC(VgpMalloc);
1287
njn926ed472005-03-11 04:44:10 +00001288 size = nmemb * bytes_per_memb;
1289 vg_assert(size >= nmemb && size >= bytes_per_memb);// check against overflow
njn3e884182003-04-15 13:03:23 +00001290
nethercote2d5b8162004-08-11 09:40:52 +00001291 if (alignB == VG_MIN_MALLOC_SZB)
njn3e884182003-04-15 13:03:23 +00001292 p = VG_(arena_malloc) ( aid, size );
1293 else
1294 p = VG_(arena_malloc_aligned) ( aid, alignB, size );
1295
njn926ed472005-03-11 04:44:10 +00001296 VG_(memset)(p, 0, size);
sewardjb5f6f512005-03-10 23:59:00 +00001297
njn926ed472005-03-11 04:44:10 +00001298 VALGRIND_MALLOCLIKE_BLOCK(p, size, 0, True);
njn25e49d8e72002-09-23 09:36:25 +00001299
1300 VGP_POPCC(VgpMalloc);
1301
sewardjde4a1d02002-03-22 01:27:54 +00001302 return p;
1303}
1304
1305
njn25e49d8e72002-09-23 09:36:25 +00001306void* VG_(arena_realloc) ( ArenaId aid, void* ptr,
nethercote7ac7f7b2004-11-02 12:36:02 +00001307 SizeT req_alignB, SizeT req_pszB )
sewardjde4a1d02002-03-22 01:27:54 +00001308{
1309 Arena* a;
nethercote7ac7f7b2004-11-02 12:36:02 +00001310 SizeT old_bszB, old_pszB;
sewardjb5f6f512005-03-10 23:59:00 +00001311 UChar *p_new;
nethercote2d5b8162004-08-11 09:40:52 +00001312 Block* b;
sewardjde4a1d02002-03-22 01:27:54 +00001313
njn25e49d8e72002-09-23 09:36:25 +00001314 VGP_PUSHCC(VgpMalloc);
1315
sewardjde4a1d02002-03-22 01:27:54 +00001316 ensure_mm_init();
1317 a = arenaId_to_ArenaP(aid);
1318
nethercote7ac7f7b2004-11-02 12:36:02 +00001319 vg_assert(req_pszB < MAX_PSZB);
sewardjde4a1d02002-03-22 01:27:54 +00001320
nethercote2d5b8162004-08-11 09:40:52 +00001321 b = get_payload_block(a, ptr);
1322 vg_assert(blockSane(a, b));
sewardjde4a1d02002-03-22 01:27:54 +00001323
nethercote2d5b8162004-08-11 09:40:52 +00001324 old_bszB = get_bszB_lo(b);
1325 vg_assert(is_inuse_bszB(old_bszB));
1326 old_bszB = mk_plain_bszB(old_bszB);
1327 old_pszB = bszB_to_pszB(a, old_bszB);
sewardjde4a1d02002-03-22 01:27:54 +00001328
njn25e49d8e72002-09-23 09:36:25 +00001329 if (req_pszB <= old_pszB) {
1330 VGP_POPCC(VgpMalloc);
1331 return ptr;
1332 }
sewardjde4a1d02002-03-22 01:27:54 +00001333
nethercote2d5b8162004-08-11 09:40:52 +00001334 if (req_alignB == VG_MIN_MALLOC_SZB)
njn25e49d8e72002-09-23 09:36:25 +00001335 p_new = VG_(arena_malloc) ( aid, req_pszB );
nethercote2d5b8162004-08-11 09:40:52 +00001336 else {
njn25e49d8e72002-09-23 09:36:25 +00001337 p_new = VG_(arena_malloc_aligned) ( aid, req_alignB, req_pszB );
nethercote2d5b8162004-08-11 09:40:52 +00001338 }
njn25e49d8e72002-09-23 09:36:25 +00001339
sewardjb5f6f512005-03-10 23:59:00 +00001340 VG_(memcpy)(p_new, ptr, old_pszB);
sewardjde4a1d02002-03-22 01:27:54 +00001341
sewardjb5f6f512005-03-10 23:59:00 +00001342 VG_(arena_free)(aid, ptr);
njn25e49d8e72002-09-23 09:36:25 +00001343
1344 VGP_POPCC(VgpMalloc);
sewardjde4a1d02002-03-22 01:27:54 +00001345 return p_new;
1346}
1347
1348
1349/*------------------------------------------------------------*/
nethercote996901a2004-08-03 13:29:09 +00001350/*--- Tool-visible functions. ---*/
njn25e49d8e72002-09-23 09:36:25 +00001351/*------------------------------------------------------------*/
1352
nethercote2d5b8162004-08-11 09:40:52 +00001353// All just wrappers to avoid exposing arenas to tools.
njn25e49d8e72002-09-23 09:36:25 +00001354
nethercote7ac7f7b2004-11-02 12:36:02 +00001355void* VG_(malloc) ( SizeT nbytes )
njn25e49d8e72002-09-23 09:36:25 +00001356{
nethercote60f5b822004-01-26 17:24:42 +00001357 return VG_(arena_malloc) ( VG_AR_TOOL, nbytes );
njn25e49d8e72002-09-23 09:36:25 +00001358}
1359
1360void VG_(free) ( void* ptr )
1361{
nethercote60f5b822004-01-26 17:24:42 +00001362 VG_(arena_free) ( VG_AR_TOOL, ptr );
njn25e49d8e72002-09-23 09:36:25 +00001363}
1364
njn926ed472005-03-11 04:44:10 +00001365void* VG_(calloc) ( SizeT nmemb, SizeT bytes_per_memb )
njn25e49d8e72002-09-23 09:36:25 +00001366{
njn926ed472005-03-11 04:44:10 +00001367 return VG_(arena_calloc) ( VG_AR_TOOL, VG_MIN_MALLOC_SZB, nmemb,
1368 bytes_per_memb );
njn25e49d8e72002-09-23 09:36:25 +00001369}
1370
nethercote7ac7f7b2004-11-02 12:36:02 +00001371void* VG_(realloc) ( void* ptr, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00001372{
nethercote2d5b8162004-08-11 09:40:52 +00001373 return VG_(arena_realloc) ( VG_AR_TOOL, ptr, VG_MIN_MALLOC_SZB, size );
njn25e49d8e72002-09-23 09:36:25 +00001374}
1375
nethercote7ac7f7b2004-11-02 12:36:02 +00001376void* VG_(malloc_aligned) ( SizeT req_alignB, SizeT req_pszB )
njn25e49d8e72002-09-23 09:36:25 +00001377{
nethercote60f5b822004-01-26 17:24:42 +00001378 return VG_(arena_malloc_aligned) ( VG_AR_TOOL, req_alignB, req_pszB );
njn25e49d8e72002-09-23 09:36:25 +00001379}
1380
1381
nethercote7ac7f7b2004-11-02 12:36:02 +00001382void* VG_(cli_malloc) ( SizeT align, SizeT nbytes )
njn3e884182003-04-15 13:03:23 +00001383{
nethercote2d5b8162004-08-11 09:40:52 +00001384 // 'align' should be valid by now. VG_(arena_malloc_aligned)() will
1385 // abort if it's not.
1386 if (VG_MIN_MALLOC_SZB == align)
njn3e884182003-04-15 13:03:23 +00001387 return VG_(arena_malloc) ( VG_AR_CLIENT, nbytes );
1388 else
sewardjf1accbc2003-07-12 01:26:52 +00001389 return VG_(arena_malloc_aligned) ( VG_AR_CLIENT, align, nbytes );
njn3e884182003-04-15 13:03:23 +00001390}
1391
1392void VG_(cli_free) ( void* p )
1393{
1394 VG_(arena_free) ( VG_AR_CLIENT, p );
1395}
1396
1397
nethercote7ac7f7b2004-11-02 12:36:02 +00001398Bool VG_(addr_is_in_block)( Addr a, Addr start, SizeT size )
njn3e884182003-04-15 13:03:23 +00001399{
1400 return (start - VG_(vg_malloc_redzone_szB) <= a
1401 && a < start + size + VG_(vg_malloc_redzone_szB));
1402}
1403
1404
njn25e49d8e72002-09-23 09:36:25 +00001405/*------------------------------------------------------------*/
sewardjde4a1d02002-03-22 01:27:54 +00001406/*--- The original test driver machinery. ---*/
1407/*------------------------------------------------------------*/
1408
1409#if 0
1410
1411#if 1
1412#define N_TEST_TRANSACTIONS 100000000
1413#define N_TEST_ARR 200000
1414#define M_TEST_MALLOC 1000
1415#else
1416#define N_TEST_TRANSACTIONS 500000
1417#define N_TEST_ARR 30000
1418#define M_TEST_MALLOC 500
1419#endif
1420
1421
1422void* test_arr[N_TEST_ARR];
1423
1424int main ( int argc, char** argv )
1425{
1426 Int i, j, k, nbytes, qq;
1427 unsigned char* chp;
njn25e49d8e72002-09-23 09:36:25 +00001428 Arena* a = &arena[VG_AR_CORE];
sewardjde4a1d02002-03-22 01:27:54 +00001429 srandom(1);
1430 for (i = 0; i < N_TEST_ARR; i++)
1431 test_arr[i] = NULL;
1432
1433 for (i = 0; i < N_TEST_TRANSACTIONS; i++) {
1434 if (i % 50000 == 0) mallocSanityCheck(a);
1435 j = random() % N_TEST_ARR;
1436 if (test_arr[j]) {
1437 vg_free(a, test_arr[j]);
1438 test_arr[j] = NULL;
1439 } else {
1440 nbytes = 1 + random() % M_TEST_MALLOC;
1441 qq = random()%64;
1442 if (qq == 32)
1443 nbytes *= 17;
1444 else if (qq == 33)
1445 nbytes = 0;
1446 test_arr[j]
1447 = (i % 17) == 0
1448 ? vg_memalign(a, nbytes, 1<< (3+(random()%10)))
1449 : vg_malloc( a, nbytes );
1450 chp = test_arr[j];
1451 for (k = 0; k < nbytes; k++)
1452 chp[k] = (unsigned char)(k + 99);
1453 }
1454 }
1455
1456
1457 for (i = 0; i < N_TEST_ARR; i++) {
1458 if (test_arr[i]) {
1459 vg_free(a, test_arr[i]);
1460 test_arr[i] = NULL;
1461 }
1462 }
1463 mallocSanityCheck(a);
1464
1465 fprintf(stderr, "ALL DONE\n");
1466
1467 show_arena_stats(a);
1468 fprintf(stderr, "%d max useful, %d bytes mmap'd (%4.1f%%), %d useful\n",
1469 a->bytes_on_loan_max,
1470 a->bytes_mmaped,
nethercote2d5b8162004-08-11 09:40:52 +00001471 100.0 * (double)a->bytes_on_loan_max / (double)a->bytes_mmaped,
sewardjde4a1d02002-03-22 01:27:54 +00001472 a->bytes_on_loan );
1473
1474 return 0;
1475}
1476#endif /* 0 */
1477
1478
1479/*--------------------------------------------------------------------*/
1480/*--- end vg_malloc2.c ---*/
1481/*--------------------------------------------------------------------*/