blob: f0314162aef836dec24f957732d31c135af1b27a [file] [log] [blame]
sewardjde4a1d02002-03-22 01:27:54 +00001
2/*--------------------------------------------------------------------*/
3/*--- An implementation of malloc/free which doesn't use sbrk. ---*/
4/*--- vg_malloc2.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
njnb9c427c2004-12-01 14:14:42 +00008 This file is part of Valgrind, a dynamic binary instrumentation
9 framework.
sewardjde4a1d02002-03-22 01:27:54 +000010
njn53612422005-03-12 16:22:54 +000011 Copyright (C) 2000-2005 Julian Seward
sewardjde4a1d02002-03-22 01:27:54 +000012 jseward@acm.org
sewardjde4a1d02002-03-22 01:27:54 +000013
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
njn25e49d8e72002-09-23 09:36:25 +000029 The GNU General Public License is contained in the file COPYING.
sewardjde4a1d02002-03-22 01:27:54 +000030*/
31
32
nethercotef1e5e152004-09-01 23:58:16 +000033#include "core.h"
sewardjb5f6f512005-03-10 23:59:00 +000034//zz#include "memcheck/memcheck.h"
sewardjde4a1d02002-03-22 01:27:54 +000035
nethercote2d5b8162004-08-11 09:40:52 +000036//#define DEBUG_MALLOC // turn on heavyweight debugging machinery
37//#define VERBOSE_MALLOC // make verbose, esp. in debugging machinery
38
39/*------------------------------------------------------------*/
40/*--- Main types ---*/
41/*------------------------------------------------------------*/
42
43#define VG_N_MALLOC_LISTS 16 // do not change this
44
nethercote7ac7f7b2004-11-02 12:36:02 +000045// The amount you can ask for is limited only by sizeof(SizeT)...
46#define MAX_PSZB (~((SizeT)0x0))
nethercote2d5b8162004-08-11 09:40:52 +000047
48typedef UChar UByte;
49
50/* Block layout:
51
nethercote7ac7f7b2004-11-02 12:36:02 +000052 this block total szB (sizeof(SizeT) bytes)
nethercote2d5b8162004-08-11 09:40:52 +000053 freelist previous ptr (sizeof(void*) bytes)
54 red zone bytes (depends on .rz_szB field of Arena)
55 (payload bytes)
56 red zone bytes (depends on .rz_szB field of Arena)
57 freelist next ptr (sizeof(void*) bytes)
nethercote7ac7f7b2004-11-02 12:36:02 +000058 this block total szB (sizeof(SizeT) bytes)
nethercote2d5b8162004-08-11 09:40:52 +000059
60 Total size in bytes (bszB) and payload size in bytes (pszB)
61 are related by:
62
nethercote7ac7f7b2004-11-02 12:36:02 +000063 bszB == pszB + 2*sizeof(SizeT) + 2*sizeof(void*) + 2*a->rz_szB
nethercote2d5b8162004-08-11 09:40:52 +000064
nethercote7ac7f7b2004-11-02 12:36:02 +000065 Furthermore, both size fields in the block have their least-sifnificant
66 bit set if the block is not in use, and unset if it is in use.
67 (The bottom 3 or so bits are always free for this because of alignment.)
68 A block size of zero is not possible, because a block always has at
69 least two SizeTs and two pointers of overhead.
nethercote2d5b8162004-08-11 09:40:52 +000070
71 Nb: All Block payloads must be VG_MIN_MALLOC_SZB-aligned. This is
72 achieved by ensuring that Superblocks are VG_MIN_MALLOC_SZB-aligned
73 (see newSuperblock() for how), and that the lengths of the following
74 things are a multiple of VG_MIN_MALLOC_SZB:
75 - Superblock admin section lengths (due to elastic padding)
76 - Block admin section (low and high) lengths (due to elastic redzones)
77 - Block payload lengths (due to req_pszB rounding up)
78*/
79typedef
80 struct {
81 // No fields are actually used in this struct, because a Block has
82 // loads of variable sized fields and so can't be accessed
83 // meaningfully with normal fields. So we use access functions all
84 // the time. This struct gives us a type to use, though. Also, we
85 // make sizeof(Block) 1 byte so that we can do arithmetic with the
86 // Block* type in increments of 1!
87 UByte dummy;
88 }
89 Block;
90
91// A superblock. 'padding' is never used, it just ensures that if the
92// entire Superblock is aligned to VG_MIN_MALLOC_SZB, then payload_bytes[]
93// will be too. It can add small amounts of padding unnecessarily -- eg.
94// 8-bytes on 32-bit machines with an 8-byte VG_MIN_MALLOC_SZB -- because
95// it's too hard to make a constant expression that works perfectly in all
96// cases.
97// payload_bytes[] is made a single big Block when the Superblock is
98// created, and then can be split and the splittings remerged, but Blocks
99// always cover its entire length -- there's never any unused bytes at the
100// end, for example.
101typedef
102 struct _Superblock {
103 struct _Superblock* next;
nethercote7ac7f7b2004-11-02 12:36:02 +0000104 SizeT n_payload_bytes;
nethercote2d5b8162004-08-11 09:40:52 +0000105 UByte padding[ VG_MIN_MALLOC_SZB -
nethercote7ac7f7b2004-11-02 12:36:02 +0000106 ((sizeof(struct _Superblock*) + sizeof(SizeT)) %
107 VG_MIN_MALLOC_SZB) ];
nethercote2d5b8162004-08-11 09:40:52 +0000108 UByte payload_bytes[0];
109 }
110 Superblock;
111
112// An arena. 'freelist' is a circular, doubly-linked list. 'rz_szB' is
113// elastic, in that it can be bigger than asked-for to ensure alignment.
114typedef
115 struct {
116 Char* name;
117 Bool clientmem; // Allocates in the client address space?
njn0e742df2004-11-30 13:26:29 +0000118 SizeT rz_szB; // Red zone size in bytes
nethercote7ac7f7b2004-11-02 12:36:02 +0000119 SizeT min_sblock_szB; // Minimum superblock size in bytes
nethercote2d5b8162004-08-11 09:40:52 +0000120 Block* freelist[VG_N_MALLOC_LISTS];
121 Superblock* sblocks;
122 // Stats only.
nethercote7ac7f7b2004-11-02 12:36:02 +0000123 SizeT bytes_on_loan;
124 SizeT bytes_mmaped;
125 SizeT bytes_on_loan_max;
nethercote2d5b8162004-08-11 09:40:52 +0000126 }
127 Arena;
128
129
130/*------------------------------------------------------------*/
131/*--- Low-level functions for working with Blocks. ---*/
132/*------------------------------------------------------------*/
133
nethercote7ac7f7b2004-11-02 12:36:02 +0000134#define SIZE_T_0x1 ((SizeT)0x1)
135
nethercote2d5b8162004-08-11 09:40:52 +0000136// Mark a bszB as in-use, and not in-use.
137static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000138SizeT mk_inuse_bszB ( SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000139{
140 vg_assert(bszB != 0);
nethercote7ac7f7b2004-11-02 12:36:02 +0000141 return bszB & (~SIZE_T_0x1);
nethercote2d5b8162004-08-11 09:40:52 +0000142}
143static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000144SizeT mk_free_bszB ( SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000145{
146 vg_assert(bszB != 0);
nethercote7ac7f7b2004-11-02 12:36:02 +0000147 return bszB | SIZE_T_0x1;
nethercote2d5b8162004-08-11 09:40:52 +0000148}
149
150// Remove the in-use/not-in-use attribute from a bszB, leaving just
151// the size.
152static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000153SizeT mk_plain_bszB ( SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000154{
155 vg_assert(bszB != 0);
nethercote7ac7f7b2004-11-02 12:36:02 +0000156 return bszB & (~SIZE_T_0x1);
nethercote2d5b8162004-08-11 09:40:52 +0000157}
158
159// Does this bszB have the in-use attribute?
160static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000161Bool is_inuse_bszB ( SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000162{
163 vg_assert(bszB != 0);
nethercote7ac7f7b2004-11-02 12:36:02 +0000164 return (0 != (bszB & SIZE_T_0x1)) ? False : True;
nethercote2d5b8162004-08-11 09:40:52 +0000165}
166
167
168// Set and get the lower size field of a block.
169static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000170void set_bszB_lo ( Block* b, SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000171{
nethercote7ac7f7b2004-11-02 12:36:02 +0000172 *(SizeT*)&b[0] = bszB;
nethercote2d5b8162004-08-11 09:40:52 +0000173}
174static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000175SizeT get_bszB_lo ( Block* b )
nethercote2d5b8162004-08-11 09:40:52 +0000176{
nethercote7ac7f7b2004-11-02 12:36:02 +0000177 return *(SizeT*)&b[0];
nethercote2d5b8162004-08-11 09:40:52 +0000178}
179
180// Get the address of the last byte in a block
181static __inline__
182UByte* last_byte ( Block* b )
183{
184 UByte* b2 = (UByte*)b;
185 return &b2[mk_plain_bszB(get_bszB_lo(b)) - 1];
186}
187
188// Set and get the upper size field of a block.
189static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000190void set_bszB_hi ( Block* b, SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000191{
192 UByte* b2 = (UByte*)b;
193 UByte* lb = last_byte(b);
194 vg_assert(lb == &b2[mk_plain_bszB(bszB) - 1]);
nethercote7ac7f7b2004-11-02 12:36:02 +0000195 *(SizeT*)&lb[-sizeof(SizeT) + 1] = bszB;
nethercote2d5b8162004-08-11 09:40:52 +0000196}
197static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000198SizeT get_bszB_hi ( Block* b )
nethercote2d5b8162004-08-11 09:40:52 +0000199{
200 UByte* lb = last_byte(b);
nethercote7ac7f7b2004-11-02 12:36:02 +0000201 return *(SizeT*)&lb[-sizeof(SizeT) + 1];
nethercote2d5b8162004-08-11 09:40:52 +0000202}
203
204
nethercote7ac7f7b2004-11-02 12:36:02 +0000205// Return the lower, upper and total overhead in bytes for a block.
206// These are determined purely by which arena the block lives in.
207static __inline__
njn0e742df2004-11-30 13:26:29 +0000208SizeT overhead_szB_lo ( Arena* a )
nethercote7ac7f7b2004-11-02 12:36:02 +0000209{
210 return sizeof(SizeT) + sizeof(void*) + a->rz_szB;
211}
212static __inline__
njn0e742df2004-11-30 13:26:29 +0000213SizeT overhead_szB_hi ( Arena* a )
nethercote7ac7f7b2004-11-02 12:36:02 +0000214{
215 return a->rz_szB + sizeof(void*) + sizeof(SizeT);
216}
217static __inline__
njn0e742df2004-11-30 13:26:29 +0000218SizeT overhead_szB ( Arena* a )
nethercote7ac7f7b2004-11-02 12:36:02 +0000219{
220 return overhead_szB_lo(a) + overhead_szB_hi(a);
221}
222
nethercote2d5b8162004-08-11 09:40:52 +0000223// Given the addr of a block, return the addr of its payload.
224static __inline__
225UByte* get_block_payload ( Arena* a, Block* b )
226{
227 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000228 return & b2[ overhead_szB_lo(a) ];
nethercote2d5b8162004-08-11 09:40:52 +0000229}
230// Given the addr of a block's payload, return the addr of the block itself.
231static __inline__
232Block* get_payload_block ( Arena* a, UByte* payload )
233{
nethercote7ac7f7b2004-11-02 12:36:02 +0000234 return (Block*)&payload[ -overhead_szB_lo(a) ];
nethercote2d5b8162004-08-11 09:40:52 +0000235}
236
237
238// Set and get the next and previous link fields of a block.
239static __inline__
240void set_prev_b ( Block* b, Block* prev_p )
241{
242 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000243 *(Block**)&b2[sizeof(SizeT)] = prev_p;
nethercote2d5b8162004-08-11 09:40:52 +0000244}
245static __inline__
246void set_next_b ( Block* b, Block* next_p )
247{
248 UByte* lb = last_byte(b);
nethercote7ac7f7b2004-11-02 12:36:02 +0000249 *(Block**)&lb[-sizeof(SizeT) - sizeof(void*) + 1] = next_p;
nethercote2d5b8162004-08-11 09:40:52 +0000250}
251static __inline__
252Block* get_prev_b ( Block* b )
253{
254 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000255 return *(Block**)&b2[sizeof(SizeT)];
nethercote2d5b8162004-08-11 09:40:52 +0000256}
257static __inline__
258Block* get_next_b ( Block* b )
259{
260 UByte* lb = last_byte(b);
nethercote7ac7f7b2004-11-02 12:36:02 +0000261 return *(Block**)&lb[-sizeof(SizeT) - sizeof(void*) + 1];
nethercote2d5b8162004-08-11 09:40:52 +0000262}
263
264
265// Get the block immediately preceding this one in the Superblock.
266static __inline__
267Block* get_predecessor_block ( Block* b )
268{
269 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000270 SizeT bszB = mk_plain_bszB( (*(SizeT*)&b2[-sizeof(SizeT)]) );
nethercote2d5b8162004-08-11 09:40:52 +0000271 return (Block*)&b2[-bszB];
272}
273
274// Read and write the lower and upper red-zone bytes of a block.
275static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000276void set_rz_lo_byte ( Arena* a, Block* b, UInt rz_byteno, UByte v )
nethercote2d5b8162004-08-11 09:40:52 +0000277{
278 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000279 b2[sizeof(SizeT) + sizeof(void*) + rz_byteno] = v;
nethercote2d5b8162004-08-11 09:40:52 +0000280}
281static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000282void set_rz_hi_byte ( Arena* a, Block* b, UInt rz_byteno, UByte v )
nethercote2d5b8162004-08-11 09:40:52 +0000283{
284 UByte* lb = last_byte(b);
nethercote7ac7f7b2004-11-02 12:36:02 +0000285 lb[-sizeof(SizeT) - sizeof(void*) - rz_byteno] = v;
nethercote2d5b8162004-08-11 09:40:52 +0000286}
287static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000288UByte get_rz_lo_byte ( Arena* a, Block* b, UInt rz_byteno )
nethercote2d5b8162004-08-11 09:40:52 +0000289{
290 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000291 return b2[sizeof(SizeT) + sizeof(void*) + rz_byteno];
nethercote2d5b8162004-08-11 09:40:52 +0000292}
293static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000294UByte get_rz_hi_byte ( Arena* a, Block* b, UInt rz_byteno )
nethercote2d5b8162004-08-11 09:40:52 +0000295{
296 UByte* lb = last_byte(b);
nethercote7ac7f7b2004-11-02 12:36:02 +0000297 return lb[-sizeof(SizeT) - sizeof(void*) - rz_byteno];
nethercote2d5b8162004-08-11 09:40:52 +0000298}
299
300
nethercote2d5b8162004-08-11 09:40:52 +0000301// Return the minimum bszB for a block in this arena. Can have zero-length
302// payloads, so it's the size of the admin bytes.
303static __inline__
njn0e742df2004-11-30 13:26:29 +0000304SizeT min_useful_bszB ( Arena* a )
nethercote2d5b8162004-08-11 09:40:52 +0000305{
306 return overhead_szB(a);
307}
308
309// Convert payload size <--> block size (both in bytes).
310static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000311SizeT pszB_to_bszB ( Arena* a, SizeT pszB )
nethercote2d5b8162004-08-11 09:40:52 +0000312{
nethercote2d5b8162004-08-11 09:40:52 +0000313 return pszB + overhead_szB(a);
314}
315static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000316SizeT bszB_to_pszB ( Arena* a, SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000317{
nethercote7ac7f7b2004-11-02 12:36:02 +0000318 vg_assert(bszB >= overhead_szB(a));
319 return bszB - overhead_szB(a);
nethercote2d5b8162004-08-11 09:40:52 +0000320}
321
322
323/*------------------------------------------------------------*/
324/*--- Arena management ---*/
325/*------------------------------------------------------------*/
326
327#define CORE_ARENA_MIN_SZB 1048576
328
329// The arena structures themselves.
330static Arena vg_arena[VG_N_ARENAS];
331
332// Functions external to this module identify arenas using ArenaIds,
333// not Arena*s. This fn converts the former to the latter.
334static Arena* arenaId_to_ArenaP ( ArenaId arena )
335{
336 vg_assert(arena >= 0 && arena < VG_N_ARENAS);
337 return & vg_arena[arena];
338}
339
340// Initialise an arena. rz_szB is the minimum redzone size; it might be
njn30490552005-03-13 06:30:42 +0000341// made bigger to ensure that VG_MIN_MALLOC_SZB is observed.
nethercote2d5b8162004-08-11 09:40:52 +0000342static
njn0e742df2004-11-30 13:26:29 +0000343void arena_init ( ArenaId aid, Char* name, SizeT rz_szB, SizeT min_sblock_szB )
nethercote2d5b8162004-08-11 09:40:52 +0000344{
nethercote7ac7f7b2004-11-02 12:36:02 +0000345 SizeT i;
nethercote2d5b8162004-08-11 09:40:52 +0000346 Arena* a = arenaId_to_ArenaP(aid);
347
nethercote7ac7f7b2004-11-02 12:36:02 +0000348 vg_assert(rz_szB < 128); // ensure reasonable size
nethercote73b526f2004-10-31 18:48:21 +0000349 vg_assert((min_sblock_szB % VKI_PAGE_SIZE) == 0);
nethercote2d5b8162004-08-11 09:40:52 +0000350 a->name = name;
351 a->clientmem = ( VG_AR_CLIENT == aid ? True : False );
352
353 // The size of the low and high admin sections in a block must be a
njn30490552005-03-13 06:30:42 +0000354 // multiple of VG_MIN_MALLOC_SZB. So we round up the asked-for
nethercote2d5b8162004-08-11 09:40:52 +0000355 // redzone size if necessary to achieve this.
356 a->rz_szB = rz_szB;
357 while (0 != overhead_szB_lo(a) % VG_MIN_MALLOC_SZB) a->rz_szB++;
358 vg_assert(overhead_szB_lo(a) == overhead_szB_hi(a));
359
360 a->min_sblock_szB = min_sblock_szB;
361 for (i = 0; i < VG_N_MALLOC_LISTS; i++) a->freelist[i] = NULL;
362 a->sblocks = NULL;
363 a->bytes_on_loan = 0;
364 a->bytes_mmaped = 0;
365 a->bytes_on_loan_max = 0;
366}
367
368/* Print vital stats for an arena. */
369void VG_(print_all_arena_stats) ( void )
370{
nethercote7ac7f7b2004-11-02 12:36:02 +0000371 UInt i;
nethercote2d5b8162004-08-11 09:40:52 +0000372 for (i = 0; i < VG_N_ARENAS; i++) {
373 Arena* a = arenaId_to_ArenaP(i);
374 VG_(message)(Vg_DebugMsg,
njn669ef072005-03-13 05:46:57 +0000375 "%8s: %8d mmap'd, %8d/%8d max/curr",
nethercote2d5b8162004-08-11 09:40:52 +0000376 a->name, a->bytes_mmaped, a->bytes_on_loan_max, a->bytes_on_loan
377 );
378 }
379}
380
381/* This library is self-initialising, as it makes this more self-contained,
382 less coupled with the outside world. Hence VG_(arena_malloc)() and
383 VG_(arena_free)() below always call ensure_mm_init() to ensure things are
384 correctly initialised. */
385static
386void ensure_mm_init ( void )
387{
njn0e742df2004-11-30 13:26:29 +0000388 static SizeT client_rz_szB;
389 static Bool init_done = False;
nethercote2d5b8162004-08-11 09:40:52 +0000390
391 if (init_done) {
392 // Make sure the client arena's redzone size never changes. Could
393 // happen if VG_(arena_malloc) was called too early, ie. before the
394 // tool was loaded.
395 vg_assert(client_rz_szB == VG_(vg_malloc_redzone_szB));
396 return;
397 }
398
399 /* No particular reason for this figure, it's just smallish */
njnca82cc02004-11-22 17:18:48 +0000400 tl_assert(VG_(vg_malloc_redzone_szB) < 128);
nethercote2d5b8162004-08-11 09:40:52 +0000401 client_rz_szB = VG_(vg_malloc_redzone_szB);
402
403 /* Use checked red zones (of various sizes) for our internal stuff,
404 and an unchecked zone of arbitrary size for the client. Of
405 course the client's red zone can be checked by the tool, eg.
406 by using addressibility maps, but not by the mechanism implemented
407 here, which merely checks at the time of freeing that the red
408 zone bytes are unchanged.
409
410 Nb: redzone sizes are *minimums*; they could be made bigger to ensure
411 alignment. Eg. on 32-bit machines, 4 becomes 8, and 12 becomes 16;
412 but on 64-bit machines 4 stays as 4, and 12 stays as 12 --- the extra
413 4 bytes in both are accounted for by the larger prev/next ptr.
414 */
415 arena_init ( VG_AR_CORE, "core", 4, CORE_ARENA_MIN_SZB );
416 arena_init ( VG_AR_TOOL, "tool", 4, 1048576 );
417 arena_init ( VG_AR_SYMTAB, "symtab", 4, 1048576 );
418 arena_init ( VG_AR_JITTER, "JITter", 4, 32768 );
419 arena_init ( VG_AR_CLIENT, "client", client_rz_szB, 1048576 );
420 arena_init ( VG_AR_DEMANGLE, "demangle", 12/*paranoid*/, 65536 );
421 arena_init ( VG_AR_EXECTXT, "exectxt", 4, 65536 );
422 arena_init ( VG_AR_ERRORS, "errors", 4, 65536 );
423 arena_init ( VG_AR_TRANSIENT, "transien", 4, 65536 );
424
425 init_done = True;
426# ifdef DEBUG_MALLOC
427 VG_(sanity_check_malloc_all)();
428# endif
429}
430
431
432/*------------------------------------------------------------*/
433/*--- Superblock management ---*/
434/*------------------------------------------------------------*/
435
436// Align ptr p upwards to an align-sized boundary.
437static
nethercote7ac7f7b2004-11-02 12:36:02 +0000438void* align_upwards ( void* p, SizeT align )
nethercote2d5b8162004-08-11 09:40:52 +0000439{
440 Addr a = (Addr)p;
441 if ((a % align) == 0) return (void*)a;
442 return (void*)(a - (a % align) + align);
443}
444
445// If not enough memory available, either aborts (for non-client memory)
446// or returns 0 (for client memory).
447static
nethercote7ac7f7b2004-11-02 12:36:02 +0000448Superblock* newSuperblock ( Arena* a, SizeT cszB )
nethercote2d5b8162004-08-11 09:40:52 +0000449{
450 // The extra VG_MIN_MALLOC_SZB bytes are for possible alignment up.
451 static UByte bootstrap_superblock[CORE_ARENA_MIN_SZB+VG_MIN_MALLOC_SZB];
sewardj548be6d2005-02-16 01:31:37 +0000452 static Bool called_before = True; //False;
nethercote2d5b8162004-08-11 09:40:52 +0000453 Superblock* sb;
454
455 // Take into account admin bytes in the Superblock.
456 cszB += sizeof(Superblock);
457
458 if (cszB < a->min_sblock_szB) cszB = a->min_sblock_szB;
nethercote73b526f2004-10-31 18:48:21 +0000459 while ((cszB % VKI_PAGE_SIZE) > 0) cszB++;
nethercote2d5b8162004-08-11 09:40:52 +0000460
461 if (!called_before) {
462 // First time we're called -- use the special static bootstrap
463 // superblock (see comment at top of main() for details).
464 called_before = True;
465 vg_assert(a == arenaId_to_ArenaP(VG_AR_CORE));
466 vg_assert(CORE_ARENA_MIN_SZB >= cszB);
467 // Ensure sb is suitably aligned.
468 sb = (Superblock*)align_upwards( bootstrap_superblock,
469 VG_MIN_MALLOC_SZB );
470 } else if (a->clientmem) {
471 // client allocation -- return 0 to client if it fails
472 sb = (Superblock *)
473 VG_(client_alloc)(0, cszB,
474 VKI_PROT_READ|VKI_PROT_WRITE|VKI_PROT_EXEC, 0);
475 if (NULL == sb)
476 return 0;
477 } else {
478 // non-client allocation -- aborts if it fails
479 sb = VG_(get_memory_from_mmap) ( cszB, "newSuperblock" );
480 }
481 vg_assert(NULL != sb);
sewardjb5f6f512005-03-10 23:59:00 +0000482 //zzVALGRIND_MAKE_WRITABLE(sb, cszB);
nethercote2d5b8162004-08-11 09:40:52 +0000483 vg_assert(0 == (Addr)sb % VG_MIN_MALLOC_SZB);
484 sb->n_payload_bytes = cszB - sizeof(Superblock);
485 a->bytes_mmaped += cszB;
486 if (0)
487 VG_(message)(Vg_DebugMsg, "newSuperblock, %d payload bytes",
488 sb->n_payload_bytes);
489 return sb;
490}
491
492// Find the superblock containing the given chunk.
493static
494Superblock* findSb ( Arena* a, Block* b )
495{
496 Superblock* sb;
497 for (sb = a->sblocks; sb; sb = sb->next)
498 if ((Block*)&sb->payload_bytes[0] <= b
499 && b < (Block*)&sb->payload_bytes[sb->n_payload_bytes])
500 return sb;
501 VG_(printf)("findSb: can't find pointer %p in arena `%s'\n", b, a->name );
502 VG_(core_panic)("findSb: VG_(arena_free)() in wrong arena?");
503 return NULL; /*NOTREACHED*/
504}
505
sewardjde4a1d02002-03-22 01:27:54 +0000506
fitzhardinge98abfc72003-12-16 02:05:15 +0000507/*------------------------------------------------------------*/
508/*--- Command line options ---*/
509/*------------------------------------------------------------*/
510
nethercote2d5b8162004-08-11 09:40:52 +0000511/* Round malloc sizes up to a multiple of VG_SLOPPY_MALLOC_SZB bytes?
512 default: NO
513 Nb: the allocator always rounds blocks up to a multiple of
514 VG_MIN_MALLOC_SZB. VG_(clo_sloppy_malloc) is relevant eg. for
515 Memcheck, which will be byte-precise with addressability maps on its
516 malloc allocations unless --sloppy-malloc=yes. */
517Bool VG_(clo_sloppy_malloc) = False;
fitzhardinge98abfc72003-12-16 02:05:15 +0000518
519/* DEBUG: print malloc details? default: NO */
nethercote2d5b8162004-08-11 09:40:52 +0000520Bool VG_(clo_trace_malloc) = False;
fitzhardinge98abfc72003-12-16 02:05:15 +0000521
522/* Minimum alignment in functions that don't specify alignment explicitly.
nethercote2d5b8162004-08-11 09:40:52 +0000523 default: 0, i.e. use VG_MIN_MALLOC_SZB. */
nethercote7ac7f7b2004-11-02 12:36:02 +0000524UInt VG_(clo_alignment) = VG_MIN_MALLOC_SZB;
fitzhardinge98abfc72003-12-16 02:05:15 +0000525
526
527Bool VG_(replacement_malloc_process_cmd_line_option)(Char* arg)
528{
jsewardb1a26ae2004-03-14 03:06:37 +0000529 if (VG_CLO_STREQN(12, arg, "--alignment=")) {
nethercote7ac7f7b2004-11-02 12:36:02 +0000530 VG_(clo_alignment) = (UInt)VG_(atoll)(&arg[12]);
fitzhardinge98abfc72003-12-16 02:05:15 +0000531
nethercote2d5b8162004-08-11 09:40:52 +0000532 if (VG_(clo_alignment) < VG_MIN_MALLOC_SZB
fitzhardinge98abfc72003-12-16 02:05:15 +0000533 || VG_(clo_alignment) > 4096
534 || VG_(log2)( VG_(clo_alignment) ) == -1 /* not a power of 2 */) {
535 VG_(message)(Vg_UserMsg, "");
536 VG_(message)(Vg_UserMsg,
537 "Invalid --alignment= setting. "
nethercote2d5b8162004-08-11 09:40:52 +0000538 "Should be a power of 2, >= %d, <= 4096.", VG_MIN_MALLOC_SZB);
fitzhardinge98abfc72003-12-16 02:05:15 +0000539 VG_(bad_option)("--alignment");
540 }
541 }
542
nethercotef28481f2004-07-10 13:56:19 +0000543 else VG_BOOL_CLO("--sloppy-malloc", VG_(clo_sloppy_malloc))
544 else VG_BOOL_CLO("--trace-malloc", VG_(clo_trace_malloc))
fitzhardinge98abfc72003-12-16 02:05:15 +0000545 else
546 return False;
547
548 return True;
549}
550
551void VG_(replacement_malloc_print_usage)(void)
552{
553 VG_(printf)(
nethercote2d5b8162004-08-11 09:40:52 +0000554" --sloppy-malloc=no|yes round malloc sizes to multiple of %d? [no]\n"
555" --alignment=<number> set minimum alignment of allocations [%d]\n",
556 VG_SLOPPY_MALLOC_SZB, VG_MIN_MALLOC_SZB
fitzhardinge98abfc72003-12-16 02:05:15 +0000557 );
558}
559
560void VG_(replacement_malloc_print_debug_usage)(void)
561{
562 VG_(printf)(
563" --trace-malloc=no|yes show client malloc details? [no]\n"
564 );
565}
566
sewardjde4a1d02002-03-22 01:27:54 +0000567
568/*------------------------------------------------------------*/
sewardjde4a1d02002-03-22 01:27:54 +0000569/*--- Functions for working with freelists. ---*/
570/*------------------------------------------------------------*/
571
nethercote2d5b8162004-08-11 09:40:52 +0000572// Nb: Determination of which freelist a block lives on is based on the
573// payload size, not block size.
sewardjde4a1d02002-03-22 01:27:54 +0000574
nethercote2d5b8162004-08-11 09:40:52 +0000575// Convert a payload size in bytes to a freelist number.
sewardjde4a1d02002-03-22 01:27:54 +0000576static
nethercote7ac7f7b2004-11-02 12:36:02 +0000577UInt pszB_to_listNo ( SizeT pszB )
sewardjde4a1d02002-03-22 01:27:54 +0000578{
nethercote2d5b8162004-08-11 09:40:52 +0000579 vg_assert(0 == pszB % VG_MIN_MALLOC_SZB);
580 pszB /= VG_MIN_MALLOC_SZB;
581 if (pszB <= 2) return 0;
582 if (pszB <= 3) return 1;
583 if (pszB <= 4) return 2;
584 if (pszB <= 5) return 3;
585 if (pszB <= 6) return 4;
586 if (pszB <= 7) return 5;
587 if (pszB <= 8) return 6;
588 if (pszB <= 9) return 7;
589 if (pszB <= 10) return 8;
590 if (pszB <= 11) return 9;
591 if (pszB <= 12) return 10;
592 if (pszB <= 16) return 11;
593 if (pszB <= 32) return 12;
594 if (pszB <= 64) return 13;
595 if (pszB <= 128) return 14;
sewardjde4a1d02002-03-22 01:27:54 +0000596 return 15;
597}
598
nethercote2d5b8162004-08-11 09:40:52 +0000599// What is the minimum payload size for a given list?
sewardjde4a1d02002-03-22 01:27:54 +0000600static
nethercote7ac7f7b2004-11-02 12:36:02 +0000601SizeT listNo_to_pszB_min ( UInt listNo )
sewardjde4a1d02002-03-22 01:27:54 +0000602{
nethercote7ac7f7b2004-11-02 12:36:02 +0000603 SizeT pszB = 0;
604 vg_assert(listNo <= VG_N_MALLOC_LISTS);
nethercote2d5b8162004-08-11 09:40:52 +0000605 while (pszB_to_listNo(pszB) < listNo) pszB += VG_MIN_MALLOC_SZB;
606 return pszB;
sewardjde4a1d02002-03-22 01:27:54 +0000607}
608
nethercote2d5b8162004-08-11 09:40:52 +0000609// What is the maximum payload size for a given list?
sewardjde4a1d02002-03-22 01:27:54 +0000610static
nethercote7ac7f7b2004-11-02 12:36:02 +0000611SizeT listNo_to_pszB_max ( UInt listNo )
sewardjde4a1d02002-03-22 01:27:54 +0000612{
nethercote7ac7f7b2004-11-02 12:36:02 +0000613 vg_assert(listNo <= VG_N_MALLOC_LISTS);
sewardjde4a1d02002-03-22 01:27:54 +0000614 if (listNo == VG_N_MALLOC_LISTS-1) {
nethercote2d5b8162004-08-11 09:40:52 +0000615 return MAX_PSZB;
sewardjde4a1d02002-03-22 01:27:54 +0000616 } else {
nethercote2d5b8162004-08-11 09:40:52 +0000617 return listNo_to_pszB_min(listNo+1) - 1;
sewardjde4a1d02002-03-22 01:27:54 +0000618 }
619}
620
621
622/* A nasty hack to try and reduce fragmentation. Try and replace
623 a->freelist[lno] with another block on the same list but with a
624 lower address, with the idea of attempting to recycle the same
625 blocks rather than cruise through the address space. */
sewardjde4a1d02002-03-22 01:27:54 +0000626static
nethercote7ac7f7b2004-11-02 12:36:02 +0000627void swizzle ( Arena* a, UInt lno )
sewardjde4a1d02002-03-22 01:27:54 +0000628{
nethercote2d5b8162004-08-11 09:40:52 +0000629 Block* p_best;
630 Block* pp;
631 Block* pn;
nethercote7ac7f7b2004-11-02 12:36:02 +0000632 UInt i;
sewardjde4a1d02002-03-22 01:27:54 +0000633
634 p_best = a->freelist[lno];
635 if (p_best == NULL) return;
636
637 pn = pp = p_best;
638 for (i = 0; i < 20; i++) {
nethercote2d5b8162004-08-11 09:40:52 +0000639 pn = get_next_b(pn);
640 pp = get_prev_b(pp);
sewardjde4a1d02002-03-22 01:27:54 +0000641 if (pn < p_best) p_best = pn;
642 if (pp < p_best) p_best = pp;
643 }
644 if (p_best < a->freelist[lno]) {
nethercote2d5b8162004-08-11 09:40:52 +0000645# ifdef VERBOSE_MALLOC
646 VG_(printf)("retreat by %d\n", a->freelist[lno] - p_best);
sewardjde4a1d02002-03-22 01:27:54 +0000647# endif
648 a->freelist[lno] = p_best;
649 }
650}
651
652
653/*------------------------------------------------------------*/
sewardjde4a1d02002-03-22 01:27:54 +0000654/*--- Sanity-check/debugging machinery. ---*/
655/*------------------------------------------------------------*/
656
nethercote2d5b8162004-08-11 09:40:52 +0000657#define VG_REDZONE_LO_MASK 0x31
658#define VG_REDZONE_HI_MASK 0x7c
659
nethercote7ac7f7b2004-11-02 12:36:02 +0000660// Do some crude sanity checks on a Block.
sewardjde4a1d02002-03-22 01:27:54 +0000661static
nethercote2d5b8162004-08-11 09:40:52 +0000662Bool blockSane ( Arena* a, Block* b )
sewardjde4a1d02002-03-22 01:27:54 +0000663{
664# define BLEAT(str) VG_(printf)("blockSane: fail -- %s\n",str)
nethercote7ac7f7b2004-11-02 12:36:02 +0000665 UInt i;
nethercote2d5b8162004-08-11 09:40:52 +0000666 if (get_bszB_lo(b) != get_bszB_hi(b))
sewardjde4a1d02002-03-22 01:27:54 +0000667 {BLEAT("sizes");return False;}
nethercote2d5b8162004-08-11 09:40:52 +0000668 if (!a->clientmem && is_inuse_bszB(get_bszB_lo(b))) {
669 for (i = 0; i < a->rz_szB; i++) {
670 if (get_rz_lo_byte(a, b, i) !=
671 (UByte)(((Addr)b&0xff) ^ VG_REDZONE_LO_MASK))
672 {BLEAT("redzone-lo");return False;}
673 if (get_rz_hi_byte(a, b, i) !=
674 (UByte)(((Addr)b&0xff) ^ VG_REDZONE_HI_MASK))
675 {BLEAT("redzone-hi");return False;}
sewardjde4a1d02002-03-22 01:27:54 +0000676 }
677 }
678 return True;
679# undef BLEAT
680}
681
nethercote2d5b8162004-08-11 09:40:52 +0000682// Print superblocks (only for debugging).
sewardjde4a1d02002-03-22 01:27:54 +0000683static
684void ppSuperblocks ( Arena* a )
685{
nethercote7ac7f7b2004-11-02 12:36:02 +0000686 UInt i, blockno;
687 SizeT b_bszB;
nethercote2d5b8162004-08-11 09:40:52 +0000688 Block* b;
sewardjde4a1d02002-03-22 01:27:54 +0000689 Superblock* sb = a->sblocks;
690 blockno = 1;
691
692 while (sb) {
693 VG_(printf)( "\n" );
nethercote2d5b8162004-08-11 09:40:52 +0000694 VG_(printf)( "superblock %d at %p, sb->n_pl_bs = %d, next = %p\n",
695 blockno++, sb, sb->n_payload_bytes, sb->next );
696 for (i = 0; i < sb->n_payload_bytes; i += mk_plain_bszB(b_bszB)) {
697 b = (Block*)&sb->payload_bytes[i];
698 b_bszB = get_bszB_lo(b);
699 VG_(printf)( " block at %d, bszB %d: ", i, mk_plain_bszB(b_bszB) );
700 VG_(printf)( "%s, ", is_inuse_bszB(b_bszB) ? "inuse" : "free");
701 VG_(printf)( "%s\n", blockSane(a, b) ? "ok" : "BAD" );
sewardjde4a1d02002-03-22 01:27:54 +0000702 }
nethercote2d5b8162004-08-11 09:40:52 +0000703 vg_assert(i == sb->n_payload_bytes); // no overshoot at end of Sb
sewardjde4a1d02002-03-22 01:27:54 +0000704 sb = sb->next;
705 }
706 VG_(printf)( "end of superblocks\n\n" );
707}
708
nethercote2d5b8162004-08-11 09:40:52 +0000709// Sanity check both the superblocks and the chains.
nethercote885dd912004-08-03 23:14:00 +0000710static void sanity_check_malloc_arena ( ArenaId aid )
sewardjde4a1d02002-03-22 01:27:54 +0000711{
nethercote7ac7f7b2004-11-02 12:36:02 +0000712 UInt i, superblockctr, blockctr_sb, blockctr_li;
713 UInt blockctr_sb_free, listno;
714 SizeT b_bszB, b_pszB, list_min_pszB, list_max_pszB;
sewardjde4a1d02002-03-22 01:27:54 +0000715 Superblock* sb;
716 Bool thisFree, lastWasFree;
nethercote2d5b8162004-08-11 09:40:52 +0000717 Block* b;
718 Block* b_prev;
nethercote7ac7f7b2004-11-02 12:36:02 +0000719 SizeT arena_bytes_on_loan;
sewardjde4a1d02002-03-22 01:27:54 +0000720 Arena* a;
721
nethercote885dd912004-08-03 23:14:00 +0000722# define BOMB VG_(core_panic)("sanity_check_malloc_arena")
sewardjde4a1d02002-03-22 01:27:54 +0000723
724 a = arenaId_to_ArenaP(aid);
725
nethercote2d5b8162004-08-11 09:40:52 +0000726 // First, traverse all the superblocks, inspecting the Blocks in each.
sewardjde4a1d02002-03-22 01:27:54 +0000727 superblockctr = blockctr_sb = blockctr_sb_free = 0;
728 arena_bytes_on_loan = 0;
729 sb = a->sblocks;
730 while (sb) {
731 lastWasFree = False;
732 superblockctr++;
nethercote2d5b8162004-08-11 09:40:52 +0000733 for (i = 0; i < sb->n_payload_bytes; i += mk_plain_bszB(b_bszB)) {
sewardjde4a1d02002-03-22 01:27:54 +0000734 blockctr_sb++;
nethercote2d5b8162004-08-11 09:40:52 +0000735 b = (Block*)&sb->payload_bytes[i];
736 b_bszB = get_bszB_lo(b);
sewardjde4a1d02002-03-22 01:27:54 +0000737 if (!blockSane(a, b)) {
nethercote2d5b8162004-08-11 09:40:52 +0000738 VG_(printf)("sanity_check_malloc_arena: sb %p, block %d (bszB %d): "
739 " BAD\n", sb, i, b_bszB );
sewardjde4a1d02002-03-22 01:27:54 +0000740 BOMB;
741 }
nethercote2d5b8162004-08-11 09:40:52 +0000742 thisFree = !is_inuse_bszB(b_bszB);
sewardjde4a1d02002-03-22 01:27:54 +0000743 if (thisFree && lastWasFree) {
nethercote2d5b8162004-08-11 09:40:52 +0000744 VG_(printf)("sanity_check_malloc_arena: sb %p, block %d (bszB %d): "
njn25e49d8e72002-09-23 09:36:25 +0000745 "UNMERGED FREES\n",
nethercote2d5b8162004-08-11 09:40:52 +0000746 sb, i, b_bszB );
sewardjde4a1d02002-03-22 01:27:54 +0000747 BOMB;
748 }
sewardjde4a1d02002-03-22 01:27:54 +0000749 if (thisFree) blockctr_sb_free++;
750 if (!thisFree)
nethercote2d5b8162004-08-11 09:40:52 +0000751 arena_bytes_on_loan += bszB_to_pszB(a, b_bszB);
752 lastWasFree = thisFree;
sewardjde4a1d02002-03-22 01:27:54 +0000753 }
nethercote2d5b8162004-08-11 09:40:52 +0000754 if (i > sb->n_payload_bytes) {
nethercote885dd912004-08-03 23:14:00 +0000755 VG_(printf)( "sanity_check_malloc_arena: sb %p: last block "
sewardjde4a1d02002-03-22 01:27:54 +0000756 "overshoots end\n", sb);
757 BOMB;
758 }
759 sb = sb->next;
760 }
761
762 if (arena_bytes_on_loan != a->bytes_on_loan) {
nethercote2d5b8162004-08-11 09:40:52 +0000763# ifdef VERBOSE_MALLOC
764 VG_(printf)( "sanity_check_malloc_arena: a->bytes_on_loan %d, "
765 "arena_bytes_on_loan %d: "
766 "MISMATCH\n", a->bytes_on_loan, arena_bytes_on_loan);
767# endif
sewardjde4a1d02002-03-22 01:27:54 +0000768 ppSuperblocks(a);
769 BOMB;
770 }
771
772 /* Second, traverse each list, checking that the back pointers make
773 sense, counting blocks encountered, and checking that each block
774 is an appropriate size for this list. */
775 blockctr_li = 0;
776 for (listno = 0; listno < VG_N_MALLOC_LISTS; listno++) {
nethercote2d5b8162004-08-11 09:40:52 +0000777 list_min_pszB = listNo_to_pszB_min(listno);
778 list_max_pszB = listNo_to_pszB_max(listno);
sewardjde4a1d02002-03-22 01:27:54 +0000779 b = a->freelist[listno];
780 if (b == NULL) continue;
781 while (True) {
782 b_prev = b;
nethercote2d5b8162004-08-11 09:40:52 +0000783 b = get_next_b(b);
784 if (get_prev_b(b) != b_prev) {
nethercote885dd912004-08-03 23:14:00 +0000785 VG_(printf)( "sanity_check_malloc_arena: list %d at %p: "
sewardjde4a1d02002-03-22 01:27:54 +0000786 "BAD LINKAGE\n",
787 listno, b );
788 BOMB;
789 }
nethercote2d5b8162004-08-11 09:40:52 +0000790 b_pszB = bszB_to_pszB(a, mk_plain_bszB(get_bszB_lo(b)));
791 if (b_pszB < list_min_pszB || b_pszB > list_max_pszB) {
sewardjde4a1d02002-03-22 01:27:54 +0000792 VG_(printf)(
nethercote885dd912004-08-03 23:14:00 +0000793 "sanity_check_malloc_arena: list %d at %p: "
nethercote2d5b8162004-08-11 09:40:52 +0000794 "WRONG CHAIN SIZE %dB (%dB, %dB)\n",
795 listno, b, b_pszB, list_min_pszB, list_max_pszB );
sewardjde4a1d02002-03-22 01:27:54 +0000796 BOMB;
797 }
798 blockctr_li++;
799 if (b == a->freelist[listno]) break;
800 }
801 }
802
803 if (blockctr_sb_free != blockctr_li) {
nethercote2d5b8162004-08-11 09:40:52 +0000804# ifdef VERBOSE_MALLOC
805 VG_(printf)( "sanity_check_malloc_arena: BLOCK COUNT MISMATCH "
806 "(via sbs %d, via lists %d)\n",
807 blockctr_sb_free, blockctr_li );
808# endif
sewardjde4a1d02002-03-22 01:27:54 +0000809 ppSuperblocks(a);
810 BOMB;
811 }
812
nethercote885dd912004-08-03 23:14:00 +0000813 if (VG_(clo_verbosity) > 2)
814 VG_(message)(Vg_DebugMsg,
njn669ef072005-03-13 05:46:57 +0000815 "%8s: %2d sbs, %5d bs, %2d/%-2d free bs, "
nethercote885dd912004-08-03 23:14:00 +0000816 "%7d mmap, %7d loan",
817 a->name,
818 superblockctr,
819 blockctr_sb, blockctr_sb_free, blockctr_li,
820 a->bytes_mmaped, a->bytes_on_loan);
sewardjde4a1d02002-03-22 01:27:54 +0000821# undef BOMB
822}
823
824
nethercote885dd912004-08-03 23:14:00 +0000825void VG_(sanity_check_malloc_all) ( void )
sewardjde4a1d02002-03-22 01:27:54 +0000826{
nethercote7ac7f7b2004-11-02 12:36:02 +0000827 UInt i;
sewardjde4a1d02002-03-22 01:27:54 +0000828 for (i = 0; i < VG_N_ARENAS; i++)
nethercote885dd912004-08-03 23:14:00 +0000829 sanity_check_malloc_arena ( i );
sewardjde4a1d02002-03-22 01:27:54 +0000830}
831
sewardjde4a1d02002-03-22 01:27:54 +0000832
nethercote2d5b8162004-08-11 09:40:52 +0000833/*------------------------------------------------------------*/
834/*--- Creating and deleting blocks. ---*/
835/*------------------------------------------------------------*/
836
837// Mark the bytes at b .. b+bszB-1 as not in use, and add them to the
838// relevant free list.
839
840static
nethercote7ac7f7b2004-11-02 12:36:02 +0000841void mkFreeBlock ( Arena* a, Block* b, SizeT bszB, UInt b_lno )
jsewardb1a26ae2004-03-14 03:06:37 +0000842{
nethercote7ac7f7b2004-11-02 12:36:02 +0000843 SizeT pszB = bszB_to_pszB(a, bszB);
nethercote2d5b8162004-08-11 09:40:52 +0000844 vg_assert(b_lno == pszB_to_listNo(pszB));
sewardjb5f6f512005-03-10 23:59:00 +0000845 //zzVALGRIND_MAKE_WRITABLE(b, bszB);
nethercote2d5b8162004-08-11 09:40:52 +0000846 // Set the size fields and indicate not-in-use.
847 set_bszB_lo(b, mk_free_bszB(bszB));
848 set_bszB_hi(b, mk_free_bszB(bszB));
849
850 // Add to the relevant list.
851 if (a->freelist[b_lno] == NULL) {
852 set_prev_b(b, b);
853 set_next_b(b, b);
854 a->freelist[b_lno] = b;
855 } else {
856 Block* b_prev = get_prev_b(a->freelist[b_lno]);
857 Block* b_next = a->freelist[b_lno];
858 set_next_b(b_prev, b);
859 set_prev_b(b_next, b);
860 set_next_b(b, b_next);
861 set_prev_b(b, b_prev);
862 }
863# ifdef DEBUG_MALLOC
864 (void)blockSane(a,b);
865# endif
866}
867
868// Mark the bytes at b .. b+bszB-1 as in use, and set up the block
869// appropriately.
870static
nethercote7ac7f7b2004-11-02 12:36:02 +0000871void mkInuseBlock ( Arena* a, Block* b, SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000872{
nethercote7ac7f7b2004-11-02 12:36:02 +0000873 UInt i;
nethercote2d5b8162004-08-11 09:40:52 +0000874 vg_assert(bszB >= min_useful_bszB(a));
sewardjb5f6f512005-03-10 23:59:00 +0000875 //zzVALGRIND_MAKE_WRITABLE(b, bszB);
nethercote2d5b8162004-08-11 09:40:52 +0000876 set_bszB_lo(b, mk_inuse_bszB(bszB));
877 set_bszB_hi(b, mk_inuse_bszB(bszB));
878 set_prev_b(b, NULL); // Take off freelist
879 set_next_b(b, NULL); // ditto
880 if (!a->clientmem) {
881 for (i = 0; i < a->rz_szB; i++) {
882 set_rz_lo_byte(a, b, i, (UByte)(((Addr)b&0xff) ^ VG_REDZONE_LO_MASK));
883 set_rz_hi_byte(a, b, i, (UByte)(((Addr)b&0xff) ^ VG_REDZONE_HI_MASK));
884 }
885 }
886# ifdef DEBUG_MALLOC
887 (void)blockSane(a,b);
888# endif
889}
890
891// Remove a block from a given list. Does no sanity checking.
892static
nethercote7ac7f7b2004-11-02 12:36:02 +0000893void unlinkBlock ( Arena* a, Block* b, UInt listno )
nethercote2d5b8162004-08-11 09:40:52 +0000894{
nethercote7ac7f7b2004-11-02 12:36:02 +0000895 vg_assert(listno < VG_N_MALLOC_LISTS);
nethercote2d5b8162004-08-11 09:40:52 +0000896 if (get_prev_b(b) == b) {
897 // Only one element in the list; treat it specially.
898 vg_assert(get_next_b(b) == b);
899 a->freelist[listno] = NULL;
900 } else {
901 Block* b_prev = get_prev_b(b);
902 Block* b_next = get_next_b(b);
903 a->freelist[listno] = b_prev;
904 set_next_b(b_prev, b_next);
905 set_prev_b(b_next, b_prev);
906 swizzle ( a, listno );
907 }
908 set_prev_b(b, NULL);
909 set_next_b(b, NULL);
jsewardb1a26ae2004-03-14 03:06:37 +0000910}
911
912
sewardjde4a1d02002-03-22 01:27:54 +0000913/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +0000914/*--- Core-visible functions. ---*/
sewardjde4a1d02002-03-22 01:27:54 +0000915/*------------------------------------------------------------*/
916
nethercote2d5b8162004-08-11 09:40:52 +0000917// Align the request size.
918static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000919SizeT align_req_pszB ( SizeT req_pszB )
nethercote2d5b8162004-08-11 09:40:52 +0000920{
nethercote7ac7f7b2004-11-02 12:36:02 +0000921 SizeT n = VG_MIN_MALLOC_SZB-1;
nethercote2d5b8162004-08-11 09:40:52 +0000922 return ((req_pszB + n) & (~n));
923}
924
nethercote7ac7f7b2004-11-02 12:36:02 +0000925void* VG_(arena_malloc) ( ArenaId aid, SizeT req_pszB )
sewardjde4a1d02002-03-22 01:27:54 +0000926{
nethercote7ac7f7b2004-11-02 12:36:02 +0000927 SizeT req_bszB, frag_bszB, b_bszB;
928 UInt lno;
sewardjde4a1d02002-03-22 01:27:54 +0000929 Superblock* new_sb;
nethercote2d5b8162004-08-11 09:40:52 +0000930 Block* b = NULL;
sewardjde4a1d02002-03-22 01:27:54 +0000931 Arena* a;
jsewardb1a26ae2004-03-14 03:06:37 +0000932 void* v;
sewardjde4a1d02002-03-22 01:27:54 +0000933
934 VGP_PUSHCC(VgpMalloc);
935
936 ensure_mm_init();
937 a = arenaId_to_ArenaP(aid);
938
nethercote7ac7f7b2004-11-02 12:36:02 +0000939 vg_assert(req_pszB < MAX_PSZB);
nethercote2d5b8162004-08-11 09:40:52 +0000940 req_pszB = align_req_pszB(req_pszB);
941 req_bszB = pszB_to_bszB(a, req_pszB);
sewardjde4a1d02002-03-22 01:27:54 +0000942
nethercote2d5b8162004-08-11 09:40:52 +0000943 // Scan through all the big-enough freelists for a block.
944 for (lno = pszB_to_listNo(req_pszB); lno < VG_N_MALLOC_LISTS; lno++) {
sewardjde4a1d02002-03-22 01:27:54 +0000945 b = a->freelist[lno];
nethercote2d5b8162004-08-11 09:40:52 +0000946 if (NULL == b) continue; // If this list is empty, try the next one.
sewardjde4a1d02002-03-22 01:27:54 +0000947 while (True) {
nethercote2d5b8162004-08-11 09:40:52 +0000948 b_bszB = mk_plain_bszB(get_bszB_lo(b));
949 if (b_bszB >= req_bszB) goto obtained_block; // success!
950 b = get_next_b(b);
951 if (b == a->freelist[lno]) break; // traversed entire freelist
sewardjde4a1d02002-03-22 01:27:54 +0000952 }
sewardjde4a1d02002-03-22 01:27:54 +0000953 }
954
nethercote2d5b8162004-08-11 09:40:52 +0000955 // If we reach here, no suitable block found, allocate a new superblock
956 vg_assert(lno == VG_N_MALLOC_LISTS);
957 new_sb = newSuperblock(a, req_bszB);
958 if (NULL == new_sb) {
959 // Should only fail if for client, otherwise, should have aborted
960 // already.
961 vg_assert(VG_AR_CLIENT == aid);
962 return NULL;
sewardjde4a1d02002-03-22 01:27:54 +0000963 }
nethercote2d5b8162004-08-11 09:40:52 +0000964 new_sb->next = a->sblocks;
965 a->sblocks = new_sb;
966 b = (Block*)&new_sb->payload_bytes[0];
967 lno = pszB_to_listNo(bszB_to_pszB(a, new_sb->n_payload_bytes));
968 mkFreeBlock ( a, b, new_sb->n_payload_bytes, lno);
969 // fall through
sewardjde4a1d02002-03-22 01:27:54 +0000970
nethercote2d5b8162004-08-11 09:40:52 +0000971 obtained_block:
972 // Ok, we can allocate from b, which lives in list lno.
sewardjde4a1d02002-03-22 01:27:54 +0000973 vg_assert(b != NULL);
nethercote7ac7f7b2004-11-02 12:36:02 +0000974 vg_assert(lno < VG_N_MALLOC_LISTS);
sewardjde4a1d02002-03-22 01:27:54 +0000975 vg_assert(a->freelist[lno] != NULL);
nethercote2d5b8162004-08-11 09:40:52 +0000976 b_bszB = mk_plain_bszB(get_bszB_lo(b));
977 // req_bszB is the size of the block we are after. b_bszB is the
978 // size of what we've actually got. */
979 vg_assert(b_bszB >= req_bszB);
sewardjde4a1d02002-03-22 01:27:54 +0000980
nethercote2d5b8162004-08-11 09:40:52 +0000981 // Could we split this block and still get a useful fragment?
982 frag_bszB = b_bszB - req_bszB;
983 if (frag_bszB >= min_useful_bszB(a)) {
984 // Yes, split block in two, put the fragment on the appropriate free
985 // list, and update b_bszB accordingly.
986 // printf( "split %dB into %dB and %dB\n", b_bszB, req_bszB, frag_bszB );
sewardjde4a1d02002-03-22 01:27:54 +0000987 unlinkBlock(a, b, lno);
nethercote2d5b8162004-08-11 09:40:52 +0000988 mkInuseBlock(a, b, req_bszB);
989 mkFreeBlock(a, &b[req_bszB], frag_bszB,
990 pszB_to_listNo(bszB_to_pszB(a, frag_bszB)));
991 b_bszB = mk_plain_bszB(get_bszB_lo(b));
992 } else {
993 // No, mark as in use and use as-is.
994 unlinkBlock(a, b, lno);
995 mkInuseBlock(a, b, b_bszB);
sewardjde4a1d02002-03-22 01:27:54 +0000996 }
sewardjde4a1d02002-03-22 01:27:54 +0000997
nethercote2d5b8162004-08-11 09:40:52 +0000998 // Update stats
999 a->bytes_on_loan += bszB_to_pszB(a, b_bszB);
sewardjde4a1d02002-03-22 01:27:54 +00001000 if (a->bytes_on_loan > a->bytes_on_loan_max)
1001 a->bytes_on_loan_max = a->bytes_on_loan;
1002
1003# ifdef DEBUG_MALLOC
nethercote885dd912004-08-03 23:14:00 +00001004 sanity_check_malloc_arena(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001005# endif
1006
njn25e49d8e72002-09-23 09:36:25 +00001007 VGP_POPCC(VgpMalloc);
nethercote2d5b8162004-08-11 09:40:52 +00001008 v = get_block_payload(a, b);
1009 vg_assert( (((Addr)v) & (VG_MIN_MALLOC_SZB-1)) == 0 );
sewardjb5f6f512005-03-10 23:59:00 +00001010
1011 VALGRIND_MALLOCLIKE_BLOCK(v, req_pszB, 0, False);
jsewardb1a26ae2004-03-14 03:06:37 +00001012 return v;
sewardjde4a1d02002-03-22 01:27:54 +00001013}
1014
1015
njn25e49d8e72002-09-23 09:36:25 +00001016void VG_(arena_free) ( ArenaId aid, void* ptr )
sewardjde4a1d02002-03-22 01:27:54 +00001017{
1018 Superblock* sb;
nethercote2d5b8162004-08-11 09:40:52 +00001019 UByte* sb_start;
1020 UByte* sb_end;
1021 Block* other;
1022 Block* b;
nethercote7ac7f7b2004-11-02 12:36:02 +00001023 SizeT b_bszB, b_pszB, other_bszB;
1024 UInt b_listno;
sewardjde4a1d02002-03-22 01:27:54 +00001025 Arena* a;
1026
1027 VGP_PUSHCC(VgpMalloc);
1028
1029 ensure_mm_init();
1030 a = arenaId_to_ArenaP(aid);
1031
njn25e49d8e72002-09-23 09:36:25 +00001032 if (ptr == NULL) {
1033 VGP_POPCC(VgpMalloc);
1034 return;
1035 }
1036
nethercote2d5b8162004-08-11 09:40:52 +00001037 b = get_payload_block(a, ptr);
sewardjde4a1d02002-03-22 01:27:54 +00001038
1039# ifdef DEBUG_MALLOC
nethercote2d5b8162004-08-11 09:40:52 +00001040 vg_assert(blockSane(a, b));
sewardjde4a1d02002-03-22 01:27:54 +00001041# endif
1042
nethercote2d5b8162004-08-11 09:40:52 +00001043 a->bytes_on_loan -= bszB_to_pszB(a, mk_plain_bszB(get_bszB_lo(b)));
sewardjde4a1d02002-03-22 01:27:54 +00001044
nethercote2d5b8162004-08-11 09:40:52 +00001045 sb = findSb( a, b );
1046 sb_start = &sb->payload_bytes[0];
1047 sb_end = &sb->payload_bytes[sb->n_payload_bytes - 1];
sewardjde4a1d02002-03-22 01:27:54 +00001048
nethercote2d5b8162004-08-11 09:40:52 +00001049 // Put this chunk back on a list somewhere.
1050 b_bszB = get_bszB_lo(b);
1051 b_pszB = bszB_to_pszB(a, b_bszB);
1052 b_listno = pszB_to_listNo(b_pszB);
1053 mkFreeBlock( a, b, b_bszB, b_listno );
sewardjde4a1d02002-03-22 01:27:54 +00001054
nethercote2d5b8162004-08-11 09:40:52 +00001055 // See if this block can be merged with its successor.
1056 // First test if we're far enough before the superblock's end to possibly
1057 // have a successor.
1058 other = b + b_bszB;
1059 if (other+min_useful_bszB(a)-1 <= (Block*)sb_end) {
1060 // Ok, we have a successor, merge if it's not in use.
1061 other_bszB = get_bszB_lo(other);
1062 if (!is_inuse_bszB(other_bszB)) {
1063 // VG_(printf)( "merge-successor\n");
1064 other_bszB = mk_plain_bszB(other_bszB);
sewardjde4a1d02002-03-22 01:27:54 +00001065# ifdef DEBUG_MALLOC
1066 vg_assert(blockSane(a, other));
1067# endif
nethercote2d5b8162004-08-11 09:40:52 +00001068 unlinkBlock( a, b, b_listno );
1069 unlinkBlock( a, other, pszB_to_listNo(bszB_to_pszB(a,other_bszB)) );
1070 b_bszB += other_bszB;
1071 b_listno = pszB_to_listNo(bszB_to_pszB(a, b_bszB));
1072 mkFreeBlock( a, b, b_bszB, b_listno );
sewardjde4a1d02002-03-22 01:27:54 +00001073 }
nethercote2d5b8162004-08-11 09:40:52 +00001074 } else {
1075 // Not enough space for successor: check that b is the last block
1076 // ie. there are no unused bytes at the end of the Superblock.
1077 vg_assert(other-1 == (Block*)sb_end);
sewardjde4a1d02002-03-22 01:27:54 +00001078 }
1079
nethercote2d5b8162004-08-11 09:40:52 +00001080 // Then see if this block can be merged with its predecessor.
1081 // First test if we're far enough after the superblock's start to possibly
1082 // have a predecessor.
1083 if (b >= (Block*)sb_start + min_useful_bszB(a)) {
1084 // Ok, we have a predecessor, merge if it's not in use.
1085 other = get_predecessor_block( b );
1086 other_bszB = get_bszB_lo(other);
1087 if (!is_inuse_bszB(other_bszB)) {
1088 // VG_(printf)( "merge-predecessor\n");
1089 other_bszB = mk_plain_bszB(other_bszB);
1090 unlinkBlock( a, b, b_listno );
1091 unlinkBlock( a, other, pszB_to_listNo(bszB_to_pszB(a, other_bszB)) );
1092 b = other;
1093 b_bszB += other_bszB;
1094 b_listno = pszB_to_listNo(bszB_to_pszB(a, b_bszB));
1095 mkFreeBlock( a, b, b_bszB, b_listno );
sewardjde4a1d02002-03-22 01:27:54 +00001096 }
nethercote2d5b8162004-08-11 09:40:52 +00001097 } else {
1098 // Not enough space for predecessor: check that b is the first block,
1099 // ie. there are no unused bytes at the start of the Superblock.
1100 vg_assert((Block*)sb_start == b);
sewardjde4a1d02002-03-22 01:27:54 +00001101 }
1102
1103# ifdef DEBUG_MALLOC
nethercote885dd912004-08-03 23:14:00 +00001104 sanity_check_malloc_arena(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001105# endif
1106
sewardjb5f6f512005-03-10 23:59:00 +00001107 VALGRIND_FREELIKE_BLOCK(ptr, 0);
1108
njn25e49d8e72002-09-23 09:36:25 +00001109 VGP_POPCC(VgpMalloc);
sewardjde4a1d02002-03-22 01:27:54 +00001110}
1111
1112
1113/*
1114 The idea for malloc_aligned() is to allocate a big block, base, and
1115 then split it into two parts: frag, which is returned to the the
1116 free pool, and align, which is the bit we're really after. Here's
1117 a picture. L and H denote the block lower and upper overheads, in
nethercote2d5b8162004-08-11 09:40:52 +00001118 bytes. The details are gruesome. Note it is slightly complicated
sewardjde4a1d02002-03-22 01:27:54 +00001119 because the initial request to generate base may return a bigger
1120 block than we asked for, so it is important to distinguish the base
1121 request size and the base actual size.
1122
1123 frag_b align_b
1124 | |
1125 | frag_p | align_p
1126 | | | |
1127 v v v v
1128
1129 +---+ +---+---+ +---+
1130 | L |----------------| H | L |---------------| H |
1131 +---+ +---+---+ +---+
1132
1133 ^ ^ ^
1134 | | :
1135 | base_p this addr must be aligned
1136 |
1137 base_b
1138
1139 . . . . . . .
nethercote2d5b8162004-08-11 09:40:52 +00001140 <------ frag_bszB -------> . . .
1141 . <------------- base_pszB_act -----------> .
sewardjde4a1d02002-03-22 01:27:54 +00001142 . . . . . . .
1143
1144*/
nethercote7ac7f7b2004-11-02 12:36:02 +00001145void* VG_(arena_malloc_aligned) ( ArenaId aid, SizeT req_alignB, SizeT req_pszB )
sewardjde4a1d02002-03-22 01:27:54 +00001146{
nethercote7ac7f7b2004-11-02 12:36:02 +00001147 SizeT base_pszB_req, base_pszB_act, frag_bszB;
nethercote2d5b8162004-08-11 09:40:52 +00001148 Block *base_b, *align_b;
1149 UByte *base_p, *align_p;
nethercote7ac7f7b2004-11-02 12:36:02 +00001150 SizeT saved_bytes_on_loan;
sewardjde4a1d02002-03-22 01:27:54 +00001151 Arena* a;
1152
njn25e49d8e72002-09-23 09:36:25 +00001153 VGP_PUSHCC(VgpMalloc);
1154
sewardjde4a1d02002-03-22 01:27:54 +00001155 ensure_mm_init();
1156 a = arenaId_to_ArenaP(aid);
1157
nethercote7ac7f7b2004-11-02 12:36:02 +00001158 vg_assert(req_pszB < MAX_PSZB);
sewardjde4a1d02002-03-22 01:27:54 +00001159
nethercote2d5b8162004-08-11 09:40:52 +00001160 // Check that the requested alignment seems reasonable; that is, is
1161 // a power of 2.
1162 if (req_alignB < VG_MIN_MALLOC_SZB
1163 || req_alignB > 1048576
1164 || VG_(log2)( VG_(clo_alignment) ) == -1 /* not a power of 2 */) {
1165 VG_(printf)("VG_(arena_malloc_aligned)(%p, %d, %d)\nbad alignment",
1166 a, req_alignB, req_pszB );
1167 VG_(core_panic)("VG_(arena_malloc_aligned)");
1168 /*NOTREACHED*/
sewardjde4a1d02002-03-22 01:27:54 +00001169 }
nethercote2d5b8162004-08-11 09:40:52 +00001170 // Paranoid
1171 vg_assert(req_alignB % VG_MIN_MALLOC_SZB == 0);
sewardjde4a1d02002-03-22 01:27:54 +00001172
1173 /* Required payload size for the aligned chunk. */
nethercote2d5b8162004-08-11 09:40:52 +00001174 req_pszB = align_req_pszB(req_pszB);
sewardjde4a1d02002-03-22 01:27:54 +00001175
nethercote2d5b8162004-08-11 09:40:52 +00001176 /* Payload size to request for the big block that we will split up. */
1177 base_pszB_req = req_pszB + min_useful_bszB(a) + req_alignB;
sewardjde4a1d02002-03-22 01:27:54 +00001178
1179 /* Payload ptr for the block we are going to split. Note this
1180 changes a->bytes_on_loan; we save and restore it ourselves. */
1181 saved_bytes_on_loan = a->bytes_on_loan;
nethercote2d5b8162004-08-11 09:40:52 +00001182 base_p = VG_(arena_malloc) ( aid, base_pszB_req );
sewardjde4a1d02002-03-22 01:27:54 +00001183 a->bytes_on_loan = saved_bytes_on_loan;
1184
1185 /* Block ptr for the block we are going to split. */
nethercote2d5b8162004-08-11 09:40:52 +00001186 base_b = get_payload_block ( a, base_p );
sewardjde4a1d02002-03-22 01:27:54 +00001187
1188 /* Pointer to the payload of the aligned block we are going to
1189 return. This has to be suitably aligned. */
nethercote2d5b8162004-08-11 09:40:52 +00001190 align_p = align_upwards ( base_b + 2 * overhead_szB_lo(a)
1191 + overhead_szB_hi(a),
sewardjde4a1d02002-03-22 01:27:54 +00001192 req_alignB );
nethercote2d5b8162004-08-11 09:40:52 +00001193 align_b = get_payload_block(a, align_p);
sewardjde4a1d02002-03-22 01:27:54 +00001194
1195 /* The block size of the fragment we will create. This must be big
1196 enough to actually create a fragment. */
nethercote2d5b8162004-08-11 09:40:52 +00001197 frag_bszB = align_b - base_b;
1198
1199 vg_assert(frag_bszB >= min_useful_bszB(a));
sewardjde4a1d02002-03-22 01:27:54 +00001200
1201 /* The actual payload size of the block we are going to split. */
nethercote2d5b8162004-08-11 09:40:52 +00001202 base_pszB_act = bszB_to_pszB(a, mk_plain_bszB(get_bszB_lo(base_b)));
sewardjde4a1d02002-03-22 01:27:54 +00001203
nethercote2d5b8162004-08-11 09:40:52 +00001204 /* Create the fragment block, and put it back on the relevant free list. */
1205 mkFreeBlock ( a, base_b, frag_bszB,
1206 pszB_to_listNo(bszB_to_pszB(a, frag_bszB)) );
sewardjde4a1d02002-03-22 01:27:54 +00001207
1208 /* Create the aligned block. */
nethercote2d5b8162004-08-11 09:40:52 +00001209 mkInuseBlock ( a, align_b,
1210 base_p + base_pszB_act
1211 + overhead_szB_hi(a) - (UByte*)align_b );
sewardjde4a1d02002-03-22 01:27:54 +00001212
1213 /* Final sanity checks. */
nethercote2d5b8162004-08-11 09:40:52 +00001214 vg_assert( is_inuse_bszB(get_bszB_lo(get_payload_block(a, align_p))) );
sewardjde4a1d02002-03-22 01:27:54 +00001215
nethercote2d5b8162004-08-11 09:40:52 +00001216 vg_assert(req_pszB
sewardjde4a1d02002-03-22 01:27:54 +00001217 <=
nethercote2d5b8162004-08-11 09:40:52 +00001218 bszB_to_pszB(a, mk_plain_bszB(get_bszB_lo(
1219 get_payload_block(a, align_p))))
sewardjde4a1d02002-03-22 01:27:54 +00001220 );
1221
1222 a->bytes_on_loan
nethercote2d5b8162004-08-11 09:40:52 +00001223 += bszB_to_pszB(a, mk_plain_bszB(get_bszB_lo(
1224 get_payload_block(a, align_p))));
sewardjde4a1d02002-03-22 01:27:54 +00001225 if (a->bytes_on_loan > a->bytes_on_loan_max)
1226 a->bytes_on_loan_max = a->bytes_on_loan;
1227
1228# ifdef DEBUG_MALLOC
nethercote885dd912004-08-03 23:14:00 +00001229 sanity_check_malloc_arena(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001230# endif
1231
njn25e49d8e72002-09-23 09:36:25 +00001232 VGP_POPCC(VgpMalloc);
1233
nethercote2d5b8162004-08-11 09:40:52 +00001234 vg_assert( (((Addr)align_p) % req_alignB) == 0 );
sewardjb5f6f512005-03-10 23:59:00 +00001235
1236 VALGRIND_MALLOCLIKE_BLOCK(align_p, req_pszB, 0, False);
1237
nethercote2d5b8162004-08-11 09:40:52 +00001238 return align_p;
1239}
1240
1241
nethercote7ac7f7b2004-11-02 12:36:02 +00001242SizeT VG_(arena_payload_szB) ( ArenaId aid, void* ptr )
nethercote2d5b8162004-08-11 09:40:52 +00001243{
1244 Arena* a = arenaId_to_ArenaP(aid);
1245 Block* b = get_payload_block(a, ptr);
1246 return bszB_to_pszB(a, get_bszB_lo(b));
sewardjde4a1d02002-03-22 01:27:54 +00001247}
1248
1249
1250/*------------------------------------------------------------*/
1251/*--- Services layered on top of malloc/free. ---*/
1252/*------------------------------------------------------------*/
1253
njn828022a2005-03-13 14:56:31 +00001254void* VG_(arena_calloc) ( ArenaId aid, SizeT nmemb, SizeT bytes_per_memb )
sewardjde4a1d02002-03-22 01:27:54 +00001255{
nethercote7ac7f7b2004-11-02 12:36:02 +00001256 SizeT size;
sewardjde4a1d02002-03-22 01:27:54 +00001257 UChar* p;
njn25e49d8e72002-09-23 09:36:25 +00001258
1259 VGP_PUSHCC(VgpMalloc);
1260
njn926ed472005-03-11 04:44:10 +00001261 size = nmemb * bytes_per_memb;
1262 vg_assert(size >= nmemb && size >= bytes_per_memb);// check against overflow
njn3e884182003-04-15 13:03:23 +00001263
njn828022a2005-03-13 14:56:31 +00001264 p = VG_(arena_malloc) ( aid, size );
njn3e884182003-04-15 13:03:23 +00001265
njn926ed472005-03-11 04:44:10 +00001266 VG_(memset)(p, 0, size);
sewardjb5f6f512005-03-10 23:59:00 +00001267
njn926ed472005-03-11 04:44:10 +00001268 VALGRIND_MALLOCLIKE_BLOCK(p, size, 0, True);
njn25e49d8e72002-09-23 09:36:25 +00001269
1270 VGP_POPCC(VgpMalloc);
1271
sewardjde4a1d02002-03-22 01:27:54 +00001272 return p;
1273}
1274
1275
njn828022a2005-03-13 14:56:31 +00001276void* VG_(arena_realloc) ( ArenaId aid, void* ptr, SizeT req_pszB )
sewardjde4a1d02002-03-22 01:27:54 +00001277{
1278 Arena* a;
nethercote7ac7f7b2004-11-02 12:36:02 +00001279 SizeT old_bszB, old_pszB;
sewardjb5f6f512005-03-10 23:59:00 +00001280 UChar *p_new;
nethercote2d5b8162004-08-11 09:40:52 +00001281 Block* b;
sewardjde4a1d02002-03-22 01:27:54 +00001282
njn25e49d8e72002-09-23 09:36:25 +00001283 VGP_PUSHCC(VgpMalloc);
1284
sewardjde4a1d02002-03-22 01:27:54 +00001285 ensure_mm_init();
1286 a = arenaId_to_ArenaP(aid);
1287
nethercote7ac7f7b2004-11-02 12:36:02 +00001288 vg_assert(req_pszB < MAX_PSZB);
sewardjde4a1d02002-03-22 01:27:54 +00001289
nethercote2d5b8162004-08-11 09:40:52 +00001290 b = get_payload_block(a, ptr);
1291 vg_assert(blockSane(a, b));
sewardjde4a1d02002-03-22 01:27:54 +00001292
nethercote2d5b8162004-08-11 09:40:52 +00001293 old_bszB = get_bszB_lo(b);
1294 vg_assert(is_inuse_bszB(old_bszB));
1295 old_bszB = mk_plain_bszB(old_bszB);
1296 old_pszB = bszB_to_pszB(a, old_bszB);
sewardjde4a1d02002-03-22 01:27:54 +00001297
njn25e49d8e72002-09-23 09:36:25 +00001298 if (req_pszB <= old_pszB) {
1299 VGP_POPCC(VgpMalloc);
1300 return ptr;
1301 }
sewardjde4a1d02002-03-22 01:27:54 +00001302
njn828022a2005-03-13 14:56:31 +00001303 p_new = VG_(arena_malloc) ( aid, req_pszB );
1304
sewardjb5f6f512005-03-10 23:59:00 +00001305 VG_(memcpy)(p_new, ptr, old_pszB);
sewardjde4a1d02002-03-22 01:27:54 +00001306
sewardjb5f6f512005-03-10 23:59:00 +00001307 VG_(arena_free)(aid, ptr);
njn25e49d8e72002-09-23 09:36:25 +00001308
1309 VGP_POPCC(VgpMalloc);
sewardjde4a1d02002-03-22 01:27:54 +00001310 return p_new;
1311}
1312
1313
1314/*------------------------------------------------------------*/
nethercote996901a2004-08-03 13:29:09 +00001315/*--- Tool-visible functions. ---*/
njn25e49d8e72002-09-23 09:36:25 +00001316/*------------------------------------------------------------*/
1317
nethercote2d5b8162004-08-11 09:40:52 +00001318// All just wrappers to avoid exposing arenas to tools.
njn25e49d8e72002-09-23 09:36:25 +00001319
nethercote7ac7f7b2004-11-02 12:36:02 +00001320void* VG_(malloc) ( SizeT nbytes )
njn25e49d8e72002-09-23 09:36:25 +00001321{
nethercote60f5b822004-01-26 17:24:42 +00001322 return VG_(arena_malloc) ( VG_AR_TOOL, nbytes );
njn25e49d8e72002-09-23 09:36:25 +00001323}
1324
1325void VG_(free) ( void* ptr )
1326{
nethercote60f5b822004-01-26 17:24:42 +00001327 VG_(arena_free) ( VG_AR_TOOL, ptr );
njn25e49d8e72002-09-23 09:36:25 +00001328}
1329
njn926ed472005-03-11 04:44:10 +00001330void* VG_(calloc) ( SizeT nmemb, SizeT bytes_per_memb )
njn25e49d8e72002-09-23 09:36:25 +00001331{
njn828022a2005-03-13 14:56:31 +00001332 return VG_(arena_calloc) ( VG_AR_TOOL, nmemb, bytes_per_memb );
njn25e49d8e72002-09-23 09:36:25 +00001333}
1334
nethercote7ac7f7b2004-11-02 12:36:02 +00001335void* VG_(realloc) ( void* ptr, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00001336{
njn828022a2005-03-13 14:56:31 +00001337 return VG_(arena_realloc) ( VG_AR_TOOL, ptr, size );
njn25e49d8e72002-09-23 09:36:25 +00001338}
1339
nethercote7ac7f7b2004-11-02 12:36:02 +00001340void* VG_(malloc_aligned) ( SizeT req_alignB, SizeT req_pszB )
njn25e49d8e72002-09-23 09:36:25 +00001341{
nethercote60f5b822004-01-26 17:24:42 +00001342 return VG_(arena_malloc_aligned) ( VG_AR_TOOL, req_alignB, req_pszB );
njn25e49d8e72002-09-23 09:36:25 +00001343}
1344
1345
nethercote7ac7f7b2004-11-02 12:36:02 +00001346void* VG_(cli_malloc) ( SizeT align, SizeT nbytes )
njn3e884182003-04-15 13:03:23 +00001347{
nethercote2d5b8162004-08-11 09:40:52 +00001348 // 'align' should be valid by now. VG_(arena_malloc_aligned)() will
1349 // abort if it's not.
1350 if (VG_MIN_MALLOC_SZB == align)
njn3e884182003-04-15 13:03:23 +00001351 return VG_(arena_malloc) ( VG_AR_CLIENT, nbytes );
1352 else
sewardjf1accbc2003-07-12 01:26:52 +00001353 return VG_(arena_malloc_aligned) ( VG_AR_CLIENT, align, nbytes );
njn3e884182003-04-15 13:03:23 +00001354}
1355
1356void VG_(cli_free) ( void* p )
1357{
1358 VG_(arena_free) ( VG_AR_CLIENT, p );
1359}
1360
1361
nethercote7ac7f7b2004-11-02 12:36:02 +00001362Bool VG_(addr_is_in_block)( Addr a, Addr start, SizeT size )
njn3e884182003-04-15 13:03:23 +00001363{
1364 return (start - VG_(vg_malloc_redzone_szB) <= a
1365 && a < start + size + VG_(vg_malloc_redzone_szB));
1366}
1367
1368
njn25e49d8e72002-09-23 09:36:25 +00001369/*------------------------------------------------------------*/
sewardjde4a1d02002-03-22 01:27:54 +00001370/*--- The original test driver machinery. ---*/
1371/*------------------------------------------------------------*/
1372
1373#if 0
1374
1375#if 1
1376#define N_TEST_TRANSACTIONS 100000000
1377#define N_TEST_ARR 200000
1378#define M_TEST_MALLOC 1000
1379#else
1380#define N_TEST_TRANSACTIONS 500000
1381#define N_TEST_ARR 30000
1382#define M_TEST_MALLOC 500
1383#endif
1384
1385
1386void* test_arr[N_TEST_ARR];
1387
1388int main ( int argc, char** argv )
1389{
1390 Int i, j, k, nbytes, qq;
1391 unsigned char* chp;
njn25e49d8e72002-09-23 09:36:25 +00001392 Arena* a = &arena[VG_AR_CORE];
sewardjde4a1d02002-03-22 01:27:54 +00001393 srandom(1);
1394 for (i = 0; i < N_TEST_ARR; i++)
1395 test_arr[i] = NULL;
1396
1397 for (i = 0; i < N_TEST_TRANSACTIONS; i++) {
1398 if (i % 50000 == 0) mallocSanityCheck(a);
1399 j = random() % N_TEST_ARR;
1400 if (test_arr[j]) {
1401 vg_free(a, test_arr[j]);
1402 test_arr[j] = NULL;
1403 } else {
1404 nbytes = 1 + random() % M_TEST_MALLOC;
1405 qq = random()%64;
1406 if (qq == 32)
1407 nbytes *= 17;
1408 else if (qq == 33)
1409 nbytes = 0;
1410 test_arr[j]
1411 = (i % 17) == 0
1412 ? vg_memalign(a, nbytes, 1<< (3+(random()%10)))
1413 : vg_malloc( a, nbytes );
1414 chp = test_arr[j];
1415 for (k = 0; k < nbytes; k++)
1416 chp[k] = (unsigned char)(k + 99);
1417 }
1418 }
1419
1420
1421 for (i = 0; i < N_TEST_ARR; i++) {
1422 if (test_arr[i]) {
1423 vg_free(a, test_arr[i]);
1424 test_arr[i] = NULL;
1425 }
1426 }
1427 mallocSanityCheck(a);
1428
1429 fprintf(stderr, "ALL DONE\n");
1430
1431 show_arena_stats(a);
1432 fprintf(stderr, "%d max useful, %d bytes mmap'd (%4.1f%%), %d useful\n",
1433 a->bytes_on_loan_max,
1434 a->bytes_mmaped,
nethercote2d5b8162004-08-11 09:40:52 +00001435 100.0 * (double)a->bytes_on_loan_max / (double)a->bytes_mmaped,
sewardjde4a1d02002-03-22 01:27:54 +00001436 a->bytes_on_loan );
1437
1438 return 0;
1439}
1440#endif /* 0 */
1441
1442
1443/*--------------------------------------------------------------------*/
1444/*--- end vg_malloc2.c ---*/
1445/*--------------------------------------------------------------------*/