blob: ae9463b8aef0bc85ad5aefdc8838366f0a9630b2 [file] [log] [blame]
sewardjde4a1d02002-03-22 01:27:54 +00001
2/*--------------------------------------------------------------------*/
3/*--- An implementation of malloc/free which doesn't use sbrk. ---*/
4/*--- vg_malloc2.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
njnb9c427c2004-12-01 14:14:42 +00008 This file is part of Valgrind, a dynamic binary instrumentation
9 framework.
sewardjde4a1d02002-03-22 01:27:54 +000010
njn53612422005-03-12 16:22:54 +000011 Copyright (C) 2000-2005 Julian Seward
sewardjde4a1d02002-03-22 01:27:54 +000012 jseward@acm.org
sewardjde4a1d02002-03-22 01:27:54 +000013
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
njn25e49d8e72002-09-23 09:36:25 +000029 The GNU General Public License is contained in the file COPYING.
sewardjde4a1d02002-03-22 01:27:54 +000030*/
31
32
nethercotef1e5e152004-09-01 23:58:16 +000033#include "core.h"
sewardjb5f6f512005-03-10 23:59:00 +000034//zz#include "memcheck/memcheck.h"
sewardjde4a1d02002-03-22 01:27:54 +000035
nethercote2d5b8162004-08-11 09:40:52 +000036//#define DEBUG_MALLOC // turn on heavyweight debugging machinery
37//#define VERBOSE_MALLOC // make verbose, esp. in debugging machinery
38
39/*------------------------------------------------------------*/
40/*--- Main types ---*/
41/*------------------------------------------------------------*/
42
njn6e6588c2005-03-13 18:52:48 +000043#define N_MALLOC_LISTS 16 // do not change this
nethercote2d5b8162004-08-11 09:40:52 +000044
nethercote7ac7f7b2004-11-02 12:36:02 +000045// The amount you can ask for is limited only by sizeof(SizeT)...
46#define MAX_PSZB (~((SizeT)0x0))
nethercote2d5b8162004-08-11 09:40:52 +000047
48typedef UChar UByte;
49
50/* Block layout:
51
nethercote7ac7f7b2004-11-02 12:36:02 +000052 this block total szB (sizeof(SizeT) bytes)
nethercote2d5b8162004-08-11 09:40:52 +000053 freelist previous ptr (sizeof(void*) bytes)
54 red zone bytes (depends on .rz_szB field of Arena)
55 (payload bytes)
56 red zone bytes (depends on .rz_szB field of Arena)
57 freelist next ptr (sizeof(void*) bytes)
nethercote7ac7f7b2004-11-02 12:36:02 +000058 this block total szB (sizeof(SizeT) bytes)
nethercote2d5b8162004-08-11 09:40:52 +000059
60 Total size in bytes (bszB) and payload size in bytes (pszB)
61 are related by:
62
nethercote7ac7f7b2004-11-02 12:36:02 +000063 bszB == pszB + 2*sizeof(SizeT) + 2*sizeof(void*) + 2*a->rz_szB
nethercote2d5b8162004-08-11 09:40:52 +000064
nethercote7ac7f7b2004-11-02 12:36:02 +000065 Furthermore, both size fields in the block have their least-sifnificant
66 bit set if the block is not in use, and unset if it is in use.
67 (The bottom 3 or so bits are always free for this because of alignment.)
68 A block size of zero is not possible, because a block always has at
69 least two SizeTs and two pointers of overhead.
nethercote2d5b8162004-08-11 09:40:52 +000070
71 Nb: All Block payloads must be VG_MIN_MALLOC_SZB-aligned. This is
72 achieved by ensuring that Superblocks are VG_MIN_MALLOC_SZB-aligned
73 (see newSuperblock() for how), and that the lengths of the following
74 things are a multiple of VG_MIN_MALLOC_SZB:
75 - Superblock admin section lengths (due to elastic padding)
76 - Block admin section (low and high) lengths (due to elastic redzones)
77 - Block payload lengths (due to req_pszB rounding up)
78*/
79typedef
80 struct {
81 // No fields are actually used in this struct, because a Block has
82 // loads of variable sized fields and so can't be accessed
83 // meaningfully with normal fields. So we use access functions all
84 // the time. This struct gives us a type to use, though. Also, we
85 // make sizeof(Block) 1 byte so that we can do arithmetic with the
86 // Block* type in increments of 1!
87 UByte dummy;
88 }
89 Block;
90
91// A superblock. 'padding' is never used, it just ensures that if the
92// entire Superblock is aligned to VG_MIN_MALLOC_SZB, then payload_bytes[]
93// will be too. It can add small amounts of padding unnecessarily -- eg.
94// 8-bytes on 32-bit machines with an 8-byte VG_MIN_MALLOC_SZB -- because
95// it's too hard to make a constant expression that works perfectly in all
96// cases.
97// payload_bytes[] is made a single big Block when the Superblock is
98// created, and then can be split and the splittings remerged, but Blocks
99// always cover its entire length -- there's never any unused bytes at the
100// end, for example.
101typedef
102 struct _Superblock {
103 struct _Superblock* next;
nethercote7ac7f7b2004-11-02 12:36:02 +0000104 SizeT n_payload_bytes;
nethercote2d5b8162004-08-11 09:40:52 +0000105 UByte padding[ VG_MIN_MALLOC_SZB -
nethercote7ac7f7b2004-11-02 12:36:02 +0000106 ((sizeof(struct _Superblock*) + sizeof(SizeT)) %
107 VG_MIN_MALLOC_SZB) ];
nethercote2d5b8162004-08-11 09:40:52 +0000108 UByte payload_bytes[0];
109 }
110 Superblock;
111
112// An arena. 'freelist' is a circular, doubly-linked list. 'rz_szB' is
113// elastic, in that it can be bigger than asked-for to ensure alignment.
114typedef
115 struct {
116 Char* name;
117 Bool clientmem; // Allocates in the client address space?
njn0e742df2004-11-30 13:26:29 +0000118 SizeT rz_szB; // Red zone size in bytes
nethercote7ac7f7b2004-11-02 12:36:02 +0000119 SizeT min_sblock_szB; // Minimum superblock size in bytes
njn6e6588c2005-03-13 18:52:48 +0000120 Block* freelist[N_MALLOC_LISTS];
nethercote2d5b8162004-08-11 09:40:52 +0000121 Superblock* sblocks;
122 // Stats only.
nethercote7ac7f7b2004-11-02 12:36:02 +0000123 SizeT bytes_on_loan;
124 SizeT bytes_mmaped;
125 SizeT bytes_on_loan_max;
nethercote2d5b8162004-08-11 09:40:52 +0000126 }
127 Arena;
128
129
130/*------------------------------------------------------------*/
131/*--- Low-level functions for working with Blocks. ---*/
132/*------------------------------------------------------------*/
133
nethercote7ac7f7b2004-11-02 12:36:02 +0000134#define SIZE_T_0x1 ((SizeT)0x1)
135
nethercote2d5b8162004-08-11 09:40:52 +0000136// Mark a bszB as in-use, and not in-use.
137static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000138SizeT mk_inuse_bszB ( SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000139{
140 vg_assert(bszB != 0);
nethercote7ac7f7b2004-11-02 12:36:02 +0000141 return bszB & (~SIZE_T_0x1);
nethercote2d5b8162004-08-11 09:40:52 +0000142}
143static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000144SizeT mk_free_bszB ( SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000145{
146 vg_assert(bszB != 0);
nethercote7ac7f7b2004-11-02 12:36:02 +0000147 return bszB | SIZE_T_0x1;
nethercote2d5b8162004-08-11 09:40:52 +0000148}
149
150// Remove the in-use/not-in-use attribute from a bszB, leaving just
151// the size.
152static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000153SizeT mk_plain_bszB ( SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000154{
155 vg_assert(bszB != 0);
nethercote7ac7f7b2004-11-02 12:36:02 +0000156 return bszB & (~SIZE_T_0x1);
nethercote2d5b8162004-08-11 09:40:52 +0000157}
158
159// Does this bszB have the in-use attribute?
160static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000161Bool is_inuse_bszB ( SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000162{
163 vg_assert(bszB != 0);
nethercote7ac7f7b2004-11-02 12:36:02 +0000164 return (0 != (bszB & SIZE_T_0x1)) ? False : True;
nethercote2d5b8162004-08-11 09:40:52 +0000165}
166
167
168// Set and get the lower size field of a block.
169static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000170void set_bszB_lo ( Block* b, SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000171{
nethercote7ac7f7b2004-11-02 12:36:02 +0000172 *(SizeT*)&b[0] = bszB;
nethercote2d5b8162004-08-11 09:40:52 +0000173}
174static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000175SizeT get_bszB_lo ( Block* b )
nethercote2d5b8162004-08-11 09:40:52 +0000176{
nethercote7ac7f7b2004-11-02 12:36:02 +0000177 return *(SizeT*)&b[0];
nethercote2d5b8162004-08-11 09:40:52 +0000178}
179
180// Get the address of the last byte in a block
181static __inline__
182UByte* last_byte ( Block* b )
183{
184 UByte* b2 = (UByte*)b;
185 return &b2[mk_plain_bszB(get_bszB_lo(b)) - 1];
186}
187
188// Set and get the upper size field of a block.
189static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000190void set_bszB_hi ( Block* b, SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000191{
192 UByte* b2 = (UByte*)b;
193 UByte* lb = last_byte(b);
194 vg_assert(lb == &b2[mk_plain_bszB(bszB) - 1]);
nethercote7ac7f7b2004-11-02 12:36:02 +0000195 *(SizeT*)&lb[-sizeof(SizeT) + 1] = bszB;
nethercote2d5b8162004-08-11 09:40:52 +0000196}
197static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000198SizeT get_bszB_hi ( Block* b )
nethercote2d5b8162004-08-11 09:40:52 +0000199{
200 UByte* lb = last_byte(b);
nethercote7ac7f7b2004-11-02 12:36:02 +0000201 return *(SizeT*)&lb[-sizeof(SizeT) + 1];
nethercote2d5b8162004-08-11 09:40:52 +0000202}
203
204
nethercote7ac7f7b2004-11-02 12:36:02 +0000205// Return the lower, upper and total overhead in bytes for a block.
206// These are determined purely by which arena the block lives in.
207static __inline__
njn0e742df2004-11-30 13:26:29 +0000208SizeT overhead_szB_lo ( Arena* a )
nethercote7ac7f7b2004-11-02 12:36:02 +0000209{
210 return sizeof(SizeT) + sizeof(void*) + a->rz_szB;
211}
212static __inline__
njn0e742df2004-11-30 13:26:29 +0000213SizeT overhead_szB_hi ( Arena* a )
nethercote7ac7f7b2004-11-02 12:36:02 +0000214{
215 return a->rz_szB + sizeof(void*) + sizeof(SizeT);
216}
217static __inline__
njn0e742df2004-11-30 13:26:29 +0000218SizeT overhead_szB ( Arena* a )
nethercote7ac7f7b2004-11-02 12:36:02 +0000219{
220 return overhead_szB_lo(a) + overhead_szB_hi(a);
221}
222
nethercote2d5b8162004-08-11 09:40:52 +0000223// Given the addr of a block, return the addr of its payload.
224static __inline__
225UByte* get_block_payload ( Arena* a, Block* b )
226{
227 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000228 return & b2[ overhead_szB_lo(a) ];
nethercote2d5b8162004-08-11 09:40:52 +0000229}
230// Given the addr of a block's payload, return the addr of the block itself.
231static __inline__
232Block* get_payload_block ( Arena* a, UByte* payload )
233{
nethercote7ac7f7b2004-11-02 12:36:02 +0000234 return (Block*)&payload[ -overhead_szB_lo(a) ];
nethercote2d5b8162004-08-11 09:40:52 +0000235}
236
237
238// Set and get the next and previous link fields of a block.
239static __inline__
240void set_prev_b ( Block* b, Block* prev_p )
241{
242 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000243 *(Block**)&b2[sizeof(SizeT)] = prev_p;
nethercote2d5b8162004-08-11 09:40:52 +0000244}
245static __inline__
246void set_next_b ( Block* b, Block* next_p )
247{
248 UByte* lb = last_byte(b);
nethercote7ac7f7b2004-11-02 12:36:02 +0000249 *(Block**)&lb[-sizeof(SizeT) - sizeof(void*) + 1] = next_p;
nethercote2d5b8162004-08-11 09:40:52 +0000250}
251static __inline__
252Block* get_prev_b ( Block* b )
253{
254 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000255 return *(Block**)&b2[sizeof(SizeT)];
nethercote2d5b8162004-08-11 09:40:52 +0000256}
257static __inline__
258Block* get_next_b ( Block* b )
259{
260 UByte* lb = last_byte(b);
nethercote7ac7f7b2004-11-02 12:36:02 +0000261 return *(Block**)&lb[-sizeof(SizeT) - sizeof(void*) + 1];
nethercote2d5b8162004-08-11 09:40:52 +0000262}
263
264
265// Get the block immediately preceding this one in the Superblock.
266static __inline__
267Block* get_predecessor_block ( Block* b )
268{
269 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000270 SizeT bszB = mk_plain_bszB( (*(SizeT*)&b2[-sizeof(SizeT)]) );
nethercote2d5b8162004-08-11 09:40:52 +0000271 return (Block*)&b2[-bszB];
272}
273
274// Read and write the lower and upper red-zone bytes of a block.
275static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000276void set_rz_lo_byte ( Arena* a, Block* b, UInt rz_byteno, UByte v )
nethercote2d5b8162004-08-11 09:40:52 +0000277{
278 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000279 b2[sizeof(SizeT) + sizeof(void*) + rz_byteno] = v;
nethercote2d5b8162004-08-11 09:40:52 +0000280}
281static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000282void set_rz_hi_byte ( Arena* a, Block* b, UInt rz_byteno, UByte v )
nethercote2d5b8162004-08-11 09:40:52 +0000283{
284 UByte* lb = last_byte(b);
nethercote7ac7f7b2004-11-02 12:36:02 +0000285 lb[-sizeof(SizeT) - sizeof(void*) - rz_byteno] = v;
nethercote2d5b8162004-08-11 09:40:52 +0000286}
287static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000288UByte get_rz_lo_byte ( Arena* a, Block* b, UInt rz_byteno )
nethercote2d5b8162004-08-11 09:40:52 +0000289{
290 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000291 return b2[sizeof(SizeT) + sizeof(void*) + rz_byteno];
nethercote2d5b8162004-08-11 09:40:52 +0000292}
293static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000294UByte get_rz_hi_byte ( Arena* a, Block* b, UInt rz_byteno )
nethercote2d5b8162004-08-11 09:40:52 +0000295{
296 UByte* lb = last_byte(b);
nethercote7ac7f7b2004-11-02 12:36:02 +0000297 return lb[-sizeof(SizeT) - sizeof(void*) - rz_byteno];
nethercote2d5b8162004-08-11 09:40:52 +0000298}
299
300
nethercote2d5b8162004-08-11 09:40:52 +0000301// Return the minimum bszB for a block in this arena. Can have zero-length
302// payloads, so it's the size of the admin bytes.
303static __inline__
njn0e742df2004-11-30 13:26:29 +0000304SizeT min_useful_bszB ( Arena* a )
nethercote2d5b8162004-08-11 09:40:52 +0000305{
306 return overhead_szB(a);
307}
308
309// Convert payload size <--> block size (both in bytes).
310static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000311SizeT pszB_to_bszB ( Arena* a, SizeT pszB )
nethercote2d5b8162004-08-11 09:40:52 +0000312{
nethercote2d5b8162004-08-11 09:40:52 +0000313 return pszB + overhead_szB(a);
314}
315static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000316SizeT bszB_to_pszB ( Arena* a, SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000317{
nethercote7ac7f7b2004-11-02 12:36:02 +0000318 vg_assert(bszB >= overhead_szB(a));
319 return bszB - overhead_szB(a);
nethercote2d5b8162004-08-11 09:40:52 +0000320}
321
322
323/*------------------------------------------------------------*/
324/*--- Arena management ---*/
325/*------------------------------------------------------------*/
326
327#define CORE_ARENA_MIN_SZB 1048576
328
329// The arena structures themselves.
330static Arena vg_arena[VG_N_ARENAS];
331
332// Functions external to this module identify arenas using ArenaIds,
333// not Arena*s. This fn converts the former to the latter.
334static Arena* arenaId_to_ArenaP ( ArenaId arena )
335{
336 vg_assert(arena >= 0 && arena < VG_N_ARENAS);
337 return & vg_arena[arena];
338}
339
340// Initialise an arena. rz_szB is the minimum redzone size; it might be
njn30490552005-03-13 06:30:42 +0000341// made bigger to ensure that VG_MIN_MALLOC_SZB is observed.
nethercote2d5b8162004-08-11 09:40:52 +0000342static
njn0e742df2004-11-30 13:26:29 +0000343void arena_init ( ArenaId aid, Char* name, SizeT rz_szB, SizeT min_sblock_szB )
nethercote2d5b8162004-08-11 09:40:52 +0000344{
nethercote7ac7f7b2004-11-02 12:36:02 +0000345 SizeT i;
nethercote2d5b8162004-08-11 09:40:52 +0000346 Arena* a = arenaId_to_ArenaP(aid);
347
nethercote7ac7f7b2004-11-02 12:36:02 +0000348 vg_assert(rz_szB < 128); // ensure reasonable size
nethercote73b526f2004-10-31 18:48:21 +0000349 vg_assert((min_sblock_szB % VKI_PAGE_SIZE) == 0);
nethercote2d5b8162004-08-11 09:40:52 +0000350 a->name = name;
351 a->clientmem = ( VG_AR_CLIENT == aid ? True : False );
352
353 // The size of the low and high admin sections in a block must be a
njn30490552005-03-13 06:30:42 +0000354 // multiple of VG_MIN_MALLOC_SZB. So we round up the asked-for
nethercote2d5b8162004-08-11 09:40:52 +0000355 // redzone size if necessary to achieve this.
356 a->rz_szB = rz_szB;
357 while (0 != overhead_szB_lo(a) % VG_MIN_MALLOC_SZB) a->rz_szB++;
358 vg_assert(overhead_szB_lo(a) == overhead_szB_hi(a));
359
360 a->min_sblock_szB = min_sblock_szB;
njn6e6588c2005-03-13 18:52:48 +0000361 for (i = 0; i < N_MALLOC_LISTS; i++) a->freelist[i] = NULL;
nethercote2d5b8162004-08-11 09:40:52 +0000362 a->sblocks = NULL;
363 a->bytes_on_loan = 0;
364 a->bytes_mmaped = 0;
365 a->bytes_on_loan_max = 0;
366}
367
368/* Print vital stats for an arena. */
369void VG_(print_all_arena_stats) ( void )
370{
nethercote7ac7f7b2004-11-02 12:36:02 +0000371 UInt i;
nethercote2d5b8162004-08-11 09:40:52 +0000372 for (i = 0; i < VG_N_ARENAS; i++) {
373 Arena* a = arenaId_to_ArenaP(i);
374 VG_(message)(Vg_DebugMsg,
njn669ef072005-03-13 05:46:57 +0000375 "%8s: %8d mmap'd, %8d/%8d max/curr",
nethercote2d5b8162004-08-11 09:40:52 +0000376 a->name, a->bytes_mmaped, a->bytes_on_loan_max, a->bytes_on_loan
377 );
378 }
379}
380
njn8a97c6d2005-03-31 04:37:24 +0000381static Bool init_done = False;
382static SizeT client_malloc_redzone_szB = 8; // default: be paranoid
383
384// Nb: this must be called before the client arena is initialised, ie.
385// before any memory is allocated.
386void VG_(set_client_malloc_redzone_szB)(SizeT rz_szB)
387{
388 if (init_done) {
389 VG_(printf)(
390 "\nTool error:\n"
391 " __FUNCTION__ cannot be called after the first allocation.\n");
392 VG_(exit)(1);
393 }
394 // This limit is no special figure, just something not too big
395 if (rz_szB > 128) {
396 VG_(printf)(
397 "\nTool error:\n"
398 " __FUNCTION__ passed a too-big value (%llu)", (ULong)rz_szB);
399 VG_(exit)(1);
400 }
401 client_malloc_redzone_szB = rz_szB;
402}
403
nethercote2d5b8162004-08-11 09:40:52 +0000404/* This library is self-initialising, as it makes this more self-contained,
405 less coupled with the outside world. Hence VG_(arena_malloc)() and
406 VG_(arena_free)() below always call ensure_mm_init() to ensure things are
407 correctly initialised. */
408static
409void ensure_mm_init ( void )
410{
nethercote2d5b8162004-08-11 09:40:52 +0000411 if (init_done) {
nethercote2d5b8162004-08-11 09:40:52 +0000412 return;
413 }
414
nethercote2d5b8162004-08-11 09:40:52 +0000415 /* Use checked red zones (of various sizes) for our internal stuff,
416 and an unchecked zone of arbitrary size for the client. Of
417 course the client's red zone can be checked by the tool, eg.
418 by using addressibility maps, but not by the mechanism implemented
419 here, which merely checks at the time of freeing that the red
420 zone bytes are unchanged.
421
422 Nb: redzone sizes are *minimums*; they could be made bigger to ensure
423 alignment. Eg. on 32-bit machines, 4 becomes 8, and 12 becomes 16;
424 but on 64-bit machines 4 stays as 4, and 12 stays as 12 --- the extra
425 4 bytes in both are accounted for by the larger prev/next ptr.
426 */
427 arena_init ( VG_AR_CORE, "core", 4, CORE_ARENA_MIN_SZB );
428 arena_init ( VG_AR_TOOL, "tool", 4, 1048576 );
429 arena_init ( VG_AR_SYMTAB, "symtab", 4, 1048576 );
njn8a97c6d2005-03-31 04:37:24 +0000430 arena_init ( VG_AR_CLIENT, "client", client_malloc_redzone_szB, 1048576 );
nethercote2d5b8162004-08-11 09:40:52 +0000431 arena_init ( VG_AR_DEMANGLE, "demangle", 12/*paranoid*/, 65536 );
432 arena_init ( VG_AR_EXECTXT, "exectxt", 4, 65536 );
433 arena_init ( VG_AR_ERRORS, "errors", 4, 65536 );
nethercote2d5b8162004-08-11 09:40:52 +0000434
435 init_done = True;
436# ifdef DEBUG_MALLOC
437 VG_(sanity_check_malloc_all)();
438# endif
439}
440
441
442/*------------------------------------------------------------*/
443/*--- Superblock management ---*/
444/*------------------------------------------------------------*/
445
446// Align ptr p upwards to an align-sized boundary.
447static
nethercote7ac7f7b2004-11-02 12:36:02 +0000448void* align_upwards ( void* p, SizeT align )
nethercote2d5b8162004-08-11 09:40:52 +0000449{
450 Addr a = (Addr)p;
451 if ((a % align) == 0) return (void*)a;
452 return (void*)(a - (a % align) + align);
453}
454
455// If not enough memory available, either aborts (for non-client memory)
456// or returns 0 (for client memory).
457static
nethercote7ac7f7b2004-11-02 12:36:02 +0000458Superblock* newSuperblock ( Arena* a, SizeT cszB )
nethercote2d5b8162004-08-11 09:40:52 +0000459{
460 // The extra VG_MIN_MALLOC_SZB bytes are for possible alignment up.
461 static UByte bootstrap_superblock[CORE_ARENA_MIN_SZB+VG_MIN_MALLOC_SZB];
sewardj548be6d2005-02-16 01:31:37 +0000462 static Bool called_before = True; //False;
nethercote2d5b8162004-08-11 09:40:52 +0000463 Superblock* sb;
464
465 // Take into account admin bytes in the Superblock.
466 cszB += sizeof(Superblock);
467
468 if (cszB < a->min_sblock_szB) cszB = a->min_sblock_szB;
nethercote73b526f2004-10-31 18:48:21 +0000469 while ((cszB % VKI_PAGE_SIZE) > 0) cszB++;
nethercote2d5b8162004-08-11 09:40:52 +0000470
471 if (!called_before) {
472 // First time we're called -- use the special static bootstrap
473 // superblock (see comment at top of main() for details).
474 called_before = True;
475 vg_assert(a == arenaId_to_ArenaP(VG_AR_CORE));
476 vg_assert(CORE_ARENA_MIN_SZB >= cszB);
477 // Ensure sb is suitably aligned.
478 sb = (Superblock*)align_upwards( bootstrap_superblock,
479 VG_MIN_MALLOC_SZB );
480 } else if (a->clientmem) {
481 // client allocation -- return 0 to client if it fails
482 sb = (Superblock *)
sewardj215776c2005-03-16 12:11:12 +0000483 VG_(get_memory_from_mmap_for_client)
484 (0, cszB, VKI_PROT_READ|VKI_PROT_WRITE|VKI_PROT_EXEC, 0);
nethercote2d5b8162004-08-11 09:40:52 +0000485 if (NULL == sb)
486 return 0;
487 } else {
488 // non-client allocation -- aborts if it fails
489 sb = VG_(get_memory_from_mmap) ( cszB, "newSuperblock" );
490 }
491 vg_assert(NULL != sb);
sewardjb5f6f512005-03-10 23:59:00 +0000492 //zzVALGRIND_MAKE_WRITABLE(sb, cszB);
nethercote2d5b8162004-08-11 09:40:52 +0000493 vg_assert(0 == (Addr)sb % VG_MIN_MALLOC_SZB);
494 sb->n_payload_bytes = cszB - sizeof(Superblock);
495 a->bytes_mmaped += cszB;
496 if (0)
497 VG_(message)(Vg_DebugMsg, "newSuperblock, %d payload bytes",
498 sb->n_payload_bytes);
499 return sb;
500}
501
502// Find the superblock containing the given chunk.
503static
504Superblock* findSb ( Arena* a, Block* b )
505{
506 Superblock* sb;
507 for (sb = a->sblocks; sb; sb = sb->next)
508 if ((Block*)&sb->payload_bytes[0] <= b
509 && b < (Block*)&sb->payload_bytes[sb->n_payload_bytes])
510 return sb;
511 VG_(printf)("findSb: can't find pointer %p in arena `%s'\n", b, a->name );
512 VG_(core_panic)("findSb: VG_(arena_free)() in wrong arena?");
513 return NULL; /*NOTREACHED*/
514}
515
sewardjde4a1d02002-03-22 01:27:54 +0000516
fitzhardinge98abfc72003-12-16 02:05:15 +0000517/*------------------------------------------------------------*/
518/*--- Command line options ---*/
519/*------------------------------------------------------------*/
520
nethercote2d5b8162004-08-11 09:40:52 +0000521/* Round malloc sizes up to a multiple of VG_SLOPPY_MALLOC_SZB bytes?
522 default: NO
523 Nb: the allocator always rounds blocks up to a multiple of
524 VG_MIN_MALLOC_SZB. VG_(clo_sloppy_malloc) is relevant eg. for
525 Memcheck, which will be byte-precise with addressability maps on its
526 malloc allocations unless --sloppy-malloc=yes. */
527Bool VG_(clo_sloppy_malloc) = False;
fitzhardinge98abfc72003-12-16 02:05:15 +0000528
529/* DEBUG: print malloc details? default: NO */
nethercote2d5b8162004-08-11 09:40:52 +0000530Bool VG_(clo_trace_malloc) = False;
fitzhardinge98abfc72003-12-16 02:05:15 +0000531
532/* Minimum alignment in functions that don't specify alignment explicitly.
nethercote2d5b8162004-08-11 09:40:52 +0000533 default: 0, i.e. use VG_MIN_MALLOC_SZB. */
nethercote7ac7f7b2004-11-02 12:36:02 +0000534UInt VG_(clo_alignment) = VG_MIN_MALLOC_SZB;
fitzhardinge98abfc72003-12-16 02:05:15 +0000535
536
537Bool VG_(replacement_malloc_process_cmd_line_option)(Char* arg)
538{
jsewardb1a26ae2004-03-14 03:06:37 +0000539 if (VG_CLO_STREQN(12, arg, "--alignment=")) {
nethercote7ac7f7b2004-11-02 12:36:02 +0000540 VG_(clo_alignment) = (UInt)VG_(atoll)(&arg[12]);
fitzhardinge98abfc72003-12-16 02:05:15 +0000541
nethercote2d5b8162004-08-11 09:40:52 +0000542 if (VG_(clo_alignment) < VG_MIN_MALLOC_SZB
fitzhardinge98abfc72003-12-16 02:05:15 +0000543 || VG_(clo_alignment) > 4096
544 || VG_(log2)( VG_(clo_alignment) ) == -1 /* not a power of 2 */) {
545 VG_(message)(Vg_UserMsg, "");
546 VG_(message)(Vg_UserMsg,
547 "Invalid --alignment= setting. "
nethercote2d5b8162004-08-11 09:40:52 +0000548 "Should be a power of 2, >= %d, <= 4096.", VG_MIN_MALLOC_SZB);
fitzhardinge98abfc72003-12-16 02:05:15 +0000549 VG_(bad_option)("--alignment");
550 }
551 }
552
njn45270a22005-03-27 01:00:11 +0000553 else VG_BOOL_CLO(arg, "--sloppy-malloc", VG_(clo_sloppy_malloc))
554 else VG_BOOL_CLO(arg, "--trace-malloc", VG_(clo_trace_malloc))
fitzhardinge98abfc72003-12-16 02:05:15 +0000555 else
556 return False;
557
558 return True;
559}
560
561void VG_(replacement_malloc_print_usage)(void)
562{
563 VG_(printf)(
nethercote2d5b8162004-08-11 09:40:52 +0000564" --sloppy-malloc=no|yes round malloc sizes to multiple of %d? [no]\n"
565" --alignment=<number> set minimum alignment of allocations [%d]\n",
566 VG_SLOPPY_MALLOC_SZB, VG_MIN_MALLOC_SZB
fitzhardinge98abfc72003-12-16 02:05:15 +0000567 );
568}
569
570void VG_(replacement_malloc_print_debug_usage)(void)
571{
572 VG_(printf)(
573" --trace-malloc=no|yes show client malloc details? [no]\n"
574 );
575}
576
sewardjde4a1d02002-03-22 01:27:54 +0000577
578/*------------------------------------------------------------*/
sewardjde4a1d02002-03-22 01:27:54 +0000579/*--- Functions for working with freelists. ---*/
580/*------------------------------------------------------------*/
581
nethercote2d5b8162004-08-11 09:40:52 +0000582// Nb: Determination of which freelist a block lives on is based on the
583// payload size, not block size.
sewardjde4a1d02002-03-22 01:27:54 +0000584
nethercote2d5b8162004-08-11 09:40:52 +0000585// Convert a payload size in bytes to a freelist number.
sewardjde4a1d02002-03-22 01:27:54 +0000586static
nethercote7ac7f7b2004-11-02 12:36:02 +0000587UInt pszB_to_listNo ( SizeT pszB )
sewardjde4a1d02002-03-22 01:27:54 +0000588{
nethercote2d5b8162004-08-11 09:40:52 +0000589 vg_assert(0 == pszB % VG_MIN_MALLOC_SZB);
590 pszB /= VG_MIN_MALLOC_SZB;
591 if (pszB <= 2) return 0;
592 if (pszB <= 3) return 1;
593 if (pszB <= 4) return 2;
594 if (pszB <= 5) return 3;
595 if (pszB <= 6) return 4;
596 if (pszB <= 7) return 5;
597 if (pszB <= 8) return 6;
598 if (pszB <= 9) return 7;
599 if (pszB <= 10) return 8;
600 if (pszB <= 11) return 9;
601 if (pszB <= 12) return 10;
602 if (pszB <= 16) return 11;
603 if (pszB <= 32) return 12;
604 if (pszB <= 64) return 13;
605 if (pszB <= 128) return 14;
sewardjde4a1d02002-03-22 01:27:54 +0000606 return 15;
607}
608
nethercote2d5b8162004-08-11 09:40:52 +0000609// What is the minimum payload size for a given list?
sewardjde4a1d02002-03-22 01:27:54 +0000610static
nethercote7ac7f7b2004-11-02 12:36:02 +0000611SizeT listNo_to_pszB_min ( UInt listNo )
sewardjde4a1d02002-03-22 01:27:54 +0000612{
nethercote7ac7f7b2004-11-02 12:36:02 +0000613 SizeT pszB = 0;
njn6e6588c2005-03-13 18:52:48 +0000614 vg_assert(listNo <= N_MALLOC_LISTS);
nethercote2d5b8162004-08-11 09:40:52 +0000615 while (pszB_to_listNo(pszB) < listNo) pszB += VG_MIN_MALLOC_SZB;
616 return pszB;
sewardjde4a1d02002-03-22 01:27:54 +0000617}
618
nethercote2d5b8162004-08-11 09:40:52 +0000619// What is the maximum payload size for a given list?
sewardjde4a1d02002-03-22 01:27:54 +0000620static
nethercote7ac7f7b2004-11-02 12:36:02 +0000621SizeT listNo_to_pszB_max ( UInt listNo )
sewardjde4a1d02002-03-22 01:27:54 +0000622{
njn6e6588c2005-03-13 18:52:48 +0000623 vg_assert(listNo <= N_MALLOC_LISTS);
624 if (listNo == N_MALLOC_LISTS-1) {
nethercote2d5b8162004-08-11 09:40:52 +0000625 return MAX_PSZB;
sewardjde4a1d02002-03-22 01:27:54 +0000626 } else {
nethercote2d5b8162004-08-11 09:40:52 +0000627 return listNo_to_pszB_min(listNo+1) - 1;
sewardjde4a1d02002-03-22 01:27:54 +0000628 }
629}
630
631
632/* A nasty hack to try and reduce fragmentation. Try and replace
633 a->freelist[lno] with another block on the same list but with a
634 lower address, with the idea of attempting to recycle the same
635 blocks rather than cruise through the address space. */
sewardjde4a1d02002-03-22 01:27:54 +0000636static
nethercote7ac7f7b2004-11-02 12:36:02 +0000637void swizzle ( Arena* a, UInt lno )
sewardjde4a1d02002-03-22 01:27:54 +0000638{
nethercote2d5b8162004-08-11 09:40:52 +0000639 Block* p_best;
640 Block* pp;
641 Block* pn;
nethercote7ac7f7b2004-11-02 12:36:02 +0000642 UInt i;
sewardjde4a1d02002-03-22 01:27:54 +0000643
644 p_best = a->freelist[lno];
645 if (p_best == NULL) return;
646
647 pn = pp = p_best;
648 for (i = 0; i < 20; i++) {
nethercote2d5b8162004-08-11 09:40:52 +0000649 pn = get_next_b(pn);
650 pp = get_prev_b(pp);
sewardjde4a1d02002-03-22 01:27:54 +0000651 if (pn < p_best) p_best = pn;
652 if (pp < p_best) p_best = pp;
653 }
654 if (p_best < a->freelist[lno]) {
nethercote2d5b8162004-08-11 09:40:52 +0000655# ifdef VERBOSE_MALLOC
656 VG_(printf)("retreat by %d\n", a->freelist[lno] - p_best);
sewardjde4a1d02002-03-22 01:27:54 +0000657# endif
658 a->freelist[lno] = p_best;
659 }
660}
661
662
663/*------------------------------------------------------------*/
sewardjde4a1d02002-03-22 01:27:54 +0000664/*--- Sanity-check/debugging machinery. ---*/
665/*------------------------------------------------------------*/
666
njn6e6588c2005-03-13 18:52:48 +0000667#define REDZONE_LO_MASK 0x31
668#define REDZONE_HI_MASK 0x7c
nethercote2d5b8162004-08-11 09:40:52 +0000669
nethercote7ac7f7b2004-11-02 12:36:02 +0000670// Do some crude sanity checks on a Block.
sewardjde4a1d02002-03-22 01:27:54 +0000671static
nethercote2d5b8162004-08-11 09:40:52 +0000672Bool blockSane ( Arena* a, Block* b )
sewardjde4a1d02002-03-22 01:27:54 +0000673{
674# define BLEAT(str) VG_(printf)("blockSane: fail -- %s\n",str)
nethercote7ac7f7b2004-11-02 12:36:02 +0000675 UInt i;
nethercote2d5b8162004-08-11 09:40:52 +0000676 if (get_bszB_lo(b) != get_bszB_hi(b))
sewardjde4a1d02002-03-22 01:27:54 +0000677 {BLEAT("sizes");return False;}
nethercote2d5b8162004-08-11 09:40:52 +0000678 if (!a->clientmem && is_inuse_bszB(get_bszB_lo(b))) {
679 for (i = 0; i < a->rz_szB; i++) {
680 if (get_rz_lo_byte(a, b, i) !=
njn6e6588c2005-03-13 18:52:48 +0000681 (UByte)(((Addr)b&0xff) ^ REDZONE_LO_MASK))
nethercote2d5b8162004-08-11 09:40:52 +0000682 {BLEAT("redzone-lo");return False;}
683 if (get_rz_hi_byte(a, b, i) !=
njn6e6588c2005-03-13 18:52:48 +0000684 (UByte)(((Addr)b&0xff) ^ REDZONE_HI_MASK))
nethercote2d5b8162004-08-11 09:40:52 +0000685 {BLEAT("redzone-hi");return False;}
sewardjde4a1d02002-03-22 01:27:54 +0000686 }
687 }
688 return True;
689# undef BLEAT
690}
691
nethercote2d5b8162004-08-11 09:40:52 +0000692// Print superblocks (only for debugging).
sewardjde4a1d02002-03-22 01:27:54 +0000693static
694void ppSuperblocks ( Arena* a )
695{
nethercote7ac7f7b2004-11-02 12:36:02 +0000696 UInt i, blockno;
697 SizeT b_bszB;
nethercote2d5b8162004-08-11 09:40:52 +0000698 Block* b;
sewardjde4a1d02002-03-22 01:27:54 +0000699 Superblock* sb = a->sblocks;
700 blockno = 1;
701
702 while (sb) {
703 VG_(printf)( "\n" );
nethercote2d5b8162004-08-11 09:40:52 +0000704 VG_(printf)( "superblock %d at %p, sb->n_pl_bs = %d, next = %p\n",
705 blockno++, sb, sb->n_payload_bytes, sb->next );
706 for (i = 0; i < sb->n_payload_bytes; i += mk_plain_bszB(b_bszB)) {
707 b = (Block*)&sb->payload_bytes[i];
708 b_bszB = get_bszB_lo(b);
709 VG_(printf)( " block at %d, bszB %d: ", i, mk_plain_bszB(b_bszB) );
710 VG_(printf)( "%s, ", is_inuse_bszB(b_bszB) ? "inuse" : "free");
711 VG_(printf)( "%s\n", blockSane(a, b) ? "ok" : "BAD" );
sewardjde4a1d02002-03-22 01:27:54 +0000712 }
nethercote2d5b8162004-08-11 09:40:52 +0000713 vg_assert(i == sb->n_payload_bytes); // no overshoot at end of Sb
sewardjde4a1d02002-03-22 01:27:54 +0000714 sb = sb->next;
715 }
716 VG_(printf)( "end of superblocks\n\n" );
717}
718
nethercote2d5b8162004-08-11 09:40:52 +0000719// Sanity check both the superblocks and the chains.
nethercote885dd912004-08-03 23:14:00 +0000720static void sanity_check_malloc_arena ( ArenaId aid )
sewardjde4a1d02002-03-22 01:27:54 +0000721{
nethercote7ac7f7b2004-11-02 12:36:02 +0000722 UInt i, superblockctr, blockctr_sb, blockctr_li;
723 UInt blockctr_sb_free, listno;
724 SizeT b_bszB, b_pszB, list_min_pszB, list_max_pszB;
sewardjde4a1d02002-03-22 01:27:54 +0000725 Superblock* sb;
726 Bool thisFree, lastWasFree;
nethercote2d5b8162004-08-11 09:40:52 +0000727 Block* b;
728 Block* b_prev;
nethercote7ac7f7b2004-11-02 12:36:02 +0000729 SizeT arena_bytes_on_loan;
sewardjde4a1d02002-03-22 01:27:54 +0000730 Arena* a;
731
nethercote885dd912004-08-03 23:14:00 +0000732# define BOMB VG_(core_panic)("sanity_check_malloc_arena")
sewardjde4a1d02002-03-22 01:27:54 +0000733
734 a = arenaId_to_ArenaP(aid);
735
nethercote2d5b8162004-08-11 09:40:52 +0000736 // First, traverse all the superblocks, inspecting the Blocks in each.
sewardjde4a1d02002-03-22 01:27:54 +0000737 superblockctr = blockctr_sb = blockctr_sb_free = 0;
738 arena_bytes_on_loan = 0;
739 sb = a->sblocks;
740 while (sb) {
741 lastWasFree = False;
742 superblockctr++;
nethercote2d5b8162004-08-11 09:40:52 +0000743 for (i = 0; i < sb->n_payload_bytes; i += mk_plain_bszB(b_bszB)) {
sewardjde4a1d02002-03-22 01:27:54 +0000744 blockctr_sb++;
nethercote2d5b8162004-08-11 09:40:52 +0000745 b = (Block*)&sb->payload_bytes[i];
746 b_bszB = get_bszB_lo(b);
sewardjde4a1d02002-03-22 01:27:54 +0000747 if (!blockSane(a, b)) {
nethercote2d5b8162004-08-11 09:40:52 +0000748 VG_(printf)("sanity_check_malloc_arena: sb %p, block %d (bszB %d): "
749 " BAD\n", sb, i, b_bszB );
sewardjde4a1d02002-03-22 01:27:54 +0000750 BOMB;
751 }
nethercote2d5b8162004-08-11 09:40:52 +0000752 thisFree = !is_inuse_bszB(b_bszB);
sewardjde4a1d02002-03-22 01:27:54 +0000753 if (thisFree && lastWasFree) {
nethercote2d5b8162004-08-11 09:40:52 +0000754 VG_(printf)("sanity_check_malloc_arena: sb %p, block %d (bszB %d): "
njn25e49d8e72002-09-23 09:36:25 +0000755 "UNMERGED FREES\n",
nethercote2d5b8162004-08-11 09:40:52 +0000756 sb, i, b_bszB );
sewardjde4a1d02002-03-22 01:27:54 +0000757 BOMB;
758 }
sewardjde4a1d02002-03-22 01:27:54 +0000759 if (thisFree) blockctr_sb_free++;
760 if (!thisFree)
nethercote2d5b8162004-08-11 09:40:52 +0000761 arena_bytes_on_loan += bszB_to_pszB(a, b_bszB);
762 lastWasFree = thisFree;
sewardjde4a1d02002-03-22 01:27:54 +0000763 }
nethercote2d5b8162004-08-11 09:40:52 +0000764 if (i > sb->n_payload_bytes) {
nethercote885dd912004-08-03 23:14:00 +0000765 VG_(printf)( "sanity_check_malloc_arena: sb %p: last block "
sewardjde4a1d02002-03-22 01:27:54 +0000766 "overshoots end\n", sb);
767 BOMB;
768 }
769 sb = sb->next;
770 }
771
772 if (arena_bytes_on_loan != a->bytes_on_loan) {
nethercote2d5b8162004-08-11 09:40:52 +0000773# ifdef VERBOSE_MALLOC
774 VG_(printf)( "sanity_check_malloc_arena: a->bytes_on_loan %d, "
775 "arena_bytes_on_loan %d: "
776 "MISMATCH\n", a->bytes_on_loan, arena_bytes_on_loan);
777# endif
sewardjde4a1d02002-03-22 01:27:54 +0000778 ppSuperblocks(a);
779 BOMB;
780 }
781
782 /* Second, traverse each list, checking that the back pointers make
783 sense, counting blocks encountered, and checking that each block
784 is an appropriate size for this list. */
785 blockctr_li = 0;
njn6e6588c2005-03-13 18:52:48 +0000786 for (listno = 0; listno < N_MALLOC_LISTS; listno++) {
nethercote2d5b8162004-08-11 09:40:52 +0000787 list_min_pszB = listNo_to_pszB_min(listno);
788 list_max_pszB = listNo_to_pszB_max(listno);
sewardjde4a1d02002-03-22 01:27:54 +0000789 b = a->freelist[listno];
790 if (b == NULL) continue;
791 while (True) {
792 b_prev = b;
nethercote2d5b8162004-08-11 09:40:52 +0000793 b = get_next_b(b);
794 if (get_prev_b(b) != b_prev) {
nethercote885dd912004-08-03 23:14:00 +0000795 VG_(printf)( "sanity_check_malloc_arena: list %d at %p: "
sewardjde4a1d02002-03-22 01:27:54 +0000796 "BAD LINKAGE\n",
797 listno, b );
798 BOMB;
799 }
nethercote2d5b8162004-08-11 09:40:52 +0000800 b_pszB = bszB_to_pszB(a, mk_plain_bszB(get_bszB_lo(b)));
801 if (b_pszB < list_min_pszB || b_pszB > list_max_pszB) {
sewardjde4a1d02002-03-22 01:27:54 +0000802 VG_(printf)(
nethercote885dd912004-08-03 23:14:00 +0000803 "sanity_check_malloc_arena: list %d at %p: "
nethercote2d5b8162004-08-11 09:40:52 +0000804 "WRONG CHAIN SIZE %dB (%dB, %dB)\n",
805 listno, b, b_pszB, list_min_pszB, list_max_pszB );
sewardjde4a1d02002-03-22 01:27:54 +0000806 BOMB;
807 }
808 blockctr_li++;
809 if (b == a->freelist[listno]) break;
810 }
811 }
812
813 if (blockctr_sb_free != blockctr_li) {
nethercote2d5b8162004-08-11 09:40:52 +0000814# ifdef VERBOSE_MALLOC
815 VG_(printf)( "sanity_check_malloc_arena: BLOCK COUNT MISMATCH "
816 "(via sbs %d, via lists %d)\n",
817 blockctr_sb_free, blockctr_li );
818# endif
sewardjde4a1d02002-03-22 01:27:54 +0000819 ppSuperblocks(a);
820 BOMB;
821 }
822
nethercote885dd912004-08-03 23:14:00 +0000823 if (VG_(clo_verbosity) > 2)
824 VG_(message)(Vg_DebugMsg,
njn669ef072005-03-13 05:46:57 +0000825 "%8s: %2d sbs, %5d bs, %2d/%-2d free bs, "
nethercote885dd912004-08-03 23:14:00 +0000826 "%7d mmap, %7d loan",
827 a->name,
828 superblockctr,
829 blockctr_sb, blockctr_sb_free, blockctr_li,
830 a->bytes_mmaped, a->bytes_on_loan);
sewardjde4a1d02002-03-22 01:27:54 +0000831# undef BOMB
832}
833
834
nethercote885dd912004-08-03 23:14:00 +0000835void VG_(sanity_check_malloc_all) ( void )
sewardjde4a1d02002-03-22 01:27:54 +0000836{
nethercote7ac7f7b2004-11-02 12:36:02 +0000837 UInt i;
sewardjde4a1d02002-03-22 01:27:54 +0000838 for (i = 0; i < VG_N_ARENAS; i++)
nethercote885dd912004-08-03 23:14:00 +0000839 sanity_check_malloc_arena ( i );
sewardjde4a1d02002-03-22 01:27:54 +0000840}
841
sewardjde4a1d02002-03-22 01:27:54 +0000842
nethercote2d5b8162004-08-11 09:40:52 +0000843/*------------------------------------------------------------*/
844/*--- Creating and deleting blocks. ---*/
845/*------------------------------------------------------------*/
846
847// Mark the bytes at b .. b+bszB-1 as not in use, and add them to the
848// relevant free list.
849
850static
nethercote7ac7f7b2004-11-02 12:36:02 +0000851void mkFreeBlock ( Arena* a, Block* b, SizeT bszB, UInt b_lno )
jsewardb1a26ae2004-03-14 03:06:37 +0000852{
nethercote7ac7f7b2004-11-02 12:36:02 +0000853 SizeT pszB = bszB_to_pszB(a, bszB);
nethercote2d5b8162004-08-11 09:40:52 +0000854 vg_assert(b_lno == pszB_to_listNo(pszB));
sewardjb5f6f512005-03-10 23:59:00 +0000855 //zzVALGRIND_MAKE_WRITABLE(b, bszB);
nethercote2d5b8162004-08-11 09:40:52 +0000856 // Set the size fields and indicate not-in-use.
857 set_bszB_lo(b, mk_free_bszB(bszB));
858 set_bszB_hi(b, mk_free_bszB(bszB));
859
860 // Add to the relevant list.
861 if (a->freelist[b_lno] == NULL) {
862 set_prev_b(b, b);
863 set_next_b(b, b);
864 a->freelist[b_lno] = b;
865 } else {
866 Block* b_prev = get_prev_b(a->freelist[b_lno]);
867 Block* b_next = a->freelist[b_lno];
868 set_next_b(b_prev, b);
869 set_prev_b(b_next, b);
870 set_next_b(b, b_next);
871 set_prev_b(b, b_prev);
872 }
873# ifdef DEBUG_MALLOC
874 (void)blockSane(a,b);
875# endif
876}
877
878// Mark the bytes at b .. b+bszB-1 as in use, and set up the block
879// appropriately.
880static
nethercote7ac7f7b2004-11-02 12:36:02 +0000881void mkInuseBlock ( Arena* a, Block* b, SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000882{
nethercote7ac7f7b2004-11-02 12:36:02 +0000883 UInt i;
nethercote2d5b8162004-08-11 09:40:52 +0000884 vg_assert(bszB >= min_useful_bszB(a));
sewardjb5f6f512005-03-10 23:59:00 +0000885 //zzVALGRIND_MAKE_WRITABLE(b, bszB);
nethercote2d5b8162004-08-11 09:40:52 +0000886 set_bszB_lo(b, mk_inuse_bszB(bszB));
887 set_bszB_hi(b, mk_inuse_bszB(bszB));
888 set_prev_b(b, NULL); // Take off freelist
889 set_next_b(b, NULL); // ditto
890 if (!a->clientmem) {
891 for (i = 0; i < a->rz_szB; i++) {
njn6e6588c2005-03-13 18:52:48 +0000892 set_rz_lo_byte(a, b, i, (UByte)(((Addr)b&0xff) ^ REDZONE_LO_MASK));
893 set_rz_hi_byte(a, b, i, (UByte)(((Addr)b&0xff) ^ REDZONE_HI_MASK));
nethercote2d5b8162004-08-11 09:40:52 +0000894 }
895 }
896# ifdef DEBUG_MALLOC
897 (void)blockSane(a,b);
898# endif
899}
900
901// Remove a block from a given list. Does no sanity checking.
902static
nethercote7ac7f7b2004-11-02 12:36:02 +0000903void unlinkBlock ( Arena* a, Block* b, UInt listno )
nethercote2d5b8162004-08-11 09:40:52 +0000904{
njn6e6588c2005-03-13 18:52:48 +0000905 vg_assert(listno < N_MALLOC_LISTS);
nethercote2d5b8162004-08-11 09:40:52 +0000906 if (get_prev_b(b) == b) {
907 // Only one element in the list; treat it specially.
908 vg_assert(get_next_b(b) == b);
909 a->freelist[listno] = NULL;
910 } else {
911 Block* b_prev = get_prev_b(b);
912 Block* b_next = get_next_b(b);
913 a->freelist[listno] = b_prev;
914 set_next_b(b_prev, b_next);
915 set_prev_b(b_next, b_prev);
916 swizzle ( a, listno );
917 }
918 set_prev_b(b, NULL);
919 set_next_b(b, NULL);
jsewardb1a26ae2004-03-14 03:06:37 +0000920}
921
922
sewardjde4a1d02002-03-22 01:27:54 +0000923/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +0000924/*--- Core-visible functions. ---*/
sewardjde4a1d02002-03-22 01:27:54 +0000925/*------------------------------------------------------------*/
926
nethercote2d5b8162004-08-11 09:40:52 +0000927// Align the request size.
928static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000929SizeT align_req_pszB ( SizeT req_pszB )
nethercote2d5b8162004-08-11 09:40:52 +0000930{
nethercote7ac7f7b2004-11-02 12:36:02 +0000931 SizeT n = VG_MIN_MALLOC_SZB-1;
nethercote2d5b8162004-08-11 09:40:52 +0000932 return ((req_pszB + n) & (~n));
933}
934
nethercote7ac7f7b2004-11-02 12:36:02 +0000935void* VG_(arena_malloc) ( ArenaId aid, SizeT req_pszB )
sewardjde4a1d02002-03-22 01:27:54 +0000936{
nethercote7ac7f7b2004-11-02 12:36:02 +0000937 SizeT req_bszB, frag_bszB, b_bszB;
938 UInt lno;
sewardjde4a1d02002-03-22 01:27:54 +0000939 Superblock* new_sb;
nethercote2d5b8162004-08-11 09:40:52 +0000940 Block* b = NULL;
sewardjde4a1d02002-03-22 01:27:54 +0000941 Arena* a;
jsewardb1a26ae2004-03-14 03:06:37 +0000942 void* v;
sewardjde4a1d02002-03-22 01:27:54 +0000943
944 VGP_PUSHCC(VgpMalloc);
945
946 ensure_mm_init();
947 a = arenaId_to_ArenaP(aid);
948
nethercote7ac7f7b2004-11-02 12:36:02 +0000949 vg_assert(req_pszB < MAX_PSZB);
nethercote2d5b8162004-08-11 09:40:52 +0000950 req_pszB = align_req_pszB(req_pszB);
951 req_bszB = pszB_to_bszB(a, req_pszB);
sewardjde4a1d02002-03-22 01:27:54 +0000952
nethercote2d5b8162004-08-11 09:40:52 +0000953 // Scan through all the big-enough freelists for a block.
njn6e6588c2005-03-13 18:52:48 +0000954 for (lno = pszB_to_listNo(req_pszB); lno < N_MALLOC_LISTS; lno++) {
sewardjde4a1d02002-03-22 01:27:54 +0000955 b = a->freelist[lno];
nethercote2d5b8162004-08-11 09:40:52 +0000956 if (NULL == b) continue; // If this list is empty, try the next one.
sewardjde4a1d02002-03-22 01:27:54 +0000957 while (True) {
nethercote2d5b8162004-08-11 09:40:52 +0000958 b_bszB = mk_plain_bszB(get_bszB_lo(b));
959 if (b_bszB >= req_bszB) goto obtained_block; // success!
960 b = get_next_b(b);
961 if (b == a->freelist[lno]) break; // traversed entire freelist
sewardjde4a1d02002-03-22 01:27:54 +0000962 }
sewardjde4a1d02002-03-22 01:27:54 +0000963 }
964
nethercote2d5b8162004-08-11 09:40:52 +0000965 // If we reach here, no suitable block found, allocate a new superblock
njn6e6588c2005-03-13 18:52:48 +0000966 vg_assert(lno == N_MALLOC_LISTS);
nethercote2d5b8162004-08-11 09:40:52 +0000967 new_sb = newSuperblock(a, req_bszB);
968 if (NULL == new_sb) {
969 // Should only fail if for client, otherwise, should have aborted
970 // already.
971 vg_assert(VG_AR_CLIENT == aid);
972 return NULL;
sewardjde4a1d02002-03-22 01:27:54 +0000973 }
nethercote2d5b8162004-08-11 09:40:52 +0000974 new_sb->next = a->sblocks;
975 a->sblocks = new_sb;
976 b = (Block*)&new_sb->payload_bytes[0];
977 lno = pszB_to_listNo(bszB_to_pszB(a, new_sb->n_payload_bytes));
978 mkFreeBlock ( a, b, new_sb->n_payload_bytes, lno);
979 // fall through
sewardjde4a1d02002-03-22 01:27:54 +0000980
nethercote2d5b8162004-08-11 09:40:52 +0000981 obtained_block:
982 // Ok, we can allocate from b, which lives in list lno.
sewardjde4a1d02002-03-22 01:27:54 +0000983 vg_assert(b != NULL);
njn6e6588c2005-03-13 18:52:48 +0000984 vg_assert(lno < N_MALLOC_LISTS);
sewardjde4a1d02002-03-22 01:27:54 +0000985 vg_assert(a->freelist[lno] != NULL);
nethercote2d5b8162004-08-11 09:40:52 +0000986 b_bszB = mk_plain_bszB(get_bszB_lo(b));
987 // req_bszB is the size of the block we are after. b_bszB is the
988 // size of what we've actually got. */
989 vg_assert(b_bszB >= req_bszB);
sewardjde4a1d02002-03-22 01:27:54 +0000990
nethercote2d5b8162004-08-11 09:40:52 +0000991 // Could we split this block and still get a useful fragment?
992 frag_bszB = b_bszB - req_bszB;
993 if (frag_bszB >= min_useful_bszB(a)) {
994 // Yes, split block in two, put the fragment on the appropriate free
995 // list, and update b_bszB accordingly.
996 // printf( "split %dB into %dB and %dB\n", b_bszB, req_bszB, frag_bszB );
sewardjde4a1d02002-03-22 01:27:54 +0000997 unlinkBlock(a, b, lno);
nethercote2d5b8162004-08-11 09:40:52 +0000998 mkInuseBlock(a, b, req_bszB);
999 mkFreeBlock(a, &b[req_bszB], frag_bszB,
1000 pszB_to_listNo(bszB_to_pszB(a, frag_bszB)));
1001 b_bszB = mk_plain_bszB(get_bszB_lo(b));
1002 } else {
1003 // No, mark as in use and use as-is.
1004 unlinkBlock(a, b, lno);
1005 mkInuseBlock(a, b, b_bszB);
sewardjde4a1d02002-03-22 01:27:54 +00001006 }
sewardjde4a1d02002-03-22 01:27:54 +00001007
nethercote2d5b8162004-08-11 09:40:52 +00001008 // Update stats
1009 a->bytes_on_loan += bszB_to_pszB(a, b_bszB);
sewardjde4a1d02002-03-22 01:27:54 +00001010 if (a->bytes_on_loan > a->bytes_on_loan_max)
1011 a->bytes_on_loan_max = a->bytes_on_loan;
1012
1013# ifdef DEBUG_MALLOC
nethercote885dd912004-08-03 23:14:00 +00001014 sanity_check_malloc_arena(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001015# endif
1016
njn25e49d8e72002-09-23 09:36:25 +00001017 VGP_POPCC(VgpMalloc);
nethercote2d5b8162004-08-11 09:40:52 +00001018 v = get_block_payload(a, b);
1019 vg_assert( (((Addr)v) & (VG_MIN_MALLOC_SZB-1)) == 0 );
sewardjb5f6f512005-03-10 23:59:00 +00001020
1021 VALGRIND_MALLOCLIKE_BLOCK(v, req_pszB, 0, False);
jsewardb1a26ae2004-03-14 03:06:37 +00001022 return v;
sewardjde4a1d02002-03-22 01:27:54 +00001023}
1024
1025
njn25e49d8e72002-09-23 09:36:25 +00001026void VG_(arena_free) ( ArenaId aid, void* ptr )
sewardjde4a1d02002-03-22 01:27:54 +00001027{
1028 Superblock* sb;
nethercote2d5b8162004-08-11 09:40:52 +00001029 UByte* sb_start;
1030 UByte* sb_end;
1031 Block* other;
1032 Block* b;
nethercote7ac7f7b2004-11-02 12:36:02 +00001033 SizeT b_bszB, b_pszB, other_bszB;
1034 UInt b_listno;
sewardjde4a1d02002-03-22 01:27:54 +00001035 Arena* a;
1036
1037 VGP_PUSHCC(VgpMalloc);
1038
1039 ensure_mm_init();
1040 a = arenaId_to_ArenaP(aid);
1041
njn25e49d8e72002-09-23 09:36:25 +00001042 if (ptr == NULL) {
1043 VGP_POPCC(VgpMalloc);
1044 return;
1045 }
1046
nethercote2d5b8162004-08-11 09:40:52 +00001047 b = get_payload_block(a, ptr);
sewardjde4a1d02002-03-22 01:27:54 +00001048
1049# ifdef DEBUG_MALLOC
nethercote2d5b8162004-08-11 09:40:52 +00001050 vg_assert(blockSane(a, b));
sewardjde4a1d02002-03-22 01:27:54 +00001051# endif
1052
nethercote2d5b8162004-08-11 09:40:52 +00001053 a->bytes_on_loan -= bszB_to_pszB(a, mk_plain_bszB(get_bszB_lo(b)));
sewardjde4a1d02002-03-22 01:27:54 +00001054
nethercote2d5b8162004-08-11 09:40:52 +00001055 sb = findSb( a, b );
1056 sb_start = &sb->payload_bytes[0];
1057 sb_end = &sb->payload_bytes[sb->n_payload_bytes - 1];
sewardjde4a1d02002-03-22 01:27:54 +00001058
nethercote2d5b8162004-08-11 09:40:52 +00001059 // Put this chunk back on a list somewhere.
1060 b_bszB = get_bszB_lo(b);
1061 b_pszB = bszB_to_pszB(a, b_bszB);
1062 b_listno = pszB_to_listNo(b_pszB);
1063 mkFreeBlock( a, b, b_bszB, b_listno );
sewardjde4a1d02002-03-22 01:27:54 +00001064
nethercote2d5b8162004-08-11 09:40:52 +00001065 // See if this block can be merged with its successor.
1066 // First test if we're far enough before the superblock's end to possibly
1067 // have a successor.
1068 other = b + b_bszB;
1069 if (other+min_useful_bszB(a)-1 <= (Block*)sb_end) {
1070 // Ok, we have a successor, merge if it's not in use.
1071 other_bszB = get_bszB_lo(other);
1072 if (!is_inuse_bszB(other_bszB)) {
1073 // VG_(printf)( "merge-successor\n");
1074 other_bszB = mk_plain_bszB(other_bszB);
sewardjde4a1d02002-03-22 01:27:54 +00001075# ifdef DEBUG_MALLOC
1076 vg_assert(blockSane(a, other));
1077# endif
nethercote2d5b8162004-08-11 09:40:52 +00001078 unlinkBlock( a, b, b_listno );
1079 unlinkBlock( a, other, pszB_to_listNo(bszB_to_pszB(a,other_bszB)) );
1080 b_bszB += other_bszB;
1081 b_listno = pszB_to_listNo(bszB_to_pszB(a, b_bszB));
1082 mkFreeBlock( a, b, b_bszB, b_listno );
sewardjde4a1d02002-03-22 01:27:54 +00001083 }
nethercote2d5b8162004-08-11 09:40:52 +00001084 } else {
1085 // Not enough space for successor: check that b is the last block
1086 // ie. there are no unused bytes at the end of the Superblock.
1087 vg_assert(other-1 == (Block*)sb_end);
sewardjde4a1d02002-03-22 01:27:54 +00001088 }
1089
nethercote2d5b8162004-08-11 09:40:52 +00001090 // Then see if this block can be merged with its predecessor.
1091 // First test if we're far enough after the superblock's start to possibly
1092 // have a predecessor.
1093 if (b >= (Block*)sb_start + min_useful_bszB(a)) {
1094 // Ok, we have a predecessor, merge if it's not in use.
1095 other = get_predecessor_block( b );
1096 other_bszB = get_bszB_lo(other);
1097 if (!is_inuse_bszB(other_bszB)) {
1098 // VG_(printf)( "merge-predecessor\n");
1099 other_bszB = mk_plain_bszB(other_bszB);
1100 unlinkBlock( a, b, b_listno );
1101 unlinkBlock( a, other, pszB_to_listNo(bszB_to_pszB(a, other_bszB)) );
1102 b = other;
1103 b_bszB += other_bszB;
1104 b_listno = pszB_to_listNo(bszB_to_pszB(a, b_bszB));
1105 mkFreeBlock( a, b, b_bszB, b_listno );
sewardjde4a1d02002-03-22 01:27:54 +00001106 }
nethercote2d5b8162004-08-11 09:40:52 +00001107 } else {
1108 // Not enough space for predecessor: check that b is the first block,
1109 // ie. there are no unused bytes at the start of the Superblock.
1110 vg_assert((Block*)sb_start == b);
sewardjde4a1d02002-03-22 01:27:54 +00001111 }
1112
1113# ifdef DEBUG_MALLOC
nethercote885dd912004-08-03 23:14:00 +00001114 sanity_check_malloc_arena(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001115# endif
1116
sewardjb5f6f512005-03-10 23:59:00 +00001117 VALGRIND_FREELIKE_BLOCK(ptr, 0);
1118
njn25e49d8e72002-09-23 09:36:25 +00001119 VGP_POPCC(VgpMalloc);
sewardjde4a1d02002-03-22 01:27:54 +00001120}
1121
1122
1123/*
1124 The idea for malloc_aligned() is to allocate a big block, base, and
1125 then split it into two parts: frag, which is returned to the the
1126 free pool, and align, which is the bit we're really after. Here's
1127 a picture. L and H denote the block lower and upper overheads, in
nethercote2d5b8162004-08-11 09:40:52 +00001128 bytes. The details are gruesome. Note it is slightly complicated
sewardjde4a1d02002-03-22 01:27:54 +00001129 because the initial request to generate base may return a bigger
1130 block than we asked for, so it is important to distinguish the base
1131 request size and the base actual size.
1132
1133 frag_b align_b
1134 | |
1135 | frag_p | align_p
1136 | | | |
1137 v v v v
1138
1139 +---+ +---+---+ +---+
1140 | L |----------------| H | L |---------------| H |
1141 +---+ +---+---+ +---+
1142
1143 ^ ^ ^
1144 | | :
1145 | base_p this addr must be aligned
1146 |
1147 base_b
1148
1149 . . . . . . .
nethercote2d5b8162004-08-11 09:40:52 +00001150 <------ frag_bszB -------> . . .
1151 . <------------- base_pszB_act -----------> .
sewardjde4a1d02002-03-22 01:27:54 +00001152 . . . . . . .
1153
1154*/
njn083f3022005-03-13 18:33:02 +00001155static
1156void* arena_malloc_aligned ( ArenaId aid, SizeT req_alignB, SizeT req_pszB )
sewardjde4a1d02002-03-22 01:27:54 +00001157{
nethercote7ac7f7b2004-11-02 12:36:02 +00001158 SizeT base_pszB_req, base_pszB_act, frag_bszB;
nethercote2d5b8162004-08-11 09:40:52 +00001159 Block *base_b, *align_b;
1160 UByte *base_p, *align_p;
nethercote7ac7f7b2004-11-02 12:36:02 +00001161 SizeT saved_bytes_on_loan;
sewardjde4a1d02002-03-22 01:27:54 +00001162 Arena* a;
1163
njn25e49d8e72002-09-23 09:36:25 +00001164 VGP_PUSHCC(VgpMalloc);
1165
sewardjde4a1d02002-03-22 01:27:54 +00001166 ensure_mm_init();
1167 a = arenaId_to_ArenaP(aid);
1168
nethercote7ac7f7b2004-11-02 12:36:02 +00001169 vg_assert(req_pszB < MAX_PSZB);
sewardjde4a1d02002-03-22 01:27:54 +00001170
nethercote2d5b8162004-08-11 09:40:52 +00001171 // Check that the requested alignment seems reasonable; that is, is
1172 // a power of 2.
1173 if (req_alignB < VG_MIN_MALLOC_SZB
1174 || req_alignB > 1048576
1175 || VG_(log2)( VG_(clo_alignment) ) == -1 /* not a power of 2 */) {
njn083f3022005-03-13 18:33:02 +00001176 VG_(printf)("arena_malloc_aligned(%p, %d, %d)\nbad alignment",
nethercote2d5b8162004-08-11 09:40:52 +00001177 a, req_alignB, req_pszB );
njn083f3022005-03-13 18:33:02 +00001178 VG_(core_panic)("arena_malloc_aligned");
nethercote2d5b8162004-08-11 09:40:52 +00001179 /*NOTREACHED*/
sewardjde4a1d02002-03-22 01:27:54 +00001180 }
nethercote2d5b8162004-08-11 09:40:52 +00001181 // Paranoid
1182 vg_assert(req_alignB % VG_MIN_MALLOC_SZB == 0);
sewardjde4a1d02002-03-22 01:27:54 +00001183
1184 /* Required payload size for the aligned chunk. */
nethercote2d5b8162004-08-11 09:40:52 +00001185 req_pszB = align_req_pszB(req_pszB);
sewardjde4a1d02002-03-22 01:27:54 +00001186
nethercote2d5b8162004-08-11 09:40:52 +00001187 /* Payload size to request for the big block that we will split up. */
1188 base_pszB_req = req_pszB + min_useful_bszB(a) + req_alignB;
sewardjde4a1d02002-03-22 01:27:54 +00001189
1190 /* Payload ptr for the block we are going to split. Note this
1191 changes a->bytes_on_loan; we save and restore it ourselves. */
1192 saved_bytes_on_loan = a->bytes_on_loan;
nethercote2d5b8162004-08-11 09:40:52 +00001193 base_p = VG_(arena_malloc) ( aid, base_pszB_req );
sewardjde4a1d02002-03-22 01:27:54 +00001194 a->bytes_on_loan = saved_bytes_on_loan;
1195
1196 /* Block ptr for the block we are going to split. */
nethercote2d5b8162004-08-11 09:40:52 +00001197 base_b = get_payload_block ( a, base_p );
sewardjde4a1d02002-03-22 01:27:54 +00001198
1199 /* Pointer to the payload of the aligned block we are going to
1200 return. This has to be suitably aligned. */
nethercote2d5b8162004-08-11 09:40:52 +00001201 align_p = align_upwards ( base_b + 2 * overhead_szB_lo(a)
1202 + overhead_szB_hi(a),
sewardjde4a1d02002-03-22 01:27:54 +00001203 req_alignB );
nethercote2d5b8162004-08-11 09:40:52 +00001204 align_b = get_payload_block(a, align_p);
sewardjde4a1d02002-03-22 01:27:54 +00001205
1206 /* The block size of the fragment we will create. This must be big
1207 enough to actually create a fragment. */
nethercote2d5b8162004-08-11 09:40:52 +00001208 frag_bszB = align_b - base_b;
1209
1210 vg_assert(frag_bszB >= min_useful_bszB(a));
sewardjde4a1d02002-03-22 01:27:54 +00001211
1212 /* The actual payload size of the block we are going to split. */
nethercote2d5b8162004-08-11 09:40:52 +00001213 base_pszB_act = bszB_to_pszB(a, mk_plain_bszB(get_bszB_lo(base_b)));
sewardjde4a1d02002-03-22 01:27:54 +00001214
nethercote2d5b8162004-08-11 09:40:52 +00001215 /* Create the fragment block, and put it back on the relevant free list. */
1216 mkFreeBlock ( a, base_b, frag_bszB,
1217 pszB_to_listNo(bszB_to_pszB(a, frag_bszB)) );
sewardjde4a1d02002-03-22 01:27:54 +00001218
1219 /* Create the aligned block. */
nethercote2d5b8162004-08-11 09:40:52 +00001220 mkInuseBlock ( a, align_b,
1221 base_p + base_pszB_act
1222 + overhead_szB_hi(a) - (UByte*)align_b );
sewardjde4a1d02002-03-22 01:27:54 +00001223
1224 /* Final sanity checks. */
nethercote2d5b8162004-08-11 09:40:52 +00001225 vg_assert( is_inuse_bszB(get_bszB_lo(get_payload_block(a, align_p))) );
sewardjde4a1d02002-03-22 01:27:54 +00001226
nethercote2d5b8162004-08-11 09:40:52 +00001227 vg_assert(req_pszB
sewardjde4a1d02002-03-22 01:27:54 +00001228 <=
nethercote2d5b8162004-08-11 09:40:52 +00001229 bszB_to_pszB(a, mk_plain_bszB(get_bszB_lo(
1230 get_payload_block(a, align_p))))
sewardjde4a1d02002-03-22 01:27:54 +00001231 );
1232
1233 a->bytes_on_loan
nethercote2d5b8162004-08-11 09:40:52 +00001234 += bszB_to_pszB(a, mk_plain_bszB(get_bszB_lo(
1235 get_payload_block(a, align_p))));
sewardjde4a1d02002-03-22 01:27:54 +00001236 if (a->bytes_on_loan > a->bytes_on_loan_max)
1237 a->bytes_on_loan_max = a->bytes_on_loan;
1238
1239# ifdef DEBUG_MALLOC
nethercote885dd912004-08-03 23:14:00 +00001240 sanity_check_malloc_arena(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001241# endif
1242
njn25e49d8e72002-09-23 09:36:25 +00001243 VGP_POPCC(VgpMalloc);
1244
nethercote2d5b8162004-08-11 09:40:52 +00001245 vg_assert( (((Addr)align_p) % req_alignB) == 0 );
sewardjb5f6f512005-03-10 23:59:00 +00001246
1247 VALGRIND_MALLOCLIKE_BLOCK(align_p, req_pszB, 0, False);
1248
nethercote2d5b8162004-08-11 09:40:52 +00001249 return align_p;
1250}
1251
1252
nethercote7ac7f7b2004-11-02 12:36:02 +00001253SizeT VG_(arena_payload_szB) ( ArenaId aid, void* ptr )
nethercote2d5b8162004-08-11 09:40:52 +00001254{
1255 Arena* a = arenaId_to_ArenaP(aid);
1256 Block* b = get_payload_block(a, ptr);
1257 return bszB_to_pszB(a, get_bszB_lo(b));
sewardjde4a1d02002-03-22 01:27:54 +00001258}
1259
1260
1261/*------------------------------------------------------------*/
1262/*--- Services layered on top of malloc/free. ---*/
1263/*------------------------------------------------------------*/
1264
njn828022a2005-03-13 14:56:31 +00001265void* VG_(arena_calloc) ( ArenaId aid, SizeT nmemb, SizeT bytes_per_memb )
sewardjde4a1d02002-03-22 01:27:54 +00001266{
nethercote7ac7f7b2004-11-02 12:36:02 +00001267 SizeT size;
sewardjde4a1d02002-03-22 01:27:54 +00001268 UChar* p;
njn25e49d8e72002-09-23 09:36:25 +00001269
1270 VGP_PUSHCC(VgpMalloc);
1271
njn926ed472005-03-11 04:44:10 +00001272 size = nmemb * bytes_per_memb;
1273 vg_assert(size >= nmemb && size >= bytes_per_memb);// check against overflow
njn3e884182003-04-15 13:03:23 +00001274
njn828022a2005-03-13 14:56:31 +00001275 p = VG_(arena_malloc) ( aid, size );
njn3e884182003-04-15 13:03:23 +00001276
njn926ed472005-03-11 04:44:10 +00001277 VG_(memset)(p, 0, size);
sewardjb5f6f512005-03-10 23:59:00 +00001278
njn926ed472005-03-11 04:44:10 +00001279 VALGRIND_MALLOCLIKE_BLOCK(p, size, 0, True);
njn25e49d8e72002-09-23 09:36:25 +00001280
1281 VGP_POPCC(VgpMalloc);
1282
sewardjde4a1d02002-03-22 01:27:54 +00001283 return p;
1284}
1285
1286
njn828022a2005-03-13 14:56:31 +00001287void* VG_(arena_realloc) ( ArenaId aid, void* ptr, SizeT req_pszB )
sewardjde4a1d02002-03-22 01:27:54 +00001288{
1289 Arena* a;
nethercote7ac7f7b2004-11-02 12:36:02 +00001290 SizeT old_bszB, old_pszB;
sewardjb5f6f512005-03-10 23:59:00 +00001291 UChar *p_new;
nethercote2d5b8162004-08-11 09:40:52 +00001292 Block* b;
sewardjde4a1d02002-03-22 01:27:54 +00001293
njn25e49d8e72002-09-23 09:36:25 +00001294 VGP_PUSHCC(VgpMalloc);
1295
sewardjde4a1d02002-03-22 01:27:54 +00001296 ensure_mm_init();
1297 a = arenaId_to_ArenaP(aid);
1298
nethercote7ac7f7b2004-11-02 12:36:02 +00001299 vg_assert(req_pszB < MAX_PSZB);
sewardjde4a1d02002-03-22 01:27:54 +00001300
nethercote2d5b8162004-08-11 09:40:52 +00001301 b = get_payload_block(a, ptr);
1302 vg_assert(blockSane(a, b));
sewardjde4a1d02002-03-22 01:27:54 +00001303
nethercote2d5b8162004-08-11 09:40:52 +00001304 old_bszB = get_bszB_lo(b);
1305 vg_assert(is_inuse_bszB(old_bszB));
1306 old_bszB = mk_plain_bszB(old_bszB);
1307 old_pszB = bszB_to_pszB(a, old_bszB);
sewardjde4a1d02002-03-22 01:27:54 +00001308
njn25e49d8e72002-09-23 09:36:25 +00001309 if (req_pszB <= old_pszB) {
1310 VGP_POPCC(VgpMalloc);
1311 return ptr;
1312 }
sewardjde4a1d02002-03-22 01:27:54 +00001313
njn828022a2005-03-13 14:56:31 +00001314 p_new = VG_(arena_malloc) ( aid, req_pszB );
1315
sewardjb5f6f512005-03-10 23:59:00 +00001316 VG_(memcpy)(p_new, ptr, old_pszB);
sewardjde4a1d02002-03-22 01:27:54 +00001317
sewardjb5f6f512005-03-10 23:59:00 +00001318 VG_(arena_free)(aid, ptr);
njn25e49d8e72002-09-23 09:36:25 +00001319
1320 VGP_POPCC(VgpMalloc);
sewardjde4a1d02002-03-22 01:27:54 +00001321 return p_new;
1322}
1323
1324
1325/*------------------------------------------------------------*/
nethercote996901a2004-08-03 13:29:09 +00001326/*--- Tool-visible functions. ---*/
njn25e49d8e72002-09-23 09:36:25 +00001327/*------------------------------------------------------------*/
1328
nethercote2d5b8162004-08-11 09:40:52 +00001329// All just wrappers to avoid exposing arenas to tools.
njn25e49d8e72002-09-23 09:36:25 +00001330
nethercote7ac7f7b2004-11-02 12:36:02 +00001331void* VG_(malloc) ( SizeT nbytes )
njn25e49d8e72002-09-23 09:36:25 +00001332{
nethercote60f5b822004-01-26 17:24:42 +00001333 return VG_(arena_malloc) ( VG_AR_TOOL, nbytes );
njn25e49d8e72002-09-23 09:36:25 +00001334}
1335
1336void VG_(free) ( void* ptr )
1337{
nethercote60f5b822004-01-26 17:24:42 +00001338 VG_(arena_free) ( VG_AR_TOOL, ptr );
njn25e49d8e72002-09-23 09:36:25 +00001339}
1340
njn926ed472005-03-11 04:44:10 +00001341void* VG_(calloc) ( SizeT nmemb, SizeT bytes_per_memb )
njn25e49d8e72002-09-23 09:36:25 +00001342{
njn828022a2005-03-13 14:56:31 +00001343 return VG_(arena_calloc) ( VG_AR_TOOL, nmemb, bytes_per_memb );
njn25e49d8e72002-09-23 09:36:25 +00001344}
1345
nethercote7ac7f7b2004-11-02 12:36:02 +00001346void* VG_(realloc) ( void* ptr, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00001347{
njn828022a2005-03-13 14:56:31 +00001348 return VG_(arena_realloc) ( VG_AR_TOOL, ptr, size );
njn25e49d8e72002-09-23 09:36:25 +00001349}
1350
nethercote7ac7f7b2004-11-02 12:36:02 +00001351void* VG_(cli_malloc) ( SizeT align, SizeT nbytes )
njn3e884182003-04-15 13:03:23 +00001352{
njnc7c31612005-03-13 18:53:34 +00001353 // 'align' should be valid (ie. big enough and a power of two) by now.
1354 // arena_malloc_aligned() will abort if it's not.
nethercote2d5b8162004-08-11 09:40:52 +00001355 if (VG_MIN_MALLOC_SZB == align)
njn083f3022005-03-13 18:33:02 +00001356 return VG_(arena_malloc) ( VG_AR_CLIENT, nbytes );
njn3e884182003-04-15 13:03:23 +00001357 else
njn083f3022005-03-13 18:33:02 +00001358 return arena_malloc_aligned ( VG_AR_CLIENT, align, nbytes );
njn3e884182003-04-15 13:03:23 +00001359}
1360
1361void VG_(cli_free) ( void* p )
1362{
1363 VG_(arena_free) ( VG_AR_CLIENT, p );
1364}
1365
1366
nethercote7ac7f7b2004-11-02 12:36:02 +00001367Bool VG_(addr_is_in_block)( Addr a, Addr start, SizeT size )
njn3e884182003-04-15 13:03:23 +00001368{
njn8a97c6d2005-03-31 04:37:24 +00001369 return (start - client_malloc_redzone_szB <= a
1370 && a < start + size + client_malloc_redzone_szB);
njn3e884182003-04-15 13:03:23 +00001371}
1372
1373
njn25e49d8e72002-09-23 09:36:25 +00001374/*------------------------------------------------------------*/
sewardjde4a1d02002-03-22 01:27:54 +00001375/*--- The original test driver machinery. ---*/
1376/*------------------------------------------------------------*/
1377
1378#if 0
1379
1380#if 1
1381#define N_TEST_TRANSACTIONS 100000000
1382#define N_TEST_ARR 200000
1383#define M_TEST_MALLOC 1000
1384#else
1385#define N_TEST_TRANSACTIONS 500000
1386#define N_TEST_ARR 30000
1387#define M_TEST_MALLOC 500
1388#endif
1389
1390
1391void* test_arr[N_TEST_ARR];
1392
1393int main ( int argc, char** argv )
1394{
1395 Int i, j, k, nbytes, qq;
1396 unsigned char* chp;
njn25e49d8e72002-09-23 09:36:25 +00001397 Arena* a = &arena[VG_AR_CORE];
sewardjde4a1d02002-03-22 01:27:54 +00001398 srandom(1);
1399 for (i = 0; i < N_TEST_ARR; i++)
1400 test_arr[i] = NULL;
1401
1402 for (i = 0; i < N_TEST_TRANSACTIONS; i++) {
1403 if (i % 50000 == 0) mallocSanityCheck(a);
1404 j = random() % N_TEST_ARR;
1405 if (test_arr[j]) {
1406 vg_free(a, test_arr[j]);
1407 test_arr[j] = NULL;
1408 } else {
1409 nbytes = 1 + random() % M_TEST_MALLOC;
1410 qq = random()%64;
1411 if (qq == 32)
1412 nbytes *= 17;
1413 else if (qq == 33)
1414 nbytes = 0;
1415 test_arr[j]
1416 = (i % 17) == 0
1417 ? vg_memalign(a, nbytes, 1<< (3+(random()%10)))
1418 : vg_malloc( a, nbytes );
1419 chp = test_arr[j];
1420 for (k = 0; k < nbytes; k++)
1421 chp[k] = (unsigned char)(k + 99);
1422 }
1423 }
1424
1425
1426 for (i = 0; i < N_TEST_ARR; i++) {
1427 if (test_arr[i]) {
1428 vg_free(a, test_arr[i]);
1429 test_arr[i] = NULL;
1430 }
1431 }
1432 mallocSanityCheck(a);
1433
1434 fprintf(stderr, "ALL DONE\n");
1435
1436 show_arena_stats(a);
1437 fprintf(stderr, "%d max useful, %d bytes mmap'd (%4.1f%%), %d useful\n",
1438 a->bytes_on_loan_max,
1439 a->bytes_mmaped,
nethercote2d5b8162004-08-11 09:40:52 +00001440 100.0 * (double)a->bytes_on_loan_max / (double)a->bytes_mmaped,
sewardjde4a1d02002-03-22 01:27:54 +00001441 a->bytes_on_loan );
1442
1443 return 0;
1444}
1445#endif /* 0 */
1446
1447
1448/*--------------------------------------------------------------------*/
1449/*--- end vg_malloc2.c ---*/
1450/*--------------------------------------------------------------------*/