blob: 6e3514e46df10060ad3d6060adfd47fee7aef1cf [file] [log] [blame]
sewardjde4a1d02002-03-22 01:27:54 +00001
2/*--------------------------------------------------------------------*/
3/*--- An implementation of malloc/free which doesn't use sbrk. ---*/
4/*--- vg_malloc2.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
njnb9c427c2004-12-01 14:14:42 +00008 This file is part of Valgrind, a dynamic binary instrumentation
9 framework.
sewardjde4a1d02002-03-22 01:27:54 +000010
njn53612422005-03-12 16:22:54 +000011 Copyright (C) 2000-2005 Julian Seward
sewardjde4a1d02002-03-22 01:27:54 +000012 jseward@acm.org
sewardjde4a1d02002-03-22 01:27:54 +000013
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
njn25e49d8e72002-09-23 09:36:25 +000029 The GNU General Public License is contained in the file COPYING.
sewardjde4a1d02002-03-22 01:27:54 +000030*/
31
32
nethercotef1e5e152004-09-01 23:58:16 +000033#include "core.h"
sewardjb5f6f512005-03-10 23:59:00 +000034//zz#include "memcheck/memcheck.h"
sewardjde4a1d02002-03-22 01:27:54 +000035
nethercote2d5b8162004-08-11 09:40:52 +000036//#define DEBUG_MALLOC // turn on heavyweight debugging machinery
37//#define VERBOSE_MALLOC // make verbose, esp. in debugging machinery
38
39/*------------------------------------------------------------*/
40/*--- Main types ---*/
41/*------------------------------------------------------------*/
42
njn6e6588c2005-03-13 18:52:48 +000043#define N_MALLOC_LISTS 16 // do not change this
nethercote2d5b8162004-08-11 09:40:52 +000044
nethercote7ac7f7b2004-11-02 12:36:02 +000045// The amount you can ask for is limited only by sizeof(SizeT)...
46#define MAX_PSZB (~((SizeT)0x0))
nethercote2d5b8162004-08-11 09:40:52 +000047
48typedef UChar UByte;
49
50/* Block layout:
51
nethercote7ac7f7b2004-11-02 12:36:02 +000052 this block total szB (sizeof(SizeT) bytes)
nethercote2d5b8162004-08-11 09:40:52 +000053 freelist previous ptr (sizeof(void*) bytes)
54 red zone bytes (depends on .rz_szB field of Arena)
55 (payload bytes)
56 red zone bytes (depends on .rz_szB field of Arena)
57 freelist next ptr (sizeof(void*) bytes)
nethercote7ac7f7b2004-11-02 12:36:02 +000058 this block total szB (sizeof(SizeT) bytes)
nethercote2d5b8162004-08-11 09:40:52 +000059
60 Total size in bytes (bszB) and payload size in bytes (pszB)
61 are related by:
62
nethercote7ac7f7b2004-11-02 12:36:02 +000063 bszB == pszB + 2*sizeof(SizeT) + 2*sizeof(void*) + 2*a->rz_szB
nethercote2d5b8162004-08-11 09:40:52 +000064
nethercote7ac7f7b2004-11-02 12:36:02 +000065 Furthermore, both size fields in the block have their least-sifnificant
66 bit set if the block is not in use, and unset if it is in use.
67 (The bottom 3 or so bits are always free for this because of alignment.)
68 A block size of zero is not possible, because a block always has at
69 least two SizeTs and two pointers of overhead.
nethercote2d5b8162004-08-11 09:40:52 +000070
71 Nb: All Block payloads must be VG_MIN_MALLOC_SZB-aligned. This is
72 achieved by ensuring that Superblocks are VG_MIN_MALLOC_SZB-aligned
73 (see newSuperblock() for how), and that the lengths of the following
74 things are a multiple of VG_MIN_MALLOC_SZB:
75 - Superblock admin section lengths (due to elastic padding)
76 - Block admin section (low and high) lengths (due to elastic redzones)
77 - Block payload lengths (due to req_pszB rounding up)
78*/
79typedef
80 struct {
81 // No fields are actually used in this struct, because a Block has
82 // loads of variable sized fields and so can't be accessed
83 // meaningfully with normal fields. So we use access functions all
84 // the time. This struct gives us a type to use, though. Also, we
85 // make sizeof(Block) 1 byte so that we can do arithmetic with the
86 // Block* type in increments of 1!
87 UByte dummy;
88 }
89 Block;
90
91// A superblock. 'padding' is never used, it just ensures that if the
92// entire Superblock is aligned to VG_MIN_MALLOC_SZB, then payload_bytes[]
93// will be too. It can add small amounts of padding unnecessarily -- eg.
94// 8-bytes on 32-bit machines with an 8-byte VG_MIN_MALLOC_SZB -- because
95// it's too hard to make a constant expression that works perfectly in all
96// cases.
97// payload_bytes[] is made a single big Block when the Superblock is
98// created, and then can be split and the splittings remerged, but Blocks
99// always cover its entire length -- there's never any unused bytes at the
100// end, for example.
101typedef
102 struct _Superblock {
103 struct _Superblock* next;
nethercote7ac7f7b2004-11-02 12:36:02 +0000104 SizeT n_payload_bytes;
nethercote2d5b8162004-08-11 09:40:52 +0000105 UByte padding[ VG_MIN_MALLOC_SZB -
nethercote7ac7f7b2004-11-02 12:36:02 +0000106 ((sizeof(struct _Superblock*) + sizeof(SizeT)) %
107 VG_MIN_MALLOC_SZB) ];
nethercote2d5b8162004-08-11 09:40:52 +0000108 UByte payload_bytes[0];
109 }
110 Superblock;
111
112// An arena. 'freelist' is a circular, doubly-linked list. 'rz_szB' is
113// elastic, in that it can be bigger than asked-for to ensure alignment.
114typedef
115 struct {
116 Char* name;
117 Bool clientmem; // Allocates in the client address space?
njn0e742df2004-11-30 13:26:29 +0000118 SizeT rz_szB; // Red zone size in bytes
nethercote7ac7f7b2004-11-02 12:36:02 +0000119 SizeT min_sblock_szB; // Minimum superblock size in bytes
njn6e6588c2005-03-13 18:52:48 +0000120 Block* freelist[N_MALLOC_LISTS];
nethercote2d5b8162004-08-11 09:40:52 +0000121 Superblock* sblocks;
122 // Stats only.
nethercote7ac7f7b2004-11-02 12:36:02 +0000123 SizeT bytes_on_loan;
124 SizeT bytes_mmaped;
125 SizeT bytes_on_loan_max;
nethercote2d5b8162004-08-11 09:40:52 +0000126 }
127 Arena;
128
129
130/*------------------------------------------------------------*/
131/*--- Low-level functions for working with Blocks. ---*/
132/*------------------------------------------------------------*/
133
nethercote7ac7f7b2004-11-02 12:36:02 +0000134#define SIZE_T_0x1 ((SizeT)0x1)
135
nethercote2d5b8162004-08-11 09:40:52 +0000136// Mark a bszB as in-use, and not in-use.
137static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000138SizeT mk_inuse_bszB ( SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000139{
140 vg_assert(bszB != 0);
nethercote7ac7f7b2004-11-02 12:36:02 +0000141 return bszB & (~SIZE_T_0x1);
nethercote2d5b8162004-08-11 09:40:52 +0000142}
143static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000144SizeT mk_free_bszB ( SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000145{
146 vg_assert(bszB != 0);
nethercote7ac7f7b2004-11-02 12:36:02 +0000147 return bszB | SIZE_T_0x1;
nethercote2d5b8162004-08-11 09:40:52 +0000148}
149
150// Remove the in-use/not-in-use attribute from a bszB, leaving just
151// the size.
152static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000153SizeT mk_plain_bszB ( SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000154{
155 vg_assert(bszB != 0);
nethercote7ac7f7b2004-11-02 12:36:02 +0000156 return bszB & (~SIZE_T_0x1);
nethercote2d5b8162004-08-11 09:40:52 +0000157}
158
159// Does this bszB have the in-use attribute?
160static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000161Bool is_inuse_bszB ( SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000162{
163 vg_assert(bszB != 0);
nethercote7ac7f7b2004-11-02 12:36:02 +0000164 return (0 != (bszB & SIZE_T_0x1)) ? False : True;
nethercote2d5b8162004-08-11 09:40:52 +0000165}
166
167
168// Set and get the lower size field of a block.
169static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000170void set_bszB_lo ( Block* b, SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000171{
nethercote7ac7f7b2004-11-02 12:36:02 +0000172 *(SizeT*)&b[0] = bszB;
nethercote2d5b8162004-08-11 09:40:52 +0000173}
174static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000175SizeT get_bszB_lo ( Block* b )
nethercote2d5b8162004-08-11 09:40:52 +0000176{
nethercote7ac7f7b2004-11-02 12:36:02 +0000177 return *(SizeT*)&b[0];
nethercote2d5b8162004-08-11 09:40:52 +0000178}
179
180// Get the address of the last byte in a block
181static __inline__
182UByte* last_byte ( Block* b )
183{
184 UByte* b2 = (UByte*)b;
185 return &b2[mk_plain_bszB(get_bszB_lo(b)) - 1];
186}
187
188// Set and get the upper size field of a block.
189static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000190void set_bszB_hi ( Block* b, SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000191{
192 UByte* b2 = (UByte*)b;
193 UByte* lb = last_byte(b);
194 vg_assert(lb == &b2[mk_plain_bszB(bszB) - 1]);
nethercote7ac7f7b2004-11-02 12:36:02 +0000195 *(SizeT*)&lb[-sizeof(SizeT) + 1] = bszB;
nethercote2d5b8162004-08-11 09:40:52 +0000196}
197static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000198SizeT get_bszB_hi ( Block* b )
nethercote2d5b8162004-08-11 09:40:52 +0000199{
200 UByte* lb = last_byte(b);
nethercote7ac7f7b2004-11-02 12:36:02 +0000201 return *(SizeT*)&lb[-sizeof(SizeT) + 1];
nethercote2d5b8162004-08-11 09:40:52 +0000202}
203
204
nethercote7ac7f7b2004-11-02 12:36:02 +0000205// Return the lower, upper and total overhead in bytes for a block.
206// These are determined purely by which arena the block lives in.
207static __inline__
njn0e742df2004-11-30 13:26:29 +0000208SizeT overhead_szB_lo ( Arena* a )
nethercote7ac7f7b2004-11-02 12:36:02 +0000209{
210 return sizeof(SizeT) + sizeof(void*) + a->rz_szB;
211}
212static __inline__
njn0e742df2004-11-30 13:26:29 +0000213SizeT overhead_szB_hi ( Arena* a )
nethercote7ac7f7b2004-11-02 12:36:02 +0000214{
215 return a->rz_szB + sizeof(void*) + sizeof(SizeT);
216}
217static __inline__
njn0e742df2004-11-30 13:26:29 +0000218SizeT overhead_szB ( Arena* a )
nethercote7ac7f7b2004-11-02 12:36:02 +0000219{
220 return overhead_szB_lo(a) + overhead_szB_hi(a);
221}
222
nethercote2d5b8162004-08-11 09:40:52 +0000223// Given the addr of a block, return the addr of its payload.
224static __inline__
225UByte* get_block_payload ( Arena* a, Block* b )
226{
227 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000228 return & b2[ overhead_szB_lo(a) ];
nethercote2d5b8162004-08-11 09:40:52 +0000229}
230// Given the addr of a block's payload, return the addr of the block itself.
231static __inline__
232Block* get_payload_block ( Arena* a, UByte* payload )
233{
nethercote7ac7f7b2004-11-02 12:36:02 +0000234 return (Block*)&payload[ -overhead_szB_lo(a) ];
nethercote2d5b8162004-08-11 09:40:52 +0000235}
236
237
238// Set and get the next and previous link fields of a block.
239static __inline__
240void set_prev_b ( Block* b, Block* prev_p )
241{
242 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000243 *(Block**)&b2[sizeof(SizeT)] = prev_p;
nethercote2d5b8162004-08-11 09:40:52 +0000244}
245static __inline__
246void set_next_b ( Block* b, Block* next_p )
247{
248 UByte* lb = last_byte(b);
nethercote7ac7f7b2004-11-02 12:36:02 +0000249 *(Block**)&lb[-sizeof(SizeT) - sizeof(void*) + 1] = next_p;
nethercote2d5b8162004-08-11 09:40:52 +0000250}
251static __inline__
252Block* get_prev_b ( Block* b )
253{
254 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000255 return *(Block**)&b2[sizeof(SizeT)];
nethercote2d5b8162004-08-11 09:40:52 +0000256}
257static __inline__
258Block* get_next_b ( Block* b )
259{
260 UByte* lb = last_byte(b);
nethercote7ac7f7b2004-11-02 12:36:02 +0000261 return *(Block**)&lb[-sizeof(SizeT) - sizeof(void*) + 1];
nethercote2d5b8162004-08-11 09:40:52 +0000262}
263
264
265// Get the block immediately preceding this one in the Superblock.
266static __inline__
267Block* get_predecessor_block ( Block* b )
268{
269 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000270 SizeT bszB = mk_plain_bszB( (*(SizeT*)&b2[-sizeof(SizeT)]) );
nethercote2d5b8162004-08-11 09:40:52 +0000271 return (Block*)&b2[-bszB];
272}
273
274// Read and write the lower and upper red-zone bytes of a block.
275static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000276void set_rz_lo_byte ( Arena* a, Block* b, UInt rz_byteno, UByte v )
nethercote2d5b8162004-08-11 09:40:52 +0000277{
278 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000279 b2[sizeof(SizeT) + sizeof(void*) + rz_byteno] = v;
nethercote2d5b8162004-08-11 09:40:52 +0000280}
281static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000282void set_rz_hi_byte ( Arena* a, Block* b, UInt rz_byteno, UByte v )
nethercote2d5b8162004-08-11 09:40:52 +0000283{
284 UByte* lb = last_byte(b);
nethercote7ac7f7b2004-11-02 12:36:02 +0000285 lb[-sizeof(SizeT) - sizeof(void*) - rz_byteno] = v;
nethercote2d5b8162004-08-11 09:40:52 +0000286}
287static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000288UByte get_rz_lo_byte ( Arena* a, Block* b, UInt rz_byteno )
nethercote2d5b8162004-08-11 09:40:52 +0000289{
290 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000291 return b2[sizeof(SizeT) + sizeof(void*) + rz_byteno];
nethercote2d5b8162004-08-11 09:40:52 +0000292}
293static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000294UByte get_rz_hi_byte ( Arena* a, Block* b, UInt rz_byteno )
nethercote2d5b8162004-08-11 09:40:52 +0000295{
296 UByte* lb = last_byte(b);
nethercote7ac7f7b2004-11-02 12:36:02 +0000297 return lb[-sizeof(SizeT) - sizeof(void*) - rz_byteno];
nethercote2d5b8162004-08-11 09:40:52 +0000298}
299
300
nethercote2d5b8162004-08-11 09:40:52 +0000301// Return the minimum bszB for a block in this arena. Can have zero-length
302// payloads, so it's the size of the admin bytes.
303static __inline__
njn0e742df2004-11-30 13:26:29 +0000304SizeT min_useful_bszB ( Arena* a )
nethercote2d5b8162004-08-11 09:40:52 +0000305{
306 return overhead_szB(a);
307}
308
309// Convert payload size <--> block size (both in bytes).
310static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000311SizeT pszB_to_bszB ( Arena* a, SizeT pszB )
nethercote2d5b8162004-08-11 09:40:52 +0000312{
nethercote2d5b8162004-08-11 09:40:52 +0000313 return pszB + overhead_szB(a);
314}
315static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000316SizeT bszB_to_pszB ( Arena* a, SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000317{
nethercote7ac7f7b2004-11-02 12:36:02 +0000318 vg_assert(bszB >= overhead_szB(a));
319 return bszB - overhead_szB(a);
nethercote2d5b8162004-08-11 09:40:52 +0000320}
321
322
323/*------------------------------------------------------------*/
324/*--- Arena management ---*/
325/*------------------------------------------------------------*/
326
327#define CORE_ARENA_MIN_SZB 1048576
328
329// The arena structures themselves.
330static Arena vg_arena[VG_N_ARENAS];
331
332// Functions external to this module identify arenas using ArenaIds,
333// not Arena*s. This fn converts the former to the latter.
334static Arena* arenaId_to_ArenaP ( ArenaId arena )
335{
336 vg_assert(arena >= 0 && arena < VG_N_ARENAS);
337 return & vg_arena[arena];
338}
339
340// Initialise an arena. rz_szB is the minimum redzone size; it might be
njn30490552005-03-13 06:30:42 +0000341// made bigger to ensure that VG_MIN_MALLOC_SZB is observed.
nethercote2d5b8162004-08-11 09:40:52 +0000342static
njn0e742df2004-11-30 13:26:29 +0000343void arena_init ( ArenaId aid, Char* name, SizeT rz_szB, SizeT min_sblock_szB )
nethercote2d5b8162004-08-11 09:40:52 +0000344{
nethercote7ac7f7b2004-11-02 12:36:02 +0000345 SizeT i;
nethercote2d5b8162004-08-11 09:40:52 +0000346 Arena* a = arenaId_to_ArenaP(aid);
347
nethercote7ac7f7b2004-11-02 12:36:02 +0000348 vg_assert(rz_szB < 128); // ensure reasonable size
nethercote73b526f2004-10-31 18:48:21 +0000349 vg_assert((min_sblock_szB % VKI_PAGE_SIZE) == 0);
nethercote2d5b8162004-08-11 09:40:52 +0000350 a->name = name;
351 a->clientmem = ( VG_AR_CLIENT == aid ? True : False );
352
353 // The size of the low and high admin sections in a block must be a
njn30490552005-03-13 06:30:42 +0000354 // multiple of VG_MIN_MALLOC_SZB. So we round up the asked-for
nethercote2d5b8162004-08-11 09:40:52 +0000355 // redzone size if necessary to achieve this.
356 a->rz_szB = rz_szB;
357 while (0 != overhead_szB_lo(a) % VG_MIN_MALLOC_SZB) a->rz_szB++;
358 vg_assert(overhead_szB_lo(a) == overhead_szB_hi(a));
359
360 a->min_sblock_szB = min_sblock_szB;
njn6e6588c2005-03-13 18:52:48 +0000361 for (i = 0; i < N_MALLOC_LISTS; i++) a->freelist[i] = NULL;
nethercote2d5b8162004-08-11 09:40:52 +0000362 a->sblocks = NULL;
363 a->bytes_on_loan = 0;
364 a->bytes_mmaped = 0;
365 a->bytes_on_loan_max = 0;
366}
367
368/* Print vital stats for an arena. */
369void VG_(print_all_arena_stats) ( void )
370{
nethercote7ac7f7b2004-11-02 12:36:02 +0000371 UInt i;
nethercote2d5b8162004-08-11 09:40:52 +0000372 for (i = 0; i < VG_N_ARENAS; i++) {
373 Arena* a = arenaId_to_ArenaP(i);
374 VG_(message)(Vg_DebugMsg,
njn669ef072005-03-13 05:46:57 +0000375 "%8s: %8d mmap'd, %8d/%8d max/curr",
nethercote2d5b8162004-08-11 09:40:52 +0000376 a->name, a->bytes_mmaped, a->bytes_on_loan_max, a->bytes_on_loan
377 );
378 }
379}
380
381/* This library is self-initialising, as it makes this more self-contained,
382 less coupled with the outside world. Hence VG_(arena_malloc)() and
383 VG_(arena_free)() below always call ensure_mm_init() to ensure things are
384 correctly initialised. */
385static
386void ensure_mm_init ( void )
387{
njn0e742df2004-11-30 13:26:29 +0000388 static SizeT client_rz_szB;
389 static Bool init_done = False;
nethercote2d5b8162004-08-11 09:40:52 +0000390
391 if (init_done) {
392 // Make sure the client arena's redzone size never changes. Could
393 // happen if VG_(arena_malloc) was called too early, ie. before the
394 // tool was loaded.
395 vg_assert(client_rz_szB == VG_(vg_malloc_redzone_szB));
396 return;
397 }
398
399 /* No particular reason for this figure, it's just smallish */
njnca82cc02004-11-22 17:18:48 +0000400 tl_assert(VG_(vg_malloc_redzone_szB) < 128);
nethercote2d5b8162004-08-11 09:40:52 +0000401 client_rz_szB = VG_(vg_malloc_redzone_szB);
402
403 /* Use checked red zones (of various sizes) for our internal stuff,
404 and an unchecked zone of arbitrary size for the client. Of
405 course the client's red zone can be checked by the tool, eg.
406 by using addressibility maps, but not by the mechanism implemented
407 here, which merely checks at the time of freeing that the red
408 zone bytes are unchanged.
409
410 Nb: redzone sizes are *minimums*; they could be made bigger to ensure
411 alignment. Eg. on 32-bit machines, 4 becomes 8, and 12 becomes 16;
412 but on 64-bit machines 4 stays as 4, and 12 stays as 12 --- the extra
413 4 bytes in both are accounted for by the larger prev/next ptr.
414 */
415 arena_init ( VG_AR_CORE, "core", 4, CORE_ARENA_MIN_SZB );
416 arena_init ( VG_AR_TOOL, "tool", 4, 1048576 );
417 arena_init ( VG_AR_SYMTAB, "symtab", 4, 1048576 );
nethercote2d5b8162004-08-11 09:40:52 +0000418 arena_init ( VG_AR_CLIENT, "client", client_rz_szB, 1048576 );
419 arena_init ( VG_AR_DEMANGLE, "demangle", 12/*paranoid*/, 65536 );
420 arena_init ( VG_AR_EXECTXT, "exectxt", 4, 65536 );
421 arena_init ( VG_AR_ERRORS, "errors", 4, 65536 );
nethercote2d5b8162004-08-11 09:40:52 +0000422
423 init_done = True;
424# ifdef DEBUG_MALLOC
425 VG_(sanity_check_malloc_all)();
426# endif
427}
428
429
430/*------------------------------------------------------------*/
431/*--- Superblock management ---*/
432/*------------------------------------------------------------*/
433
434// Align ptr p upwards to an align-sized boundary.
435static
nethercote7ac7f7b2004-11-02 12:36:02 +0000436void* align_upwards ( void* p, SizeT align )
nethercote2d5b8162004-08-11 09:40:52 +0000437{
438 Addr a = (Addr)p;
439 if ((a % align) == 0) return (void*)a;
440 return (void*)(a - (a % align) + align);
441}
442
443// If not enough memory available, either aborts (for non-client memory)
444// or returns 0 (for client memory).
445static
nethercote7ac7f7b2004-11-02 12:36:02 +0000446Superblock* newSuperblock ( Arena* a, SizeT cszB )
nethercote2d5b8162004-08-11 09:40:52 +0000447{
448 // The extra VG_MIN_MALLOC_SZB bytes are for possible alignment up.
449 static UByte bootstrap_superblock[CORE_ARENA_MIN_SZB+VG_MIN_MALLOC_SZB];
sewardj548be6d2005-02-16 01:31:37 +0000450 static Bool called_before = True; //False;
nethercote2d5b8162004-08-11 09:40:52 +0000451 Superblock* sb;
452
453 // Take into account admin bytes in the Superblock.
454 cszB += sizeof(Superblock);
455
456 if (cszB < a->min_sblock_szB) cszB = a->min_sblock_szB;
nethercote73b526f2004-10-31 18:48:21 +0000457 while ((cszB % VKI_PAGE_SIZE) > 0) cszB++;
nethercote2d5b8162004-08-11 09:40:52 +0000458
459 if (!called_before) {
460 // First time we're called -- use the special static bootstrap
461 // superblock (see comment at top of main() for details).
462 called_before = True;
463 vg_assert(a == arenaId_to_ArenaP(VG_AR_CORE));
464 vg_assert(CORE_ARENA_MIN_SZB >= cszB);
465 // Ensure sb is suitably aligned.
466 sb = (Superblock*)align_upwards( bootstrap_superblock,
467 VG_MIN_MALLOC_SZB );
468 } else if (a->clientmem) {
469 // client allocation -- return 0 to client if it fails
470 sb = (Superblock *)
sewardj215776c2005-03-16 12:11:12 +0000471 VG_(get_memory_from_mmap_for_client)
472 (0, cszB, VKI_PROT_READ|VKI_PROT_WRITE|VKI_PROT_EXEC, 0);
nethercote2d5b8162004-08-11 09:40:52 +0000473 if (NULL == sb)
474 return 0;
475 } else {
476 // non-client allocation -- aborts if it fails
477 sb = VG_(get_memory_from_mmap) ( cszB, "newSuperblock" );
478 }
479 vg_assert(NULL != sb);
sewardjb5f6f512005-03-10 23:59:00 +0000480 //zzVALGRIND_MAKE_WRITABLE(sb, cszB);
nethercote2d5b8162004-08-11 09:40:52 +0000481 vg_assert(0 == (Addr)sb % VG_MIN_MALLOC_SZB);
482 sb->n_payload_bytes = cszB - sizeof(Superblock);
483 a->bytes_mmaped += cszB;
484 if (0)
485 VG_(message)(Vg_DebugMsg, "newSuperblock, %d payload bytes",
486 sb->n_payload_bytes);
487 return sb;
488}
489
490// Find the superblock containing the given chunk.
491static
492Superblock* findSb ( Arena* a, Block* b )
493{
494 Superblock* sb;
495 for (sb = a->sblocks; sb; sb = sb->next)
496 if ((Block*)&sb->payload_bytes[0] <= b
497 && b < (Block*)&sb->payload_bytes[sb->n_payload_bytes])
498 return sb;
499 VG_(printf)("findSb: can't find pointer %p in arena `%s'\n", b, a->name );
500 VG_(core_panic)("findSb: VG_(arena_free)() in wrong arena?");
501 return NULL; /*NOTREACHED*/
502}
503
sewardjde4a1d02002-03-22 01:27:54 +0000504
fitzhardinge98abfc72003-12-16 02:05:15 +0000505/*------------------------------------------------------------*/
506/*--- Command line options ---*/
507/*------------------------------------------------------------*/
508
nethercote2d5b8162004-08-11 09:40:52 +0000509/* Round malloc sizes up to a multiple of VG_SLOPPY_MALLOC_SZB bytes?
510 default: NO
511 Nb: the allocator always rounds blocks up to a multiple of
512 VG_MIN_MALLOC_SZB. VG_(clo_sloppy_malloc) is relevant eg. for
513 Memcheck, which will be byte-precise with addressability maps on its
514 malloc allocations unless --sloppy-malloc=yes. */
515Bool VG_(clo_sloppy_malloc) = False;
fitzhardinge98abfc72003-12-16 02:05:15 +0000516
517/* DEBUG: print malloc details? default: NO */
nethercote2d5b8162004-08-11 09:40:52 +0000518Bool VG_(clo_trace_malloc) = False;
fitzhardinge98abfc72003-12-16 02:05:15 +0000519
520/* Minimum alignment in functions that don't specify alignment explicitly.
nethercote2d5b8162004-08-11 09:40:52 +0000521 default: 0, i.e. use VG_MIN_MALLOC_SZB. */
nethercote7ac7f7b2004-11-02 12:36:02 +0000522UInt VG_(clo_alignment) = VG_MIN_MALLOC_SZB;
fitzhardinge98abfc72003-12-16 02:05:15 +0000523
524
525Bool VG_(replacement_malloc_process_cmd_line_option)(Char* arg)
526{
jsewardb1a26ae2004-03-14 03:06:37 +0000527 if (VG_CLO_STREQN(12, arg, "--alignment=")) {
nethercote7ac7f7b2004-11-02 12:36:02 +0000528 VG_(clo_alignment) = (UInt)VG_(atoll)(&arg[12]);
fitzhardinge98abfc72003-12-16 02:05:15 +0000529
nethercote2d5b8162004-08-11 09:40:52 +0000530 if (VG_(clo_alignment) < VG_MIN_MALLOC_SZB
fitzhardinge98abfc72003-12-16 02:05:15 +0000531 || VG_(clo_alignment) > 4096
532 || VG_(log2)( VG_(clo_alignment) ) == -1 /* not a power of 2 */) {
533 VG_(message)(Vg_UserMsg, "");
534 VG_(message)(Vg_UserMsg,
535 "Invalid --alignment= setting. "
nethercote2d5b8162004-08-11 09:40:52 +0000536 "Should be a power of 2, >= %d, <= 4096.", VG_MIN_MALLOC_SZB);
fitzhardinge98abfc72003-12-16 02:05:15 +0000537 VG_(bad_option)("--alignment");
538 }
539 }
540
nethercotef28481f2004-07-10 13:56:19 +0000541 else VG_BOOL_CLO("--sloppy-malloc", VG_(clo_sloppy_malloc))
542 else VG_BOOL_CLO("--trace-malloc", VG_(clo_trace_malloc))
fitzhardinge98abfc72003-12-16 02:05:15 +0000543 else
544 return False;
545
546 return True;
547}
548
549void VG_(replacement_malloc_print_usage)(void)
550{
551 VG_(printf)(
nethercote2d5b8162004-08-11 09:40:52 +0000552" --sloppy-malloc=no|yes round malloc sizes to multiple of %d? [no]\n"
553" --alignment=<number> set minimum alignment of allocations [%d]\n",
554 VG_SLOPPY_MALLOC_SZB, VG_MIN_MALLOC_SZB
fitzhardinge98abfc72003-12-16 02:05:15 +0000555 );
556}
557
558void VG_(replacement_malloc_print_debug_usage)(void)
559{
560 VG_(printf)(
561" --trace-malloc=no|yes show client malloc details? [no]\n"
562 );
563}
564
sewardjde4a1d02002-03-22 01:27:54 +0000565
566/*------------------------------------------------------------*/
sewardjde4a1d02002-03-22 01:27:54 +0000567/*--- Functions for working with freelists. ---*/
568/*------------------------------------------------------------*/
569
nethercote2d5b8162004-08-11 09:40:52 +0000570// Nb: Determination of which freelist a block lives on is based on the
571// payload size, not block size.
sewardjde4a1d02002-03-22 01:27:54 +0000572
nethercote2d5b8162004-08-11 09:40:52 +0000573// Convert a payload size in bytes to a freelist number.
sewardjde4a1d02002-03-22 01:27:54 +0000574static
nethercote7ac7f7b2004-11-02 12:36:02 +0000575UInt pszB_to_listNo ( SizeT pszB )
sewardjde4a1d02002-03-22 01:27:54 +0000576{
nethercote2d5b8162004-08-11 09:40:52 +0000577 vg_assert(0 == pszB % VG_MIN_MALLOC_SZB);
578 pszB /= VG_MIN_MALLOC_SZB;
579 if (pszB <= 2) return 0;
580 if (pszB <= 3) return 1;
581 if (pszB <= 4) return 2;
582 if (pszB <= 5) return 3;
583 if (pszB <= 6) return 4;
584 if (pszB <= 7) return 5;
585 if (pszB <= 8) return 6;
586 if (pszB <= 9) return 7;
587 if (pszB <= 10) return 8;
588 if (pszB <= 11) return 9;
589 if (pszB <= 12) return 10;
590 if (pszB <= 16) return 11;
591 if (pszB <= 32) return 12;
592 if (pszB <= 64) return 13;
593 if (pszB <= 128) return 14;
sewardjde4a1d02002-03-22 01:27:54 +0000594 return 15;
595}
596
nethercote2d5b8162004-08-11 09:40:52 +0000597// What is the minimum payload size for a given list?
sewardjde4a1d02002-03-22 01:27:54 +0000598static
nethercote7ac7f7b2004-11-02 12:36:02 +0000599SizeT listNo_to_pszB_min ( UInt listNo )
sewardjde4a1d02002-03-22 01:27:54 +0000600{
nethercote7ac7f7b2004-11-02 12:36:02 +0000601 SizeT pszB = 0;
njn6e6588c2005-03-13 18:52:48 +0000602 vg_assert(listNo <= N_MALLOC_LISTS);
nethercote2d5b8162004-08-11 09:40:52 +0000603 while (pszB_to_listNo(pszB) < listNo) pszB += VG_MIN_MALLOC_SZB;
604 return pszB;
sewardjde4a1d02002-03-22 01:27:54 +0000605}
606
nethercote2d5b8162004-08-11 09:40:52 +0000607// What is the maximum payload size for a given list?
sewardjde4a1d02002-03-22 01:27:54 +0000608static
nethercote7ac7f7b2004-11-02 12:36:02 +0000609SizeT listNo_to_pszB_max ( UInt listNo )
sewardjde4a1d02002-03-22 01:27:54 +0000610{
njn6e6588c2005-03-13 18:52:48 +0000611 vg_assert(listNo <= N_MALLOC_LISTS);
612 if (listNo == N_MALLOC_LISTS-1) {
nethercote2d5b8162004-08-11 09:40:52 +0000613 return MAX_PSZB;
sewardjde4a1d02002-03-22 01:27:54 +0000614 } else {
nethercote2d5b8162004-08-11 09:40:52 +0000615 return listNo_to_pszB_min(listNo+1) - 1;
sewardjde4a1d02002-03-22 01:27:54 +0000616 }
617}
618
619
620/* A nasty hack to try and reduce fragmentation. Try and replace
621 a->freelist[lno] with another block on the same list but with a
622 lower address, with the idea of attempting to recycle the same
623 blocks rather than cruise through the address space. */
sewardjde4a1d02002-03-22 01:27:54 +0000624static
nethercote7ac7f7b2004-11-02 12:36:02 +0000625void swizzle ( Arena* a, UInt lno )
sewardjde4a1d02002-03-22 01:27:54 +0000626{
nethercote2d5b8162004-08-11 09:40:52 +0000627 Block* p_best;
628 Block* pp;
629 Block* pn;
nethercote7ac7f7b2004-11-02 12:36:02 +0000630 UInt i;
sewardjde4a1d02002-03-22 01:27:54 +0000631
632 p_best = a->freelist[lno];
633 if (p_best == NULL) return;
634
635 pn = pp = p_best;
636 for (i = 0; i < 20; i++) {
nethercote2d5b8162004-08-11 09:40:52 +0000637 pn = get_next_b(pn);
638 pp = get_prev_b(pp);
sewardjde4a1d02002-03-22 01:27:54 +0000639 if (pn < p_best) p_best = pn;
640 if (pp < p_best) p_best = pp;
641 }
642 if (p_best < a->freelist[lno]) {
nethercote2d5b8162004-08-11 09:40:52 +0000643# ifdef VERBOSE_MALLOC
644 VG_(printf)("retreat by %d\n", a->freelist[lno] - p_best);
sewardjde4a1d02002-03-22 01:27:54 +0000645# endif
646 a->freelist[lno] = p_best;
647 }
648}
649
650
651/*------------------------------------------------------------*/
sewardjde4a1d02002-03-22 01:27:54 +0000652/*--- Sanity-check/debugging machinery. ---*/
653/*------------------------------------------------------------*/
654
njn6e6588c2005-03-13 18:52:48 +0000655#define REDZONE_LO_MASK 0x31
656#define REDZONE_HI_MASK 0x7c
nethercote2d5b8162004-08-11 09:40:52 +0000657
nethercote7ac7f7b2004-11-02 12:36:02 +0000658// Do some crude sanity checks on a Block.
sewardjde4a1d02002-03-22 01:27:54 +0000659static
nethercote2d5b8162004-08-11 09:40:52 +0000660Bool blockSane ( Arena* a, Block* b )
sewardjde4a1d02002-03-22 01:27:54 +0000661{
662# define BLEAT(str) VG_(printf)("blockSane: fail -- %s\n",str)
nethercote7ac7f7b2004-11-02 12:36:02 +0000663 UInt i;
nethercote2d5b8162004-08-11 09:40:52 +0000664 if (get_bszB_lo(b) != get_bszB_hi(b))
sewardjde4a1d02002-03-22 01:27:54 +0000665 {BLEAT("sizes");return False;}
nethercote2d5b8162004-08-11 09:40:52 +0000666 if (!a->clientmem && is_inuse_bszB(get_bszB_lo(b))) {
667 for (i = 0; i < a->rz_szB; i++) {
668 if (get_rz_lo_byte(a, b, i) !=
njn6e6588c2005-03-13 18:52:48 +0000669 (UByte)(((Addr)b&0xff) ^ REDZONE_LO_MASK))
nethercote2d5b8162004-08-11 09:40:52 +0000670 {BLEAT("redzone-lo");return False;}
671 if (get_rz_hi_byte(a, b, i) !=
njn6e6588c2005-03-13 18:52:48 +0000672 (UByte)(((Addr)b&0xff) ^ REDZONE_HI_MASK))
nethercote2d5b8162004-08-11 09:40:52 +0000673 {BLEAT("redzone-hi");return False;}
sewardjde4a1d02002-03-22 01:27:54 +0000674 }
675 }
676 return True;
677# undef BLEAT
678}
679
nethercote2d5b8162004-08-11 09:40:52 +0000680// Print superblocks (only for debugging).
sewardjde4a1d02002-03-22 01:27:54 +0000681static
682void ppSuperblocks ( Arena* a )
683{
nethercote7ac7f7b2004-11-02 12:36:02 +0000684 UInt i, blockno;
685 SizeT b_bszB;
nethercote2d5b8162004-08-11 09:40:52 +0000686 Block* b;
sewardjde4a1d02002-03-22 01:27:54 +0000687 Superblock* sb = a->sblocks;
688 blockno = 1;
689
690 while (sb) {
691 VG_(printf)( "\n" );
nethercote2d5b8162004-08-11 09:40:52 +0000692 VG_(printf)( "superblock %d at %p, sb->n_pl_bs = %d, next = %p\n",
693 blockno++, sb, sb->n_payload_bytes, sb->next );
694 for (i = 0; i < sb->n_payload_bytes; i += mk_plain_bszB(b_bszB)) {
695 b = (Block*)&sb->payload_bytes[i];
696 b_bszB = get_bszB_lo(b);
697 VG_(printf)( " block at %d, bszB %d: ", i, mk_plain_bszB(b_bszB) );
698 VG_(printf)( "%s, ", is_inuse_bszB(b_bszB) ? "inuse" : "free");
699 VG_(printf)( "%s\n", blockSane(a, b) ? "ok" : "BAD" );
sewardjde4a1d02002-03-22 01:27:54 +0000700 }
nethercote2d5b8162004-08-11 09:40:52 +0000701 vg_assert(i == sb->n_payload_bytes); // no overshoot at end of Sb
sewardjde4a1d02002-03-22 01:27:54 +0000702 sb = sb->next;
703 }
704 VG_(printf)( "end of superblocks\n\n" );
705}
706
nethercote2d5b8162004-08-11 09:40:52 +0000707// Sanity check both the superblocks and the chains.
nethercote885dd912004-08-03 23:14:00 +0000708static void sanity_check_malloc_arena ( ArenaId aid )
sewardjde4a1d02002-03-22 01:27:54 +0000709{
nethercote7ac7f7b2004-11-02 12:36:02 +0000710 UInt i, superblockctr, blockctr_sb, blockctr_li;
711 UInt blockctr_sb_free, listno;
712 SizeT b_bszB, b_pszB, list_min_pszB, list_max_pszB;
sewardjde4a1d02002-03-22 01:27:54 +0000713 Superblock* sb;
714 Bool thisFree, lastWasFree;
nethercote2d5b8162004-08-11 09:40:52 +0000715 Block* b;
716 Block* b_prev;
nethercote7ac7f7b2004-11-02 12:36:02 +0000717 SizeT arena_bytes_on_loan;
sewardjde4a1d02002-03-22 01:27:54 +0000718 Arena* a;
719
nethercote885dd912004-08-03 23:14:00 +0000720# define BOMB VG_(core_panic)("sanity_check_malloc_arena")
sewardjde4a1d02002-03-22 01:27:54 +0000721
722 a = arenaId_to_ArenaP(aid);
723
nethercote2d5b8162004-08-11 09:40:52 +0000724 // First, traverse all the superblocks, inspecting the Blocks in each.
sewardjde4a1d02002-03-22 01:27:54 +0000725 superblockctr = blockctr_sb = blockctr_sb_free = 0;
726 arena_bytes_on_loan = 0;
727 sb = a->sblocks;
728 while (sb) {
729 lastWasFree = False;
730 superblockctr++;
nethercote2d5b8162004-08-11 09:40:52 +0000731 for (i = 0; i < sb->n_payload_bytes; i += mk_plain_bszB(b_bszB)) {
sewardjde4a1d02002-03-22 01:27:54 +0000732 blockctr_sb++;
nethercote2d5b8162004-08-11 09:40:52 +0000733 b = (Block*)&sb->payload_bytes[i];
734 b_bszB = get_bszB_lo(b);
sewardjde4a1d02002-03-22 01:27:54 +0000735 if (!blockSane(a, b)) {
nethercote2d5b8162004-08-11 09:40:52 +0000736 VG_(printf)("sanity_check_malloc_arena: sb %p, block %d (bszB %d): "
737 " BAD\n", sb, i, b_bszB );
sewardjde4a1d02002-03-22 01:27:54 +0000738 BOMB;
739 }
nethercote2d5b8162004-08-11 09:40:52 +0000740 thisFree = !is_inuse_bszB(b_bszB);
sewardjde4a1d02002-03-22 01:27:54 +0000741 if (thisFree && lastWasFree) {
nethercote2d5b8162004-08-11 09:40:52 +0000742 VG_(printf)("sanity_check_malloc_arena: sb %p, block %d (bszB %d): "
njn25e49d8e72002-09-23 09:36:25 +0000743 "UNMERGED FREES\n",
nethercote2d5b8162004-08-11 09:40:52 +0000744 sb, i, b_bszB );
sewardjde4a1d02002-03-22 01:27:54 +0000745 BOMB;
746 }
sewardjde4a1d02002-03-22 01:27:54 +0000747 if (thisFree) blockctr_sb_free++;
748 if (!thisFree)
nethercote2d5b8162004-08-11 09:40:52 +0000749 arena_bytes_on_loan += bszB_to_pszB(a, b_bszB);
750 lastWasFree = thisFree;
sewardjde4a1d02002-03-22 01:27:54 +0000751 }
nethercote2d5b8162004-08-11 09:40:52 +0000752 if (i > sb->n_payload_bytes) {
nethercote885dd912004-08-03 23:14:00 +0000753 VG_(printf)( "sanity_check_malloc_arena: sb %p: last block "
sewardjde4a1d02002-03-22 01:27:54 +0000754 "overshoots end\n", sb);
755 BOMB;
756 }
757 sb = sb->next;
758 }
759
760 if (arena_bytes_on_loan != a->bytes_on_loan) {
nethercote2d5b8162004-08-11 09:40:52 +0000761# ifdef VERBOSE_MALLOC
762 VG_(printf)( "sanity_check_malloc_arena: a->bytes_on_loan %d, "
763 "arena_bytes_on_loan %d: "
764 "MISMATCH\n", a->bytes_on_loan, arena_bytes_on_loan);
765# endif
sewardjde4a1d02002-03-22 01:27:54 +0000766 ppSuperblocks(a);
767 BOMB;
768 }
769
770 /* Second, traverse each list, checking that the back pointers make
771 sense, counting blocks encountered, and checking that each block
772 is an appropriate size for this list. */
773 blockctr_li = 0;
njn6e6588c2005-03-13 18:52:48 +0000774 for (listno = 0; listno < N_MALLOC_LISTS; listno++) {
nethercote2d5b8162004-08-11 09:40:52 +0000775 list_min_pszB = listNo_to_pszB_min(listno);
776 list_max_pszB = listNo_to_pszB_max(listno);
sewardjde4a1d02002-03-22 01:27:54 +0000777 b = a->freelist[listno];
778 if (b == NULL) continue;
779 while (True) {
780 b_prev = b;
nethercote2d5b8162004-08-11 09:40:52 +0000781 b = get_next_b(b);
782 if (get_prev_b(b) != b_prev) {
nethercote885dd912004-08-03 23:14:00 +0000783 VG_(printf)( "sanity_check_malloc_arena: list %d at %p: "
sewardjde4a1d02002-03-22 01:27:54 +0000784 "BAD LINKAGE\n",
785 listno, b );
786 BOMB;
787 }
nethercote2d5b8162004-08-11 09:40:52 +0000788 b_pszB = bszB_to_pszB(a, mk_plain_bszB(get_bszB_lo(b)));
789 if (b_pszB < list_min_pszB || b_pszB > list_max_pszB) {
sewardjde4a1d02002-03-22 01:27:54 +0000790 VG_(printf)(
nethercote885dd912004-08-03 23:14:00 +0000791 "sanity_check_malloc_arena: list %d at %p: "
nethercote2d5b8162004-08-11 09:40:52 +0000792 "WRONG CHAIN SIZE %dB (%dB, %dB)\n",
793 listno, b, b_pszB, list_min_pszB, list_max_pszB );
sewardjde4a1d02002-03-22 01:27:54 +0000794 BOMB;
795 }
796 blockctr_li++;
797 if (b == a->freelist[listno]) break;
798 }
799 }
800
801 if (blockctr_sb_free != blockctr_li) {
nethercote2d5b8162004-08-11 09:40:52 +0000802# ifdef VERBOSE_MALLOC
803 VG_(printf)( "sanity_check_malloc_arena: BLOCK COUNT MISMATCH "
804 "(via sbs %d, via lists %d)\n",
805 blockctr_sb_free, blockctr_li );
806# endif
sewardjde4a1d02002-03-22 01:27:54 +0000807 ppSuperblocks(a);
808 BOMB;
809 }
810
nethercote885dd912004-08-03 23:14:00 +0000811 if (VG_(clo_verbosity) > 2)
812 VG_(message)(Vg_DebugMsg,
njn669ef072005-03-13 05:46:57 +0000813 "%8s: %2d sbs, %5d bs, %2d/%-2d free bs, "
nethercote885dd912004-08-03 23:14:00 +0000814 "%7d mmap, %7d loan",
815 a->name,
816 superblockctr,
817 blockctr_sb, blockctr_sb_free, blockctr_li,
818 a->bytes_mmaped, a->bytes_on_loan);
sewardjde4a1d02002-03-22 01:27:54 +0000819# undef BOMB
820}
821
822
nethercote885dd912004-08-03 23:14:00 +0000823void VG_(sanity_check_malloc_all) ( void )
sewardjde4a1d02002-03-22 01:27:54 +0000824{
nethercote7ac7f7b2004-11-02 12:36:02 +0000825 UInt i;
sewardjde4a1d02002-03-22 01:27:54 +0000826 for (i = 0; i < VG_N_ARENAS; i++)
nethercote885dd912004-08-03 23:14:00 +0000827 sanity_check_malloc_arena ( i );
sewardjde4a1d02002-03-22 01:27:54 +0000828}
829
sewardjde4a1d02002-03-22 01:27:54 +0000830
nethercote2d5b8162004-08-11 09:40:52 +0000831/*------------------------------------------------------------*/
832/*--- Creating and deleting blocks. ---*/
833/*------------------------------------------------------------*/
834
835// Mark the bytes at b .. b+bszB-1 as not in use, and add them to the
836// relevant free list.
837
838static
nethercote7ac7f7b2004-11-02 12:36:02 +0000839void mkFreeBlock ( Arena* a, Block* b, SizeT bszB, UInt b_lno )
jsewardb1a26ae2004-03-14 03:06:37 +0000840{
nethercote7ac7f7b2004-11-02 12:36:02 +0000841 SizeT pszB = bszB_to_pszB(a, bszB);
nethercote2d5b8162004-08-11 09:40:52 +0000842 vg_assert(b_lno == pszB_to_listNo(pszB));
sewardjb5f6f512005-03-10 23:59:00 +0000843 //zzVALGRIND_MAKE_WRITABLE(b, bszB);
nethercote2d5b8162004-08-11 09:40:52 +0000844 // Set the size fields and indicate not-in-use.
845 set_bszB_lo(b, mk_free_bszB(bszB));
846 set_bszB_hi(b, mk_free_bszB(bszB));
847
848 // Add to the relevant list.
849 if (a->freelist[b_lno] == NULL) {
850 set_prev_b(b, b);
851 set_next_b(b, b);
852 a->freelist[b_lno] = b;
853 } else {
854 Block* b_prev = get_prev_b(a->freelist[b_lno]);
855 Block* b_next = a->freelist[b_lno];
856 set_next_b(b_prev, b);
857 set_prev_b(b_next, b);
858 set_next_b(b, b_next);
859 set_prev_b(b, b_prev);
860 }
861# ifdef DEBUG_MALLOC
862 (void)blockSane(a,b);
863# endif
864}
865
866// Mark the bytes at b .. b+bszB-1 as in use, and set up the block
867// appropriately.
868static
nethercote7ac7f7b2004-11-02 12:36:02 +0000869void mkInuseBlock ( Arena* a, Block* b, SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000870{
nethercote7ac7f7b2004-11-02 12:36:02 +0000871 UInt i;
nethercote2d5b8162004-08-11 09:40:52 +0000872 vg_assert(bszB >= min_useful_bszB(a));
sewardjb5f6f512005-03-10 23:59:00 +0000873 //zzVALGRIND_MAKE_WRITABLE(b, bszB);
nethercote2d5b8162004-08-11 09:40:52 +0000874 set_bszB_lo(b, mk_inuse_bszB(bszB));
875 set_bszB_hi(b, mk_inuse_bszB(bszB));
876 set_prev_b(b, NULL); // Take off freelist
877 set_next_b(b, NULL); // ditto
878 if (!a->clientmem) {
879 for (i = 0; i < a->rz_szB; i++) {
njn6e6588c2005-03-13 18:52:48 +0000880 set_rz_lo_byte(a, b, i, (UByte)(((Addr)b&0xff) ^ REDZONE_LO_MASK));
881 set_rz_hi_byte(a, b, i, (UByte)(((Addr)b&0xff) ^ REDZONE_HI_MASK));
nethercote2d5b8162004-08-11 09:40:52 +0000882 }
883 }
884# ifdef DEBUG_MALLOC
885 (void)blockSane(a,b);
886# endif
887}
888
889// Remove a block from a given list. Does no sanity checking.
890static
nethercote7ac7f7b2004-11-02 12:36:02 +0000891void unlinkBlock ( Arena* a, Block* b, UInt listno )
nethercote2d5b8162004-08-11 09:40:52 +0000892{
njn6e6588c2005-03-13 18:52:48 +0000893 vg_assert(listno < N_MALLOC_LISTS);
nethercote2d5b8162004-08-11 09:40:52 +0000894 if (get_prev_b(b) == b) {
895 // Only one element in the list; treat it specially.
896 vg_assert(get_next_b(b) == b);
897 a->freelist[listno] = NULL;
898 } else {
899 Block* b_prev = get_prev_b(b);
900 Block* b_next = get_next_b(b);
901 a->freelist[listno] = b_prev;
902 set_next_b(b_prev, b_next);
903 set_prev_b(b_next, b_prev);
904 swizzle ( a, listno );
905 }
906 set_prev_b(b, NULL);
907 set_next_b(b, NULL);
jsewardb1a26ae2004-03-14 03:06:37 +0000908}
909
910
sewardjde4a1d02002-03-22 01:27:54 +0000911/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +0000912/*--- Core-visible functions. ---*/
sewardjde4a1d02002-03-22 01:27:54 +0000913/*------------------------------------------------------------*/
914
nethercote2d5b8162004-08-11 09:40:52 +0000915// Align the request size.
916static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000917SizeT align_req_pszB ( SizeT req_pszB )
nethercote2d5b8162004-08-11 09:40:52 +0000918{
nethercote7ac7f7b2004-11-02 12:36:02 +0000919 SizeT n = VG_MIN_MALLOC_SZB-1;
nethercote2d5b8162004-08-11 09:40:52 +0000920 return ((req_pszB + n) & (~n));
921}
922
nethercote7ac7f7b2004-11-02 12:36:02 +0000923void* VG_(arena_malloc) ( ArenaId aid, SizeT req_pszB )
sewardjde4a1d02002-03-22 01:27:54 +0000924{
nethercote7ac7f7b2004-11-02 12:36:02 +0000925 SizeT req_bszB, frag_bszB, b_bszB;
926 UInt lno;
sewardjde4a1d02002-03-22 01:27:54 +0000927 Superblock* new_sb;
nethercote2d5b8162004-08-11 09:40:52 +0000928 Block* b = NULL;
sewardjde4a1d02002-03-22 01:27:54 +0000929 Arena* a;
jsewardb1a26ae2004-03-14 03:06:37 +0000930 void* v;
sewardjde4a1d02002-03-22 01:27:54 +0000931
932 VGP_PUSHCC(VgpMalloc);
933
934 ensure_mm_init();
935 a = arenaId_to_ArenaP(aid);
936
nethercote7ac7f7b2004-11-02 12:36:02 +0000937 vg_assert(req_pszB < MAX_PSZB);
nethercote2d5b8162004-08-11 09:40:52 +0000938 req_pszB = align_req_pszB(req_pszB);
939 req_bszB = pszB_to_bszB(a, req_pszB);
sewardjde4a1d02002-03-22 01:27:54 +0000940
nethercote2d5b8162004-08-11 09:40:52 +0000941 // Scan through all the big-enough freelists for a block.
njn6e6588c2005-03-13 18:52:48 +0000942 for (lno = pszB_to_listNo(req_pszB); lno < N_MALLOC_LISTS; lno++) {
sewardjde4a1d02002-03-22 01:27:54 +0000943 b = a->freelist[lno];
nethercote2d5b8162004-08-11 09:40:52 +0000944 if (NULL == b) continue; // If this list is empty, try the next one.
sewardjde4a1d02002-03-22 01:27:54 +0000945 while (True) {
nethercote2d5b8162004-08-11 09:40:52 +0000946 b_bszB = mk_plain_bszB(get_bszB_lo(b));
947 if (b_bszB >= req_bszB) goto obtained_block; // success!
948 b = get_next_b(b);
949 if (b == a->freelist[lno]) break; // traversed entire freelist
sewardjde4a1d02002-03-22 01:27:54 +0000950 }
sewardjde4a1d02002-03-22 01:27:54 +0000951 }
952
nethercote2d5b8162004-08-11 09:40:52 +0000953 // If we reach here, no suitable block found, allocate a new superblock
njn6e6588c2005-03-13 18:52:48 +0000954 vg_assert(lno == N_MALLOC_LISTS);
nethercote2d5b8162004-08-11 09:40:52 +0000955 new_sb = newSuperblock(a, req_bszB);
956 if (NULL == new_sb) {
957 // Should only fail if for client, otherwise, should have aborted
958 // already.
959 vg_assert(VG_AR_CLIENT == aid);
960 return NULL;
sewardjde4a1d02002-03-22 01:27:54 +0000961 }
nethercote2d5b8162004-08-11 09:40:52 +0000962 new_sb->next = a->sblocks;
963 a->sblocks = new_sb;
964 b = (Block*)&new_sb->payload_bytes[0];
965 lno = pszB_to_listNo(bszB_to_pszB(a, new_sb->n_payload_bytes));
966 mkFreeBlock ( a, b, new_sb->n_payload_bytes, lno);
967 // fall through
sewardjde4a1d02002-03-22 01:27:54 +0000968
nethercote2d5b8162004-08-11 09:40:52 +0000969 obtained_block:
970 // Ok, we can allocate from b, which lives in list lno.
sewardjde4a1d02002-03-22 01:27:54 +0000971 vg_assert(b != NULL);
njn6e6588c2005-03-13 18:52:48 +0000972 vg_assert(lno < N_MALLOC_LISTS);
sewardjde4a1d02002-03-22 01:27:54 +0000973 vg_assert(a->freelist[lno] != NULL);
nethercote2d5b8162004-08-11 09:40:52 +0000974 b_bszB = mk_plain_bszB(get_bszB_lo(b));
975 // req_bszB is the size of the block we are after. b_bszB is the
976 // size of what we've actually got. */
977 vg_assert(b_bszB >= req_bszB);
sewardjde4a1d02002-03-22 01:27:54 +0000978
nethercote2d5b8162004-08-11 09:40:52 +0000979 // Could we split this block and still get a useful fragment?
980 frag_bszB = b_bszB - req_bszB;
981 if (frag_bszB >= min_useful_bszB(a)) {
982 // Yes, split block in two, put the fragment on the appropriate free
983 // list, and update b_bszB accordingly.
984 // printf( "split %dB into %dB and %dB\n", b_bszB, req_bszB, frag_bszB );
sewardjde4a1d02002-03-22 01:27:54 +0000985 unlinkBlock(a, b, lno);
nethercote2d5b8162004-08-11 09:40:52 +0000986 mkInuseBlock(a, b, req_bszB);
987 mkFreeBlock(a, &b[req_bszB], frag_bszB,
988 pszB_to_listNo(bszB_to_pszB(a, frag_bszB)));
989 b_bszB = mk_plain_bszB(get_bszB_lo(b));
990 } else {
991 // No, mark as in use and use as-is.
992 unlinkBlock(a, b, lno);
993 mkInuseBlock(a, b, b_bszB);
sewardjde4a1d02002-03-22 01:27:54 +0000994 }
sewardjde4a1d02002-03-22 01:27:54 +0000995
nethercote2d5b8162004-08-11 09:40:52 +0000996 // Update stats
997 a->bytes_on_loan += bszB_to_pszB(a, b_bszB);
sewardjde4a1d02002-03-22 01:27:54 +0000998 if (a->bytes_on_loan > a->bytes_on_loan_max)
999 a->bytes_on_loan_max = a->bytes_on_loan;
1000
1001# ifdef DEBUG_MALLOC
nethercote885dd912004-08-03 23:14:00 +00001002 sanity_check_malloc_arena(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001003# endif
1004
njn25e49d8e72002-09-23 09:36:25 +00001005 VGP_POPCC(VgpMalloc);
nethercote2d5b8162004-08-11 09:40:52 +00001006 v = get_block_payload(a, b);
1007 vg_assert( (((Addr)v) & (VG_MIN_MALLOC_SZB-1)) == 0 );
sewardjb5f6f512005-03-10 23:59:00 +00001008
1009 VALGRIND_MALLOCLIKE_BLOCK(v, req_pszB, 0, False);
jsewardb1a26ae2004-03-14 03:06:37 +00001010 return v;
sewardjde4a1d02002-03-22 01:27:54 +00001011}
1012
1013
njn25e49d8e72002-09-23 09:36:25 +00001014void VG_(arena_free) ( ArenaId aid, void* ptr )
sewardjde4a1d02002-03-22 01:27:54 +00001015{
1016 Superblock* sb;
nethercote2d5b8162004-08-11 09:40:52 +00001017 UByte* sb_start;
1018 UByte* sb_end;
1019 Block* other;
1020 Block* b;
nethercote7ac7f7b2004-11-02 12:36:02 +00001021 SizeT b_bszB, b_pszB, other_bszB;
1022 UInt b_listno;
sewardjde4a1d02002-03-22 01:27:54 +00001023 Arena* a;
1024
1025 VGP_PUSHCC(VgpMalloc);
1026
1027 ensure_mm_init();
1028 a = arenaId_to_ArenaP(aid);
1029
njn25e49d8e72002-09-23 09:36:25 +00001030 if (ptr == NULL) {
1031 VGP_POPCC(VgpMalloc);
1032 return;
1033 }
1034
nethercote2d5b8162004-08-11 09:40:52 +00001035 b = get_payload_block(a, ptr);
sewardjde4a1d02002-03-22 01:27:54 +00001036
1037# ifdef DEBUG_MALLOC
nethercote2d5b8162004-08-11 09:40:52 +00001038 vg_assert(blockSane(a, b));
sewardjde4a1d02002-03-22 01:27:54 +00001039# endif
1040
nethercote2d5b8162004-08-11 09:40:52 +00001041 a->bytes_on_loan -= bszB_to_pszB(a, mk_plain_bszB(get_bszB_lo(b)));
sewardjde4a1d02002-03-22 01:27:54 +00001042
nethercote2d5b8162004-08-11 09:40:52 +00001043 sb = findSb( a, b );
1044 sb_start = &sb->payload_bytes[0];
1045 sb_end = &sb->payload_bytes[sb->n_payload_bytes - 1];
sewardjde4a1d02002-03-22 01:27:54 +00001046
nethercote2d5b8162004-08-11 09:40:52 +00001047 // Put this chunk back on a list somewhere.
1048 b_bszB = get_bszB_lo(b);
1049 b_pszB = bszB_to_pszB(a, b_bszB);
1050 b_listno = pszB_to_listNo(b_pszB);
1051 mkFreeBlock( a, b, b_bszB, b_listno );
sewardjde4a1d02002-03-22 01:27:54 +00001052
nethercote2d5b8162004-08-11 09:40:52 +00001053 // See if this block can be merged with its successor.
1054 // First test if we're far enough before the superblock's end to possibly
1055 // have a successor.
1056 other = b + b_bszB;
1057 if (other+min_useful_bszB(a)-1 <= (Block*)sb_end) {
1058 // Ok, we have a successor, merge if it's not in use.
1059 other_bszB = get_bszB_lo(other);
1060 if (!is_inuse_bszB(other_bszB)) {
1061 // VG_(printf)( "merge-successor\n");
1062 other_bszB = mk_plain_bszB(other_bszB);
sewardjde4a1d02002-03-22 01:27:54 +00001063# ifdef DEBUG_MALLOC
1064 vg_assert(blockSane(a, other));
1065# endif
nethercote2d5b8162004-08-11 09:40:52 +00001066 unlinkBlock( a, b, b_listno );
1067 unlinkBlock( a, other, pszB_to_listNo(bszB_to_pszB(a,other_bszB)) );
1068 b_bszB += other_bszB;
1069 b_listno = pszB_to_listNo(bszB_to_pszB(a, b_bszB));
1070 mkFreeBlock( a, b, b_bszB, b_listno );
sewardjde4a1d02002-03-22 01:27:54 +00001071 }
nethercote2d5b8162004-08-11 09:40:52 +00001072 } else {
1073 // Not enough space for successor: check that b is the last block
1074 // ie. there are no unused bytes at the end of the Superblock.
1075 vg_assert(other-1 == (Block*)sb_end);
sewardjde4a1d02002-03-22 01:27:54 +00001076 }
1077
nethercote2d5b8162004-08-11 09:40:52 +00001078 // Then see if this block can be merged with its predecessor.
1079 // First test if we're far enough after the superblock's start to possibly
1080 // have a predecessor.
1081 if (b >= (Block*)sb_start + min_useful_bszB(a)) {
1082 // Ok, we have a predecessor, merge if it's not in use.
1083 other = get_predecessor_block( b );
1084 other_bszB = get_bszB_lo(other);
1085 if (!is_inuse_bszB(other_bszB)) {
1086 // VG_(printf)( "merge-predecessor\n");
1087 other_bszB = mk_plain_bszB(other_bszB);
1088 unlinkBlock( a, b, b_listno );
1089 unlinkBlock( a, other, pszB_to_listNo(bszB_to_pszB(a, other_bszB)) );
1090 b = other;
1091 b_bszB += other_bszB;
1092 b_listno = pszB_to_listNo(bszB_to_pszB(a, b_bszB));
1093 mkFreeBlock( a, b, b_bszB, b_listno );
sewardjde4a1d02002-03-22 01:27:54 +00001094 }
nethercote2d5b8162004-08-11 09:40:52 +00001095 } else {
1096 // Not enough space for predecessor: check that b is the first block,
1097 // ie. there are no unused bytes at the start of the Superblock.
1098 vg_assert((Block*)sb_start == b);
sewardjde4a1d02002-03-22 01:27:54 +00001099 }
1100
1101# ifdef DEBUG_MALLOC
nethercote885dd912004-08-03 23:14:00 +00001102 sanity_check_malloc_arena(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001103# endif
1104
sewardjb5f6f512005-03-10 23:59:00 +00001105 VALGRIND_FREELIKE_BLOCK(ptr, 0);
1106
njn25e49d8e72002-09-23 09:36:25 +00001107 VGP_POPCC(VgpMalloc);
sewardjde4a1d02002-03-22 01:27:54 +00001108}
1109
1110
1111/*
1112 The idea for malloc_aligned() is to allocate a big block, base, and
1113 then split it into two parts: frag, which is returned to the the
1114 free pool, and align, which is the bit we're really after. Here's
1115 a picture. L and H denote the block lower and upper overheads, in
nethercote2d5b8162004-08-11 09:40:52 +00001116 bytes. The details are gruesome. Note it is slightly complicated
sewardjde4a1d02002-03-22 01:27:54 +00001117 because the initial request to generate base may return a bigger
1118 block than we asked for, so it is important to distinguish the base
1119 request size and the base actual size.
1120
1121 frag_b align_b
1122 | |
1123 | frag_p | align_p
1124 | | | |
1125 v v v v
1126
1127 +---+ +---+---+ +---+
1128 | L |----------------| H | L |---------------| H |
1129 +---+ +---+---+ +---+
1130
1131 ^ ^ ^
1132 | | :
1133 | base_p this addr must be aligned
1134 |
1135 base_b
1136
1137 . . . . . . .
nethercote2d5b8162004-08-11 09:40:52 +00001138 <------ frag_bszB -------> . . .
1139 . <------------- base_pszB_act -----------> .
sewardjde4a1d02002-03-22 01:27:54 +00001140 . . . . . . .
1141
1142*/
njn083f3022005-03-13 18:33:02 +00001143static
1144void* arena_malloc_aligned ( ArenaId aid, SizeT req_alignB, SizeT req_pszB )
sewardjde4a1d02002-03-22 01:27:54 +00001145{
nethercote7ac7f7b2004-11-02 12:36:02 +00001146 SizeT base_pszB_req, base_pszB_act, frag_bszB;
nethercote2d5b8162004-08-11 09:40:52 +00001147 Block *base_b, *align_b;
1148 UByte *base_p, *align_p;
nethercote7ac7f7b2004-11-02 12:36:02 +00001149 SizeT saved_bytes_on_loan;
sewardjde4a1d02002-03-22 01:27:54 +00001150 Arena* a;
1151
njn25e49d8e72002-09-23 09:36:25 +00001152 VGP_PUSHCC(VgpMalloc);
1153
sewardjde4a1d02002-03-22 01:27:54 +00001154 ensure_mm_init();
1155 a = arenaId_to_ArenaP(aid);
1156
nethercote7ac7f7b2004-11-02 12:36:02 +00001157 vg_assert(req_pszB < MAX_PSZB);
sewardjde4a1d02002-03-22 01:27:54 +00001158
nethercote2d5b8162004-08-11 09:40:52 +00001159 // Check that the requested alignment seems reasonable; that is, is
1160 // a power of 2.
1161 if (req_alignB < VG_MIN_MALLOC_SZB
1162 || req_alignB > 1048576
1163 || VG_(log2)( VG_(clo_alignment) ) == -1 /* not a power of 2 */) {
njn083f3022005-03-13 18:33:02 +00001164 VG_(printf)("arena_malloc_aligned(%p, %d, %d)\nbad alignment",
nethercote2d5b8162004-08-11 09:40:52 +00001165 a, req_alignB, req_pszB );
njn083f3022005-03-13 18:33:02 +00001166 VG_(core_panic)("arena_malloc_aligned");
nethercote2d5b8162004-08-11 09:40:52 +00001167 /*NOTREACHED*/
sewardjde4a1d02002-03-22 01:27:54 +00001168 }
nethercote2d5b8162004-08-11 09:40:52 +00001169 // Paranoid
1170 vg_assert(req_alignB % VG_MIN_MALLOC_SZB == 0);
sewardjde4a1d02002-03-22 01:27:54 +00001171
1172 /* Required payload size for the aligned chunk. */
nethercote2d5b8162004-08-11 09:40:52 +00001173 req_pszB = align_req_pszB(req_pszB);
sewardjde4a1d02002-03-22 01:27:54 +00001174
nethercote2d5b8162004-08-11 09:40:52 +00001175 /* Payload size to request for the big block that we will split up. */
1176 base_pszB_req = req_pszB + min_useful_bszB(a) + req_alignB;
sewardjde4a1d02002-03-22 01:27:54 +00001177
1178 /* Payload ptr for the block we are going to split. Note this
1179 changes a->bytes_on_loan; we save and restore it ourselves. */
1180 saved_bytes_on_loan = a->bytes_on_loan;
nethercote2d5b8162004-08-11 09:40:52 +00001181 base_p = VG_(arena_malloc) ( aid, base_pszB_req );
sewardjde4a1d02002-03-22 01:27:54 +00001182 a->bytes_on_loan = saved_bytes_on_loan;
1183
1184 /* Block ptr for the block we are going to split. */
nethercote2d5b8162004-08-11 09:40:52 +00001185 base_b = get_payload_block ( a, base_p );
sewardjde4a1d02002-03-22 01:27:54 +00001186
1187 /* Pointer to the payload of the aligned block we are going to
1188 return. This has to be suitably aligned. */
nethercote2d5b8162004-08-11 09:40:52 +00001189 align_p = align_upwards ( base_b + 2 * overhead_szB_lo(a)
1190 + overhead_szB_hi(a),
sewardjde4a1d02002-03-22 01:27:54 +00001191 req_alignB );
nethercote2d5b8162004-08-11 09:40:52 +00001192 align_b = get_payload_block(a, align_p);
sewardjde4a1d02002-03-22 01:27:54 +00001193
1194 /* The block size of the fragment we will create. This must be big
1195 enough to actually create a fragment. */
nethercote2d5b8162004-08-11 09:40:52 +00001196 frag_bszB = align_b - base_b;
1197
1198 vg_assert(frag_bszB >= min_useful_bszB(a));
sewardjde4a1d02002-03-22 01:27:54 +00001199
1200 /* The actual payload size of the block we are going to split. */
nethercote2d5b8162004-08-11 09:40:52 +00001201 base_pszB_act = bszB_to_pszB(a, mk_plain_bszB(get_bszB_lo(base_b)));
sewardjde4a1d02002-03-22 01:27:54 +00001202
nethercote2d5b8162004-08-11 09:40:52 +00001203 /* Create the fragment block, and put it back on the relevant free list. */
1204 mkFreeBlock ( a, base_b, frag_bszB,
1205 pszB_to_listNo(bszB_to_pszB(a, frag_bszB)) );
sewardjde4a1d02002-03-22 01:27:54 +00001206
1207 /* Create the aligned block. */
nethercote2d5b8162004-08-11 09:40:52 +00001208 mkInuseBlock ( a, align_b,
1209 base_p + base_pszB_act
1210 + overhead_szB_hi(a) - (UByte*)align_b );
sewardjde4a1d02002-03-22 01:27:54 +00001211
1212 /* Final sanity checks. */
nethercote2d5b8162004-08-11 09:40:52 +00001213 vg_assert( is_inuse_bszB(get_bszB_lo(get_payload_block(a, align_p))) );
sewardjde4a1d02002-03-22 01:27:54 +00001214
nethercote2d5b8162004-08-11 09:40:52 +00001215 vg_assert(req_pszB
sewardjde4a1d02002-03-22 01:27:54 +00001216 <=
nethercote2d5b8162004-08-11 09:40:52 +00001217 bszB_to_pszB(a, mk_plain_bszB(get_bszB_lo(
1218 get_payload_block(a, align_p))))
sewardjde4a1d02002-03-22 01:27:54 +00001219 );
1220
1221 a->bytes_on_loan
nethercote2d5b8162004-08-11 09:40:52 +00001222 += bszB_to_pszB(a, mk_plain_bszB(get_bszB_lo(
1223 get_payload_block(a, align_p))));
sewardjde4a1d02002-03-22 01:27:54 +00001224 if (a->bytes_on_loan > a->bytes_on_loan_max)
1225 a->bytes_on_loan_max = a->bytes_on_loan;
1226
1227# ifdef DEBUG_MALLOC
nethercote885dd912004-08-03 23:14:00 +00001228 sanity_check_malloc_arena(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001229# endif
1230
njn25e49d8e72002-09-23 09:36:25 +00001231 VGP_POPCC(VgpMalloc);
1232
nethercote2d5b8162004-08-11 09:40:52 +00001233 vg_assert( (((Addr)align_p) % req_alignB) == 0 );
sewardjb5f6f512005-03-10 23:59:00 +00001234
1235 VALGRIND_MALLOCLIKE_BLOCK(align_p, req_pszB, 0, False);
1236
nethercote2d5b8162004-08-11 09:40:52 +00001237 return align_p;
1238}
1239
1240
nethercote7ac7f7b2004-11-02 12:36:02 +00001241SizeT VG_(arena_payload_szB) ( ArenaId aid, void* ptr )
nethercote2d5b8162004-08-11 09:40:52 +00001242{
1243 Arena* a = arenaId_to_ArenaP(aid);
1244 Block* b = get_payload_block(a, ptr);
1245 return bszB_to_pszB(a, get_bszB_lo(b));
sewardjde4a1d02002-03-22 01:27:54 +00001246}
1247
1248
1249/*------------------------------------------------------------*/
1250/*--- Services layered on top of malloc/free. ---*/
1251/*------------------------------------------------------------*/
1252
njn828022a2005-03-13 14:56:31 +00001253void* VG_(arena_calloc) ( ArenaId aid, SizeT nmemb, SizeT bytes_per_memb )
sewardjde4a1d02002-03-22 01:27:54 +00001254{
nethercote7ac7f7b2004-11-02 12:36:02 +00001255 SizeT size;
sewardjde4a1d02002-03-22 01:27:54 +00001256 UChar* p;
njn25e49d8e72002-09-23 09:36:25 +00001257
1258 VGP_PUSHCC(VgpMalloc);
1259
njn926ed472005-03-11 04:44:10 +00001260 size = nmemb * bytes_per_memb;
1261 vg_assert(size >= nmemb && size >= bytes_per_memb);// check against overflow
njn3e884182003-04-15 13:03:23 +00001262
njn828022a2005-03-13 14:56:31 +00001263 p = VG_(arena_malloc) ( aid, size );
njn3e884182003-04-15 13:03:23 +00001264
njn926ed472005-03-11 04:44:10 +00001265 VG_(memset)(p, 0, size);
sewardjb5f6f512005-03-10 23:59:00 +00001266
njn926ed472005-03-11 04:44:10 +00001267 VALGRIND_MALLOCLIKE_BLOCK(p, size, 0, True);
njn25e49d8e72002-09-23 09:36:25 +00001268
1269 VGP_POPCC(VgpMalloc);
1270
sewardjde4a1d02002-03-22 01:27:54 +00001271 return p;
1272}
1273
1274
njn828022a2005-03-13 14:56:31 +00001275void* VG_(arena_realloc) ( ArenaId aid, void* ptr, SizeT req_pszB )
sewardjde4a1d02002-03-22 01:27:54 +00001276{
1277 Arena* a;
nethercote7ac7f7b2004-11-02 12:36:02 +00001278 SizeT old_bszB, old_pszB;
sewardjb5f6f512005-03-10 23:59:00 +00001279 UChar *p_new;
nethercote2d5b8162004-08-11 09:40:52 +00001280 Block* b;
sewardjde4a1d02002-03-22 01:27:54 +00001281
njn25e49d8e72002-09-23 09:36:25 +00001282 VGP_PUSHCC(VgpMalloc);
1283
sewardjde4a1d02002-03-22 01:27:54 +00001284 ensure_mm_init();
1285 a = arenaId_to_ArenaP(aid);
1286
nethercote7ac7f7b2004-11-02 12:36:02 +00001287 vg_assert(req_pszB < MAX_PSZB);
sewardjde4a1d02002-03-22 01:27:54 +00001288
nethercote2d5b8162004-08-11 09:40:52 +00001289 b = get_payload_block(a, ptr);
1290 vg_assert(blockSane(a, b));
sewardjde4a1d02002-03-22 01:27:54 +00001291
nethercote2d5b8162004-08-11 09:40:52 +00001292 old_bszB = get_bszB_lo(b);
1293 vg_assert(is_inuse_bszB(old_bszB));
1294 old_bszB = mk_plain_bszB(old_bszB);
1295 old_pszB = bszB_to_pszB(a, old_bszB);
sewardjde4a1d02002-03-22 01:27:54 +00001296
njn25e49d8e72002-09-23 09:36:25 +00001297 if (req_pszB <= old_pszB) {
1298 VGP_POPCC(VgpMalloc);
1299 return ptr;
1300 }
sewardjde4a1d02002-03-22 01:27:54 +00001301
njn828022a2005-03-13 14:56:31 +00001302 p_new = VG_(arena_malloc) ( aid, req_pszB );
1303
sewardjb5f6f512005-03-10 23:59:00 +00001304 VG_(memcpy)(p_new, ptr, old_pszB);
sewardjde4a1d02002-03-22 01:27:54 +00001305
sewardjb5f6f512005-03-10 23:59:00 +00001306 VG_(arena_free)(aid, ptr);
njn25e49d8e72002-09-23 09:36:25 +00001307
1308 VGP_POPCC(VgpMalloc);
sewardjde4a1d02002-03-22 01:27:54 +00001309 return p_new;
1310}
1311
1312
1313/*------------------------------------------------------------*/
nethercote996901a2004-08-03 13:29:09 +00001314/*--- Tool-visible functions. ---*/
njn25e49d8e72002-09-23 09:36:25 +00001315/*------------------------------------------------------------*/
1316
nethercote2d5b8162004-08-11 09:40:52 +00001317// All just wrappers to avoid exposing arenas to tools.
njn25e49d8e72002-09-23 09:36:25 +00001318
nethercote7ac7f7b2004-11-02 12:36:02 +00001319void* VG_(malloc) ( SizeT nbytes )
njn25e49d8e72002-09-23 09:36:25 +00001320{
nethercote60f5b822004-01-26 17:24:42 +00001321 return VG_(arena_malloc) ( VG_AR_TOOL, nbytes );
njn25e49d8e72002-09-23 09:36:25 +00001322}
1323
1324void VG_(free) ( void* ptr )
1325{
nethercote60f5b822004-01-26 17:24:42 +00001326 VG_(arena_free) ( VG_AR_TOOL, ptr );
njn25e49d8e72002-09-23 09:36:25 +00001327}
1328
njn926ed472005-03-11 04:44:10 +00001329void* VG_(calloc) ( SizeT nmemb, SizeT bytes_per_memb )
njn25e49d8e72002-09-23 09:36:25 +00001330{
njn828022a2005-03-13 14:56:31 +00001331 return VG_(arena_calloc) ( VG_AR_TOOL, nmemb, bytes_per_memb );
njn25e49d8e72002-09-23 09:36:25 +00001332}
1333
nethercote7ac7f7b2004-11-02 12:36:02 +00001334void* VG_(realloc) ( void* ptr, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00001335{
njn828022a2005-03-13 14:56:31 +00001336 return VG_(arena_realloc) ( VG_AR_TOOL, ptr, size );
njn25e49d8e72002-09-23 09:36:25 +00001337}
1338
nethercote7ac7f7b2004-11-02 12:36:02 +00001339void* VG_(cli_malloc) ( SizeT align, SizeT nbytes )
njn3e884182003-04-15 13:03:23 +00001340{
njnc7c31612005-03-13 18:53:34 +00001341 // 'align' should be valid (ie. big enough and a power of two) by now.
1342 // arena_malloc_aligned() will abort if it's not.
nethercote2d5b8162004-08-11 09:40:52 +00001343 if (VG_MIN_MALLOC_SZB == align)
njn083f3022005-03-13 18:33:02 +00001344 return VG_(arena_malloc) ( VG_AR_CLIENT, nbytes );
njn3e884182003-04-15 13:03:23 +00001345 else
njn083f3022005-03-13 18:33:02 +00001346 return arena_malloc_aligned ( VG_AR_CLIENT, align, nbytes );
njn3e884182003-04-15 13:03:23 +00001347}
1348
1349void VG_(cli_free) ( void* p )
1350{
1351 VG_(arena_free) ( VG_AR_CLIENT, p );
1352}
1353
1354
nethercote7ac7f7b2004-11-02 12:36:02 +00001355Bool VG_(addr_is_in_block)( Addr a, Addr start, SizeT size )
njn3e884182003-04-15 13:03:23 +00001356{
1357 return (start - VG_(vg_malloc_redzone_szB) <= a
1358 && a < start + size + VG_(vg_malloc_redzone_szB));
1359}
1360
1361
njn25e49d8e72002-09-23 09:36:25 +00001362/*------------------------------------------------------------*/
sewardjde4a1d02002-03-22 01:27:54 +00001363/*--- The original test driver machinery. ---*/
1364/*------------------------------------------------------------*/
1365
1366#if 0
1367
1368#if 1
1369#define N_TEST_TRANSACTIONS 100000000
1370#define N_TEST_ARR 200000
1371#define M_TEST_MALLOC 1000
1372#else
1373#define N_TEST_TRANSACTIONS 500000
1374#define N_TEST_ARR 30000
1375#define M_TEST_MALLOC 500
1376#endif
1377
1378
1379void* test_arr[N_TEST_ARR];
1380
1381int main ( int argc, char** argv )
1382{
1383 Int i, j, k, nbytes, qq;
1384 unsigned char* chp;
njn25e49d8e72002-09-23 09:36:25 +00001385 Arena* a = &arena[VG_AR_CORE];
sewardjde4a1d02002-03-22 01:27:54 +00001386 srandom(1);
1387 for (i = 0; i < N_TEST_ARR; i++)
1388 test_arr[i] = NULL;
1389
1390 for (i = 0; i < N_TEST_TRANSACTIONS; i++) {
1391 if (i % 50000 == 0) mallocSanityCheck(a);
1392 j = random() % N_TEST_ARR;
1393 if (test_arr[j]) {
1394 vg_free(a, test_arr[j]);
1395 test_arr[j] = NULL;
1396 } else {
1397 nbytes = 1 + random() % M_TEST_MALLOC;
1398 qq = random()%64;
1399 if (qq == 32)
1400 nbytes *= 17;
1401 else if (qq == 33)
1402 nbytes = 0;
1403 test_arr[j]
1404 = (i % 17) == 0
1405 ? vg_memalign(a, nbytes, 1<< (3+(random()%10)))
1406 : vg_malloc( a, nbytes );
1407 chp = test_arr[j];
1408 for (k = 0; k < nbytes; k++)
1409 chp[k] = (unsigned char)(k + 99);
1410 }
1411 }
1412
1413
1414 for (i = 0; i < N_TEST_ARR; i++) {
1415 if (test_arr[i]) {
1416 vg_free(a, test_arr[i]);
1417 test_arr[i] = NULL;
1418 }
1419 }
1420 mallocSanityCheck(a);
1421
1422 fprintf(stderr, "ALL DONE\n");
1423
1424 show_arena_stats(a);
1425 fprintf(stderr, "%d max useful, %d bytes mmap'd (%4.1f%%), %d useful\n",
1426 a->bytes_on_loan_max,
1427 a->bytes_mmaped,
nethercote2d5b8162004-08-11 09:40:52 +00001428 100.0 * (double)a->bytes_on_loan_max / (double)a->bytes_mmaped,
sewardjde4a1d02002-03-22 01:27:54 +00001429 a->bytes_on_loan );
1430
1431 return 0;
1432}
1433#endif /* 0 */
1434
1435
1436/*--------------------------------------------------------------------*/
1437/*--- end vg_malloc2.c ---*/
1438/*--------------------------------------------------------------------*/