blob: f2908a6a3ab0d4af91adba4b4828e64d3c2d5ba5 [file] [log] [blame]
sewardjde4a1d02002-03-22 01:27:54 +00001
2/*--------------------------------------------------------------------*/
3/*--- An implementation of malloc/free which doesn't use sbrk. ---*/
njn717cde52005-05-10 02:47:21 +00004/*--- m_mallocfree.c ---*/
sewardjde4a1d02002-03-22 01:27:54 +00005/*--------------------------------------------------------------------*/
6
7/*
njnb9c427c2004-12-01 14:14:42 +00008 This file is part of Valgrind, a dynamic binary instrumentation
9 framework.
sewardjde4a1d02002-03-22 01:27:54 +000010
njn53612422005-03-12 16:22:54 +000011 Copyright (C) 2000-2005 Julian Seward
sewardjde4a1d02002-03-22 01:27:54 +000012 jseward@acm.org
sewardjde4a1d02002-03-22 01:27:54 +000013
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
njn25e49d8e72002-09-23 09:36:25 +000029 The GNU General Public License is contained in the file COPYING.
sewardjde4a1d02002-03-22 01:27:54 +000030*/
31
32
nethercotef1e5e152004-09-01 23:58:16 +000033#include "core.h"
sewardj55f9d1a2005-04-25 11:11:44 +000034#include "pub_core_aspacemgr.h"
35
sewardjb5f6f512005-03-10 23:59:00 +000036//zz#include "memcheck/memcheck.h"
sewardjde4a1d02002-03-22 01:27:54 +000037
nethercote2d5b8162004-08-11 09:40:52 +000038//#define DEBUG_MALLOC // turn on heavyweight debugging machinery
39//#define VERBOSE_MALLOC // make verbose, esp. in debugging machinery
40
41/*------------------------------------------------------------*/
42/*--- Main types ---*/
43/*------------------------------------------------------------*/
44
njn6e6588c2005-03-13 18:52:48 +000045#define N_MALLOC_LISTS 16 // do not change this
nethercote2d5b8162004-08-11 09:40:52 +000046
nethercote7ac7f7b2004-11-02 12:36:02 +000047// The amount you can ask for is limited only by sizeof(SizeT)...
48#define MAX_PSZB (~((SizeT)0x0))
nethercote2d5b8162004-08-11 09:40:52 +000049
50typedef UChar UByte;
51
52/* Block layout:
53
nethercote7ac7f7b2004-11-02 12:36:02 +000054 this block total szB (sizeof(SizeT) bytes)
nethercote2d5b8162004-08-11 09:40:52 +000055 freelist previous ptr (sizeof(void*) bytes)
56 red zone bytes (depends on .rz_szB field of Arena)
57 (payload bytes)
58 red zone bytes (depends on .rz_szB field of Arena)
59 freelist next ptr (sizeof(void*) bytes)
nethercote7ac7f7b2004-11-02 12:36:02 +000060 this block total szB (sizeof(SizeT) bytes)
nethercote2d5b8162004-08-11 09:40:52 +000061
62 Total size in bytes (bszB) and payload size in bytes (pszB)
63 are related by:
64
nethercote7ac7f7b2004-11-02 12:36:02 +000065 bszB == pszB + 2*sizeof(SizeT) + 2*sizeof(void*) + 2*a->rz_szB
nethercote2d5b8162004-08-11 09:40:52 +000066
nethercote7ac7f7b2004-11-02 12:36:02 +000067 Furthermore, both size fields in the block have their least-sifnificant
68 bit set if the block is not in use, and unset if it is in use.
69 (The bottom 3 or so bits are always free for this because of alignment.)
70 A block size of zero is not possible, because a block always has at
71 least two SizeTs and two pointers of overhead.
nethercote2d5b8162004-08-11 09:40:52 +000072
73 Nb: All Block payloads must be VG_MIN_MALLOC_SZB-aligned. This is
74 achieved by ensuring that Superblocks are VG_MIN_MALLOC_SZB-aligned
75 (see newSuperblock() for how), and that the lengths of the following
76 things are a multiple of VG_MIN_MALLOC_SZB:
77 - Superblock admin section lengths (due to elastic padding)
78 - Block admin section (low and high) lengths (due to elastic redzones)
79 - Block payload lengths (due to req_pszB rounding up)
80*/
81typedef
82 struct {
83 // No fields are actually used in this struct, because a Block has
84 // loads of variable sized fields and so can't be accessed
85 // meaningfully with normal fields. So we use access functions all
86 // the time. This struct gives us a type to use, though. Also, we
87 // make sizeof(Block) 1 byte so that we can do arithmetic with the
88 // Block* type in increments of 1!
89 UByte dummy;
90 }
91 Block;
92
93// A superblock. 'padding' is never used, it just ensures that if the
94// entire Superblock is aligned to VG_MIN_MALLOC_SZB, then payload_bytes[]
95// will be too. It can add small amounts of padding unnecessarily -- eg.
96// 8-bytes on 32-bit machines with an 8-byte VG_MIN_MALLOC_SZB -- because
97// it's too hard to make a constant expression that works perfectly in all
98// cases.
99// payload_bytes[] is made a single big Block when the Superblock is
100// created, and then can be split and the splittings remerged, but Blocks
101// always cover its entire length -- there's never any unused bytes at the
102// end, for example.
103typedef
104 struct _Superblock {
105 struct _Superblock* next;
nethercote7ac7f7b2004-11-02 12:36:02 +0000106 SizeT n_payload_bytes;
nethercote2d5b8162004-08-11 09:40:52 +0000107 UByte padding[ VG_MIN_MALLOC_SZB -
nethercote7ac7f7b2004-11-02 12:36:02 +0000108 ((sizeof(struct _Superblock*) + sizeof(SizeT)) %
109 VG_MIN_MALLOC_SZB) ];
nethercote2d5b8162004-08-11 09:40:52 +0000110 UByte payload_bytes[0];
111 }
112 Superblock;
113
114// An arena. 'freelist' is a circular, doubly-linked list. 'rz_szB' is
115// elastic, in that it can be bigger than asked-for to ensure alignment.
116typedef
117 struct {
118 Char* name;
119 Bool clientmem; // Allocates in the client address space?
njn0e742df2004-11-30 13:26:29 +0000120 SizeT rz_szB; // Red zone size in bytes
nethercote7ac7f7b2004-11-02 12:36:02 +0000121 SizeT min_sblock_szB; // Minimum superblock size in bytes
njn6e6588c2005-03-13 18:52:48 +0000122 Block* freelist[N_MALLOC_LISTS];
nethercote2d5b8162004-08-11 09:40:52 +0000123 Superblock* sblocks;
124 // Stats only.
nethercote7ac7f7b2004-11-02 12:36:02 +0000125 SizeT bytes_on_loan;
126 SizeT bytes_mmaped;
127 SizeT bytes_on_loan_max;
nethercote2d5b8162004-08-11 09:40:52 +0000128 }
129 Arena;
130
131
132/*------------------------------------------------------------*/
133/*--- Low-level functions for working with Blocks. ---*/
134/*------------------------------------------------------------*/
135
nethercote7ac7f7b2004-11-02 12:36:02 +0000136#define SIZE_T_0x1 ((SizeT)0x1)
137
nethercote2d5b8162004-08-11 09:40:52 +0000138// Mark a bszB as in-use, and not in-use.
139static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000140SizeT mk_inuse_bszB ( SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000141{
142 vg_assert(bszB != 0);
nethercote7ac7f7b2004-11-02 12:36:02 +0000143 return bszB & (~SIZE_T_0x1);
nethercote2d5b8162004-08-11 09:40:52 +0000144}
145static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000146SizeT mk_free_bszB ( SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000147{
148 vg_assert(bszB != 0);
nethercote7ac7f7b2004-11-02 12:36:02 +0000149 return bszB | SIZE_T_0x1;
nethercote2d5b8162004-08-11 09:40:52 +0000150}
151
152// Remove the in-use/not-in-use attribute from a bszB, leaving just
153// the size.
154static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000155SizeT mk_plain_bszB ( SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000156{
157 vg_assert(bszB != 0);
nethercote7ac7f7b2004-11-02 12:36:02 +0000158 return bszB & (~SIZE_T_0x1);
nethercote2d5b8162004-08-11 09:40:52 +0000159}
160
161// Does this bszB have the in-use attribute?
162static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000163Bool is_inuse_bszB ( SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000164{
165 vg_assert(bszB != 0);
nethercote7ac7f7b2004-11-02 12:36:02 +0000166 return (0 != (bszB & SIZE_T_0x1)) ? False : True;
nethercote2d5b8162004-08-11 09:40:52 +0000167}
168
169
170// Set and get the lower size field of a block.
171static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000172void set_bszB_lo ( Block* b, SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000173{
nethercote7ac7f7b2004-11-02 12:36:02 +0000174 *(SizeT*)&b[0] = bszB;
nethercote2d5b8162004-08-11 09:40:52 +0000175}
176static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000177SizeT get_bszB_lo ( Block* b )
nethercote2d5b8162004-08-11 09:40:52 +0000178{
nethercote7ac7f7b2004-11-02 12:36:02 +0000179 return *(SizeT*)&b[0];
nethercote2d5b8162004-08-11 09:40:52 +0000180}
181
182// Get the address of the last byte in a block
183static __inline__
184UByte* last_byte ( Block* b )
185{
186 UByte* b2 = (UByte*)b;
187 return &b2[mk_plain_bszB(get_bszB_lo(b)) - 1];
188}
189
190// Set and get the upper size field of a block.
191static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000192void set_bszB_hi ( Block* b, SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000193{
194 UByte* b2 = (UByte*)b;
195 UByte* lb = last_byte(b);
196 vg_assert(lb == &b2[mk_plain_bszB(bszB) - 1]);
nethercote7ac7f7b2004-11-02 12:36:02 +0000197 *(SizeT*)&lb[-sizeof(SizeT) + 1] = bszB;
nethercote2d5b8162004-08-11 09:40:52 +0000198}
199static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000200SizeT get_bszB_hi ( Block* b )
nethercote2d5b8162004-08-11 09:40:52 +0000201{
202 UByte* lb = last_byte(b);
nethercote7ac7f7b2004-11-02 12:36:02 +0000203 return *(SizeT*)&lb[-sizeof(SizeT) + 1];
nethercote2d5b8162004-08-11 09:40:52 +0000204}
205
206
nethercote7ac7f7b2004-11-02 12:36:02 +0000207// Return the lower, upper and total overhead in bytes for a block.
208// These are determined purely by which arena the block lives in.
209static __inline__
njn0e742df2004-11-30 13:26:29 +0000210SizeT overhead_szB_lo ( Arena* a )
nethercote7ac7f7b2004-11-02 12:36:02 +0000211{
212 return sizeof(SizeT) + sizeof(void*) + a->rz_szB;
213}
214static __inline__
njn0e742df2004-11-30 13:26:29 +0000215SizeT overhead_szB_hi ( Arena* a )
nethercote7ac7f7b2004-11-02 12:36:02 +0000216{
217 return a->rz_szB + sizeof(void*) + sizeof(SizeT);
218}
219static __inline__
njn0e742df2004-11-30 13:26:29 +0000220SizeT overhead_szB ( Arena* a )
nethercote7ac7f7b2004-11-02 12:36:02 +0000221{
222 return overhead_szB_lo(a) + overhead_szB_hi(a);
223}
224
nethercote2d5b8162004-08-11 09:40:52 +0000225// Given the addr of a block, return the addr of its payload.
226static __inline__
227UByte* get_block_payload ( Arena* a, Block* b )
228{
229 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000230 return & b2[ overhead_szB_lo(a) ];
nethercote2d5b8162004-08-11 09:40:52 +0000231}
232// Given the addr of a block's payload, return the addr of the block itself.
233static __inline__
234Block* get_payload_block ( Arena* a, UByte* payload )
235{
nethercote7ac7f7b2004-11-02 12:36:02 +0000236 return (Block*)&payload[ -overhead_szB_lo(a) ];
nethercote2d5b8162004-08-11 09:40:52 +0000237}
238
239
240// Set and get the next and previous link fields of a block.
241static __inline__
242void set_prev_b ( Block* b, Block* prev_p )
243{
244 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000245 *(Block**)&b2[sizeof(SizeT)] = prev_p;
nethercote2d5b8162004-08-11 09:40:52 +0000246}
247static __inline__
248void set_next_b ( Block* b, Block* next_p )
249{
250 UByte* lb = last_byte(b);
nethercote7ac7f7b2004-11-02 12:36:02 +0000251 *(Block**)&lb[-sizeof(SizeT) - sizeof(void*) + 1] = next_p;
nethercote2d5b8162004-08-11 09:40:52 +0000252}
253static __inline__
254Block* get_prev_b ( Block* b )
255{
256 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000257 return *(Block**)&b2[sizeof(SizeT)];
nethercote2d5b8162004-08-11 09:40:52 +0000258}
259static __inline__
260Block* get_next_b ( Block* b )
261{
262 UByte* lb = last_byte(b);
nethercote7ac7f7b2004-11-02 12:36:02 +0000263 return *(Block**)&lb[-sizeof(SizeT) - sizeof(void*) + 1];
nethercote2d5b8162004-08-11 09:40:52 +0000264}
265
266
267// Get the block immediately preceding this one in the Superblock.
268static __inline__
269Block* get_predecessor_block ( Block* b )
270{
271 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000272 SizeT bszB = mk_plain_bszB( (*(SizeT*)&b2[-sizeof(SizeT)]) );
nethercote2d5b8162004-08-11 09:40:52 +0000273 return (Block*)&b2[-bszB];
274}
275
276// Read and write the lower and upper red-zone bytes of a block.
277static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000278void set_rz_lo_byte ( Arena* a, Block* b, UInt rz_byteno, UByte v )
nethercote2d5b8162004-08-11 09:40:52 +0000279{
280 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000281 b2[sizeof(SizeT) + sizeof(void*) + rz_byteno] = v;
nethercote2d5b8162004-08-11 09:40:52 +0000282}
283static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000284void set_rz_hi_byte ( Arena* a, Block* b, UInt rz_byteno, UByte v )
nethercote2d5b8162004-08-11 09:40:52 +0000285{
286 UByte* lb = last_byte(b);
nethercote7ac7f7b2004-11-02 12:36:02 +0000287 lb[-sizeof(SizeT) - sizeof(void*) - rz_byteno] = v;
nethercote2d5b8162004-08-11 09:40:52 +0000288}
289static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000290UByte get_rz_lo_byte ( Arena* a, Block* b, UInt rz_byteno )
nethercote2d5b8162004-08-11 09:40:52 +0000291{
292 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000293 return b2[sizeof(SizeT) + sizeof(void*) + rz_byteno];
nethercote2d5b8162004-08-11 09:40:52 +0000294}
295static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000296UByte get_rz_hi_byte ( Arena* a, Block* b, UInt rz_byteno )
nethercote2d5b8162004-08-11 09:40:52 +0000297{
298 UByte* lb = last_byte(b);
nethercote7ac7f7b2004-11-02 12:36:02 +0000299 return lb[-sizeof(SizeT) - sizeof(void*) - rz_byteno];
nethercote2d5b8162004-08-11 09:40:52 +0000300}
301
302
nethercote2d5b8162004-08-11 09:40:52 +0000303// Return the minimum bszB for a block in this arena. Can have zero-length
304// payloads, so it's the size of the admin bytes.
305static __inline__
njn0e742df2004-11-30 13:26:29 +0000306SizeT min_useful_bszB ( Arena* a )
nethercote2d5b8162004-08-11 09:40:52 +0000307{
308 return overhead_szB(a);
309}
310
311// Convert payload size <--> block size (both in bytes).
312static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000313SizeT pszB_to_bszB ( Arena* a, SizeT pszB )
nethercote2d5b8162004-08-11 09:40:52 +0000314{
nethercote2d5b8162004-08-11 09:40:52 +0000315 return pszB + overhead_szB(a);
316}
317static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000318SizeT bszB_to_pszB ( Arena* a, SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000319{
nethercote7ac7f7b2004-11-02 12:36:02 +0000320 vg_assert(bszB >= overhead_szB(a));
321 return bszB - overhead_szB(a);
nethercote2d5b8162004-08-11 09:40:52 +0000322}
323
324
325/*------------------------------------------------------------*/
326/*--- Arena management ---*/
327/*------------------------------------------------------------*/
328
329#define CORE_ARENA_MIN_SZB 1048576
330
331// The arena structures themselves.
332static Arena vg_arena[VG_N_ARENAS];
333
334// Functions external to this module identify arenas using ArenaIds,
335// not Arena*s. This fn converts the former to the latter.
336static Arena* arenaId_to_ArenaP ( ArenaId arena )
337{
338 vg_assert(arena >= 0 && arena < VG_N_ARENAS);
339 return & vg_arena[arena];
340}
341
342// Initialise an arena. rz_szB is the minimum redzone size; it might be
njn30490552005-03-13 06:30:42 +0000343// made bigger to ensure that VG_MIN_MALLOC_SZB is observed.
nethercote2d5b8162004-08-11 09:40:52 +0000344static
njn0e742df2004-11-30 13:26:29 +0000345void arena_init ( ArenaId aid, Char* name, SizeT rz_szB, SizeT min_sblock_szB )
nethercote2d5b8162004-08-11 09:40:52 +0000346{
nethercote7ac7f7b2004-11-02 12:36:02 +0000347 SizeT i;
nethercote2d5b8162004-08-11 09:40:52 +0000348 Arena* a = arenaId_to_ArenaP(aid);
349
nethercote7ac7f7b2004-11-02 12:36:02 +0000350 vg_assert(rz_szB < 128); // ensure reasonable size
nethercote73b526f2004-10-31 18:48:21 +0000351 vg_assert((min_sblock_szB % VKI_PAGE_SIZE) == 0);
nethercote2d5b8162004-08-11 09:40:52 +0000352 a->name = name;
353 a->clientmem = ( VG_AR_CLIENT == aid ? True : False );
354
355 // The size of the low and high admin sections in a block must be a
njn30490552005-03-13 06:30:42 +0000356 // multiple of VG_MIN_MALLOC_SZB. So we round up the asked-for
nethercote2d5b8162004-08-11 09:40:52 +0000357 // redzone size if necessary to achieve this.
358 a->rz_szB = rz_szB;
359 while (0 != overhead_szB_lo(a) % VG_MIN_MALLOC_SZB) a->rz_szB++;
360 vg_assert(overhead_szB_lo(a) == overhead_szB_hi(a));
361
362 a->min_sblock_szB = min_sblock_szB;
njn6e6588c2005-03-13 18:52:48 +0000363 for (i = 0; i < N_MALLOC_LISTS; i++) a->freelist[i] = NULL;
nethercote2d5b8162004-08-11 09:40:52 +0000364 a->sblocks = NULL;
365 a->bytes_on_loan = 0;
366 a->bytes_mmaped = 0;
367 a->bytes_on_loan_max = 0;
368}
369
370/* Print vital stats for an arena. */
371void VG_(print_all_arena_stats) ( void )
372{
nethercote7ac7f7b2004-11-02 12:36:02 +0000373 UInt i;
nethercote2d5b8162004-08-11 09:40:52 +0000374 for (i = 0; i < VG_N_ARENAS; i++) {
375 Arena* a = arenaId_to_ArenaP(i);
376 VG_(message)(Vg_DebugMsg,
njn669ef072005-03-13 05:46:57 +0000377 "%8s: %8d mmap'd, %8d/%8d max/curr",
nethercote2d5b8162004-08-11 09:40:52 +0000378 a->name, a->bytes_mmaped, a->bytes_on_loan_max, a->bytes_on_loan
379 );
380 }
381}
382
njn8a97c6d2005-03-31 04:37:24 +0000383static Bool init_done = False;
384static SizeT client_malloc_redzone_szB = 8; // default: be paranoid
385
386// Nb: this must be called before the client arena is initialised, ie.
387// before any memory is allocated.
388void VG_(set_client_malloc_redzone_szB)(SizeT rz_szB)
389{
390 if (init_done) {
391 VG_(printf)(
392 "\nTool error:\n"
sewardj6ffee522005-04-25 15:42:57 +0000393 "%s cannot be called after the first allocation.\n",
394 __PRETTY_FUNCTION__);
njn8a97c6d2005-03-31 04:37:24 +0000395 VG_(exit)(1);
396 }
397 // This limit is no special figure, just something not too big
398 if (rz_szB > 128) {
399 VG_(printf)(
400 "\nTool error:\n"
sewardj6ffee522005-04-25 15:42:57 +0000401 " %s passed a too-big value (%llu)",
402 __PRETTY_FUNCTION__, (ULong)rz_szB);
njn8a97c6d2005-03-31 04:37:24 +0000403 VG_(exit)(1);
404 }
405 client_malloc_redzone_szB = rz_szB;
406}
407
nethercote2d5b8162004-08-11 09:40:52 +0000408/* This library is self-initialising, as it makes this more self-contained,
409 less coupled with the outside world. Hence VG_(arena_malloc)() and
410 VG_(arena_free)() below always call ensure_mm_init() to ensure things are
411 correctly initialised. */
412static
413void ensure_mm_init ( void )
414{
nethercote2d5b8162004-08-11 09:40:52 +0000415 if (init_done) {
nethercote2d5b8162004-08-11 09:40:52 +0000416 return;
417 }
418
nethercote2d5b8162004-08-11 09:40:52 +0000419 /* Use checked red zones (of various sizes) for our internal stuff,
420 and an unchecked zone of arbitrary size for the client. Of
421 course the client's red zone can be checked by the tool, eg.
422 by using addressibility maps, but not by the mechanism implemented
423 here, which merely checks at the time of freeing that the red
424 zone bytes are unchanged.
425
426 Nb: redzone sizes are *minimums*; they could be made bigger to ensure
427 alignment. Eg. on 32-bit machines, 4 becomes 8, and 12 becomes 16;
428 but on 64-bit machines 4 stays as 4, and 12 stays as 12 --- the extra
429 4 bytes in both are accounted for by the larger prev/next ptr.
430 */
431 arena_init ( VG_AR_CORE, "core", 4, CORE_ARENA_MIN_SZB );
432 arena_init ( VG_AR_TOOL, "tool", 4, 1048576 );
433 arena_init ( VG_AR_SYMTAB, "symtab", 4, 1048576 );
njn8a97c6d2005-03-31 04:37:24 +0000434 arena_init ( VG_AR_CLIENT, "client", client_malloc_redzone_szB, 1048576 );
nethercote2d5b8162004-08-11 09:40:52 +0000435 arena_init ( VG_AR_DEMANGLE, "demangle", 12/*paranoid*/, 65536 );
436 arena_init ( VG_AR_EXECTXT, "exectxt", 4, 65536 );
437 arena_init ( VG_AR_ERRORS, "errors", 4, 65536 );
nethercote2d5b8162004-08-11 09:40:52 +0000438
439 init_done = True;
440# ifdef DEBUG_MALLOC
441 VG_(sanity_check_malloc_all)();
442# endif
443}
444
445
446/*------------------------------------------------------------*/
447/*--- Superblock management ---*/
448/*------------------------------------------------------------*/
449
450// Align ptr p upwards to an align-sized boundary.
451static
nethercote7ac7f7b2004-11-02 12:36:02 +0000452void* align_upwards ( void* p, SizeT align )
nethercote2d5b8162004-08-11 09:40:52 +0000453{
454 Addr a = (Addr)p;
455 if ((a % align) == 0) return (void*)a;
456 return (void*)(a - (a % align) + align);
457}
458
459// If not enough memory available, either aborts (for non-client memory)
460// or returns 0 (for client memory).
461static
nethercote7ac7f7b2004-11-02 12:36:02 +0000462Superblock* newSuperblock ( Arena* a, SizeT cszB )
nethercote2d5b8162004-08-11 09:40:52 +0000463{
464 // The extra VG_MIN_MALLOC_SZB bytes are for possible alignment up.
465 static UByte bootstrap_superblock[CORE_ARENA_MIN_SZB+VG_MIN_MALLOC_SZB];
sewardj548be6d2005-02-16 01:31:37 +0000466 static Bool called_before = True; //False;
nethercote2d5b8162004-08-11 09:40:52 +0000467 Superblock* sb;
468
469 // Take into account admin bytes in the Superblock.
470 cszB += sizeof(Superblock);
471
472 if (cszB < a->min_sblock_szB) cszB = a->min_sblock_szB;
nethercote73b526f2004-10-31 18:48:21 +0000473 while ((cszB % VKI_PAGE_SIZE) > 0) cszB++;
nethercote2d5b8162004-08-11 09:40:52 +0000474
475 if (!called_before) {
476 // First time we're called -- use the special static bootstrap
477 // superblock (see comment at top of main() for details).
478 called_before = True;
479 vg_assert(a == arenaId_to_ArenaP(VG_AR_CORE));
480 vg_assert(CORE_ARENA_MIN_SZB >= cszB);
481 // Ensure sb is suitably aligned.
482 sb = (Superblock*)align_upwards( bootstrap_superblock,
483 VG_MIN_MALLOC_SZB );
484 } else if (a->clientmem) {
485 // client allocation -- return 0 to client if it fails
486 sb = (Superblock *)
sewardj215776c2005-03-16 12:11:12 +0000487 VG_(get_memory_from_mmap_for_client)
488 (0, cszB, VKI_PROT_READ|VKI_PROT_WRITE|VKI_PROT_EXEC, 0);
nethercote2d5b8162004-08-11 09:40:52 +0000489 if (NULL == sb)
490 return 0;
491 } else {
492 // non-client allocation -- aborts if it fails
493 sb = VG_(get_memory_from_mmap) ( cszB, "newSuperblock" );
494 }
495 vg_assert(NULL != sb);
sewardjb5f6f512005-03-10 23:59:00 +0000496 //zzVALGRIND_MAKE_WRITABLE(sb, cszB);
nethercote2d5b8162004-08-11 09:40:52 +0000497 vg_assert(0 == (Addr)sb % VG_MIN_MALLOC_SZB);
498 sb->n_payload_bytes = cszB - sizeof(Superblock);
499 a->bytes_mmaped += cszB;
500 if (0)
501 VG_(message)(Vg_DebugMsg, "newSuperblock, %d payload bytes",
502 sb->n_payload_bytes);
503 return sb;
504}
505
506// Find the superblock containing the given chunk.
507static
508Superblock* findSb ( Arena* a, Block* b )
509{
510 Superblock* sb;
511 for (sb = a->sblocks; sb; sb = sb->next)
512 if ((Block*)&sb->payload_bytes[0] <= b
513 && b < (Block*)&sb->payload_bytes[sb->n_payload_bytes])
514 return sb;
515 VG_(printf)("findSb: can't find pointer %p in arena `%s'\n", b, a->name );
516 VG_(core_panic)("findSb: VG_(arena_free)() in wrong arena?");
517 return NULL; /*NOTREACHED*/
518}
519
sewardjde4a1d02002-03-22 01:27:54 +0000520
fitzhardinge98abfc72003-12-16 02:05:15 +0000521/*------------------------------------------------------------*/
sewardjde4a1d02002-03-22 01:27:54 +0000522/*--- Functions for working with freelists. ---*/
523/*------------------------------------------------------------*/
524
nethercote2d5b8162004-08-11 09:40:52 +0000525// Nb: Determination of which freelist a block lives on is based on the
526// payload size, not block size.
sewardjde4a1d02002-03-22 01:27:54 +0000527
nethercote2d5b8162004-08-11 09:40:52 +0000528// Convert a payload size in bytes to a freelist number.
sewardjde4a1d02002-03-22 01:27:54 +0000529static
nethercote7ac7f7b2004-11-02 12:36:02 +0000530UInt pszB_to_listNo ( SizeT pszB )
sewardjde4a1d02002-03-22 01:27:54 +0000531{
nethercote2d5b8162004-08-11 09:40:52 +0000532 vg_assert(0 == pszB % VG_MIN_MALLOC_SZB);
533 pszB /= VG_MIN_MALLOC_SZB;
534 if (pszB <= 2) return 0;
535 if (pszB <= 3) return 1;
536 if (pszB <= 4) return 2;
537 if (pszB <= 5) return 3;
538 if (pszB <= 6) return 4;
539 if (pszB <= 7) return 5;
540 if (pszB <= 8) return 6;
541 if (pszB <= 9) return 7;
542 if (pszB <= 10) return 8;
543 if (pszB <= 11) return 9;
544 if (pszB <= 12) return 10;
545 if (pszB <= 16) return 11;
546 if (pszB <= 32) return 12;
547 if (pszB <= 64) return 13;
548 if (pszB <= 128) return 14;
sewardjde4a1d02002-03-22 01:27:54 +0000549 return 15;
550}
551
nethercote2d5b8162004-08-11 09:40:52 +0000552// What is the minimum payload size for a given list?
sewardjde4a1d02002-03-22 01:27:54 +0000553static
nethercote7ac7f7b2004-11-02 12:36:02 +0000554SizeT listNo_to_pszB_min ( UInt listNo )
sewardjde4a1d02002-03-22 01:27:54 +0000555{
nethercote7ac7f7b2004-11-02 12:36:02 +0000556 SizeT pszB = 0;
njn6e6588c2005-03-13 18:52:48 +0000557 vg_assert(listNo <= N_MALLOC_LISTS);
nethercote2d5b8162004-08-11 09:40:52 +0000558 while (pszB_to_listNo(pszB) < listNo) pszB += VG_MIN_MALLOC_SZB;
559 return pszB;
sewardjde4a1d02002-03-22 01:27:54 +0000560}
561
nethercote2d5b8162004-08-11 09:40:52 +0000562// What is the maximum payload size for a given list?
sewardjde4a1d02002-03-22 01:27:54 +0000563static
nethercote7ac7f7b2004-11-02 12:36:02 +0000564SizeT listNo_to_pszB_max ( UInt listNo )
sewardjde4a1d02002-03-22 01:27:54 +0000565{
njn6e6588c2005-03-13 18:52:48 +0000566 vg_assert(listNo <= N_MALLOC_LISTS);
567 if (listNo == N_MALLOC_LISTS-1) {
nethercote2d5b8162004-08-11 09:40:52 +0000568 return MAX_PSZB;
sewardjde4a1d02002-03-22 01:27:54 +0000569 } else {
nethercote2d5b8162004-08-11 09:40:52 +0000570 return listNo_to_pszB_min(listNo+1) - 1;
sewardjde4a1d02002-03-22 01:27:54 +0000571 }
572}
573
574
575/* A nasty hack to try and reduce fragmentation. Try and replace
576 a->freelist[lno] with another block on the same list but with a
577 lower address, with the idea of attempting to recycle the same
578 blocks rather than cruise through the address space. */
sewardjde4a1d02002-03-22 01:27:54 +0000579static
nethercote7ac7f7b2004-11-02 12:36:02 +0000580void swizzle ( Arena* a, UInt lno )
sewardjde4a1d02002-03-22 01:27:54 +0000581{
nethercote2d5b8162004-08-11 09:40:52 +0000582 Block* p_best;
583 Block* pp;
584 Block* pn;
nethercote7ac7f7b2004-11-02 12:36:02 +0000585 UInt i;
sewardjde4a1d02002-03-22 01:27:54 +0000586
587 p_best = a->freelist[lno];
588 if (p_best == NULL) return;
589
590 pn = pp = p_best;
591 for (i = 0; i < 20; i++) {
nethercote2d5b8162004-08-11 09:40:52 +0000592 pn = get_next_b(pn);
593 pp = get_prev_b(pp);
sewardjde4a1d02002-03-22 01:27:54 +0000594 if (pn < p_best) p_best = pn;
595 if (pp < p_best) p_best = pp;
596 }
597 if (p_best < a->freelist[lno]) {
nethercote2d5b8162004-08-11 09:40:52 +0000598# ifdef VERBOSE_MALLOC
599 VG_(printf)("retreat by %d\n", a->freelist[lno] - p_best);
sewardjde4a1d02002-03-22 01:27:54 +0000600# endif
601 a->freelist[lno] = p_best;
602 }
603}
604
605
606/*------------------------------------------------------------*/
sewardjde4a1d02002-03-22 01:27:54 +0000607/*--- Sanity-check/debugging machinery. ---*/
608/*------------------------------------------------------------*/
609
njn6e6588c2005-03-13 18:52:48 +0000610#define REDZONE_LO_MASK 0x31
611#define REDZONE_HI_MASK 0x7c
nethercote2d5b8162004-08-11 09:40:52 +0000612
nethercote7ac7f7b2004-11-02 12:36:02 +0000613// Do some crude sanity checks on a Block.
sewardjde4a1d02002-03-22 01:27:54 +0000614static
nethercote2d5b8162004-08-11 09:40:52 +0000615Bool blockSane ( Arena* a, Block* b )
sewardjde4a1d02002-03-22 01:27:54 +0000616{
617# define BLEAT(str) VG_(printf)("blockSane: fail -- %s\n",str)
nethercote7ac7f7b2004-11-02 12:36:02 +0000618 UInt i;
nethercote2d5b8162004-08-11 09:40:52 +0000619 if (get_bszB_lo(b) != get_bszB_hi(b))
sewardjde4a1d02002-03-22 01:27:54 +0000620 {BLEAT("sizes");return False;}
nethercote2d5b8162004-08-11 09:40:52 +0000621 if (!a->clientmem && is_inuse_bszB(get_bszB_lo(b))) {
622 for (i = 0; i < a->rz_szB; i++) {
623 if (get_rz_lo_byte(a, b, i) !=
njn6e6588c2005-03-13 18:52:48 +0000624 (UByte)(((Addr)b&0xff) ^ REDZONE_LO_MASK))
nethercote2d5b8162004-08-11 09:40:52 +0000625 {BLEAT("redzone-lo");return False;}
626 if (get_rz_hi_byte(a, b, i) !=
njn6e6588c2005-03-13 18:52:48 +0000627 (UByte)(((Addr)b&0xff) ^ REDZONE_HI_MASK))
nethercote2d5b8162004-08-11 09:40:52 +0000628 {BLEAT("redzone-hi");return False;}
sewardjde4a1d02002-03-22 01:27:54 +0000629 }
630 }
631 return True;
632# undef BLEAT
633}
634
nethercote2d5b8162004-08-11 09:40:52 +0000635// Print superblocks (only for debugging).
sewardjde4a1d02002-03-22 01:27:54 +0000636static
637void ppSuperblocks ( Arena* a )
638{
nethercote7ac7f7b2004-11-02 12:36:02 +0000639 UInt i, blockno;
640 SizeT b_bszB;
nethercote2d5b8162004-08-11 09:40:52 +0000641 Block* b;
sewardjde4a1d02002-03-22 01:27:54 +0000642 Superblock* sb = a->sblocks;
643 blockno = 1;
644
645 while (sb) {
646 VG_(printf)( "\n" );
nethercote2d5b8162004-08-11 09:40:52 +0000647 VG_(printf)( "superblock %d at %p, sb->n_pl_bs = %d, next = %p\n",
648 blockno++, sb, sb->n_payload_bytes, sb->next );
649 for (i = 0; i < sb->n_payload_bytes; i += mk_plain_bszB(b_bszB)) {
650 b = (Block*)&sb->payload_bytes[i];
651 b_bszB = get_bszB_lo(b);
652 VG_(printf)( " block at %d, bszB %d: ", i, mk_plain_bszB(b_bszB) );
653 VG_(printf)( "%s, ", is_inuse_bszB(b_bszB) ? "inuse" : "free");
654 VG_(printf)( "%s\n", blockSane(a, b) ? "ok" : "BAD" );
sewardjde4a1d02002-03-22 01:27:54 +0000655 }
nethercote2d5b8162004-08-11 09:40:52 +0000656 vg_assert(i == sb->n_payload_bytes); // no overshoot at end of Sb
sewardjde4a1d02002-03-22 01:27:54 +0000657 sb = sb->next;
658 }
659 VG_(printf)( "end of superblocks\n\n" );
660}
661
nethercote2d5b8162004-08-11 09:40:52 +0000662// Sanity check both the superblocks and the chains.
nethercote885dd912004-08-03 23:14:00 +0000663static void sanity_check_malloc_arena ( ArenaId aid )
sewardjde4a1d02002-03-22 01:27:54 +0000664{
nethercote7ac7f7b2004-11-02 12:36:02 +0000665 UInt i, superblockctr, blockctr_sb, blockctr_li;
666 UInt blockctr_sb_free, listno;
667 SizeT b_bszB, b_pszB, list_min_pszB, list_max_pszB;
sewardjde4a1d02002-03-22 01:27:54 +0000668 Superblock* sb;
669 Bool thisFree, lastWasFree;
nethercote2d5b8162004-08-11 09:40:52 +0000670 Block* b;
671 Block* b_prev;
nethercote7ac7f7b2004-11-02 12:36:02 +0000672 SizeT arena_bytes_on_loan;
sewardjde4a1d02002-03-22 01:27:54 +0000673 Arena* a;
674
nethercote885dd912004-08-03 23:14:00 +0000675# define BOMB VG_(core_panic)("sanity_check_malloc_arena")
sewardjde4a1d02002-03-22 01:27:54 +0000676
677 a = arenaId_to_ArenaP(aid);
678
nethercote2d5b8162004-08-11 09:40:52 +0000679 // First, traverse all the superblocks, inspecting the Blocks in each.
sewardjde4a1d02002-03-22 01:27:54 +0000680 superblockctr = blockctr_sb = blockctr_sb_free = 0;
681 arena_bytes_on_loan = 0;
682 sb = a->sblocks;
683 while (sb) {
684 lastWasFree = False;
685 superblockctr++;
nethercote2d5b8162004-08-11 09:40:52 +0000686 for (i = 0; i < sb->n_payload_bytes; i += mk_plain_bszB(b_bszB)) {
sewardjde4a1d02002-03-22 01:27:54 +0000687 blockctr_sb++;
nethercote2d5b8162004-08-11 09:40:52 +0000688 b = (Block*)&sb->payload_bytes[i];
689 b_bszB = get_bszB_lo(b);
sewardjde4a1d02002-03-22 01:27:54 +0000690 if (!blockSane(a, b)) {
nethercote2d5b8162004-08-11 09:40:52 +0000691 VG_(printf)("sanity_check_malloc_arena: sb %p, block %d (bszB %d): "
692 " BAD\n", sb, i, b_bszB );
sewardjde4a1d02002-03-22 01:27:54 +0000693 BOMB;
694 }
nethercote2d5b8162004-08-11 09:40:52 +0000695 thisFree = !is_inuse_bszB(b_bszB);
sewardjde4a1d02002-03-22 01:27:54 +0000696 if (thisFree && lastWasFree) {
nethercote2d5b8162004-08-11 09:40:52 +0000697 VG_(printf)("sanity_check_malloc_arena: sb %p, block %d (bszB %d): "
njn25e49d8e72002-09-23 09:36:25 +0000698 "UNMERGED FREES\n",
nethercote2d5b8162004-08-11 09:40:52 +0000699 sb, i, b_bszB );
sewardjde4a1d02002-03-22 01:27:54 +0000700 BOMB;
701 }
sewardjde4a1d02002-03-22 01:27:54 +0000702 if (thisFree) blockctr_sb_free++;
703 if (!thisFree)
nethercote2d5b8162004-08-11 09:40:52 +0000704 arena_bytes_on_loan += bszB_to_pszB(a, b_bszB);
705 lastWasFree = thisFree;
sewardjde4a1d02002-03-22 01:27:54 +0000706 }
nethercote2d5b8162004-08-11 09:40:52 +0000707 if (i > sb->n_payload_bytes) {
nethercote885dd912004-08-03 23:14:00 +0000708 VG_(printf)( "sanity_check_malloc_arena: sb %p: last block "
sewardjde4a1d02002-03-22 01:27:54 +0000709 "overshoots end\n", sb);
710 BOMB;
711 }
712 sb = sb->next;
713 }
714
715 if (arena_bytes_on_loan != a->bytes_on_loan) {
nethercote2d5b8162004-08-11 09:40:52 +0000716# ifdef VERBOSE_MALLOC
717 VG_(printf)( "sanity_check_malloc_arena: a->bytes_on_loan %d, "
718 "arena_bytes_on_loan %d: "
719 "MISMATCH\n", a->bytes_on_loan, arena_bytes_on_loan);
720# endif
sewardjde4a1d02002-03-22 01:27:54 +0000721 ppSuperblocks(a);
722 BOMB;
723 }
724
725 /* Second, traverse each list, checking that the back pointers make
726 sense, counting blocks encountered, and checking that each block
727 is an appropriate size for this list. */
728 blockctr_li = 0;
njn6e6588c2005-03-13 18:52:48 +0000729 for (listno = 0; listno < N_MALLOC_LISTS; listno++) {
nethercote2d5b8162004-08-11 09:40:52 +0000730 list_min_pszB = listNo_to_pszB_min(listno);
731 list_max_pszB = listNo_to_pszB_max(listno);
sewardjde4a1d02002-03-22 01:27:54 +0000732 b = a->freelist[listno];
733 if (b == NULL) continue;
734 while (True) {
735 b_prev = b;
nethercote2d5b8162004-08-11 09:40:52 +0000736 b = get_next_b(b);
737 if (get_prev_b(b) != b_prev) {
nethercote885dd912004-08-03 23:14:00 +0000738 VG_(printf)( "sanity_check_malloc_arena: list %d at %p: "
sewardjde4a1d02002-03-22 01:27:54 +0000739 "BAD LINKAGE\n",
740 listno, b );
741 BOMB;
742 }
nethercote2d5b8162004-08-11 09:40:52 +0000743 b_pszB = bszB_to_pszB(a, mk_plain_bszB(get_bszB_lo(b)));
744 if (b_pszB < list_min_pszB || b_pszB > list_max_pszB) {
sewardjde4a1d02002-03-22 01:27:54 +0000745 VG_(printf)(
nethercote885dd912004-08-03 23:14:00 +0000746 "sanity_check_malloc_arena: list %d at %p: "
nethercote2d5b8162004-08-11 09:40:52 +0000747 "WRONG CHAIN SIZE %dB (%dB, %dB)\n",
748 listno, b, b_pszB, list_min_pszB, list_max_pszB );
sewardjde4a1d02002-03-22 01:27:54 +0000749 BOMB;
750 }
751 blockctr_li++;
752 if (b == a->freelist[listno]) break;
753 }
754 }
755
756 if (blockctr_sb_free != blockctr_li) {
nethercote2d5b8162004-08-11 09:40:52 +0000757# ifdef VERBOSE_MALLOC
758 VG_(printf)( "sanity_check_malloc_arena: BLOCK COUNT MISMATCH "
759 "(via sbs %d, via lists %d)\n",
760 blockctr_sb_free, blockctr_li );
761# endif
sewardjde4a1d02002-03-22 01:27:54 +0000762 ppSuperblocks(a);
763 BOMB;
764 }
765
nethercote885dd912004-08-03 23:14:00 +0000766 if (VG_(clo_verbosity) > 2)
767 VG_(message)(Vg_DebugMsg,
njn669ef072005-03-13 05:46:57 +0000768 "%8s: %2d sbs, %5d bs, %2d/%-2d free bs, "
nethercote885dd912004-08-03 23:14:00 +0000769 "%7d mmap, %7d loan",
770 a->name,
771 superblockctr,
772 blockctr_sb, blockctr_sb_free, blockctr_li,
773 a->bytes_mmaped, a->bytes_on_loan);
sewardjde4a1d02002-03-22 01:27:54 +0000774# undef BOMB
775}
776
777
nethercote885dd912004-08-03 23:14:00 +0000778void VG_(sanity_check_malloc_all) ( void )
sewardjde4a1d02002-03-22 01:27:54 +0000779{
nethercote7ac7f7b2004-11-02 12:36:02 +0000780 UInt i;
sewardjde4a1d02002-03-22 01:27:54 +0000781 for (i = 0; i < VG_N_ARENAS; i++)
nethercote885dd912004-08-03 23:14:00 +0000782 sanity_check_malloc_arena ( i );
sewardjde4a1d02002-03-22 01:27:54 +0000783}
784
sewardjde4a1d02002-03-22 01:27:54 +0000785
nethercote2d5b8162004-08-11 09:40:52 +0000786/*------------------------------------------------------------*/
787/*--- Creating and deleting blocks. ---*/
788/*------------------------------------------------------------*/
789
790// Mark the bytes at b .. b+bszB-1 as not in use, and add them to the
791// relevant free list.
792
793static
nethercote7ac7f7b2004-11-02 12:36:02 +0000794void mkFreeBlock ( Arena* a, Block* b, SizeT bszB, UInt b_lno )
jsewardb1a26ae2004-03-14 03:06:37 +0000795{
nethercote7ac7f7b2004-11-02 12:36:02 +0000796 SizeT pszB = bszB_to_pszB(a, bszB);
nethercote2d5b8162004-08-11 09:40:52 +0000797 vg_assert(b_lno == pszB_to_listNo(pszB));
sewardjb5f6f512005-03-10 23:59:00 +0000798 //zzVALGRIND_MAKE_WRITABLE(b, bszB);
nethercote2d5b8162004-08-11 09:40:52 +0000799 // Set the size fields and indicate not-in-use.
800 set_bszB_lo(b, mk_free_bszB(bszB));
801 set_bszB_hi(b, mk_free_bszB(bszB));
802
803 // Add to the relevant list.
804 if (a->freelist[b_lno] == NULL) {
805 set_prev_b(b, b);
806 set_next_b(b, b);
807 a->freelist[b_lno] = b;
808 } else {
809 Block* b_prev = get_prev_b(a->freelist[b_lno]);
810 Block* b_next = a->freelist[b_lno];
811 set_next_b(b_prev, b);
812 set_prev_b(b_next, b);
813 set_next_b(b, b_next);
814 set_prev_b(b, b_prev);
815 }
816# ifdef DEBUG_MALLOC
817 (void)blockSane(a,b);
818# endif
819}
820
821// Mark the bytes at b .. b+bszB-1 as in use, and set up the block
822// appropriately.
823static
nethercote7ac7f7b2004-11-02 12:36:02 +0000824void mkInuseBlock ( Arena* a, Block* b, SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000825{
nethercote7ac7f7b2004-11-02 12:36:02 +0000826 UInt i;
nethercote2d5b8162004-08-11 09:40:52 +0000827 vg_assert(bszB >= min_useful_bszB(a));
sewardjb5f6f512005-03-10 23:59:00 +0000828 //zzVALGRIND_MAKE_WRITABLE(b, bszB);
nethercote2d5b8162004-08-11 09:40:52 +0000829 set_bszB_lo(b, mk_inuse_bszB(bszB));
830 set_bszB_hi(b, mk_inuse_bszB(bszB));
831 set_prev_b(b, NULL); // Take off freelist
832 set_next_b(b, NULL); // ditto
833 if (!a->clientmem) {
834 for (i = 0; i < a->rz_szB; i++) {
njn6e6588c2005-03-13 18:52:48 +0000835 set_rz_lo_byte(a, b, i, (UByte)(((Addr)b&0xff) ^ REDZONE_LO_MASK));
836 set_rz_hi_byte(a, b, i, (UByte)(((Addr)b&0xff) ^ REDZONE_HI_MASK));
nethercote2d5b8162004-08-11 09:40:52 +0000837 }
838 }
839# ifdef DEBUG_MALLOC
840 (void)blockSane(a,b);
841# endif
842}
843
844// Remove a block from a given list. Does no sanity checking.
845static
nethercote7ac7f7b2004-11-02 12:36:02 +0000846void unlinkBlock ( Arena* a, Block* b, UInt listno )
nethercote2d5b8162004-08-11 09:40:52 +0000847{
njn6e6588c2005-03-13 18:52:48 +0000848 vg_assert(listno < N_MALLOC_LISTS);
nethercote2d5b8162004-08-11 09:40:52 +0000849 if (get_prev_b(b) == b) {
850 // Only one element in the list; treat it specially.
851 vg_assert(get_next_b(b) == b);
852 a->freelist[listno] = NULL;
853 } else {
854 Block* b_prev = get_prev_b(b);
855 Block* b_next = get_next_b(b);
856 a->freelist[listno] = b_prev;
857 set_next_b(b_prev, b_next);
858 set_prev_b(b_next, b_prev);
859 swizzle ( a, listno );
860 }
861 set_prev_b(b, NULL);
862 set_next_b(b, NULL);
jsewardb1a26ae2004-03-14 03:06:37 +0000863}
864
865
sewardjde4a1d02002-03-22 01:27:54 +0000866/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +0000867/*--- Core-visible functions. ---*/
sewardjde4a1d02002-03-22 01:27:54 +0000868/*------------------------------------------------------------*/
869
nethercote2d5b8162004-08-11 09:40:52 +0000870// Align the request size.
871static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000872SizeT align_req_pszB ( SizeT req_pszB )
nethercote2d5b8162004-08-11 09:40:52 +0000873{
nethercote7ac7f7b2004-11-02 12:36:02 +0000874 SizeT n = VG_MIN_MALLOC_SZB-1;
nethercote2d5b8162004-08-11 09:40:52 +0000875 return ((req_pszB + n) & (~n));
876}
877
nethercote7ac7f7b2004-11-02 12:36:02 +0000878void* VG_(arena_malloc) ( ArenaId aid, SizeT req_pszB )
sewardjde4a1d02002-03-22 01:27:54 +0000879{
nethercote7ac7f7b2004-11-02 12:36:02 +0000880 SizeT req_bszB, frag_bszB, b_bszB;
881 UInt lno;
sewardjde4a1d02002-03-22 01:27:54 +0000882 Superblock* new_sb;
nethercote2d5b8162004-08-11 09:40:52 +0000883 Block* b = NULL;
sewardjde4a1d02002-03-22 01:27:54 +0000884 Arena* a;
jsewardb1a26ae2004-03-14 03:06:37 +0000885 void* v;
sewardjde4a1d02002-03-22 01:27:54 +0000886
887 VGP_PUSHCC(VgpMalloc);
888
889 ensure_mm_init();
890 a = arenaId_to_ArenaP(aid);
891
nethercote7ac7f7b2004-11-02 12:36:02 +0000892 vg_assert(req_pszB < MAX_PSZB);
nethercote2d5b8162004-08-11 09:40:52 +0000893 req_pszB = align_req_pszB(req_pszB);
894 req_bszB = pszB_to_bszB(a, req_pszB);
sewardjde4a1d02002-03-22 01:27:54 +0000895
nethercote2d5b8162004-08-11 09:40:52 +0000896 // Scan through all the big-enough freelists for a block.
njn6e6588c2005-03-13 18:52:48 +0000897 for (lno = pszB_to_listNo(req_pszB); lno < N_MALLOC_LISTS; lno++) {
sewardjde4a1d02002-03-22 01:27:54 +0000898 b = a->freelist[lno];
nethercote2d5b8162004-08-11 09:40:52 +0000899 if (NULL == b) continue; // If this list is empty, try the next one.
sewardjde4a1d02002-03-22 01:27:54 +0000900 while (True) {
nethercote2d5b8162004-08-11 09:40:52 +0000901 b_bszB = mk_plain_bszB(get_bszB_lo(b));
902 if (b_bszB >= req_bszB) goto obtained_block; // success!
903 b = get_next_b(b);
904 if (b == a->freelist[lno]) break; // traversed entire freelist
sewardjde4a1d02002-03-22 01:27:54 +0000905 }
sewardjde4a1d02002-03-22 01:27:54 +0000906 }
907
nethercote2d5b8162004-08-11 09:40:52 +0000908 // If we reach here, no suitable block found, allocate a new superblock
njn6e6588c2005-03-13 18:52:48 +0000909 vg_assert(lno == N_MALLOC_LISTS);
nethercote2d5b8162004-08-11 09:40:52 +0000910 new_sb = newSuperblock(a, req_bszB);
911 if (NULL == new_sb) {
912 // Should only fail if for client, otherwise, should have aborted
913 // already.
914 vg_assert(VG_AR_CLIENT == aid);
915 return NULL;
sewardjde4a1d02002-03-22 01:27:54 +0000916 }
nethercote2d5b8162004-08-11 09:40:52 +0000917 new_sb->next = a->sblocks;
918 a->sblocks = new_sb;
919 b = (Block*)&new_sb->payload_bytes[0];
920 lno = pszB_to_listNo(bszB_to_pszB(a, new_sb->n_payload_bytes));
921 mkFreeBlock ( a, b, new_sb->n_payload_bytes, lno);
922 // fall through
sewardjde4a1d02002-03-22 01:27:54 +0000923
nethercote2d5b8162004-08-11 09:40:52 +0000924 obtained_block:
925 // Ok, we can allocate from b, which lives in list lno.
sewardjde4a1d02002-03-22 01:27:54 +0000926 vg_assert(b != NULL);
njn6e6588c2005-03-13 18:52:48 +0000927 vg_assert(lno < N_MALLOC_LISTS);
sewardjde4a1d02002-03-22 01:27:54 +0000928 vg_assert(a->freelist[lno] != NULL);
nethercote2d5b8162004-08-11 09:40:52 +0000929 b_bszB = mk_plain_bszB(get_bszB_lo(b));
930 // req_bszB is the size of the block we are after. b_bszB is the
931 // size of what we've actually got. */
932 vg_assert(b_bszB >= req_bszB);
sewardjde4a1d02002-03-22 01:27:54 +0000933
nethercote2d5b8162004-08-11 09:40:52 +0000934 // Could we split this block and still get a useful fragment?
935 frag_bszB = b_bszB - req_bszB;
936 if (frag_bszB >= min_useful_bszB(a)) {
937 // Yes, split block in two, put the fragment on the appropriate free
938 // list, and update b_bszB accordingly.
939 // printf( "split %dB into %dB and %dB\n", b_bszB, req_bszB, frag_bszB );
sewardjde4a1d02002-03-22 01:27:54 +0000940 unlinkBlock(a, b, lno);
nethercote2d5b8162004-08-11 09:40:52 +0000941 mkInuseBlock(a, b, req_bszB);
942 mkFreeBlock(a, &b[req_bszB], frag_bszB,
943 pszB_to_listNo(bszB_to_pszB(a, frag_bszB)));
944 b_bszB = mk_plain_bszB(get_bszB_lo(b));
945 } else {
946 // No, mark as in use and use as-is.
947 unlinkBlock(a, b, lno);
948 mkInuseBlock(a, b, b_bszB);
sewardjde4a1d02002-03-22 01:27:54 +0000949 }
sewardjde4a1d02002-03-22 01:27:54 +0000950
nethercote2d5b8162004-08-11 09:40:52 +0000951 // Update stats
952 a->bytes_on_loan += bszB_to_pszB(a, b_bszB);
sewardjde4a1d02002-03-22 01:27:54 +0000953 if (a->bytes_on_loan > a->bytes_on_loan_max)
954 a->bytes_on_loan_max = a->bytes_on_loan;
955
956# ifdef DEBUG_MALLOC
nethercote885dd912004-08-03 23:14:00 +0000957 sanity_check_malloc_arena(aid);
sewardjde4a1d02002-03-22 01:27:54 +0000958# endif
959
njn25e49d8e72002-09-23 09:36:25 +0000960 VGP_POPCC(VgpMalloc);
nethercote2d5b8162004-08-11 09:40:52 +0000961 v = get_block_payload(a, b);
962 vg_assert( (((Addr)v) & (VG_MIN_MALLOC_SZB-1)) == 0 );
sewardjb5f6f512005-03-10 23:59:00 +0000963
964 VALGRIND_MALLOCLIKE_BLOCK(v, req_pszB, 0, False);
jsewardb1a26ae2004-03-14 03:06:37 +0000965 return v;
sewardjde4a1d02002-03-22 01:27:54 +0000966}
967
968
njn25e49d8e72002-09-23 09:36:25 +0000969void VG_(arena_free) ( ArenaId aid, void* ptr )
sewardjde4a1d02002-03-22 01:27:54 +0000970{
971 Superblock* sb;
nethercote2d5b8162004-08-11 09:40:52 +0000972 UByte* sb_start;
973 UByte* sb_end;
974 Block* other;
975 Block* b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000976 SizeT b_bszB, b_pszB, other_bszB;
977 UInt b_listno;
sewardjde4a1d02002-03-22 01:27:54 +0000978 Arena* a;
979
980 VGP_PUSHCC(VgpMalloc);
981
982 ensure_mm_init();
983 a = arenaId_to_ArenaP(aid);
984
njn25e49d8e72002-09-23 09:36:25 +0000985 if (ptr == NULL) {
986 VGP_POPCC(VgpMalloc);
987 return;
988 }
989
nethercote2d5b8162004-08-11 09:40:52 +0000990 b = get_payload_block(a, ptr);
sewardjde4a1d02002-03-22 01:27:54 +0000991
992# ifdef DEBUG_MALLOC
nethercote2d5b8162004-08-11 09:40:52 +0000993 vg_assert(blockSane(a, b));
sewardjde4a1d02002-03-22 01:27:54 +0000994# endif
995
nethercote2d5b8162004-08-11 09:40:52 +0000996 a->bytes_on_loan -= bszB_to_pszB(a, mk_plain_bszB(get_bszB_lo(b)));
sewardjde4a1d02002-03-22 01:27:54 +0000997
nethercote2d5b8162004-08-11 09:40:52 +0000998 sb = findSb( a, b );
999 sb_start = &sb->payload_bytes[0];
1000 sb_end = &sb->payload_bytes[sb->n_payload_bytes - 1];
sewardjde4a1d02002-03-22 01:27:54 +00001001
nethercote2d5b8162004-08-11 09:40:52 +00001002 // Put this chunk back on a list somewhere.
1003 b_bszB = get_bszB_lo(b);
1004 b_pszB = bszB_to_pszB(a, b_bszB);
1005 b_listno = pszB_to_listNo(b_pszB);
1006 mkFreeBlock( a, b, b_bszB, b_listno );
sewardjde4a1d02002-03-22 01:27:54 +00001007
nethercote2d5b8162004-08-11 09:40:52 +00001008 // See if this block can be merged with its successor.
1009 // First test if we're far enough before the superblock's end to possibly
1010 // have a successor.
1011 other = b + b_bszB;
1012 if (other+min_useful_bszB(a)-1 <= (Block*)sb_end) {
1013 // Ok, we have a successor, merge if it's not in use.
1014 other_bszB = get_bszB_lo(other);
1015 if (!is_inuse_bszB(other_bszB)) {
1016 // VG_(printf)( "merge-successor\n");
1017 other_bszB = mk_plain_bszB(other_bszB);
sewardjde4a1d02002-03-22 01:27:54 +00001018# ifdef DEBUG_MALLOC
1019 vg_assert(blockSane(a, other));
1020# endif
nethercote2d5b8162004-08-11 09:40:52 +00001021 unlinkBlock( a, b, b_listno );
1022 unlinkBlock( a, other, pszB_to_listNo(bszB_to_pszB(a,other_bszB)) );
1023 b_bszB += other_bszB;
1024 b_listno = pszB_to_listNo(bszB_to_pszB(a, b_bszB));
1025 mkFreeBlock( a, b, b_bszB, b_listno );
sewardjde4a1d02002-03-22 01:27:54 +00001026 }
nethercote2d5b8162004-08-11 09:40:52 +00001027 } else {
1028 // Not enough space for successor: check that b is the last block
1029 // ie. there are no unused bytes at the end of the Superblock.
1030 vg_assert(other-1 == (Block*)sb_end);
sewardjde4a1d02002-03-22 01:27:54 +00001031 }
1032
nethercote2d5b8162004-08-11 09:40:52 +00001033 // Then see if this block can be merged with its predecessor.
1034 // First test if we're far enough after the superblock's start to possibly
1035 // have a predecessor.
1036 if (b >= (Block*)sb_start + min_useful_bszB(a)) {
1037 // Ok, we have a predecessor, merge if it's not in use.
1038 other = get_predecessor_block( b );
1039 other_bszB = get_bszB_lo(other);
1040 if (!is_inuse_bszB(other_bszB)) {
1041 // VG_(printf)( "merge-predecessor\n");
1042 other_bszB = mk_plain_bszB(other_bszB);
1043 unlinkBlock( a, b, b_listno );
1044 unlinkBlock( a, other, pszB_to_listNo(bszB_to_pszB(a, other_bszB)) );
1045 b = other;
1046 b_bszB += other_bszB;
1047 b_listno = pszB_to_listNo(bszB_to_pszB(a, b_bszB));
1048 mkFreeBlock( a, b, b_bszB, b_listno );
sewardjde4a1d02002-03-22 01:27:54 +00001049 }
nethercote2d5b8162004-08-11 09:40:52 +00001050 } else {
1051 // Not enough space for predecessor: check that b is the first block,
1052 // ie. there are no unused bytes at the start of the Superblock.
1053 vg_assert((Block*)sb_start == b);
sewardjde4a1d02002-03-22 01:27:54 +00001054 }
1055
1056# ifdef DEBUG_MALLOC
nethercote885dd912004-08-03 23:14:00 +00001057 sanity_check_malloc_arena(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001058# endif
1059
sewardjb5f6f512005-03-10 23:59:00 +00001060 VALGRIND_FREELIKE_BLOCK(ptr, 0);
1061
njn25e49d8e72002-09-23 09:36:25 +00001062 VGP_POPCC(VgpMalloc);
sewardjde4a1d02002-03-22 01:27:54 +00001063}
1064
1065
1066/*
1067 The idea for malloc_aligned() is to allocate a big block, base, and
1068 then split it into two parts: frag, which is returned to the the
1069 free pool, and align, which is the bit we're really after. Here's
1070 a picture. L and H denote the block lower and upper overheads, in
nethercote2d5b8162004-08-11 09:40:52 +00001071 bytes. The details are gruesome. Note it is slightly complicated
sewardjde4a1d02002-03-22 01:27:54 +00001072 because the initial request to generate base may return a bigger
1073 block than we asked for, so it is important to distinguish the base
1074 request size and the base actual size.
1075
1076 frag_b align_b
1077 | |
1078 | frag_p | align_p
1079 | | | |
1080 v v v v
1081
1082 +---+ +---+---+ +---+
1083 | L |----------------| H | L |---------------| H |
1084 +---+ +---+---+ +---+
1085
1086 ^ ^ ^
1087 | | :
1088 | base_p this addr must be aligned
1089 |
1090 base_b
1091
1092 . . . . . . .
nethercote2d5b8162004-08-11 09:40:52 +00001093 <------ frag_bszB -------> . . .
1094 . <------------- base_pszB_act -----------> .
sewardjde4a1d02002-03-22 01:27:54 +00001095 . . . . . . .
1096
1097*/
njn717cde52005-05-10 02:47:21 +00001098void* VG_(arena_memalign) ( ArenaId aid, SizeT req_alignB, SizeT req_pszB )
sewardjde4a1d02002-03-22 01:27:54 +00001099{
nethercote7ac7f7b2004-11-02 12:36:02 +00001100 SizeT base_pszB_req, base_pszB_act, frag_bszB;
nethercote2d5b8162004-08-11 09:40:52 +00001101 Block *base_b, *align_b;
1102 UByte *base_p, *align_p;
nethercote7ac7f7b2004-11-02 12:36:02 +00001103 SizeT saved_bytes_on_loan;
sewardjde4a1d02002-03-22 01:27:54 +00001104 Arena* a;
1105
njn25e49d8e72002-09-23 09:36:25 +00001106 VGP_PUSHCC(VgpMalloc);
1107
sewardjde4a1d02002-03-22 01:27:54 +00001108 ensure_mm_init();
1109 a = arenaId_to_ArenaP(aid);
1110
nethercote7ac7f7b2004-11-02 12:36:02 +00001111 vg_assert(req_pszB < MAX_PSZB);
sewardjde4a1d02002-03-22 01:27:54 +00001112
nethercote2d5b8162004-08-11 09:40:52 +00001113 // Check that the requested alignment seems reasonable; that is, is
1114 // a power of 2.
1115 if (req_alignB < VG_MIN_MALLOC_SZB
1116 || req_alignB > 1048576
njn717cde52005-05-10 02:47:21 +00001117 || VG_(log2)( req_alignB ) == -1 /* not a power of 2 */) {
1118 VG_(printf)("VG_(arena_memalign)(%p, %d, %d)\nbad alignment",
nethercote2d5b8162004-08-11 09:40:52 +00001119 a, req_alignB, req_pszB );
njn717cde52005-05-10 02:47:21 +00001120 VG_(core_panic)("VG_(arena_memalign)");
nethercote2d5b8162004-08-11 09:40:52 +00001121 /*NOTREACHED*/
sewardjde4a1d02002-03-22 01:27:54 +00001122 }
nethercote2d5b8162004-08-11 09:40:52 +00001123 // Paranoid
1124 vg_assert(req_alignB % VG_MIN_MALLOC_SZB == 0);
sewardjde4a1d02002-03-22 01:27:54 +00001125
1126 /* Required payload size for the aligned chunk. */
nethercote2d5b8162004-08-11 09:40:52 +00001127 req_pszB = align_req_pszB(req_pszB);
sewardjde4a1d02002-03-22 01:27:54 +00001128
nethercote2d5b8162004-08-11 09:40:52 +00001129 /* Payload size to request for the big block that we will split up. */
1130 base_pszB_req = req_pszB + min_useful_bszB(a) + req_alignB;
sewardjde4a1d02002-03-22 01:27:54 +00001131
1132 /* Payload ptr for the block we are going to split. Note this
1133 changes a->bytes_on_loan; we save and restore it ourselves. */
1134 saved_bytes_on_loan = a->bytes_on_loan;
nethercote2d5b8162004-08-11 09:40:52 +00001135 base_p = VG_(arena_malloc) ( aid, base_pszB_req );
sewardjde4a1d02002-03-22 01:27:54 +00001136 a->bytes_on_loan = saved_bytes_on_loan;
1137
1138 /* Block ptr for the block we are going to split. */
nethercote2d5b8162004-08-11 09:40:52 +00001139 base_b = get_payload_block ( a, base_p );
sewardjde4a1d02002-03-22 01:27:54 +00001140
1141 /* Pointer to the payload of the aligned block we are going to
1142 return. This has to be suitably aligned. */
nethercote2d5b8162004-08-11 09:40:52 +00001143 align_p = align_upwards ( base_b + 2 * overhead_szB_lo(a)
1144 + overhead_szB_hi(a),
sewardjde4a1d02002-03-22 01:27:54 +00001145 req_alignB );
nethercote2d5b8162004-08-11 09:40:52 +00001146 align_b = get_payload_block(a, align_p);
sewardjde4a1d02002-03-22 01:27:54 +00001147
1148 /* The block size of the fragment we will create. This must be big
1149 enough to actually create a fragment. */
nethercote2d5b8162004-08-11 09:40:52 +00001150 frag_bszB = align_b - base_b;
1151
1152 vg_assert(frag_bszB >= min_useful_bszB(a));
sewardjde4a1d02002-03-22 01:27:54 +00001153
1154 /* The actual payload size of the block we are going to split. */
nethercote2d5b8162004-08-11 09:40:52 +00001155 base_pszB_act = bszB_to_pszB(a, mk_plain_bszB(get_bszB_lo(base_b)));
sewardjde4a1d02002-03-22 01:27:54 +00001156
nethercote2d5b8162004-08-11 09:40:52 +00001157 /* Create the fragment block, and put it back on the relevant free list. */
1158 mkFreeBlock ( a, base_b, frag_bszB,
1159 pszB_to_listNo(bszB_to_pszB(a, frag_bszB)) );
sewardjde4a1d02002-03-22 01:27:54 +00001160
1161 /* Create the aligned block. */
nethercote2d5b8162004-08-11 09:40:52 +00001162 mkInuseBlock ( a, align_b,
1163 base_p + base_pszB_act
1164 + overhead_szB_hi(a) - (UByte*)align_b );
sewardjde4a1d02002-03-22 01:27:54 +00001165
1166 /* Final sanity checks. */
nethercote2d5b8162004-08-11 09:40:52 +00001167 vg_assert( is_inuse_bszB(get_bszB_lo(get_payload_block(a, align_p))) );
sewardjde4a1d02002-03-22 01:27:54 +00001168
nethercote2d5b8162004-08-11 09:40:52 +00001169 vg_assert(req_pszB
sewardjde4a1d02002-03-22 01:27:54 +00001170 <=
nethercote2d5b8162004-08-11 09:40:52 +00001171 bszB_to_pszB(a, mk_plain_bszB(get_bszB_lo(
1172 get_payload_block(a, align_p))))
sewardjde4a1d02002-03-22 01:27:54 +00001173 );
1174
1175 a->bytes_on_loan
nethercote2d5b8162004-08-11 09:40:52 +00001176 += bszB_to_pszB(a, mk_plain_bszB(get_bszB_lo(
1177 get_payload_block(a, align_p))));
sewardjde4a1d02002-03-22 01:27:54 +00001178 if (a->bytes_on_loan > a->bytes_on_loan_max)
1179 a->bytes_on_loan_max = a->bytes_on_loan;
1180
1181# ifdef DEBUG_MALLOC
nethercote885dd912004-08-03 23:14:00 +00001182 sanity_check_malloc_arena(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001183# endif
1184
njn25e49d8e72002-09-23 09:36:25 +00001185 VGP_POPCC(VgpMalloc);
1186
nethercote2d5b8162004-08-11 09:40:52 +00001187 vg_assert( (((Addr)align_p) % req_alignB) == 0 );
sewardjb5f6f512005-03-10 23:59:00 +00001188
1189 VALGRIND_MALLOCLIKE_BLOCK(align_p, req_pszB, 0, False);
1190
nethercote2d5b8162004-08-11 09:40:52 +00001191 return align_p;
1192}
1193
1194
nethercote7ac7f7b2004-11-02 12:36:02 +00001195SizeT VG_(arena_payload_szB) ( ArenaId aid, void* ptr )
nethercote2d5b8162004-08-11 09:40:52 +00001196{
1197 Arena* a = arenaId_to_ArenaP(aid);
1198 Block* b = get_payload_block(a, ptr);
1199 return bszB_to_pszB(a, get_bszB_lo(b));
sewardjde4a1d02002-03-22 01:27:54 +00001200}
1201
1202
1203/*------------------------------------------------------------*/
1204/*--- Services layered on top of malloc/free. ---*/
1205/*------------------------------------------------------------*/
1206
njn828022a2005-03-13 14:56:31 +00001207void* VG_(arena_calloc) ( ArenaId aid, SizeT nmemb, SizeT bytes_per_memb )
sewardjde4a1d02002-03-22 01:27:54 +00001208{
nethercote7ac7f7b2004-11-02 12:36:02 +00001209 SizeT size;
sewardjde4a1d02002-03-22 01:27:54 +00001210 UChar* p;
njn25e49d8e72002-09-23 09:36:25 +00001211
1212 VGP_PUSHCC(VgpMalloc);
1213
njn926ed472005-03-11 04:44:10 +00001214 size = nmemb * bytes_per_memb;
1215 vg_assert(size >= nmemb && size >= bytes_per_memb);// check against overflow
njn3e884182003-04-15 13:03:23 +00001216
njn828022a2005-03-13 14:56:31 +00001217 p = VG_(arena_malloc) ( aid, size );
njn3e884182003-04-15 13:03:23 +00001218
njn926ed472005-03-11 04:44:10 +00001219 VG_(memset)(p, 0, size);
sewardjb5f6f512005-03-10 23:59:00 +00001220
njn926ed472005-03-11 04:44:10 +00001221 VALGRIND_MALLOCLIKE_BLOCK(p, size, 0, True);
njn25e49d8e72002-09-23 09:36:25 +00001222
1223 VGP_POPCC(VgpMalloc);
1224
sewardjde4a1d02002-03-22 01:27:54 +00001225 return p;
1226}
1227
1228
njn828022a2005-03-13 14:56:31 +00001229void* VG_(arena_realloc) ( ArenaId aid, void* ptr, SizeT req_pszB )
sewardjde4a1d02002-03-22 01:27:54 +00001230{
1231 Arena* a;
nethercote7ac7f7b2004-11-02 12:36:02 +00001232 SizeT old_bszB, old_pszB;
sewardjb5f6f512005-03-10 23:59:00 +00001233 UChar *p_new;
nethercote2d5b8162004-08-11 09:40:52 +00001234 Block* b;
sewardjde4a1d02002-03-22 01:27:54 +00001235
njn25e49d8e72002-09-23 09:36:25 +00001236 VGP_PUSHCC(VgpMalloc);
1237
sewardjde4a1d02002-03-22 01:27:54 +00001238 ensure_mm_init();
1239 a = arenaId_to_ArenaP(aid);
1240
nethercote7ac7f7b2004-11-02 12:36:02 +00001241 vg_assert(req_pszB < MAX_PSZB);
sewardjde4a1d02002-03-22 01:27:54 +00001242
nethercote2d5b8162004-08-11 09:40:52 +00001243 b = get_payload_block(a, ptr);
1244 vg_assert(blockSane(a, b));
sewardjde4a1d02002-03-22 01:27:54 +00001245
nethercote2d5b8162004-08-11 09:40:52 +00001246 old_bszB = get_bszB_lo(b);
1247 vg_assert(is_inuse_bszB(old_bszB));
1248 old_bszB = mk_plain_bszB(old_bszB);
1249 old_pszB = bszB_to_pszB(a, old_bszB);
sewardjde4a1d02002-03-22 01:27:54 +00001250
njn25e49d8e72002-09-23 09:36:25 +00001251 if (req_pszB <= old_pszB) {
1252 VGP_POPCC(VgpMalloc);
1253 return ptr;
1254 }
sewardjde4a1d02002-03-22 01:27:54 +00001255
njn828022a2005-03-13 14:56:31 +00001256 p_new = VG_(arena_malloc) ( aid, req_pszB );
1257
sewardjb5f6f512005-03-10 23:59:00 +00001258 VG_(memcpy)(p_new, ptr, old_pszB);
sewardjde4a1d02002-03-22 01:27:54 +00001259
sewardjb5f6f512005-03-10 23:59:00 +00001260 VG_(arena_free)(aid, ptr);
njn25e49d8e72002-09-23 09:36:25 +00001261
1262 VGP_POPCC(VgpMalloc);
sewardjde4a1d02002-03-22 01:27:54 +00001263 return p_new;
1264}
1265
1266
1267/*------------------------------------------------------------*/
nethercote996901a2004-08-03 13:29:09 +00001268/*--- Tool-visible functions. ---*/
njn25e49d8e72002-09-23 09:36:25 +00001269/*------------------------------------------------------------*/
1270
nethercote2d5b8162004-08-11 09:40:52 +00001271// All just wrappers to avoid exposing arenas to tools.
njn25e49d8e72002-09-23 09:36:25 +00001272
nethercote7ac7f7b2004-11-02 12:36:02 +00001273void* VG_(malloc) ( SizeT nbytes )
njn25e49d8e72002-09-23 09:36:25 +00001274{
nethercote60f5b822004-01-26 17:24:42 +00001275 return VG_(arena_malloc) ( VG_AR_TOOL, nbytes );
njn25e49d8e72002-09-23 09:36:25 +00001276}
1277
1278void VG_(free) ( void* ptr )
1279{
nethercote60f5b822004-01-26 17:24:42 +00001280 VG_(arena_free) ( VG_AR_TOOL, ptr );
njn25e49d8e72002-09-23 09:36:25 +00001281}
1282
njn926ed472005-03-11 04:44:10 +00001283void* VG_(calloc) ( SizeT nmemb, SizeT bytes_per_memb )
njn25e49d8e72002-09-23 09:36:25 +00001284{
njn828022a2005-03-13 14:56:31 +00001285 return VG_(arena_calloc) ( VG_AR_TOOL, nmemb, bytes_per_memb );
njn25e49d8e72002-09-23 09:36:25 +00001286}
1287
nethercote7ac7f7b2004-11-02 12:36:02 +00001288void* VG_(realloc) ( void* ptr, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00001289{
njn828022a2005-03-13 14:56:31 +00001290 return VG_(arena_realloc) ( VG_AR_TOOL, ptr, size );
njn25e49d8e72002-09-23 09:36:25 +00001291}
1292
sewardjde4a1d02002-03-22 01:27:54 +00001293/*--------------------------------------------------------------------*/
njn717cde52005-05-10 02:47:21 +00001294/*--- end ---*/
sewardjde4a1d02002-03-22 01:27:54 +00001295/*--------------------------------------------------------------------*/