blob: cc203c44f4befbabce5114fdf8aa2708f0ffab0f [file] [log] [blame]
sewardjde4a1d02002-03-22 01:27:54 +00001
2/*--------------------------------------------------------------------*/
3/*--- An implementation of malloc/free which doesn't use sbrk. ---*/
njn717cde52005-05-10 02:47:21 +00004/*--- m_mallocfree.c ---*/
sewardjde4a1d02002-03-22 01:27:54 +00005/*--------------------------------------------------------------------*/
6
7/*
njnb9c427c2004-12-01 14:14:42 +00008 This file is part of Valgrind, a dynamic binary instrumentation
9 framework.
sewardjde4a1d02002-03-22 01:27:54 +000010
njn53612422005-03-12 16:22:54 +000011 Copyright (C) 2000-2005 Julian Seward
sewardjde4a1d02002-03-22 01:27:54 +000012 jseward@acm.org
sewardjde4a1d02002-03-22 01:27:54 +000013
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
njn25e49d8e72002-09-23 09:36:25 +000029 The GNU General Public License is contained in the file COPYING.
sewardjde4a1d02002-03-22 01:27:54 +000030*/
31
njnc7561b92005-06-19 01:24:32 +000032#include "pub_core_basics.h"
njn97405b22005-06-02 03:39:33 +000033#include "pub_core_libcbase.h"
njn132bfcc2005-06-04 19:16:06 +000034#include "pub_core_libcassert.h"
njne9befc62005-06-11 15:51:30 +000035#include "pub_core_libcmman.h"
njn36a20fa2005-06-03 03:08:39 +000036#include "pub_core_libcprint.h"
njnaf1d7df2005-06-11 01:31:52 +000037#include "pub_core_mallocfree.h"
njn20242342005-05-16 23:31:24 +000038#include "pub_core_options.h"
njn31513b42005-06-01 03:09:59 +000039#include "pub_core_profile.h"
njnfc51f8d2005-06-21 03:20:17 +000040#include "pub_core_tooliface.h"
njn296c24d2005-05-15 03:52:40 +000041#include "valgrind.h"
sewardj55f9d1a2005-04-25 11:11:44 +000042
sewardjb5f6f512005-03-10 23:59:00 +000043//zz#include "memcheck/memcheck.h"
sewardjde4a1d02002-03-22 01:27:54 +000044
nethercote2d5b8162004-08-11 09:40:52 +000045//#define DEBUG_MALLOC // turn on heavyweight debugging machinery
46//#define VERBOSE_MALLOC // make verbose, esp. in debugging machinery
47
48/*------------------------------------------------------------*/
49/*--- Main types ---*/
50/*------------------------------------------------------------*/
51
sewardj70e212d2005-05-19 10:54:01 +000052#define N_MALLOC_LISTS 18 // do not change this
nethercote2d5b8162004-08-11 09:40:52 +000053
nethercote7ac7f7b2004-11-02 12:36:02 +000054// The amount you can ask for is limited only by sizeof(SizeT)...
55#define MAX_PSZB (~((SizeT)0x0))
nethercote2d5b8162004-08-11 09:40:52 +000056
57typedef UChar UByte;
58
59/* Block layout:
60
nethercote7ac7f7b2004-11-02 12:36:02 +000061 this block total szB (sizeof(SizeT) bytes)
nethercote2d5b8162004-08-11 09:40:52 +000062 freelist previous ptr (sizeof(void*) bytes)
63 red zone bytes (depends on .rz_szB field of Arena)
64 (payload bytes)
65 red zone bytes (depends on .rz_szB field of Arena)
66 freelist next ptr (sizeof(void*) bytes)
nethercote7ac7f7b2004-11-02 12:36:02 +000067 this block total szB (sizeof(SizeT) bytes)
nethercote2d5b8162004-08-11 09:40:52 +000068
69 Total size in bytes (bszB) and payload size in bytes (pszB)
70 are related by:
71
nethercote7ac7f7b2004-11-02 12:36:02 +000072 bszB == pszB + 2*sizeof(SizeT) + 2*sizeof(void*) + 2*a->rz_szB
nethercote2d5b8162004-08-11 09:40:52 +000073
njn37517e82005-05-25 15:52:39 +000074 Furthermore, both size fields in the block have their least-significant
nethercote7ac7f7b2004-11-02 12:36:02 +000075 bit set if the block is not in use, and unset if it is in use.
76 (The bottom 3 or so bits are always free for this because of alignment.)
77 A block size of zero is not possible, because a block always has at
78 least two SizeTs and two pointers of overhead.
nethercote2d5b8162004-08-11 09:40:52 +000079
80 Nb: All Block payloads must be VG_MIN_MALLOC_SZB-aligned. This is
81 achieved by ensuring that Superblocks are VG_MIN_MALLOC_SZB-aligned
82 (see newSuperblock() for how), and that the lengths of the following
83 things are a multiple of VG_MIN_MALLOC_SZB:
84 - Superblock admin section lengths (due to elastic padding)
85 - Block admin section (low and high) lengths (due to elastic redzones)
86 - Block payload lengths (due to req_pszB rounding up)
87*/
88typedef
89 struct {
90 // No fields are actually used in this struct, because a Block has
njn37517e82005-05-25 15:52:39 +000091 // many variable sized fields and so can't be accessed
nethercote2d5b8162004-08-11 09:40:52 +000092 // meaningfully with normal fields. So we use access functions all
93 // the time. This struct gives us a type to use, though. Also, we
94 // make sizeof(Block) 1 byte so that we can do arithmetic with the
95 // Block* type in increments of 1!
96 UByte dummy;
97 }
98 Block;
99
100// A superblock. 'padding' is never used, it just ensures that if the
101// entire Superblock is aligned to VG_MIN_MALLOC_SZB, then payload_bytes[]
102// will be too. It can add small amounts of padding unnecessarily -- eg.
103// 8-bytes on 32-bit machines with an 8-byte VG_MIN_MALLOC_SZB -- because
104// it's too hard to make a constant expression that works perfectly in all
105// cases.
106// payload_bytes[] is made a single big Block when the Superblock is
107// created, and then can be split and the splittings remerged, but Blocks
108// always cover its entire length -- there's never any unused bytes at the
109// end, for example.
110typedef
111 struct _Superblock {
112 struct _Superblock* next;
nethercote7ac7f7b2004-11-02 12:36:02 +0000113 SizeT n_payload_bytes;
nethercote2d5b8162004-08-11 09:40:52 +0000114 UByte padding[ VG_MIN_MALLOC_SZB -
nethercote7ac7f7b2004-11-02 12:36:02 +0000115 ((sizeof(struct _Superblock*) + sizeof(SizeT)) %
116 VG_MIN_MALLOC_SZB) ];
nethercote2d5b8162004-08-11 09:40:52 +0000117 UByte payload_bytes[0];
118 }
119 Superblock;
120
121// An arena. 'freelist' is a circular, doubly-linked list. 'rz_szB' is
122// elastic, in that it can be bigger than asked-for to ensure alignment.
123typedef
124 struct {
125 Char* name;
126 Bool clientmem; // Allocates in the client address space?
njn0e742df2004-11-30 13:26:29 +0000127 SizeT rz_szB; // Red zone size in bytes
nethercote7ac7f7b2004-11-02 12:36:02 +0000128 SizeT min_sblock_szB; // Minimum superblock size in bytes
njn6e6588c2005-03-13 18:52:48 +0000129 Block* freelist[N_MALLOC_LISTS];
nethercote2d5b8162004-08-11 09:40:52 +0000130 Superblock* sblocks;
131 // Stats only.
nethercote7ac7f7b2004-11-02 12:36:02 +0000132 SizeT bytes_on_loan;
133 SizeT bytes_mmaped;
134 SizeT bytes_on_loan_max;
nethercote2d5b8162004-08-11 09:40:52 +0000135 }
136 Arena;
137
138
139/*------------------------------------------------------------*/
140/*--- Low-level functions for working with Blocks. ---*/
141/*------------------------------------------------------------*/
142
nethercote7ac7f7b2004-11-02 12:36:02 +0000143#define SIZE_T_0x1 ((SizeT)0x1)
144
nethercote2d5b8162004-08-11 09:40:52 +0000145// Mark a bszB as in-use, and not in-use.
146static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000147SizeT mk_inuse_bszB ( SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000148{
149 vg_assert(bszB != 0);
nethercote7ac7f7b2004-11-02 12:36:02 +0000150 return bszB & (~SIZE_T_0x1);
nethercote2d5b8162004-08-11 09:40:52 +0000151}
152static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000153SizeT mk_free_bszB ( SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000154{
155 vg_assert(bszB != 0);
nethercote7ac7f7b2004-11-02 12:36:02 +0000156 return bszB | SIZE_T_0x1;
nethercote2d5b8162004-08-11 09:40:52 +0000157}
158
159// Remove the in-use/not-in-use attribute from a bszB, leaving just
160// the size.
161static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000162SizeT mk_plain_bszB ( SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000163{
164 vg_assert(bszB != 0);
nethercote7ac7f7b2004-11-02 12:36:02 +0000165 return bszB & (~SIZE_T_0x1);
nethercote2d5b8162004-08-11 09:40:52 +0000166}
167
168// Does this bszB have the in-use attribute?
169static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000170Bool is_inuse_bszB ( SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000171{
172 vg_assert(bszB != 0);
nethercote7ac7f7b2004-11-02 12:36:02 +0000173 return (0 != (bszB & SIZE_T_0x1)) ? False : True;
nethercote2d5b8162004-08-11 09:40:52 +0000174}
175
176
177// Set and get the lower size field of a block.
178static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000179void set_bszB_lo ( Block* b, SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000180{
nethercote7ac7f7b2004-11-02 12:36:02 +0000181 *(SizeT*)&b[0] = bszB;
nethercote2d5b8162004-08-11 09:40:52 +0000182}
183static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000184SizeT get_bszB_lo ( Block* b )
nethercote2d5b8162004-08-11 09:40:52 +0000185{
nethercote7ac7f7b2004-11-02 12:36:02 +0000186 return *(SizeT*)&b[0];
nethercote2d5b8162004-08-11 09:40:52 +0000187}
188
189// Get the address of the last byte in a block
190static __inline__
191UByte* last_byte ( Block* b )
192{
193 UByte* b2 = (UByte*)b;
194 return &b2[mk_plain_bszB(get_bszB_lo(b)) - 1];
195}
196
197// Set and get the upper size field of a block.
198static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000199void set_bszB_hi ( Block* b, SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000200{
201 UByte* b2 = (UByte*)b;
202 UByte* lb = last_byte(b);
203 vg_assert(lb == &b2[mk_plain_bszB(bszB) - 1]);
nethercote7ac7f7b2004-11-02 12:36:02 +0000204 *(SizeT*)&lb[-sizeof(SizeT) + 1] = bszB;
nethercote2d5b8162004-08-11 09:40:52 +0000205}
206static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000207SizeT get_bszB_hi ( Block* b )
nethercote2d5b8162004-08-11 09:40:52 +0000208{
209 UByte* lb = last_byte(b);
nethercote7ac7f7b2004-11-02 12:36:02 +0000210 return *(SizeT*)&lb[-sizeof(SizeT) + 1];
nethercote2d5b8162004-08-11 09:40:52 +0000211}
212
213
nethercote7ac7f7b2004-11-02 12:36:02 +0000214// Return the lower, upper and total overhead in bytes for a block.
215// These are determined purely by which arena the block lives in.
216static __inline__
njn0e742df2004-11-30 13:26:29 +0000217SizeT overhead_szB_lo ( Arena* a )
nethercote7ac7f7b2004-11-02 12:36:02 +0000218{
219 return sizeof(SizeT) + sizeof(void*) + a->rz_szB;
220}
221static __inline__
njn0e742df2004-11-30 13:26:29 +0000222SizeT overhead_szB_hi ( Arena* a )
nethercote7ac7f7b2004-11-02 12:36:02 +0000223{
224 return a->rz_szB + sizeof(void*) + sizeof(SizeT);
225}
226static __inline__
njn0e742df2004-11-30 13:26:29 +0000227SizeT overhead_szB ( Arena* a )
nethercote7ac7f7b2004-11-02 12:36:02 +0000228{
229 return overhead_szB_lo(a) + overhead_szB_hi(a);
230}
231
nethercote2d5b8162004-08-11 09:40:52 +0000232// Given the addr of a block, return the addr of its payload.
233static __inline__
234UByte* get_block_payload ( Arena* a, Block* b )
235{
236 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000237 return & b2[ overhead_szB_lo(a) ];
nethercote2d5b8162004-08-11 09:40:52 +0000238}
239// Given the addr of a block's payload, return the addr of the block itself.
240static __inline__
241Block* get_payload_block ( Arena* a, UByte* payload )
242{
nethercote7ac7f7b2004-11-02 12:36:02 +0000243 return (Block*)&payload[ -overhead_szB_lo(a) ];
nethercote2d5b8162004-08-11 09:40:52 +0000244}
245
246
247// Set and get the next and previous link fields of a block.
248static __inline__
249void set_prev_b ( Block* b, Block* prev_p )
250{
251 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000252 *(Block**)&b2[sizeof(SizeT)] = prev_p;
nethercote2d5b8162004-08-11 09:40:52 +0000253}
254static __inline__
255void set_next_b ( Block* b, Block* next_p )
256{
257 UByte* lb = last_byte(b);
nethercote7ac7f7b2004-11-02 12:36:02 +0000258 *(Block**)&lb[-sizeof(SizeT) - sizeof(void*) + 1] = next_p;
nethercote2d5b8162004-08-11 09:40:52 +0000259}
260static __inline__
261Block* get_prev_b ( Block* b )
262{
263 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000264 return *(Block**)&b2[sizeof(SizeT)];
nethercote2d5b8162004-08-11 09:40:52 +0000265}
266static __inline__
267Block* get_next_b ( Block* b )
268{
269 UByte* lb = last_byte(b);
nethercote7ac7f7b2004-11-02 12:36:02 +0000270 return *(Block**)&lb[-sizeof(SizeT) - sizeof(void*) + 1];
nethercote2d5b8162004-08-11 09:40:52 +0000271}
272
273
274// Get the block immediately preceding this one in the Superblock.
275static __inline__
276Block* get_predecessor_block ( Block* b )
277{
278 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000279 SizeT bszB = mk_plain_bszB( (*(SizeT*)&b2[-sizeof(SizeT)]) );
nethercote2d5b8162004-08-11 09:40:52 +0000280 return (Block*)&b2[-bszB];
281}
282
283// Read and write the lower and upper red-zone bytes of a block.
284static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000285void set_rz_lo_byte ( Arena* a, Block* b, UInt rz_byteno, UByte v )
nethercote2d5b8162004-08-11 09:40:52 +0000286{
287 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000288 b2[sizeof(SizeT) + sizeof(void*) + rz_byteno] = v;
nethercote2d5b8162004-08-11 09:40:52 +0000289}
290static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000291void set_rz_hi_byte ( Arena* a, Block* b, UInt rz_byteno, UByte v )
nethercote2d5b8162004-08-11 09:40:52 +0000292{
293 UByte* lb = last_byte(b);
nethercote7ac7f7b2004-11-02 12:36:02 +0000294 lb[-sizeof(SizeT) - sizeof(void*) - rz_byteno] = v;
nethercote2d5b8162004-08-11 09:40:52 +0000295}
296static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000297UByte get_rz_lo_byte ( Arena* a, Block* b, UInt rz_byteno )
nethercote2d5b8162004-08-11 09:40:52 +0000298{
299 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000300 return b2[sizeof(SizeT) + sizeof(void*) + rz_byteno];
nethercote2d5b8162004-08-11 09:40:52 +0000301}
302static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000303UByte get_rz_hi_byte ( Arena* a, Block* b, UInt rz_byteno )
nethercote2d5b8162004-08-11 09:40:52 +0000304{
305 UByte* lb = last_byte(b);
nethercote7ac7f7b2004-11-02 12:36:02 +0000306 return lb[-sizeof(SizeT) - sizeof(void*) - rz_byteno];
nethercote2d5b8162004-08-11 09:40:52 +0000307}
308
309
nethercote2d5b8162004-08-11 09:40:52 +0000310// Return the minimum bszB for a block in this arena. Can have zero-length
311// payloads, so it's the size of the admin bytes.
312static __inline__
njn0e742df2004-11-30 13:26:29 +0000313SizeT min_useful_bszB ( Arena* a )
nethercote2d5b8162004-08-11 09:40:52 +0000314{
315 return overhead_szB(a);
316}
317
318// Convert payload size <--> block size (both in bytes).
319static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000320SizeT pszB_to_bszB ( Arena* a, SizeT pszB )
nethercote2d5b8162004-08-11 09:40:52 +0000321{
nethercote2d5b8162004-08-11 09:40:52 +0000322 return pszB + overhead_szB(a);
323}
324static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000325SizeT bszB_to_pszB ( Arena* a, SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000326{
nethercote7ac7f7b2004-11-02 12:36:02 +0000327 vg_assert(bszB >= overhead_szB(a));
328 return bszB - overhead_szB(a);
nethercote2d5b8162004-08-11 09:40:52 +0000329}
330
331
332/*------------------------------------------------------------*/
333/*--- Arena management ---*/
334/*------------------------------------------------------------*/
335
336#define CORE_ARENA_MIN_SZB 1048576
337
338// The arena structures themselves.
339static Arena vg_arena[VG_N_ARENAS];
340
341// Functions external to this module identify arenas using ArenaIds,
342// not Arena*s. This fn converts the former to the latter.
343static Arena* arenaId_to_ArenaP ( ArenaId arena )
344{
345 vg_assert(arena >= 0 && arena < VG_N_ARENAS);
346 return & vg_arena[arena];
347}
348
349// Initialise an arena. rz_szB is the minimum redzone size; it might be
njn30490552005-03-13 06:30:42 +0000350// made bigger to ensure that VG_MIN_MALLOC_SZB is observed.
nethercote2d5b8162004-08-11 09:40:52 +0000351static
njn0e742df2004-11-30 13:26:29 +0000352void arena_init ( ArenaId aid, Char* name, SizeT rz_szB, SizeT min_sblock_szB )
nethercote2d5b8162004-08-11 09:40:52 +0000353{
nethercote7ac7f7b2004-11-02 12:36:02 +0000354 SizeT i;
nethercote2d5b8162004-08-11 09:40:52 +0000355 Arena* a = arenaId_to_ArenaP(aid);
356
nethercote7ac7f7b2004-11-02 12:36:02 +0000357 vg_assert(rz_szB < 128); // ensure reasonable size
nethercote73b526f2004-10-31 18:48:21 +0000358 vg_assert((min_sblock_szB % VKI_PAGE_SIZE) == 0);
nethercote2d5b8162004-08-11 09:40:52 +0000359 a->name = name;
360 a->clientmem = ( VG_AR_CLIENT == aid ? True : False );
361
362 // The size of the low and high admin sections in a block must be a
njn30490552005-03-13 06:30:42 +0000363 // multiple of VG_MIN_MALLOC_SZB. So we round up the asked-for
nethercote2d5b8162004-08-11 09:40:52 +0000364 // redzone size if necessary to achieve this.
365 a->rz_szB = rz_szB;
366 while (0 != overhead_szB_lo(a) % VG_MIN_MALLOC_SZB) a->rz_szB++;
367 vg_assert(overhead_szB_lo(a) == overhead_szB_hi(a));
368
369 a->min_sblock_szB = min_sblock_szB;
njn6e6588c2005-03-13 18:52:48 +0000370 for (i = 0; i < N_MALLOC_LISTS; i++) a->freelist[i] = NULL;
nethercote2d5b8162004-08-11 09:40:52 +0000371 a->sblocks = NULL;
372 a->bytes_on_loan = 0;
373 a->bytes_mmaped = 0;
374 a->bytes_on_loan_max = 0;
375}
376
377/* Print vital stats for an arena. */
378void VG_(print_all_arena_stats) ( void )
379{
nethercote7ac7f7b2004-11-02 12:36:02 +0000380 UInt i;
nethercote2d5b8162004-08-11 09:40:52 +0000381 for (i = 0; i < VG_N_ARENAS; i++) {
382 Arena* a = arenaId_to_ArenaP(i);
383 VG_(message)(Vg_DebugMsg,
njn669ef072005-03-13 05:46:57 +0000384 "%8s: %8d mmap'd, %8d/%8d max/curr",
nethercote2d5b8162004-08-11 09:40:52 +0000385 a->name, a->bytes_mmaped, a->bytes_on_loan_max, a->bytes_on_loan
386 );
387 }
388}
389
390/* This library is self-initialising, as it makes this more self-contained,
391 less coupled with the outside world. Hence VG_(arena_malloc)() and
392 VG_(arena_free)() below always call ensure_mm_init() to ensure things are
393 correctly initialised. */
394static
395void ensure_mm_init ( void )
396{
njnfc51f8d2005-06-21 03:20:17 +0000397 static Bool init_done = False;
398 static SizeT client_redzone_szB = 8; // default: be paranoid
399
nethercote2d5b8162004-08-11 09:40:52 +0000400 if (init_done) {
njnfc51f8d2005-06-21 03:20:17 +0000401 // This assertion ensures that a tool cannot try to change the client
402 // redzone size with VG_(needs_malloc_replacement)() after this module
403 // has done its first allocation.
404 if (VG_(needs).malloc_replacement)
405 vg_assert(client_redzone_szB == VG_(tdict).tool_client_redzone_szB);
nethercote2d5b8162004-08-11 09:40:52 +0000406 return;
407 }
408
njnfc51f8d2005-06-21 03:20:17 +0000409 if (VG_(needs).malloc_replacement) {
410 client_redzone_szB = VG_(tdict).tool_client_redzone_szB;
411 // 128 is no special figure, just something not too big
412 if (client_redzone_szB > 128) {
413 VG_(printf)( "\nTool error:\n"
414 " specified redzone size is too big (%llu)\n",
415 (ULong)client_redzone_szB);
416 VG_(exit)(1);
417 }
418 }
419
nethercote2d5b8162004-08-11 09:40:52 +0000420 /* Use checked red zones (of various sizes) for our internal stuff,
421 and an unchecked zone of arbitrary size for the client. Of
422 course the client's red zone can be checked by the tool, eg.
423 by using addressibility maps, but not by the mechanism implemented
424 here, which merely checks at the time of freeing that the red
425 zone bytes are unchanged.
426
427 Nb: redzone sizes are *minimums*; they could be made bigger to ensure
428 alignment. Eg. on 32-bit machines, 4 becomes 8, and 12 becomes 16;
429 but on 64-bit machines 4 stays as 4, and 12 stays as 12 --- the extra
430 4 bytes in both are accounted for by the larger prev/next ptr.
431 */
432 arena_init ( VG_AR_CORE, "core", 4, CORE_ARENA_MIN_SZB );
433 arena_init ( VG_AR_TOOL, "tool", 4, 1048576 );
434 arena_init ( VG_AR_SYMTAB, "symtab", 4, 1048576 );
njnfc51f8d2005-06-21 03:20:17 +0000435 arena_init ( VG_AR_CLIENT, "client", client_redzone_szB, 1048576 );
nethercote2d5b8162004-08-11 09:40:52 +0000436 arena_init ( VG_AR_DEMANGLE, "demangle", 12/*paranoid*/, 65536 );
437 arena_init ( VG_AR_EXECTXT, "exectxt", 4, 65536 );
438 arena_init ( VG_AR_ERRORS, "errors", 4, 65536 );
nethercote2d5b8162004-08-11 09:40:52 +0000439
440 init_done = True;
441# ifdef DEBUG_MALLOC
442 VG_(sanity_check_malloc_all)();
443# endif
444}
445
446
447/*------------------------------------------------------------*/
448/*--- Superblock management ---*/
449/*------------------------------------------------------------*/
450
451// Align ptr p upwards to an align-sized boundary.
452static
nethercote7ac7f7b2004-11-02 12:36:02 +0000453void* align_upwards ( void* p, SizeT align )
nethercote2d5b8162004-08-11 09:40:52 +0000454{
455 Addr a = (Addr)p;
456 if ((a % align) == 0) return (void*)a;
457 return (void*)(a - (a % align) + align);
458}
459
460// If not enough memory available, either aborts (for non-client memory)
461// or returns 0 (for client memory).
462static
nethercote7ac7f7b2004-11-02 12:36:02 +0000463Superblock* newSuperblock ( Arena* a, SizeT cszB )
nethercote2d5b8162004-08-11 09:40:52 +0000464{
465 // The extra VG_MIN_MALLOC_SZB bytes are for possible alignment up.
466 static UByte bootstrap_superblock[CORE_ARENA_MIN_SZB+VG_MIN_MALLOC_SZB];
sewardj548be6d2005-02-16 01:31:37 +0000467 static Bool called_before = True; //False;
nethercote2d5b8162004-08-11 09:40:52 +0000468 Superblock* sb;
469
470 // Take into account admin bytes in the Superblock.
471 cszB += sizeof(Superblock);
472
473 if (cszB < a->min_sblock_szB) cszB = a->min_sblock_szB;
nethercote73b526f2004-10-31 18:48:21 +0000474 while ((cszB % VKI_PAGE_SIZE) > 0) cszB++;
nethercote2d5b8162004-08-11 09:40:52 +0000475
476 if (!called_before) {
477 // First time we're called -- use the special static bootstrap
478 // superblock (see comment at top of main() for details).
479 called_before = True;
480 vg_assert(a == arenaId_to_ArenaP(VG_AR_CORE));
481 vg_assert(CORE_ARENA_MIN_SZB >= cszB);
482 // Ensure sb is suitably aligned.
483 sb = (Superblock*)align_upwards( bootstrap_superblock,
484 VG_MIN_MALLOC_SZB );
485 } else if (a->clientmem) {
486 // client allocation -- return 0 to client if it fails
487 sb = (Superblock *)
sewardj215776c2005-03-16 12:11:12 +0000488 VG_(get_memory_from_mmap_for_client)
489 (0, cszB, VKI_PROT_READ|VKI_PROT_WRITE|VKI_PROT_EXEC, 0);
nethercote2d5b8162004-08-11 09:40:52 +0000490 if (NULL == sb)
491 return 0;
492 } else {
493 // non-client allocation -- aborts if it fails
494 sb = VG_(get_memory_from_mmap) ( cszB, "newSuperblock" );
495 }
496 vg_assert(NULL != sb);
sewardjb5f6f512005-03-10 23:59:00 +0000497 //zzVALGRIND_MAKE_WRITABLE(sb, cszB);
nethercote2d5b8162004-08-11 09:40:52 +0000498 vg_assert(0 == (Addr)sb % VG_MIN_MALLOC_SZB);
499 sb->n_payload_bytes = cszB - sizeof(Superblock);
500 a->bytes_mmaped += cszB;
501 if (0)
502 VG_(message)(Vg_DebugMsg, "newSuperblock, %d payload bytes",
503 sb->n_payload_bytes);
504 return sb;
505}
506
507// Find the superblock containing the given chunk.
508static
509Superblock* findSb ( Arena* a, Block* b )
510{
511 Superblock* sb;
512 for (sb = a->sblocks; sb; sb = sb->next)
513 if ((Block*)&sb->payload_bytes[0] <= b
514 && b < (Block*)&sb->payload_bytes[sb->n_payload_bytes])
515 return sb;
njn02bc4b82005-05-15 17:28:26 +0000516 VG_(printf)("findSb: can't find pointer %p in arena '%s'\n", b, a->name );
nethercote2d5b8162004-08-11 09:40:52 +0000517 VG_(core_panic)("findSb: VG_(arena_free)() in wrong arena?");
518 return NULL; /*NOTREACHED*/
519}
520
sewardjde4a1d02002-03-22 01:27:54 +0000521
fitzhardinge98abfc72003-12-16 02:05:15 +0000522/*------------------------------------------------------------*/
sewardjde4a1d02002-03-22 01:27:54 +0000523/*--- Functions for working with freelists. ---*/
524/*------------------------------------------------------------*/
525
nethercote2d5b8162004-08-11 09:40:52 +0000526// Nb: Determination of which freelist a block lives on is based on the
527// payload size, not block size.
sewardjde4a1d02002-03-22 01:27:54 +0000528
nethercote2d5b8162004-08-11 09:40:52 +0000529// Convert a payload size in bytes to a freelist number.
sewardjde4a1d02002-03-22 01:27:54 +0000530static
nethercote7ac7f7b2004-11-02 12:36:02 +0000531UInt pszB_to_listNo ( SizeT pszB )
sewardjde4a1d02002-03-22 01:27:54 +0000532{
nethercote2d5b8162004-08-11 09:40:52 +0000533 vg_assert(0 == pszB % VG_MIN_MALLOC_SZB);
534 pszB /= VG_MIN_MALLOC_SZB;
njn61dcab82005-05-21 19:36:45 +0000535
536 // The first 13 lists hold blocks of size VG_MIN_MALLOC_SZB * list_num.
537 // The final 4 hold bigger blocks.
538 if (pszB <= 12) return pszB;
sewardj70e212d2005-05-19 10:54:01 +0000539 if (pszB <= 16) return 13;
540 if (pszB <= 32) return 14;
541 if (pszB <= 64) return 15;
542 if (pszB <= 128) return 16;
543 return 17;
sewardjde4a1d02002-03-22 01:27:54 +0000544}
545
nethercote2d5b8162004-08-11 09:40:52 +0000546// What is the minimum payload size for a given list?
sewardjde4a1d02002-03-22 01:27:54 +0000547static
nethercote7ac7f7b2004-11-02 12:36:02 +0000548SizeT listNo_to_pszB_min ( UInt listNo )
sewardjde4a1d02002-03-22 01:27:54 +0000549{
nethercote7ac7f7b2004-11-02 12:36:02 +0000550 SizeT pszB = 0;
njn6e6588c2005-03-13 18:52:48 +0000551 vg_assert(listNo <= N_MALLOC_LISTS);
nethercote2d5b8162004-08-11 09:40:52 +0000552 while (pszB_to_listNo(pszB) < listNo) pszB += VG_MIN_MALLOC_SZB;
553 return pszB;
sewardjde4a1d02002-03-22 01:27:54 +0000554}
555
nethercote2d5b8162004-08-11 09:40:52 +0000556// What is the maximum payload size for a given list?
sewardjde4a1d02002-03-22 01:27:54 +0000557static
nethercote7ac7f7b2004-11-02 12:36:02 +0000558SizeT listNo_to_pszB_max ( UInt listNo )
sewardjde4a1d02002-03-22 01:27:54 +0000559{
njn6e6588c2005-03-13 18:52:48 +0000560 vg_assert(listNo <= N_MALLOC_LISTS);
561 if (listNo == N_MALLOC_LISTS-1) {
nethercote2d5b8162004-08-11 09:40:52 +0000562 return MAX_PSZB;
sewardjde4a1d02002-03-22 01:27:54 +0000563 } else {
nethercote2d5b8162004-08-11 09:40:52 +0000564 return listNo_to_pszB_min(listNo+1) - 1;
sewardjde4a1d02002-03-22 01:27:54 +0000565 }
566}
567
568
569/* A nasty hack to try and reduce fragmentation. Try and replace
570 a->freelist[lno] with another block on the same list but with a
571 lower address, with the idea of attempting to recycle the same
572 blocks rather than cruise through the address space. */
sewardjde4a1d02002-03-22 01:27:54 +0000573static
nethercote7ac7f7b2004-11-02 12:36:02 +0000574void swizzle ( Arena* a, UInt lno )
sewardjde4a1d02002-03-22 01:27:54 +0000575{
nethercote2d5b8162004-08-11 09:40:52 +0000576 Block* p_best;
577 Block* pp;
578 Block* pn;
nethercote7ac7f7b2004-11-02 12:36:02 +0000579 UInt i;
sewardjde4a1d02002-03-22 01:27:54 +0000580
581 p_best = a->freelist[lno];
582 if (p_best == NULL) return;
583
584 pn = pp = p_best;
585 for (i = 0; i < 20; i++) {
nethercote2d5b8162004-08-11 09:40:52 +0000586 pn = get_next_b(pn);
587 pp = get_prev_b(pp);
sewardjde4a1d02002-03-22 01:27:54 +0000588 if (pn < p_best) p_best = pn;
589 if (pp < p_best) p_best = pp;
590 }
591 if (p_best < a->freelist[lno]) {
nethercote2d5b8162004-08-11 09:40:52 +0000592# ifdef VERBOSE_MALLOC
593 VG_(printf)("retreat by %d\n", a->freelist[lno] - p_best);
sewardjde4a1d02002-03-22 01:27:54 +0000594# endif
595 a->freelist[lno] = p_best;
596 }
597}
598
599
600/*------------------------------------------------------------*/
sewardjde4a1d02002-03-22 01:27:54 +0000601/*--- Sanity-check/debugging machinery. ---*/
602/*------------------------------------------------------------*/
603
njn6e6588c2005-03-13 18:52:48 +0000604#define REDZONE_LO_MASK 0x31
605#define REDZONE_HI_MASK 0x7c
nethercote2d5b8162004-08-11 09:40:52 +0000606
nethercote7ac7f7b2004-11-02 12:36:02 +0000607// Do some crude sanity checks on a Block.
sewardjde4a1d02002-03-22 01:27:54 +0000608static
nethercote2d5b8162004-08-11 09:40:52 +0000609Bool blockSane ( Arena* a, Block* b )
sewardjde4a1d02002-03-22 01:27:54 +0000610{
611# define BLEAT(str) VG_(printf)("blockSane: fail -- %s\n",str)
nethercote7ac7f7b2004-11-02 12:36:02 +0000612 UInt i;
nethercote2d5b8162004-08-11 09:40:52 +0000613 if (get_bszB_lo(b) != get_bszB_hi(b))
sewardjde4a1d02002-03-22 01:27:54 +0000614 {BLEAT("sizes");return False;}
nethercote2d5b8162004-08-11 09:40:52 +0000615 if (!a->clientmem && is_inuse_bszB(get_bszB_lo(b))) {
616 for (i = 0; i < a->rz_szB; i++) {
617 if (get_rz_lo_byte(a, b, i) !=
njn6e6588c2005-03-13 18:52:48 +0000618 (UByte)(((Addr)b&0xff) ^ REDZONE_LO_MASK))
nethercote2d5b8162004-08-11 09:40:52 +0000619 {BLEAT("redzone-lo");return False;}
620 if (get_rz_hi_byte(a, b, i) !=
njn6e6588c2005-03-13 18:52:48 +0000621 (UByte)(((Addr)b&0xff) ^ REDZONE_HI_MASK))
nethercote2d5b8162004-08-11 09:40:52 +0000622 {BLEAT("redzone-hi");return False;}
sewardjde4a1d02002-03-22 01:27:54 +0000623 }
624 }
625 return True;
626# undef BLEAT
627}
628
nethercote2d5b8162004-08-11 09:40:52 +0000629// Print superblocks (only for debugging).
sewardjde4a1d02002-03-22 01:27:54 +0000630static
631void ppSuperblocks ( Arena* a )
632{
nethercote7ac7f7b2004-11-02 12:36:02 +0000633 UInt i, blockno;
634 SizeT b_bszB;
nethercote2d5b8162004-08-11 09:40:52 +0000635 Block* b;
sewardjde4a1d02002-03-22 01:27:54 +0000636 Superblock* sb = a->sblocks;
637 blockno = 1;
638
639 while (sb) {
640 VG_(printf)( "\n" );
nethercote2d5b8162004-08-11 09:40:52 +0000641 VG_(printf)( "superblock %d at %p, sb->n_pl_bs = %d, next = %p\n",
642 blockno++, sb, sb->n_payload_bytes, sb->next );
643 for (i = 0; i < sb->n_payload_bytes; i += mk_plain_bszB(b_bszB)) {
644 b = (Block*)&sb->payload_bytes[i];
645 b_bszB = get_bszB_lo(b);
646 VG_(printf)( " block at %d, bszB %d: ", i, mk_plain_bszB(b_bszB) );
647 VG_(printf)( "%s, ", is_inuse_bszB(b_bszB) ? "inuse" : "free");
648 VG_(printf)( "%s\n", blockSane(a, b) ? "ok" : "BAD" );
sewardjde4a1d02002-03-22 01:27:54 +0000649 }
nethercote2d5b8162004-08-11 09:40:52 +0000650 vg_assert(i == sb->n_payload_bytes); // no overshoot at end of Sb
sewardjde4a1d02002-03-22 01:27:54 +0000651 sb = sb->next;
652 }
653 VG_(printf)( "end of superblocks\n\n" );
654}
655
nethercote2d5b8162004-08-11 09:40:52 +0000656// Sanity check both the superblocks and the chains.
nethercote885dd912004-08-03 23:14:00 +0000657static void sanity_check_malloc_arena ( ArenaId aid )
sewardjde4a1d02002-03-22 01:27:54 +0000658{
nethercote7ac7f7b2004-11-02 12:36:02 +0000659 UInt i, superblockctr, blockctr_sb, blockctr_li;
660 UInt blockctr_sb_free, listno;
661 SizeT b_bszB, b_pszB, list_min_pszB, list_max_pszB;
sewardjde4a1d02002-03-22 01:27:54 +0000662 Superblock* sb;
663 Bool thisFree, lastWasFree;
nethercote2d5b8162004-08-11 09:40:52 +0000664 Block* b;
665 Block* b_prev;
nethercote7ac7f7b2004-11-02 12:36:02 +0000666 SizeT arena_bytes_on_loan;
sewardjde4a1d02002-03-22 01:27:54 +0000667 Arena* a;
668
nethercote885dd912004-08-03 23:14:00 +0000669# define BOMB VG_(core_panic)("sanity_check_malloc_arena")
sewardjde4a1d02002-03-22 01:27:54 +0000670
671 a = arenaId_to_ArenaP(aid);
672
nethercote2d5b8162004-08-11 09:40:52 +0000673 // First, traverse all the superblocks, inspecting the Blocks in each.
sewardjde4a1d02002-03-22 01:27:54 +0000674 superblockctr = blockctr_sb = blockctr_sb_free = 0;
675 arena_bytes_on_loan = 0;
676 sb = a->sblocks;
677 while (sb) {
678 lastWasFree = False;
679 superblockctr++;
nethercote2d5b8162004-08-11 09:40:52 +0000680 for (i = 0; i < sb->n_payload_bytes; i += mk_plain_bszB(b_bszB)) {
sewardjde4a1d02002-03-22 01:27:54 +0000681 blockctr_sb++;
nethercote2d5b8162004-08-11 09:40:52 +0000682 b = (Block*)&sb->payload_bytes[i];
683 b_bszB = get_bszB_lo(b);
sewardjde4a1d02002-03-22 01:27:54 +0000684 if (!blockSane(a, b)) {
nethercote2d5b8162004-08-11 09:40:52 +0000685 VG_(printf)("sanity_check_malloc_arena: sb %p, block %d (bszB %d): "
686 " BAD\n", sb, i, b_bszB );
sewardjde4a1d02002-03-22 01:27:54 +0000687 BOMB;
688 }
nethercote2d5b8162004-08-11 09:40:52 +0000689 thisFree = !is_inuse_bszB(b_bszB);
sewardjde4a1d02002-03-22 01:27:54 +0000690 if (thisFree && lastWasFree) {
nethercote2d5b8162004-08-11 09:40:52 +0000691 VG_(printf)("sanity_check_malloc_arena: sb %p, block %d (bszB %d): "
njn25e49d8e72002-09-23 09:36:25 +0000692 "UNMERGED FREES\n",
nethercote2d5b8162004-08-11 09:40:52 +0000693 sb, i, b_bszB );
sewardjde4a1d02002-03-22 01:27:54 +0000694 BOMB;
695 }
sewardjde4a1d02002-03-22 01:27:54 +0000696 if (thisFree) blockctr_sb_free++;
697 if (!thisFree)
nethercote2d5b8162004-08-11 09:40:52 +0000698 arena_bytes_on_loan += bszB_to_pszB(a, b_bszB);
699 lastWasFree = thisFree;
sewardjde4a1d02002-03-22 01:27:54 +0000700 }
nethercote2d5b8162004-08-11 09:40:52 +0000701 if (i > sb->n_payload_bytes) {
nethercote885dd912004-08-03 23:14:00 +0000702 VG_(printf)( "sanity_check_malloc_arena: sb %p: last block "
sewardjde4a1d02002-03-22 01:27:54 +0000703 "overshoots end\n", sb);
704 BOMB;
705 }
706 sb = sb->next;
707 }
708
709 if (arena_bytes_on_loan != a->bytes_on_loan) {
nethercote2d5b8162004-08-11 09:40:52 +0000710# ifdef VERBOSE_MALLOC
711 VG_(printf)( "sanity_check_malloc_arena: a->bytes_on_loan %d, "
712 "arena_bytes_on_loan %d: "
713 "MISMATCH\n", a->bytes_on_loan, arena_bytes_on_loan);
714# endif
sewardjde4a1d02002-03-22 01:27:54 +0000715 ppSuperblocks(a);
716 BOMB;
717 }
718
719 /* Second, traverse each list, checking that the back pointers make
720 sense, counting blocks encountered, and checking that each block
721 is an appropriate size for this list. */
722 blockctr_li = 0;
njn6e6588c2005-03-13 18:52:48 +0000723 for (listno = 0; listno < N_MALLOC_LISTS; listno++) {
nethercote2d5b8162004-08-11 09:40:52 +0000724 list_min_pszB = listNo_to_pszB_min(listno);
725 list_max_pszB = listNo_to_pszB_max(listno);
sewardjde4a1d02002-03-22 01:27:54 +0000726 b = a->freelist[listno];
727 if (b == NULL) continue;
728 while (True) {
729 b_prev = b;
nethercote2d5b8162004-08-11 09:40:52 +0000730 b = get_next_b(b);
731 if (get_prev_b(b) != b_prev) {
nethercote885dd912004-08-03 23:14:00 +0000732 VG_(printf)( "sanity_check_malloc_arena: list %d at %p: "
sewardjde4a1d02002-03-22 01:27:54 +0000733 "BAD LINKAGE\n",
734 listno, b );
735 BOMB;
736 }
nethercote2d5b8162004-08-11 09:40:52 +0000737 b_pszB = bszB_to_pszB(a, mk_plain_bszB(get_bszB_lo(b)));
738 if (b_pszB < list_min_pszB || b_pszB > list_max_pszB) {
sewardjde4a1d02002-03-22 01:27:54 +0000739 VG_(printf)(
nethercote885dd912004-08-03 23:14:00 +0000740 "sanity_check_malloc_arena: list %d at %p: "
nethercote2d5b8162004-08-11 09:40:52 +0000741 "WRONG CHAIN SIZE %dB (%dB, %dB)\n",
742 listno, b, b_pszB, list_min_pszB, list_max_pszB );
sewardjde4a1d02002-03-22 01:27:54 +0000743 BOMB;
744 }
745 blockctr_li++;
746 if (b == a->freelist[listno]) break;
747 }
748 }
749
750 if (blockctr_sb_free != blockctr_li) {
nethercote2d5b8162004-08-11 09:40:52 +0000751# ifdef VERBOSE_MALLOC
752 VG_(printf)( "sanity_check_malloc_arena: BLOCK COUNT MISMATCH "
753 "(via sbs %d, via lists %d)\n",
754 blockctr_sb_free, blockctr_li );
755# endif
sewardjde4a1d02002-03-22 01:27:54 +0000756 ppSuperblocks(a);
757 BOMB;
758 }
759
nethercote885dd912004-08-03 23:14:00 +0000760 if (VG_(clo_verbosity) > 2)
761 VG_(message)(Vg_DebugMsg,
njn669ef072005-03-13 05:46:57 +0000762 "%8s: %2d sbs, %5d bs, %2d/%-2d free bs, "
nethercote885dd912004-08-03 23:14:00 +0000763 "%7d mmap, %7d loan",
764 a->name,
765 superblockctr,
766 blockctr_sb, blockctr_sb_free, blockctr_li,
767 a->bytes_mmaped, a->bytes_on_loan);
sewardjde4a1d02002-03-22 01:27:54 +0000768# undef BOMB
769}
770
771
nethercote885dd912004-08-03 23:14:00 +0000772void VG_(sanity_check_malloc_all) ( void )
sewardjde4a1d02002-03-22 01:27:54 +0000773{
nethercote7ac7f7b2004-11-02 12:36:02 +0000774 UInt i;
sewardjde4a1d02002-03-22 01:27:54 +0000775 for (i = 0; i < VG_N_ARENAS; i++)
nethercote885dd912004-08-03 23:14:00 +0000776 sanity_check_malloc_arena ( i );
sewardjde4a1d02002-03-22 01:27:54 +0000777}
778
sewardjde4a1d02002-03-22 01:27:54 +0000779
nethercote2d5b8162004-08-11 09:40:52 +0000780/*------------------------------------------------------------*/
781/*--- Creating and deleting blocks. ---*/
782/*------------------------------------------------------------*/
783
784// Mark the bytes at b .. b+bszB-1 as not in use, and add them to the
785// relevant free list.
786
787static
nethercote7ac7f7b2004-11-02 12:36:02 +0000788void mkFreeBlock ( Arena* a, Block* b, SizeT bszB, UInt b_lno )
jsewardb1a26ae2004-03-14 03:06:37 +0000789{
nethercote7ac7f7b2004-11-02 12:36:02 +0000790 SizeT pszB = bszB_to_pszB(a, bszB);
nethercote2d5b8162004-08-11 09:40:52 +0000791 vg_assert(b_lno == pszB_to_listNo(pszB));
sewardjb5f6f512005-03-10 23:59:00 +0000792 //zzVALGRIND_MAKE_WRITABLE(b, bszB);
nethercote2d5b8162004-08-11 09:40:52 +0000793 // Set the size fields and indicate not-in-use.
794 set_bszB_lo(b, mk_free_bszB(bszB));
795 set_bszB_hi(b, mk_free_bszB(bszB));
796
797 // Add to the relevant list.
798 if (a->freelist[b_lno] == NULL) {
799 set_prev_b(b, b);
800 set_next_b(b, b);
801 a->freelist[b_lno] = b;
802 } else {
803 Block* b_prev = get_prev_b(a->freelist[b_lno]);
804 Block* b_next = a->freelist[b_lno];
805 set_next_b(b_prev, b);
806 set_prev_b(b_next, b);
807 set_next_b(b, b_next);
808 set_prev_b(b, b_prev);
809 }
810# ifdef DEBUG_MALLOC
811 (void)blockSane(a,b);
812# endif
813}
814
815// Mark the bytes at b .. b+bszB-1 as in use, and set up the block
816// appropriately.
817static
nethercote7ac7f7b2004-11-02 12:36:02 +0000818void mkInuseBlock ( Arena* a, Block* b, SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000819{
nethercote7ac7f7b2004-11-02 12:36:02 +0000820 UInt i;
nethercote2d5b8162004-08-11 09:40:52 +0000821 vg_assert(bszB >= min_useful_bszB(a));
sewardjb5f6f512005-03-10 23:59:00 +0000822 //zzVALGRIND_MAKE_WRITABLE(b, bszB);
nethercote2d5b8162004-08-11 09:40:52 +0000823 set_bszB_lo(b, mk_inuse_bszB(bszB));
824 set_bszB_hi(b, mk_inuse_bszB(bszB));
825 set_prev_b(b, NULL); // Take off freelist
826 set_next_b(b, NULL); // ditto
827 if (!a->clientmem) {
828 for (i = 0; i < a->rz_szB; i++) {
njn6e6588c2005-03-13 18:52:48 +0000829 set_rz_lo_byte(a, b, i, (UByte)(((Addr)b&0xff) ^ REDZONE_LO_MASK));
830 set_rz_hi_byte(a, b, i, (UByte)(((Addr)b&0xff) ^ REDZONE_HI_MASK));
nethercote2d5b8162004-08-11 09:40:52 +0000831 }
832 }
833# ifdef DEBUG_MALLOC
834 (void)blockSane(a,b);
835# endif
836}
837
838// Remove a block from a given list. Does no sanity checking.
839static
nethercote7ac7f7b2004-11-02 12:36:02 +0000840void unlinkBlock ( Arena* a, Block* b, UInt listno )
nethercote2d5b8162004-08-11 09:40:52 +0000841{
njn6e6588c2005-03-13 18:52:48 +0000842 vg_assert(listno < N_MALLOC_LISTS);
nethercote2d5b8162004-08-11 09:40:52 +0000843 if (get_prev_b(b) == b) {
844 // Only one element in the list; treat it specially.
845 vg_assert(get_next_b(b) == b);
846 a->freelist[listno] = NULL;
847 } else {
848 Block* b_prev = get_prev_b(b);
849 Block* b_next = get_next_b(b);
850 a->freelist[listno] = b_prev;
851 set_next_b(b_prev, b_next);
852 set_prev_b(b_next, b_prev);
853 swizzle ( a, listno );
854 }
855 set_prev_b(b, NULL);
856 set_next_b(b, NULL);
jsewardb1a26ae2004-03-14 03:06:37 +0000857}
858
859
sewardjde4a1d02002-03-22 01:27:54 +0000860/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +0000861/*--- Core-visible functions. ---*/
sewardjde4a1d02002-03-22 01:27:54 +0000862/*------------------------------------------------------------*/
863
nethercote2d5b8162004-08-11 09:40:52 +0000864// Align the request size.
865static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000866SizeT align_req_pszB ( SizeT req_pszB )
nethercote2d5b8162004-08-11 09:40:52 +0000867{
nethercote7ac7f7b2004-11-02 12:36:02 +0000868 SizeT n = VG_MIN_MALLOC_SZB-1;
nethercote2d5b8162004-08-11 09:40:52 +0000869 return ((req_pszB + n) & (~n));
870}
871
nethercote7ac7f7b2004-11-02 12:36:02 +0000872void* VG_(arena_malloc) ( ArenaId aid, SizeT req_pszB )
sewardjde4a1d02002-03-22 01:27:54 +0000873{
nethercote7ac7f7b2004-11-02 12:36:02 +0000874 SizeT req_bszB, frag_bszB, b_bszB;
875 UInt lno;
sewardjde4a1d02002-03-22 01:27:54 +0000876 Superblock* new_sb;
nethercote2d5b8162004-08-11 09:40:52 +0000877 Block* b = NULL;
sewardjde4a1d02002-03-22 01:27:54 +0000878 Arena* a;
jsewardb1a26ae2004-03-14 03:06:37 +0000879 void* v;
sewardjde4a1d02002-03-22 01:27:54 +0000880
881 VGP_PUSHCC(VgpMalloc);
882
883 ensure_mm_init();
884 a = arenaId_to_ArenaP(aid);
885
nethercote7ac7f7b2004-11-02 12:36:02 +0000886 vg_assert(req_pszB < MAX_PSZB);
nethercote2d5b8162004-08-11 09:40:52 +0000887 req_pszB = align_req_pszB(req_pszB);
888 req_bszB = pszB_to_bszB(a, req_pszB);
sewardjde4a1d02002-03-22 01:27:54 +0000889
nethercote2d5b8162004-08-11 09:40:52 +0000890 // Scan through all the big-enough freelists for a block.
njn6e6588c2005-03-13 18:52:48 +0000891 for (lno = pszB_to_listNo(req_pszB); lno < N_MALLOC_LISTS; lno++) {
sewardjde4a1d02002-03-22 01:27:54 +0000892 b = a->freelist[lno];
nethercote2d5b8162004-08-11 09:40:52 +0000893 if (NULL == b) continue; // If this list is empty, try the next one.
sewardjde4a1d02002-03-22 01:27:54 +0000894 while (True) {
nethercote2d5b8162004-08-11 09:40:52 +0000895 b_bszB = mk_plain_bszB(get_bszB_lo(b));
896 if (b_bszB >= req_bszB) goto obtained_block; // success!
897 b = get_next_b(b);
898 if (b == a->freelist[lno]) break; // traversed entire freelist
sewardjde4a1d02002-03-22 01:27:54 +0000899 }
sewardjde4a1d02002-03-22 01:27:54 +0000900 }
901
nethercote2d5b8162004-08-11 09:40:52 +0000902 // If we reach here, no suitable block found, allocate a new superblock
njn6e6588c2005-03-13 18:52:48 +0000903 vg_assert(lno == N_MALLOC_LISTS);
nethercote2d5b8162004-08-11 09:40:52 +0000904 new_sb = newSuperblock(a, req_bszB);
905 if (NULL == new_sb) {
906 // Should only fail if for client, otherwise, should have aborted
907 // already.
908 vg_assert(VG_AR_CLIENT == aid);
909 return NULL;
sewardjde4a1d02002-03-22 01:27:54 +0000910 }
nethercote2d5b8162004-08-11 09:40:52 +0000911 new_sb->next = a->sblocks;
912 a->sblocks = new_sb;
913 b = (Block*)&new_sb->payload_bytes[0];
914 lno = pszB_to_listNo(bszB_to_pszB(a, new_sb->n_payload_bytes));
915 mkFreeBlock ( a, b, new_sb->n_payload_bytes, lno);
916 // fall through
sewardjde4a1d02002-03-22 01:27:54 +0000917
nethercote2d5b8162004-08-11 09:40:52 +0000918 obtained_block:
919 // Ok, we can allocate from b, which lives in list lno.
sewardjde4a1d02002-03-22 01:27:54 +0000920 vg_assert(b != NULL);
njn6e6588c2005-03-13 18:52:48 +0000921 vg_assert(lno < N_MALLOC_LISTS);
sewardjde4a1d02002-03-22 01:27:54 +0000922 vg_assert(a->freelist[lno] != NULL);
nethercote2d5b8162004-08-11 09:40:52 +0000923 b_bszB = mk_plain_bszB(get_bszB_lo(b));
924 // req_bszB is the size of the block we are after. b_bszB is the
925 // size of what we've actually got. */
926 vg_assert(b_bszB >= req_bszB);
sewardjde4a1d02002-03-22 01:27:54 +0000927
nethercote2d5b8162004-08-11 09:40:52 +0000928 // Could we split this block and still get a useful fragment?
929 frag_bszB = b_bszB - req_bszB;
930 if (frag_bszB >= min_useful_bszB(a)) {
931 // Yes, split block in two, put the fragment on the appropriate free
932 // list, and update b_bszB accordingly.
933 // printf( "split %dB into %dB and %dB\n", b_bszB, req_bszB, frag_bszB );
sewardjde4a1d02002-03-22 01:27:54 +0000934 unlinkBlock(a, b, lno);
nethercote2d5b8162004-08-11 09:40:52 +0000935 mkInuseBlock(a, b, req_bszB);
936 mkFreeBlock(a, &b[req_bszB], frag_bszB,
937 pszB_to_listNo(bszB_to_pszB(a, frag_bszB)));
938 b_bszB = mk_plain_bszB(get_bszB_lo(b));
939 } else {
940 // No, mark as in use and use as-is.
941 unlinkBlock(a, b, lno);
942 mkInuseBlock(a, b, b_bszB);
sewardjde4a1d02002-03-22 01:27:54 +0000943 }
sewardjde4a1d02002-03-22 01:27:54 +0000944
nethercote2d5b8162004-08-11 09:40:52 +0000945 // Update stats
946 a->bytes_on_loan += bszB_to_pszB(a, b_bszB);
sewardjde4a1d02002-03-22 01:27:54 +0000947 if (a->bytes_on_loan > a->bytes_on_loan_max)
948 a->bytes_on_loan_max = a->bytes_on_loan;
949
950# ifdef DEBUG_MALLOC
nethercote885dd912004-08-03 23:14:00 +0000951 sanity_check_malloc_arena(aid);
sewardjde4a1d02002-03-22 01:27:54 +0000952# endif
953
njn25e49d8e72002-09-23 09:36:25 +0000954 VGP_POPCC(VgpMalloc);
nethercote2d5b8162004-08-11 09:40:52 +0000955 v = get_block_payload(a, b);
956 vg_assert( (((Addr)v) & (VG_MIN_MALLOC_SZB-1)) == 0 );
sewardjb5f6f512005-03-10 23:59:00 +0000957
958 VALGRIND_MALLOCLIKE_BLOCK(v, req_pszB, 0, False);
jsewardb1a26ae2004-03-14 03:06:37 +0000959 return v;
sewardjde4a1d02002-03-22 01:27:54 +0000960}
961
962
njn25e49d8e72002-09-23 09:36:25 +0000963void VG_(arena_free) ( ArenaId aid, void* ptr )
sewardjde4a1d02002-03-22 01:27:54 +0000964{
965 Superblock* sb;
nethercote2d5b8162004-08-11 09:40:52 +0000966 UByte* sb_start;
967 UByte* sb_end;
968 Block* other;
969 Block* b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000970 SizeT b_bszB, b_pszB, other_bszB;
971 UInt b_listno;
sewardjde4a1d02002-03-22 01:27:54 +0000972 Arena* a;
973
974 VGP_PUSHCC(VgpMalloc);
975
976 ensure_mm_init();
977 a = arenaId_to_ArenaP(aid);
978
njn25e49d8e72002-09-23 09:36:25 +0000979 if (ptr == NULL) {
980 VGP_POPCC(VgpMalloc);
981 return;
982 }
983
nethercote2d5b8162004-08-11 09:40:52 +0000984 b = get_payload_block(a, ptr);
sewardjde4a1d02002-03-22 01:27:54 +0000985
986# ifdef DEBUG_MALLOC
nethercote2d5b8162004-08-11 09:40:52 +0000987 vg_assert(blockSane(a, b));
sewardjde4a1d02002-03-22 01:27:54 +0000988# endif
989
nethercote2d5b8162004-08-11 09:40:52 +0000990 a->bytes_on_loan -= bszB_to_pszB(a, mk_plain_bszB(get_bszB_lo(b)));
sewardjde4a1d02002-03-22 01:27:54 +0000991
nethercote2d5b8162004-08-11 09:40:52 +0000992 sb = findSb( a, b );
993 sb_start = &sb->payload_bytes[0];
994 sb_end = &sb->payload_bytes[sb->n_payload_bytes - 1];
sewardjde4a1d02002-03-22 01:27:54 +0000995
nethercote2d5b8162004-08-11 09:40:52 +0000996 // Put this chunk back on a list somewhere.
997 b_bszB = get_bszB_lo(b);
998 b_pszB = bszB_to_pszB(a, b_bszB);
999 b_listno = pszB_to_listNo(b_pszB);
1000 mkFreeBlock( a, b, b_bszB, b_listno );
sewardjde4a1d02002-03-22 01:27:54 +00001001
nethercote2d5b8162004-08-11 09:40:52 +00001002 // See if this block can be merged with its successor.
1003 // First test if we're far enough before the superblock's end to possibly
1004 // have a successor.
1005 other = b + b_bszB;
1006 if (other+min_useful_bszB(a)-1 <= (Block*)sb_end) {
1007 // Ok, we have a successor, merge if it's not in use.
1008 other_bszB = get_bszB_lo(other);
1009 if (!is_inuse_bszB(other_bszB)) {
1010 // VG_(printf)( "merge-successor\n");
1011 other_bszB = mk_plain_bszB(other_bszB);
sewardjde4a1d02002-03-22 01:27:54 +00001012# ifdef DEBUG_MALLOC
1013 vg_assert(blockSane(a, other));
1014# endif
nethercote2d5b8162004-08-11 09:40:52 +00001015 unlinkBlock( a, b, b_listno );
1016 unlinkBlock( a, other, pszB_to_listNo(bszB_to_pszB(a,other_bszB)) );
1017 b_bszB += other_bszB;
1018 b_listno = pszB_to_listNo(bszB_to_pszB(a, b_bszB));
1019 mkFreeBlock( a, b, b_bszB, b_listno );
sewardjde4a1d02002-03-22 01:27:54 +00001020 }
nethercote2d5b8162004-08-11 09:40:52 +00001021 } else {
1022 // Not enough space for successor: check that b is the last block
1023 // ie. there are no unused bytes at the end of the Superblock.
1024 vg_assert(other-1 == (Block*)sb_end);
sewardjde4a1d02002-03-22 01:27:54 +00001025 }
1026
nethercote2d5b8162004-08-11 09:40:52 +00001027 // Then see if this block can be merged with its predecessor.
1028 // First test if we're far enough after the superblock's start to possibly
1029 // have a predecessor.
1030 if (b >= (Block*)sb_start + min_useful_bszB(a)) {
1031 // Ok, we have a predecessor, merge if it's not in use.
1032 other = get_predecessor_block( b );
1033 other_bszB = get_bszB_lo(other);
1034 if (!is_inuse_bszB(other_bszB)) {
1035 // VG_(printf)( "merge-predecessor\n");
1036 other_bszB = mk_plain_bszB(other_bszB);
1037 unlinkBlock( a, b, b_listno );
1038 unlinkBlock( a, other, pszB_to_listNo(bszB_to_pszB(a, other_bszB)) );
1039 b = other;
1040 b_bszB += other_bszB;
1041 b_listno = pszB_to_listNo(bszB_to_pszB(a, b_bszB));
1042 mkFreeBlock( a, b, b_bszB, b_listno );
sewardjde4a1d02002-03-22 01:27:54 +00001043 }
nethercote2d5b8162004-08-11 09:40:52 +00001044 } else {
1045 // Not enough space for predecessor: check that b is the first block,
1046 // ie. there are no unused bytes at the start of the Superblock.
1047 vg_assert((Block*)sb_start == b);
sewardjde4a1d02002-03-22 01:27:54 +00001048 }
1049
1050# ifdef DEBUG_MALLOC
nethercote885dd912004-08-03 23:14:00 +00001051 sanity_check_malloc_arena(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001052# endif
1053
sewardjb5f6f512005-03-10 23:59:00 +00001054 VALGRIND_FREELIKE_BLOCK(ptr, 0);
1055
njn25e49d8e72002-09-23 09:36:25 +00001056 VGP_POPCC(VgpMalloc);
sewardjde4a1d02002-03-22 01:27:54 +00001057}
1058
1059
1060/*
1061 The idea for malloc_aligned() is to allocate a big block, base, and
1062 then split it into two parts: frag, which is returned to the the
1063 free pool, and align, which is the bit we're really after. Here's
1064 a picture. L and H denote the block lower and upper overheads, in
nethercote2d5b8162004-08-11 09:40:52 +00001065 bytes. The details are gruesome. Note it is slightly complicated
sewardjde4a1d02002-03-22 01:27:54 +00001066 because the initial request to generate base may return a bigger
1067 block than we asked for, so it is important to distinguish the base
1068 request size and the base actual size.
1069
1070 frag_b align_b
1071 | |
1072 | frag_p | align_p
1073 | | | |
1074 v v v v
1075
1076 +---+ +---+---+ +---+
1077 | L |----------------| H | L |---------------| H |
1078 +---+ +---+---+ +---+
1079
1080 ^ ^ ^
1081 | | :
1082 | base_p this addr must be aligned
1083 |
1084 base_b
1085
1086 . . . . . . .
nethercote2d5b8162004-08-11 09:40:52 +00001087 <------ frag_bszB -------> . . .
1088 . <------------- base_pszB_act -----------> .
sewardjde4a1d02002-03-22 01:27:54 +00001089 . . . . . . .
1090
1091*/
njn717cde52005-05-10 02:47:21 +00001092void* VG_(arena_memalign) ( ArenaId aid, SizeT req_alignB, SizeT req_pszB )
sewardjde4a1d02002-03-22 01:27:54 +00001093{
nethercote7ac7f7b2004-11-02 12:36:02 +00001094 SizeT base_pszB_req, base_pszB_act, frag_bszB;
nethercote2d5b8162004-08-11 09:40:52 +00001095 Block *base_b, *align_b;
1096 UByte *base_p, *align_p;
nethercote7ac7f7b2004-11-02 12:36:02 +00001097 SizeT saved_bytes_on_loan;
sewardjde4a1d02002-03-22 01:27:54 +00001098 Arena* a;
1099
njn25e49d8e72002-09-23 09:36:25 +00001100 VGP_PUSHCC(VgpMalloc);
1101
sewardjde4a1d02002-03-22 01:27:54 +00001102 ensure_mm_init();
1103 a = arenaId_to_ArenaP(aid);
1104
nethercote7ac7f7b2004-11-02 12:36:02 +00001105 vg_assert(req_pszB < MAX_PSZB);
sewardjde4a1d02002-03-22 01:27:54 +00001106
nethercote2d5b8162004-08-11 09:40:52 +00001107 // Check that the requested alignment seems reasonable; that is, is
1108 // a power of 2.
1109 if (req_alignB < VG_MIN_MALLOC_SZB
1110 || req_alignB > 1048576
njn717cde52005-05-10 02:47:21 +00001111 || VG_(log2)( req_alignB ) == -1 /* not a power of 2 */) {
1112 VG_(printf)("VG_(arena_memalign)(%p, %d, %d)\nbad alignment",
nethercote2d5b8162004-08-11 09:40:52 +00001113 a, req_alignB, req_pszB );
njn717cde52005-05-10 02:47:21 +00001114 VG_(core_panic)("VG_(arena_memalign)");
nethercote2d5b8162004-08-11 09:40:52 +00001115 /*NOTREACHED*/
sewardjde4a1d02002-03-22 01:27:54 +00001116 }
nethercote2d5b8162004-08-11 09:40:52 +00001117 // Paranoid
1118 vg_assert(req_alignB % VG_MIN_MALLOC_SZB == 0);
sewardjde4a1d02002-03-22 01:27:54 +00001119
1120 /* Required payload size for the aligned chunk. */
nethercote2d5b8162004-08-11 09:40:52 +00001121 req_pszB = align_req_pszB(req_pszB);
sewardjde4a1d02002-03-22 01:27:54 +00001122
nethercote2d5b8162004-08-11 09:40:52 +00001123 /* Payload size to request for the big block that we will split up. */
1124 base_pszB_req = req_pszB + min_useful_bszB(a) + req_alignB;
sewardjde4a1d02002-03-22 01:27:54 +00001125
1126 /* Payload ptr for the block we are going to split. Note this
1127 changes a->bytes_on_loan; we save and restore it ourselves. */
1128 saved_bytes_on_loan = a->bytes_on_loan;
nethercote2d5b8162004-08-11 09:40:52 +00001129 base_p = VG_(arena_malloc) ( aid, base_pszB_req );
sewardjde4a1d02002-03-22 01:27:54 +00001130 a->bytes_on_loan = saved_bytes_on_loan;
1131
1132 /* Block ptr for the block we are going to split. */
nethercote2d5b8162004-08-11 09:40:52 +00001133 base_b = get_payload_block ( a, base_p );
sewardjde4a1d02002-03-22 01:27:54 +00001134
1135 /* Pointer to the payload of the aligned block we are going to
1136 return. This has to be suitably aligned. */
nethercote2d5b8162004-08-11 09:40:52 +00001137 align_p = align_upwards ( base_b + 2 * overhead_szB_lo(a)
1138 + overhead_szB_hi(a),
sewardjde4a1d02002-03-22 01:27:54 +00001139 req_alignB );
nethercote2d5b8162004-08-11 09:40:52 +00001140 align_b = get_payload_block(a, align_p);
sewardjde4a1d02002-03-22 01:27:54 +00001141
1142 /* The block size of the fragment we will create. This must be big
1143 enough to actually create a fragment. */
nethercote2d5b8162004-08-11 09:40:52 +00001144 frag_bszB = align_b - base_b;
1145
1146 vg_assert(frag_bszB >= min_useful_bszB(a));
sewardjde4a1d02002-03-22 01:27:54 +00001147
1148 /* The actual payload size of the block we are going to split. */
nethercote2d5b8162004-08-11 09:40:52 +00001149 base_pszB_act = bszB_to_pszB(a, mk_plain_bszB(get_bszB_lo(base_b)));
sewardjde4a1d02002-03-22 01:27:54 +00001150
nethercote2d5b8162004-08-11 09:40:52 +00001151 /* Create the fragment block, and put it back on the relevant free list. */
1152 mkFreeBlock ( a, base_b, frag_bszB,
1153 pszB_to_listNo(bszB_to_pszB(a, frag_bszB)) );
sewardjde4a1d02002-03-22 01:27:54 +00001154
1155 /* Create the aligned block. */
nethercote2d5b8162004-08-11 09:40:52 +00001156 mkInuseBlock ( a, align_b,
1157 base_p + base_pszB_act
1158 + overhead_szB_hi(a) - (UByte*)align_b );
sewardjde4a1d02002-03-22 01:27:54 +00001159
1160 /* Final sanity checks. */
nethercote2d5b8162004-08-11 09:40:52 +00001161 vg_assert( is_inuse_bszB(get_bszB_lo(get_payload_block(a, align_p))) );
sewardjde4a1d02002-03-22 01:27:54 +00001162
nethercote2d5b8162004-08-11 09:40:52 +00001163 vg_assert(req_pszB
sewardjde4a1d02002-03-22 01:27:54 +00001164 <=
nethercote2d5b8162004-08-11 09:40:52 +00001165 bszB_to_pszB(a, mk_plain_bszB(get_bszB_lo(
1166 get_payload_block(a, align_p))))
sewardjde4a1d02002-03-22 01:27:54 +00001167 );
1168
1169 a->bytes_on_loan
nethercote2d5b8162004-08-11 09:40:52 +00001170 += bszB_to_pszB(a, mk_plain_bszB(get_bszB_lo(
1171 get_payload_block(a, align_p))));
sewardjde4a1d02002-03-22 01:27:54 +00001172 if (a->bytes_on_loan > a->bytes_on_loan_max)
1173 a->bytes_on_loan_max = a->bytes_on_loan;
1174
1175# ifdef DEBUG_MALLOC
nethercote885dd912004-08-03 23:14:00 +00001176 sanity_check_malloc_arena(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001177# endif
1178
njn25e49d8e72002-09-23 09:36:25 +00001179 VGP_POPCC(VgpMalloc);
1180
nethercote2d5b8162004-08-11 09:40:52 +00001181 vg_assert( (((Addr)align_p) % req_alignB) == 0 );
sewardjb5f6f512005-03-10 23:59:00 +00001182
1183 VALGRIND_MALLOCLIKE_BLOCK(align_p, req_pszB, 0, False);
1184
nethercote2d5b8162004-08-11 09:40:52 +00001185 return align_p;
1186}
1187
1188
nethercote7ac7f7b2004-11-02 12:36:02 +00001189SizeT VG_(arena_payload_szB) ( ArenaId aid, void* ptr )
nethercote2d5b8162004-08-11 09:40:52 +00001190{
1191 Arena* a = arenaId_to_ArenaP(aid);
1192 Block* b = get_payload_block(a, ptr);
1193 return bszB_to_pszB(a, get_bszB_lo(b));
sewardjde4a1d02002-03-22 01:27:54 +00001194}
1195
1196
1197/*------------------------------------------------------------*/
1198/*--- Services layered on top of malloc/free. ---*/
1199/*------------------------------------------------------------*/
1200
njn828022a2005-03-13 14:56:31 +00001201void* VG_(arena_calloc) ( ArenaId aid, SizeT nmemb, SizeT bytes_per_memb )
sewardjde4a1d02002-03-22 01:27:54 +00001202{
nethercote7ac7f7b2004-11-02 12:36:02 +00001203 SizeT size;
sewardjde4a1d02002-03-22 01:27:54 +00001204 UChar* p;
njn25e49d8e72002-09-23 09:36:25 +00001205
1206 VGP_PUSHCC(VgpMalloc);
1207
njn926ed472005-03-11 04:44:10 +00001208 size = nmemb * bytes_per_memb;
1209 vg_assert(size >= nmemb && size >= bytes_per_memb);// check against overflow
njn3e884182003-04-15 13:03:23 +00001210
njn828022a2005-03-13 14:56:31 +00001211 p = VG_(arena_malloc) ( aid, size );
njn3e884182003-04-15 13:03:23 +00001212
njn926ed472005-03-11 04:44:10 +00001213 VG_(memset)(p, 0, size);
sewardjb5f6f512005-03-10 23:59:00 +00001214
njn926ed472005-03-11 04:44:10 +00001215 VALGRIND_MALLOCLIKE_BLOCK(p, size, 0, True);
njn25e49d8e72002-09-23 09:36:25 +00001216
1217 VGP_POPCC(VgpMalloc);
1218
sewardjde4a1d02002-03-22 01:27:54 +00001219 return p;
1220}
1221
1222
njn828022a2005-03-13 14:56:31 +00001223void* VG_(arena_realloc) ( ArenaId aid, void* ptr, SizeT req_pszB )
sewardjde4a1d02002-03-22 01:27:54 +00001224{
1225 Arena* a;
nethercote7ac7f7b2004-11-02 12:36:02 +00001226 SizeT old_bszB, old_pszB;
sewardjb5f6f512005-03-10 23:59:00 +00001227 UChar *p_new;
nethercote2d5b8162004-08-11 09:40:52 +00001228 Block* b;
sewardjde4a1d02002-03-22 01:27:54 +00001229
njn25e49d8e72002-09-23 09:36:25 +00001230 VGP_PUSHCC(VgpMalloc);
1231
sewardjde4a1d02002-03-22 01:27:54 +00001232 ensure_mm_init();
1233 a = arenaId_to_ArenaP(aid);
1234
nethercote7ac7f7b2004-11-02 12:36:02 +00001235 vg_assert(req_pszB < MAX_PSZB);
sewardjde4a1d02002-03-22 01:27:54 +00001236
nethercote2d5b8162004-08-11 09:40:52 +00001237 b = get_payload_block(a, ptr);
1238 vg_assert(blockSane(a, b));
sewardjde4a1d02002-03-22 01:27:54 +00001239
nethercote2d5b8162004-08-11 09:40:52 +00001240 old_bszB = get_bszB_lo(b);
1241 vg_assert(is_inuse_bszB(old_bszB));
1242 old_bszB = mk_plain_bszB(old_bszB);
1243 old_pszB = bszB_to_pszB(a, old_bszB);
sewardjde4a1d02002-03-22 01:27:54 +00001244
njn25e49d8e72002-09-23 09:36:25 +00001245 if (req_pszB <= old_pszB) {
1246 VGP_POPCC(VgpMalloc);
1247 return ptr;
1248 }
sewardjde4a1d02002-03-22 01:27:54 +00001249
njn828022a2005-03-13 14:56:31 +00001250 p_new = VG_(arena_malloc) ( aid, req_pszB );
1251
sewardjb5f6f512005-03-10 23:59:00 +00001252 VG_(memcpy)(p_new, ptr, old_pszB);
sewardjde4a1d02002-03-22 01:27:54 +00001253
sewardjb5f6f512005-03-10 23:59:00 +00001254 VG_(arena_free)(aid, ptr);
njn25e49d8e72002-09-23 09:36:25 +00001255
1256 VGP_POPCC(VgpMalloc);
sewardjde4a1d02002-03-22 01:27:54 +00001257 return p_new;
1258}
1259
1260
njn6ba622c2005-06-11 01:12:08 +00001261/* Inline just for the wrapper VG_(strdup) below */
1262__inline__ Char* VG_(arena_strdup) ( ArenaId aid, const Char* s )
1263{
1264 Int i;
1265 Int len;
1266 Char* res;
1267
1268 if (s == NULL)
1269 return NULL;
1270
1271 len = VG_(strlen)(s) + 1;
1272 res = VG_(arena_malloc) (aid, len);
1273
1274 for (i = 0; i < len; i++)
1275 res[i] = s[i];
1276 return res;
1277}
1278
1279
sewardjde4a1d02002-03-22 01:27:54 +00001280/*------------------------------------------------------------*/
nethercote996901a2004-08-03 13:29:09 +00001281/*--- Tool-visible functions. ---*/
njn25e49d8e72002-09-23 09:36:25 +00001282/*------------------------------------------------------------*/
1283
nethercote2d5b8162004-08-11 09:40:52 +00001284// All just wrappers to avoid exposing arenas to tools.
njn25e49d8e72002-09-23 09:36:25 +00001285
nethercote7ac7f7b2004-11-02 12:36:02 +00001286void* VG_(malloc) ( SizeT nbytes )
njn25e49d8e72002-09-23 09:36:25 +00001287{
nethercote60f5b822004-01-26 17:24:42 +00001288 return VG_(arena_malloc) ( VG_AR_TOOL, nbytes );
njn25e49d8e72002-09-23 09:36:25 +00001289}
1290
1291void VG_(free) ( void* ptr )
1292{
nethercote60f5b822004-01-26 17:24:42 +00001293 VG_(arena_free) ( VG_AR_TOOL, ptr );
njn25e49d8e72002-09-23 09:36:25 +00001294}
1295
njn926ed472005-03-11 04:44:10 +00001296void* VG_(calloc) ( SizeT nmemb, SizeT bytes_per_memb )
njn25e49d8e72002-09-23 09:36:25 +00001297{
njn828022a2005-03-13 14:56:31 +00001298 return VG_(arena_calloc) ( VG_AR_TOOL, nmemb, bytes_per_memb );
njn25e49d8e72002-09-23 09:36:25 +00001299}
1300
nethercote7ac7f7b2004-11-02 12:36:02 +00001301void* VG_(realloc) ( void* ptr, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00001302{
njn828022a2005-03-13 14:56:31 +00001303 return VG_(arena_realloc) ( VG_AR_TOOL, ptr, size );
njn25e49d8e72002-09-23 09:36:25 +00001304}
1305
njn6ba622c2005-06-11 01:12:08 +00001306Char* VG_(strdup) ( const Char* s )
1307{
1308 return VG_(arena_strdup) ( VG_AR_TOOL, s );
1309}
1310
sewardjde4a1d02002-03-22 01:27:54 +00001311/*--------------------------------------------------------------------*/
njn717cde52005-05-10 02:47:21 +00001312/*--- end ---*/
sewardjde4a1d02002-03-22 01:27:54 +00001313/*--------------------------------------------------------------------*/