blob: 82de6a80a3bb9b79a1a2b283bd67c38546c5615c [file] [log] [blame]
sewardjde4a1d02002-03-22 01:27:54 +00001
2/*--------------------------------------------------------------------*/
3/*--- An implementation of malloc/free which doesn't use sbrk. ---*/
njn717cde52005-05-10 02:47:21 +00004/*--- m_mallocfree.c ---*/
sewardjde4a1d02002-03-22 01:27:54 +00005/*--------------------------------------------------------------------*/
6
7/*
njnb9c427c2004-12-01 14:14:42 +00008 This file is part of Valgrind, a dynamic binary instrumentation
9 framework.
sewardjde4a1d02002-03-22 01:27:54 +000010
njn53612422005-03-12 16:22:54 +000011 Copyright (C) 2000-2005 Julian Seward
sewardjde4a1d02002-03-22 01:27:54 +000012 jseward@acm.org
sewardjde4a1d02002-03-22 01:27:54 +000013
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
njn25e49d8e72002-09-23 09:36:25 +000029 The GNU General Public License is contained in the file COPYING.
sewardjde4a1d02002-03-22 01:27:54 +000030*/
31
njnc7561b92005-06-19 01:24:32 +000032#include "pub_core_basics.h"
njn97405b22005-06-02 03:39:33 +000033#include "pub_core_libcbase.h"
njn132bfcc2005-06-04 19:16:06 +000034#include "pub_core_libcassert.h"
njne9befc62005-06-11 15:51:30 +000035#include "pub_core_libcmman.h"
njn36a20fa2005-06-03 03:08:39 +000036#include "pub_core_libcprint.h"
njnaf1d7df2005-06-11 01:31:52 +000037#include "pub_core_mallocfree.h"
njn20242342005-05-16 23:31:24 +000038#include "pub_core_options.h"
njn31513b42005-06-01 03:09:59 +000039#include "pub_core_profile.h"
njnfc51f8d2005-06-21 03:20:17 +000040#include "pub_core_tooliface.h"
njn296c24d2005-05-15 03:52:40 +000041#include "valgrind.h"
sewardj55f9d1a2005-04-25 11:11:44 +000042
sewardjb5f6f512005-03-10 23:59:00 +000043//zz#include "memcheck/memcheck.h"
sewardjde4a1d02002-03-22 01:27:54 +000044
nethercote2d5b8162004-08-11 09:40:52 +000045//#define DEBUG_MALLOC // turn on heavyweight debugging machinery
46//#define VERBOSE_MALLOC // make verbose, esp. in debugging machinery
47
48/*------------------------------------------------------------*/
49/*--- Main types ---*/
50/*------------------------------------------------------------*/
51
sewardj70e212d2005-05-19 10:54:01 +000052#define N_MALLOC_LISTS 18 // do not change this
nethercote2d5b8162004-08-11 09:40:52 +000053
nethercote7ac7f7b2004-11-02 12:36:02 +000054// The amount you can ask for is limited only by sizeof(SizeT)...
55#define MAX_PSZB (~((SizeT)0x0))
nethercote2d5b8162004-08-11 09:40:52 +000056
57typedef UChar UByte;
58
59/* Block layout:
60
nethercote7ac7f7b2004-11-02 12:36:02 +000061 this block total szB (sizeof(SizeT) bytes)
nethercote2d5b8162004-08-11 09:40:52 +000062 freelist previous ptr (sizeof(void*) bytes)
63 red zone bytes (depends on .rz_szB field of Arena)
64 (payload bytes)
65 red zone bytes (depends on .rz_szB field of Arena)
66 freelist next ptr (sizeof(void*) bytes)
nethercote7ac7f7b2004-11-02 12:36:02 +000067 this block total szB (sizeof(SizeT) bytes)
nethercote2d5b8162004-08-11 09:40:52 +000068
69 Total size in bytes (bszB) and payload size in bytes (pszB)
70 are related by:
71
nethercote7ac7f7b2004-11-02 12:36:02 +000072 bszB == pszB + 2*sizeof(SizeT) + 2*sizeof(void*) + 2*a->rz_szB
nethercote2d5b8162004-08-11 09:40:52 +000073
njn37517e82005-05-25 15:52:39 +000074 Furthermore, both size fields in the block have their least-significant
nethercote7ac7f7b2004-11-02 12:36:02 +000075 bit set if the block is not in use, and unset if it is in use.
76 (The bottom 3 or so bits are always free for this because of alignment.)
77 A block size of zero is not possible, because a block always has at
78 least two SizeTs and two pointers of overhead.
nethercote2d5b8162004-08-11 09:40:52 +000079
80 Nb: All Block payloads must be VG_MIN_MALLOC_SZB-aligned. This is
81 achieved by ensuring that Superblocks are VG_MIN_MALLOC_SZB-aligned
82 (see newSuperblock() for how), and that the lengths of the following
83 things are a multiple of VG_MIN_MALLOC_SZB:
84 - Superblock admin section lengths (due to elastic padding)
85 - Block admin section (low and high) lengths (due to elastic redzones)
86 - Block payload lengths (due to req_pszB rounding up)
87*/
88typedef
89 struct {
90 // No fields are actually used in this struct, because a Block has
njn37517e82005-05-25 15:52:39 +000091 // many variable sized fields and so can't be accessed
nethercote2d5b8162004-08-11 09:40:52 +000092 // meaningfully with normal fields. So we use access functions all
93 // the time. This struct gives us a type to use, though. Also, we
94 // make sizeof(Block) 1 byte so that we can do arithmetic with the
95 // Block* type in increments of 1!
96 UByte dummy;
97 }
98 Block;
99
100// A superblock. 'padding' is never used, it just ensures that if the
101// entire Superblock is aligned to VG_MIN_MALLOC_SZB, then payload_bytes[]
102// will be too. It can add small amounts of padding unnecessarily -- eg.
103// 8-bytes on 32-bit machines with an 8-byte VG_MIN_MALLOC_SZB -- because
104// it's too hard to make a constant expression that works perfectly in all
105// cases.
106// payload_bytes[] is made a single big Block when the Superblock is
107// created, and then can be split and the splittings remerged, but Blocks
108// always cover its entire length -- there's never any unused bytes at the
109// end, for example.
110typedef
111 struct _Superblock {
112 struct _Superblock* next;
nethercote7ac7f7b2004-11-02 12:36:02 +0000113 SizeT n_payload_bytes;
nethercote2d5b8162004-08-11 09:40:52 +0000114 UByte padding[ VG_MIN_MALLOC_SZB -
nethercote7ac7f7b2004-11-02 12:36:02 +0000115 ((sizeof(struct _Superblock*) + sizeof(SizeT)) %
116 VG_MIN_MALLOC_SZB) ];
nethercote2d5b8162004-08-11 09:40:52 +0000117 UByte payload_bytes[0];
118 }
119 Superblock;
120
121// An arena. 'freelist' is a circular, doubly-linked list. 'rz_szB' is
122// elastic, in that it can be bigger than asked-for to ensure alignment.
123typedef
124 struct {
125 Char* name;
126 Bool clientmem; // Allocates in the client address space?
njn0e742df2004-11-30 13:26:29 +0000127 SizeT rz_szB; // Red zone size in bytes
nethercote7ac7f7b2004-11-02 12:36:02 +0000128 SizeT min_sblock_szB; // Minimum superblock size in bytes
njn6e6588c2005-03-13 18:52:48 +0000129 Block* freelist[N_MALLOC_LISTS];
nethercote2d5b8162004-08-11 09:40:52 +0000130 Superblock* sblocks;
131 // Stats only.
nethercote7ac7f7b2004-11-02 12:36:02 +0000132 SizeT bytes_on_loan;
133 SizeT bytes_mmaped;
134 SizeT bytes_on_loan_max;
nethercote2d5b8162004-08-11 09:40:52 +0000135 }
136 Arena;
137
138
139/*------------------------------------------------------------*/
140/*--- Low-level functions for working with Blocks. ---*/
141/*------------------------------------------------------------*/
142
nethercote7ac7f7b2004-11-02 12:36:02 +0000143#define SIZE_T_0x1 ((SizeT)0x1)
144
nethercote2d5b8162004-08-11 09:40:52 +0000145// Mark a bszB as in-use, and not in-use.
146static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000147SizeT mk_inuse_bszB ( SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000148{
149 vg_assert(bszB != 0);
nethercote7ac7f7b2004-11-02 12:36:02 +0000150 return bszB & (~SIZE_T_0x1);
nethercote2d5b8162004-08-11 09:40:52 +0000151}
152static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000153SizeT mk_free_bszB ( SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000154{
155 vg_assert(bszB != 0);
nethercote7ac7f7b2004-11-02 12:36:02 +0000156 return bszB | SIZE_T_0x1;
nethercote2d5b8162004-08-11 09:40:52 +0000157}
158
159// Remove the in-use/not-in-use attribute from a bszB, leaving just
160// the size.
161static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000162SizeT mk_plain_bszB ( SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000163{
164 vg_assert(bszB != 0);
nethercote7ac7f7b2004-11-02 12:36:02 +0000165 return bszB & (~SIZE_T_0x1);
nethercote2d5b8162004-08-11 09:40:52 +0000166}
167
nethercote2d5b8162004-08-11 09:40:52 +0000168// Set and get the lower size field of a block.
169static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000170void set_bszB_lo ( Block* b, SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000171{
nethercote7ac7f7b2004-11-02 12:36:02 +0000172 *(SizeT*)&b[0] = bszB;
nethercote2d5b8162004-08-11 09:40:52 +0000173}
174static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000175SizeT get_bszB_lo ( Block* b )
nethercote2d5b8162004-08-11 09:40:52 +0000176{
nethercote7ac7f7b2004-11-02 12:36:02 +0000177 return *(SizeT*)&b[0];
nethercote2d5b8162004-08-11 09:40:52 +0000178}
179
njn472cc7c2005-07-17 17:20:30 +0000180// Does this block have the in-use attribute?
181static __inline__
182Bool is_inuse_block ( Block* b )
183{
184 SizeT bszB = get_bszB_lo(b);
185 vg_assert(bszB != 0);
186 return (0 != (bszB & SIZE_T_0x1)) ? False : True;
187}
188
nethercote2d5b8162004-08-11 09:40:52 +0000189// Get the address of the last byte in a block
190static __inline__
191UByte* last_byte ( Block* b )
192{
193 UByte* b2 = (UByte*)b;
194 return &b2[mk_plain_bszB(get_bszB_lo(b)) - 1];
195}
196
197// Set and get the upper size field of a block.
198static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000199void set_bszB_hi ( Block* b, SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000200{
201 UByte* b2 = (UByte*)b;
202 UByte* lb = last_byte(b);
203 vg_assert(lb == &b2[mk_plain_bszB(bszB) - 1]);
nethercote7ac7f7b2004-11-02 12:36:02 +0000204 *(SizeT*)&lb[-sizeof(SizeT) + 1] = bszB;
nethercote2d5b8162004-08-11 09:40:52 +0000205}
206static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000207SizeT get_bszB_hi ( Block* b )
nethercote2d5b8162004-08-11 09:40:52 +0000208{
209 UByte* lb = last_byte(b);
nethercote7ac7f7b2004-11-02 12:36:02 +0000210 return *(SizeT*)&lb[-sizeof(SizeT) + 1];
nethercote2d5b8162004-08-11 09:40:52 +0000211}
212
njnd0e685c2005-07-17 17:55:42 +0000213// Get a block's size as stored, ie with the in-use/free attribute.
214static __inline__
215SizeT get_bszB_as_is ( Block* b )
216{
217 SizeT bszB_lo = get_bszB_lo(b);
218 SizeT bszB_hi = get_bszB_hi(b);
219 vg_assert(bszB_lo == bszB_hi);
220 return bszB_lo;
221}
222
223// Get a block's plain size, ie. remove the in-use/free attribute.
224static __inline__
225SizeT get_bszB ( Block* b )
226{
227 return mk_plain_bszB(get_bszB_as_is(b));
228}
nethercote2d5b8162004-08-11 09:40:52 +0000229
nethercote7ac7f7b2004-11-02 12:36:02 +0000230// Return the lower, upper and total overhead in bytes for a block.
231// These are determined purely by which arena the block lives in.
232static __inline__
njn0e742df2004-11-30 13:26:29 +0000233SizeT overhead_szB_lo ( Arena* a )
nethercote7ac7f7b2004-11-02 12:36:02 +0000234{
235 return sizeof(SizeT) + sizeof(void*) + a->rz_szB;
236}
237static __inline__
njn0e742df2004-11-30 13:26:29 +0000238SizeT overhead_szB_hi ( Arena* a )
nethercote7ac7f7b2004-11-02 12:36:02 +0000239{
240 return a->rz_szB + sizeof(void*) + sizeof(SizeT);
241}
242static __inline__
njn0e742df2004-11-30 13:26:29 +0000243SizeT overhead_szB ( Arena* a )
nethercote7ac7f7b2004-11-02 12:36:02 +0000244{
245 return overhead_szB_lo(a) + overhead_szB_hi(a);
246}
247
nethercote2d5b8162004-08-11 09:40:52 +0000248// Given the addr of a block, return the addr of its payload.
249static __inline__
250UByte* get_block_payload ( Arena* a, Block* b )
251{
252 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000253 return & b2[ overhead_szB_lo(a) ];
nethercote2d5b8162004-08-11 09:40:52 +0000254}
255// Given the addr of a block's payload, return the addr of the block itself.
256static __inline__
257Block* get_payload_block ( Arena* a, UByte* payload )
258{
nethercote7ac7f7b2004-11-02 12:36:02 +0000259 return (Block*)&payload[ -overhead_szB_lo(a) ];
nethercote2d5b8162004-08-11 09:40:52 +0000260}
261
262
263// Set and get the next and previous link fields of a block.
264static __inline__
265void set_prev_b ( Block* b, Block* prev_p )
266{
267 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000268 *(Block**)&b2[sizeof(SizeT)] = prev_p;
nethercote2d5b8162004-08-11 09:40:52 +0000269}
270static __inline__
271void set_next_b ( Block* b, Block* next_p )
272{
273 UByte* lb = last_byte(b);
nethercote7ac7f7b2004-11-02 12:36:02 +0000274 *(Block**)&lb[-sizeof(SizeT) - sizeof(void*) + 1] = next_p;
nethercote2d5b8162004-08-11 09:40:52 +0000275}
276static __inline__
277Block* get_prev_b ( Block* b )
278{
279 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000280 return *(Block**)&b2[sizeof(SizeT)];
nethercote2d5b8162004-08-11 09:40:52 +0000281}
282static __inline__
283Block* get_next_b ( Block* b )
284{
285 UByte* lb = last_byte(b);
nethercote7ac7f7b2004-11-02 12:36:02 +0000286 return *(Block**)&lb[-sizeof(SizeT) - sizeof(void*) + 1];
nethercote2d5b8162004-08-11 09:40:52 +0000287}
288
289
290// Get the block immediately preceding this one in the Superblock.
291static __inline__
292Block* get_predecessor_block ( Block* b )
293{
294 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000295 SizeT bszB = mk_plain_bszB( (*(SizeT*)&b2[-sizeof(SizeT)]) );
nethercote2d5b8162004-08-11 09:40:52 +0000296 return (Block*)&b2[-bszB];
297}
298
299// Read and write the lower and upper red-zone bytes of a block.
300static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000301void set_rz_lo_byte ( Arena* a, Block* b, UInt rz_byteno, UByte v )
nethercote2d5b8162004-08-11 09:40:52 +0000302{
303 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000304 b2[sizeof(SizeT) + sizeof(void*) + rz_byteno] = v;
nethercote2d5b8162004-08-11 09:40:52 +0000305}
306static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000307void set_rz_hi_byte ( Arena* a, Block* b, UInt rz_byteno, UByte v )
nethercote2d5b8162004-08-11 09:40:52 +0000308{
309 UByte* lb = last_byte(b);
nethercote7ac7f7b2004-11-02 12:36:02 +0000310 lb[-sizeof(SizeT) - sizeof(void*) - rz_byteno] = v;
nethercote2d5b8162004-08-11 09:40:52 +0000311}
312static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000313UByte get_rz_lo_byte ( Arena* a, Block* b, UInt rz_byteno )
nethercote2d5b8162004-08-11 09:40:52 +0000314{
315 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000316 return b2[sizeof(SizeT) + sizeof(void*) + rz_byteno];
nethercote2d5b8162004-08-11 09:40:52 +0000317}
318static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000319UByte get_rz_hi_byte ( Arena* a, Block* b, UInt rz_byteno )
nethercote2d5b8162004-08-11 09:40:52 +0000320{
321 UByte* lb = last_byte(b);
nethercote7ac7f7b2004-11-02 12:36:02 +0000322 return lb[-sizeof(SizeT) - sizeof(void*) - rz_byteno];
nethercote2d5b8162004-08-11 09:40:52 +0000323}
324
325
nethercote2d5b8162004-08-11 09:40:52 +0000326// Return the minimum bszB for a block in this arena. Can have zero-length
327// payloads, so it's the size of the admin bytes.
328static __inline__
njn0e742df2004-11-30 13:26:29 +0000329SizeT min_useful_bszB ( Arena* a )
nethercote2d5b8162004-08-11 09:40:52 +0000330{
331 return overhead_szB(a);
332}
333
334// Convert payload size <--> block size (both in bytes).
335static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000336SizeT pszB_to_bszB ( Arena* a, SizeT pszB )
nethercote2d5b8162004-08-11 09:40:52 +0000337{
nethercote2d5b8162004-08-11 09:40:52 +0000338 return pszB + overhead_szB(a);
339}
340static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000341SizeT bszB_to_pszB ( Arena* a, SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000342{
nethercote7ac7f7b2004-11-02 12:36:02 +0000343 vg_assert(bszB >= overhead_szB(a));
344 return bszB - overhead_szB(a);
nethercote2d5b8162004-08-11 09:40:52 +0000345}
346
347
348/*------------------------------------------------------------*/
349/*--- Arena management ---*/
350/*------------------------------------------------------------*/
351
352#define CORE_ARENA_MIN_SZB 1048576
353
354// The arena structures themselves.
355static Arena vg_arena[VG_N_ARENAS];
356
357// Functions external to this module identify arenas using ArenaIds,
358// not Arena*s. This fn converts the former to the latter.
359static Arena* arenaId_to_ArenaP ( ArenaId arena )
360{
361 vg_assert(arena >= 0 && arena < VG_N_ARENAS);
362 return & vg_arena[arena];
363}
364
365// Initialise an arena. rz_szB is the minimum redzone size; it might be
njn30490552005-03-13 06:30:42 +0000366// made bigger to ensure that VG_MIN_MALLOC_SZB is observed.
nethercote2d5b8162004-08-11 09:40:52 +0000367static
njn0e742df2004-11-30 13:26:29 +0000368void arena_init ( ArenaId aid, Char* name, SizeT rz_szB, SizeT min_sblock_szB )
nethercote2d5b8162004-08-11 09:40:52 +0000369{
nethercote7ac7f7b2004-11-02 12:36:02 +0000370 SizeT i;
nethercote2d5b8162004-08-11 09:40:52 +0000371 Arena* a = arenaId_to_ArenaP(aid);
372
nethercote7ac7f7b2004-11-02 12:36:02 +0000373 vg_assert(rz_szB < 128); // ensure reasonable size
nethercote73b526f2004-10-31 18:48:21 +0000374 vg_assert((min_sblock_szB % VKI_PAGE_SIZE) == 0);
nethercote2d5b8162004-08-11 09:40:52 +0000375 a->name = name;
376 a->clientmem = ( VG_AR_CLIENT == aid ? True : False );
377
378 // The size of the low and high admin sections in a block must be a
njn30490552005-03-13 06:30:42 +0000379 // multiple of VG_MIN_MALLOC_SZB. So we round up the asked-for
nethercote2d5b8162004-08-11 09:40:52 +0000380 // redzone size if necessary to achieve this.
381 a->rz_szB = rz_szB;
382 while (0 != overhead_szB_lo(a) % VG_MIN_MALLOC_SZB) a->rz_szB++;
383 vg_assert(overhead_szB_lo(a) == overhead_szB_hi(a));
384
385 a->min_sblock_szB = min_sblock_szB;
njn6e6588c2005-03-13 18:52:48 +0000386 for (i = 0; i < N_MALLOC_LISTS; i++) a->freelist[i] = NULL;
nethercote2d5b8162004-08-11 09:40:52 +0000387 a->sblocks = NULL;
388 a->bytes_on_loan = 0;
389 a->bytes_mmaped = 0;
390 a->bytes_on_loan_max = 0;
391}
392
393/* Print vital stats for an arena. */
394void VG_(print_all_arena_stats) ( void )
395{
nethercote7ac7f7b2004-11-02 12:36:02 +0000396 UInt i;
nethercote2d5b8162004-08-11 09:40:52 +0000397 for (i = 0; i < VG_N_ARENAS; i++) {
398 Arena* a = arenaId_to_ArenaP(i);
399 VG_(message)(Vg_DebugMsg,
njn669ef072005-03-13 05:46:57 +0000400 "%8s: %8d mmap'd, %8d/%8d max/curr",
nethercote2d5b8162004-08-11 09:40:52 +0000401 a->name, a->bytes_mmaped, a->bytes_on_loan_max, a->bytes_on_loan
402 );
403 }
404}
405
406/* This library is self-initialising, as it makes this more self-contained,
407 less coupled with the outside world. Hence VG_(arena_malloc)() and
408 VG_(arena_free)() below always call ensure_mm_init() to ensure things are
409 correctly initialised. */
410static
411void ensure_mm_init ( void )
412{
njnfc51f8d2005-06-21 03:20:17 +0000413 static Bool init_done = False;
414 static SizeT client_redzone_szB = 8; // default: be paranoid
415
nethercote2d5b8162004-08-11 09:40:52 +0000416 if (init_done) {
njnfc51f8d2005-06-21 03:20:17 +0000417 // This assertion ensures that a tool cannot try to change the client
418 // redzone size with VG_(needs_malloc_replacement)() after this module
419 // has done its first allocation.
420 if (VG_(needs).malloc_replacement)
421 vg_assert(client_redzone_szB == VG_(tdict).tool_client_redzone_szB);
nethercote2d5b8162004-08-11 09:40:52 +0000422 return;
423 }
424
njnfc51f8d2005-06-21 03:20:17 +0000425 if (VG_(needs).malloc_replacement) {
426 client_redzone_szB = VG_(tdict).tool_client_redzone_szB;
427 // 128 is no special figure, just something not too big
428 if (client_redzone_szB > 128) {
429 VG_(printf)( "\nTool error:\n"
430 " specified redzone size is too big (%llu)\n",
431 (ULong)client_redzone_szB);
432 VG_(exit)(1);
433 }
434 }
435
nethercote2d5b8162004-08-11 09:40:52 +0000436 /* Use checked red zones (of various sizes) for our internal stuff,
437 and an unchecked zone of arbitrary size for the client. Of
438 course the client's red zone can be checked by the tool, eg.
439 by using addressibility maps, but not by the mechanism implemented
440 here, which merely checks at the time of freeing that the red
441 zone bytes are unchanged.
442
443 Nb: redzone sizes are *minimums*; they could be made bigger to ensure
444 alignment. Eg. on 32-bit machines, 4 becomes 8, and 12 becomes 16;
445 but on 64-bit machines 4 stays as 4, and 12 stays as 12 --- the extra
446 4 bytes in both are accounted for by the larger prev/next ptr.
447 */
448 arena_init ( VG_AR_CORE, "core", 4, CORE_ARENA_MIN_SZB );
449 arena_init ( VG_AR_TOOL, "tool", 4, 1048576 );
450 arena_init ( VG_AR_SYMTAB, "symtab", 4, 1048576 );
njnfc51f8d2005-06-21 03:20:17 +0000451 arena_init ( VG_AR_CLIENT, "client", client_redzone_szB, 1048576 );
nethercote2d5b8162004-08-11 09:40:52 +0000452 arena_init ( VG_AR_DEMANGLE, "demangle", 12/*paranoid*/, 65536 );
453 arena_init ( VG_AR_EXECTXT, "exectxt", 4, 65536 );
454 arena_init ( VG_AR_ERRORS, "errors", 4, 65536 );
nethercote2d5b8162004-08-11 09:40:52 +0000455
456 init_done = True;
457# ifdef DEBUG_MALLOC
458 VG_(sanity_check_malloc_all)();
459# endif
460}
461
462
463/*------------------------------------------------------------*/
464/*--- Superblock management ---*/
465/*------------------------------------------------------------*/
466
467// Align ptr p upwards to an align-sized boundary.
468static
nethercote7ac7f7b2004-11-02 12:36:02 +0000469void* align_upwards ( void* p, SizeT align )
nethercote2d5b8162004-08-11 09:40:52 +0000470{
471 Addr a = (Addr)p;
472 if ((a % align) == 0) return (void*)a;
473 return (void*)(a - (a % align) + align);
474}
475
476// If not enough memory available, either aborts (for non-client memory)
477// or returns 0 (for client memory).
478static
nethercote7ac7f7b2004-11-02 12:36:02 +0000479Superblock* newSuperblock ( Arena* a, SizeT cszB )
nethercote2d5b8162004-08-11 09:40:52 +0000480{
481 // The extra VG_MIN_MALLOC_SZB bytes are for possible alignment up.
482 static UByte bootstrap_superblock[CORE_ARENA_MIN_SZB+VG_MIN_MALLOC_SZB];
sewardj548be6d2005-02-16 01:31:37 +0000483 static Bool called_before = True; //False;
nethercote2d5b8162004-08-11 09:40:52 +0000484 Superblock* sb;
485
486 // Take into account admin bytes in the Superblock.
487 cszB += sizeof(Superblock);
488
489 if (cszB < a->min_sblock_szB) cszB = a->min_sblock_szB;
nethercote73b526f2004-10-31 18:48:21 +0000490 while ((cszB % VKI_PAGE_SIZE) > 0) cszB++;
nethercote2d5b8162004-08-11 09:40:52 +0000491
492 if (!called_before) {
493 // First time we're called -- use the special static bootstrap
494 // superblock (see comment at top of main() for details).
495 called_before = True;
496 vg_assert(a == arenaId_to_ArenaP(VG_AR_CORE));
497 vg_assert(CORE_ARENA_MIN_SZB >= cszB);
498 // Ensure sb is suitably aligned.
499 sb = (Superblock*)align_upwards( bootstrap_superblock,
500 VG_MIN_MALLOC_SZB );
501 } else if (a->clientmem) {
502 // client allocation -- return 0 to client if it fails
njn0ae787c2005-06-28 22:14:53 +0000503 sb = (Superblock*)VG_(get_memory_from_mmap_for_client)(cszB);
nethercote2d5b8162004-08-11 09:40:52 +0000504 if (NULL == sb)
505 return 0;
506 } else {
507 // non-client allocation -- aborts if it fails
508 sb = VG_(get_memory_from_mmap) ( cszB, "newSuperblock" );
509 }
510 vg_assert(NULL != sb);
sewardjb5f6f512005-03-10 23:59:00 +0000511 //zzVALGRIND_MAKE_WRITABLE(sb, cszB);
nethercote2d5b8162004-08-11 09:40:52 +0000512 vg_assert(0 == (Addr)sb % VG_MIN_MALLOC_SZB);
513 sb->n_payload_bytes = cszB - sizeof(Superblock);
514 a->bytes_mmaped += cszB;
515 if (0)
516 VG_(message)(Vg_DebugMsg, "newSuperblock, %d payload bytes",
517 sb->n_payload_bytes);
518 return sb;
519}
520
521// Find the superblock containing the given chunk.
522static
523Superblock* findSb ( Arena* a, Block* b )
524{
525 Superblock* sb;
526 for (sb = a->sblocks; sb; sb = sb->next)
527 if ((Block*)&sb->payload_bytes[0] <= b
528 && b < (Block*)&sb->payload_bytes[sb->n_payload_bytes])
529 return sb;
njn02bc4b82005-05-15 17:28:26 +0000530 VG_(printf)("findSb: can't find pointer %p in arena '%s'\n", b, a->name );
nethercote2d5b8162004-08-11 09:40:52 +0000531 VG_(core_panic)("findSb: VG_(arena_free)() in wrong arena?");
532 return NULL; /*NOTREACHED*/
533}
534
sewardjde4a1d02002-03-22 01:27:54 +0000535
fitzhardinge98abfc72003-12-16 02:05:15 +0000536/*------------------------------------------------------------*/
sewardjde4a1d02002-03-22 01:27:54 +0000537/*--- Functions for working with freelists. ---*/
538/*------------------------------------------------------------*/
539
nethercote2d5b8162004-08-11 09:40:52 +0000540// Nb: Determination of which freelist a block lives on is based on the
541// payload size, not block size.
sewardjde4a1d02002-03-22 01:27:54 +0000542
nethercote2d5b8162004-08-11 09:40:52 +0000543// Convert a payload size in bytes to a freelist number.
sewardjde4a1d02002-03-22 01:27:54 +0000544static
nethercote7ac7f7b2004-11-02 12:36:02 +0000545UInt pszB_to_listNo ( SizeT pszB )
sewardjde4a1d02002-03-22 01:27:54 +0000546{
nethercote2d5b8162004-08-11 09:40:52 +0000547 vg_assert(0 == pszB % VG_MIN_MALLOC_SZB);
548 pszB /= VG_MIN_MALLOC_SZB;
njn61dcab82005-05-21 19:36:45 +0000549
550 // The first 13 lists hold blocks of size VG_MIN_MALLOC_SZB * list_num.
551 // The final 4 hold bigger blocks.
552 if (pszB <= 12) return pszB;
sewardj70e212d2005-05-19 10:54:01 +0000553 if (pszB <= 16) return 13;
554 if (pszB <= 32) return 14;
555 if (pszB <= 64) return 15;
556 if (pszB <= 128) return 16;
557 return 17;
sewardjde4a1d02002-03-22 01:27:54 +0000558}
559
nethercote2d5b8162004-08-11 09:40:52 +0000560// What is the minimum payload size for a given list?
sewardjde4a1d02002-03-22 01:27:54 +0000561static
nethercote7ac7f7b2004-11-02 12:36:02 +0000562SizeT listNo_to_pszB_min ( UInt listNo )
sewardjde4a1d02002-03-22 01:27:54 +0000563{
nethercote7ac7f7b2004-11-02 12:36:02 +0000564 SizeT pszB = 0;
njn6e6588c2005-03-13 18:52:48 +0000565 vg_assert(listNo <= N_MALLOC_LISTS);
nethercote2d5b8162004-08-11 09:40:52 +0000566 while (pszB_to_listNo(pszB) < listNo) pszB += VG_MIN_MALLOC_SZB;
567 return pszB;
sewardjde4a1d02002-03-22 01:27:54 +0000568}
569
nethercote2d5b8162004-08-11 09:40:52 +0000570// What is the maximum payload size for a given list?
sewardjde4a1d02002-03-22 01:27:54 +0000571static
nethercote7ac7f7b2004-11-02 12:36:02 +0000572SizeT listNo_to_pszB_max ( UInt listNo )
sewardjde4a1d02002-03-22 01:27:54 +0000573{
njn6e6588c2005-03-13 18:52:48 +0000574 vg_assert(listNo <= N_MALLOC_LISTS);
575 if (listNo == N_MALLOC_LISTS-1) {
nethercote2d5b8162004-08-11 09:40:52 +0000576 return MAX_PSZB;
sewardjde4a1d02002-03-22 01:27:54 +0000577 } else {
nethercote2d5b8162004-08-11 09:40:52 +0000578 return listNo_to_pszB_min(listNo+1) - 1;
sewardjde4a1d02002-03-22 01:27:54 +0000579 }
580}
581
582
583/* A nasty hack to try and reduce fragmentation. Try and replace
584 a->freelist[lno] with another block on the same list but with a
585 lower address, with the idea of attempting to recycle the same
586 blocks rather than cruise through the address space. */
sewardjde4a1d02002-03-22 01:27:54 +0000587static
nethercote7ac7f7b2004-11-02 12:36:02 +0000588void swizzle ( Arena* a, UInt lno )
sewardjde4a1d02002-03-22 01:27:54 +0000589{
nethercote2d5b8162004-08-11 09:40:52 +0000590 Block* p_best;
591 Block* pp;
592 Block* pn;
nethercote7ac7f7b2004-11-02 12:36:02 +0000593 UInt i;
sewardjde4a1d02002-03-22 01:27:54 +0000594
595 p_best = a->freelist[lno];
596 if (p_best == NULL) return;
597
598 pn = pp = p_best;
599 for (i = 0; i < 20; i++) {
nethercote2d5b8162004-08-11 09:40:52 +0000600 pn = get_next_b(pn);
601 pp = get_prev_b(pp);
sewardjde4a1d02002-03-22 01:27:54 +0000602 if (pn < p_best) p_best = pn;
603 if (pp < p_best) p_best = pp;
604 }
605 if (p_best < a->freelist[lno]) {
nethercote2d5b8162004-08-11 09:40:52 +0000606# ifdef VERBOSE_MALLOC
607 VG_(printf)("retreat by %d\n", a->freelist[lno] - p_best);
sewardjde4a1d02002-03-22 01:27:54 +0000608# endif
609 a->freelist[lno] = p_best;
610 }
611}
612
613
614/*------------------------------------------------------------*/
sewardjde4a1d02002-03-22 01:27:54 +0000615/*--- Sanity-check/debugging machinery. ---*/
616/*------------------------------------------------------------*/
617
njn6e6588c2005-03-13 18:52:48 +0000618#define REDZONE_LO_MASK 0x31
619#define REDZONE_HI_MASK 0x7c
nethercote2d5b8162004-08-11 09:40:52 +0000620
nethercote7ac7f7b2004-11-02 12:36:02 +0000621// Do some crude sanity checks on a Block.
sewardjde4a1d02002-03-22 01:27:54 +0000622static
nethercote2d5b8162004-08-11 09:40:52 +0000623Bool blockSane ( Arena* a, Block* b )
sewardjde4a1d02002-03-22 01:27:54 +0000624{
625# define BLEAT(str) VG_(printf)("blockSane: fail -- %s\n",str)
nethercote7ac7f7b2004-11-02 12:36:02 +0000626 UInt i;
nethercote2d5b8162004-08-11 09:40:52 +0000627 if (get_bszB_lo(b) != get_bszB_hi(b))
sewardjde4a1d02002-03-22 01:27:54 +0000628 {BLEAT("sizes");return False;}
njn472cc7c2005-07-17 17:20:30 +0000629 if (!a->clientmem && is_inuse_block(b)) {
nethercote2d5b8162004-08-11 09:40:52 +0000630 for (i = 0; i < a->rz_szB; i++) {
631 if (get_rz_lo_byte(a, b, i) !=
njn6e6588c2005-03-13 18:52:48 +0000632 (UByte)(((Addr)b&0xff) ^ REDZONE_LO_MASK))
nethercote2d5b8162004-08-11 09:40:52 +0000633 {BLEAT("redzone-lo");return False;}
634 if (get_rz_hi_byte(a, b, i) !=
njn6e6588c2005-03-13 18:52:48 +0000635 (UByte)(((Addr)b&0xff) ^ REDZONE_HI_MASK))
nethercote2d5b8162004-08-11 09:40:52 +0000636 {BLEAT("redzone-hi");return False;}
sewardjde4a1d02002-03-22 01:27:54 +0000637 }
638 }
639 return True;
640# undef BLEAT
641}
642
nethercote2d5b8162004-08-11 09:40:52 +0000643// Print superblocks (only for debugging).
sewardjde4a1d02002-03-22 01:27:54 +0000644static
645void ppSuperblocks ( Arena* a )
646{
njnd0e685c2005-07-17 17:55:42 +0000647 UInt i, blockno = 1;
sewardjde4a1d02002-03-22 01:27:54 +0000648 Superblock* sb = a->sblocks;
njnd0e685c2005-07-17 17:55:42 +0000649 SizeT b_bszB;
sewardjde4a1d02002-03-22 01:27:54 +0000650
651 while (sb) {
652 VG_(printf)( "\n" );
nethercote2d5b8162004-08-11 09:40:52 +0000653 VG_(printf)( "superblock %d at %p, sb->n_pl_bs = %d, next = %p\n",
654 blockno++, sb, sb->n_payload_bytes, sb->next );
njnd0e685c2005-07-17 17:55:42 +0000655 for (i = 0; i < sb->n_payload_bytes; i += b_bszB) {
656 Block* b = (Block*)&sb->payload_bytes[i];
657 b_bszB = get_bszB(b);
658 VG_(printf)( " block at %d, bszB %d: ", i, b_bszB );
njn472cc7c2005-07-17 17:20:30 +0000659 VG_(printf)( "%s, ", is_inuse_block(b) ? "inuse" : "free");
nethercote2d5b8162004-08-11 09:40:52 +0000660 VG_(printf)( "%s\n", blockSane(a, b) ? "ok" : "BAD" );
sewardjde4a1d02002-03-22 01:27:54 +0000661 }
nethercote2d5b8162004-08-11 09:40:52 +0000662 vg_assert(i == sb->n_payload_bytes); // no overshoot at end of Sb
sewardjde4a1d02002-03-22 01:27:54 +0000663 sb = sb->next;
664 }
665 VG_(printf)( "end of superblocks\n\n" );
666}
667
nethercote2d5b8162004-08-11 09:40:52 +0000668// Sanity check both the superblocks and the chains.
nethercote885dd912004-08-03 23:14:00 +0000669static void sanity_check_malloc_arena ( ArenaId aid )
sewardjde4a1d02002-03-22 01:27:54 +0000670{
nethercote7ac7f7b2004-11-02 12:36:02 +0000671 UInt i, superblockctr, blockctr_sb, blockctr_li;
672 UInt blockctr_sb_free, listno;
673 SizeT b_bszB, b_pszB, list_min_pszB, list_max_pszB;
sewardjde4a1d02002-03-22 01:27:54 +0000674 Superblock* sb;
675 Bool thisFree, lastWasFree;
nethercote2d5b8162004-08-11 09:40:52 +0000676 Block* b;
677 Block* b_prev;
nethercote7ac7f7b2004-11-02 12:36:02 +0000678 SizeT arena_bytes_on_loan;
sewardjde4a1d02002-03-22 01:27:54 +0000679 Arena* a;
680
nethercote885dd912004-08-03 23:14:00 +0000681# define BOMB VG_(core_panic)("sanity_check_malloc_arena")
sewardjde4a1d02002-03-22 01:27:54 +0000682
683 a = arenaId_to_ArenaP(aid);
684
nethercote2d5b8162004-08-11 09:40:52 +0000685 // First, traverse all the superblocks, inspecting the Blocks in each.
sewardjde4a1d02002-03-22 01:27:54 +0000686 superblockctr = blockctr_sb = blockctr_sb_free = 0;
687 arena_bytes_on_loan = 0;
688 sb = a->sblocks;
689 while (sb) {
690 lastWasFree = False;
691 superblockctr++;
nethercote2d5b8162004-08-11 09:40:52 +0000692 for (i = 0; i < sb->n_payload_bytes; i += mk_plain_bszB(b_bszB)) {
sewardjde4a1d02002-03-22 01:27:54 +0000693 blockctr_sb++;
nethercote2d5b8162004-08-11 09:40:52 +0000694 b = (Block*)&sb->payload_bytes[i];
njnd0e685c2005-07-17 17:55:42 +0000695 b_bszB = get_bszB_as_is(b);
sewardjde4a1d02002-03-22 01:27:54 +0000696 if (!blockSane(a, b)) {
nethercote2d5b8162004-08-11 09:40:52 +0000697 VG_(printf)("sanity_check_malloc_arena: sb %p, block %d (bszB %d): "
698 " BAD\n", sb, i, b_bszB );
sewardjde4a1d02002-03-22 01:27:54 +0000699 BOMB;
700 }
njn472cc7c2005-07-17 17:20:30 +0000701 thisFree = !is_inuse_block(b);
sewardjde4a1d02002-03-22 01:27:54 +0000702 if (thisFree && lastWasFree) {
nethercote2d5b8162004-08-11 09:40:52 +0000703 VG_(printf)("sanity_check_malloc_arena: sb %p, block %d (bszB %d): "
njn25e49d8e72002-09-23 09:36:25 +0000704 "UNMERGED FREES\n",
nethercote2d5b8162004-08-11 09:40:52 +0000705 sb, i, b_bszB );
sewardjde4a1d02002-03-22 01:27:54 +0000706 BOMB;
707 }
sewardjde4a1d02002-03-22 01:27:54 +0000708 if (thisFree) blockctr_sb_free++;
709 if (!thisFree)
nethercote2d5b8162004-08-11 09:40:52 +0000710 arena_bytes_on_loan += bszB_to_pszB(a, b_bszB);
711 lastWasFree = thisFree;
sewardjde4a1d02002-03-22 01:27:54 +0000712 }
nethercote2d5b8162004-08-11 09:40:52 +0000713 if (i > sb->n_payload_bytes) {
nethercote885dd912004-08-03 23:14:00 +0000714 VG_(printf)( "sanity_check_malloc_arena: sb %p: last block "
sewardjde4a1d02002-03-22 01:27:54 +0000715 "overshoots end\n", sb);
716 BOMB;
717 }
718 sb = sb->next;
719 }
720
721 if (arena_bytes_on_loan != a->bytes_on_loan) {
nethercote2d5b8162004-08-11 09:40:52 +0000722# ifdef VERBOSE_MALLOC
723 VG_(printf)( "sanity_check_malloc_arena: a->bytes_on_loan %d, "
724 "arena_bytes_on_loan %d: "
725 "MISMATCH\n", a->bytes_on_loan, arena_bytes_on_loan);
726# endif
sewardjde4a1d02002-03-22 01:27:54 +0000727 ppSuperblocks(a);
728 BOMB;
729 }
730
731 /* Second, traverse each list, checking that the back pointers make
732 sense, counting blocks encountered, and checking that each block
733 is an appropriate size for this list. */
734 blockctr_li = 0;
njn6e6588c2005-03-13 18:52:48 +0000735 for (listno = 0; listno < N_MALLOC_LISTS; listno++) {
nethercote2d5b8162004-08-11 09:40:52 +0000736 list_min_pszB = listNo_to_pszB_min(listno);
737 list_max_pszB = listNo_to_pszB_max(listno);
sewardjde4a1d02002-03-22 01:27:54 +0000738 b = a->freelist[listno];
739 if (b == NULL) continue;
740 while (True) {
741 b_prev = b;
nethercote2d5b8162004-08-11 09:40:52 +0000742 b = get_next_b(b);
743 if (get_prev_b(b) != b_prev) {
nethercote885dd912004-08-03 23:14:00 +0000744 VG_(printf)( "sanity_check_malloc_arena: list %d at %p: "
sewardjde4a1d02002-03-22 01:27:54 +0000745 "BAD LINKAGE\n",
746 listno, b );
747 BOMB;
748 }
njnd0e685c2005-07-17 17:55:42 +0000749 b_pszB = bszB_to_pszB(a, get_bszB(b));
nethercote2d5b8162004-08-11 09:40:52 +0000750 if (b_pszB < list_min_pszB || b_pszB > list_max_pszB) {
sewardjde4a1d02002-03-22 01:27:54 +0000751 VG_(printf)(
nethercote885dd912004-08-03 23:14:00 +0000752 "sanity_check_malloc_arena: list %d at %p: "
nethercote2d5b8162004-08-11 09:40:52 +0000753 "WRONG CHAIN SIZE %dB (%dB, %dB)\n",
754 listno, b, b_pszB, list_min_pszB, list_max_pszB );
sewardjde4a1d02002-03-22 01:27:54 +0000755 BOMB;
756 }
757 blockctr_li++;
758 if (b == a->freelist[listno]) break;
759 }
760 }
761
762 if (blockctr_sb_free != blockctr_li) {
nethercote2d5b8162004-08-11 09:40:52 +0000763# ifdef VERBOSE_MALLOC
764 VG_(printf)( "sanity_check_malloc_arena: BLOCK COUNT MISMATCH "
765 "(via sbs %d, via lists %d)\n",
766 blockctr_sb_free, blockctr_li );
767# endif
sewardjde4a1d02002-03-22 01:27:54 +0000768 ppSuperblocks(a);
769 BOMB;
770 }
771
nethercote885dd912004-08-03 23:14:00 +0000772 if (VG_(clo_verbosity) > 2)
773 VG_(message)(Vg_DebugMsg,
njn669ef072005-03-13 05:46:57 +0000774 "%8s: %2d sbs, %5d bs, %2d/%-2d free bs, "
nethercote885dd912004-08-03 23:14:00 +0000775 "%7d mmap, %7d loan",
776 a->name,
777 superblockctr,
778 blockctr_sb, blockctr_sb_free, blockctr_li,
779 a->bytes_mmaped, a->bytes_on_loan);
sewardjde4a1d02002-03-22 01:27:54 +0000780# undef BOMB
781}
782
783
nethercote885dd912004-08-03 23:14:00 +0000784void VG_(sanity_check_malloc_all) ( void )
sewardjde4a1d02002-03-22 01:27:54 +0000785{
nethercote7ac7f7b2004-11-02 12:36:02 +0000786 UInt i;
sewardjde4a1d02002-03-22 01:27:54 +0000787 for (i = 0; i < VG_N_ARENAS; i++)
nethercote885dd912004-08-03 23:14:00 +0000788 sanity_check_malloc_arena ( i );
sewardjde4a1d02002-03-22 01:27:54 +0000789}
790
sewardjde4a1d02002-03-22 01:27:54 +0000791
nethercote2d5b8162004-08-11 09:40:52 +0000792/*------------------------------------------------------------*/
793/*--- Creating and deleting blocks. ---*/
794/*------------------------------------------------------------*/
795
796// Mark the bytes at b .. b+bszB-1 as not in use, and add them to the
797// relevant free list.
798
799static
nethercote7ac7f7b2004-11-02 12:36:02 +0000800void mkFreeBlock ( Arena* a, Block* b, SizeT bszB, UInt b_lno )
jsewardb1a26ae2004-03-14 03:06:37 +0000801{
nethercote7ac7f7b2004-11-02 12:36:02 +0000802 SizeT pszB = bszB_to_pszB(a, bszB);
nethercote2d5b8162004-08-11 09:40:52 +0000803 vg_assert(b_lno == pszB_to_listNo(pszB));
sewardjb5f6f512005-03-10 23:59:00 +0000804 //zzVALGRIND_MAKE_WRITABLE(b, bszB);
nethercote2d5b8162004-08-11 09:40:52 +0000805 // Set the size fields and indicate not-in-use.
806 set_bszB_lo(b, mk_free_bszB(bszB));
807 set_bszB_hi(b, mk_free_bszB(bszB));
808
809 // Add to the relevant list.
810 if (a->freelist[b_lno] == NULL) {
811 set_prev_b(b, b);
812 set_next_b(b, b);
813 a->freelist[b_lno] = b;
814 } else {
815 Block* b_prev = get_prev_b(a->freelist[b_lno]);
816 Block* b_next = a->freelist[b_lno];
817 set_next_b(b_prev, b);
818 set_prev_b(b_next, b);
819 set_next_b(b, b_next);
820 set_prev_b(b, b_prev);
821 }
822# ifdef DEBUG_MALLOC
823 (void)blockSane(a,b);
824# endif
825}
826
827// Mark the bytes at b .. b+bszB-1 as in use, and set up the block
828// appropriately.
829static
nethercote7ac7f7b2004-11-02 12:36:02 +0000830void mkInuseBlock ( Arena* a, Block* b, SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000831{
nethercote7ac7f7b2004-11-02 12:36:02 +0000832 UInt i;
nethercote2d5b8162004-08-11 09:40:52 +0000833 vg_assert(bszB >= min_useful_bszB(a));
sewardjb5f6f512005-03-10 23:59:00 +0000834 //zzVALGRIND_MAKE_WRITABLE(b, bszB);
nethercote2d5b8162004-08-11 09:40:52 +0000835 set_bszB_lo(b, mk_inuse_bszB(bszB));
836 set_bszB_hi(b, mk_inuse_bszB(bszB));
837 set_prev_b(b, NULL); // Take off freelist
838 set_next_b(b, NULL); // ditto
839 if (!a->clientmem) {
840 for (i = 0; i < a->rz_szB; i++) {
njn6e6588c2005-03-13 18:52:48 +0000841 set_rz_lo_byte(a, b, i, (UByte)(((Addr)b&0xff) ^ REDZONE_LO_MASK));
842 set_rz_hi_byte(a, b, i, (UByte)(((Addr)b&0xff) ^ REDZONE_HI_MASK));
nethercote2d5b8162004-08-11 09:40:52 +0000843 }
844 }
845# ifdef DEBUG_MALLOC
846 (void)blockSane(a,b);
847# endif
848}
849
850// Remove a block from a given list. Does no sanity checking.
851static
nethercote7ac7f7b2004-11-02 12:36:02 +0000852void unlinkBlock ( Arena* a, Block* b, UInt listno )
nethercote2d5b8162004-08-11 09:40:52 +0000853{
njn6e6588c2005-03-13 18:52:48 +0000854 vg_assert(listno < N_MALLOC_LISTS);
nethercote2d5b8162004-08-11 09:40:52 +0000855 if (get_prev_b(b) == b) {
856 // Only one element in the list; treat it specially.
857 vg_assert(get_next_b(b) == b);
858 a->freelist[listno] = NULL;
859 } else {
860 Block* b_prev = get_prev_b(b);
861 Block* b_next = get_next_b(b);
862 a->freelist[listno] = b_prev;
863 set_next_b(b_prev, b_next);
864 set_prev_b(b_next, b_prev);
865 swizzle ( a, listno );
866 }
867 set_prev_b(b, NULL);
868 set_next_b(b, NULL);
jsewardb1a26ae2004-03-14 03:06:37 +0000869}
870
871
sewardjde4a1d02002-03-22 01:27:54 +0000872/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +0000873/*--- Core-visible functions. ---*/
sewardjde4a1d02002-03-22 01:27:54 +0000874/*------------------------------------------------------------*/
875
nethercote2d5b8162004-08-11 09:40:52 +0000876// Align the request size.
877static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000878SizeT align_req_pszB ( SizeT req_pszB )
nethercote2d5b8162004-08-11 09:40:52 +0000879{
nethercote7ac7f7b2004-11-02 12:36:02 +0000880 SizeT n = VG_MIN_MALLOC_SZB-1;
nethercote2d5b8162004-08-11 09:40:52 +0000881 return ((req_pszB + n) & (~n));
882}
883
nethercote7ac7f7b2004-11-02 12:36:02 +0000884void* VG_(arena_malloc) ( ArenaId aid, SizeT req_pszB )
sewardjde4a1d02002-03-22 01:27:54 +0000885{
nethercote7ac7f7b2004-11-02 12:36:02 +0000886 SizeT req_bszB, frag_bszB, b_bszB;
887 UInt lno;
sewardjde4a1d02002-03-22 01:27:54 +0000888 Superblock* new_sb;
nethercote2d5b8162004-08-11 09:40:52 +0000889 Block* b = NULL;
sewardjde4a1d02002-03-22 01:27:54 +0000890 Arena* a;
jsewardb1a26ae2004-03-14 03:06:37 +0000891 void* v;
sewardjde4a1d02002-03-22 01:27:54 +0000892
893 VGP_PUSHCC(VgpMalloc);
894
895 ensure_mm_init();
896 a = arenaId_to_ArenaP(aid);
897
nethercote7ac7f7b2004-11-02 12:36:02 +0000898 vg_assert(req_pszB < MAX_PSZB);
nethercote2d5b8162004-08-11 09:40:52 +0000899 req_pszB = align_req_pszB(req_pszB);
900 req_bszB = pszB_to_bszB(a, req_pszB);
sewardjde4a1d02002-03-22 01:27:54 +0000901
nethercote2d5b8162004-08-11 09:40:52 +0000902 // Scan through all the big-enough freelists for a block.
njn6e6588c2005-03-13 18:52:48 +0000903 for (lno = pszB_to_listNo(req_pszB); lno < N_MALLOC_LISTS; lno++) {
sewardjde4a1d02002-03-22 01:27:54 +0000904 b = a->freelist[lno];
nethercote2d5b8162004-08-11 09:40:52 +0000905 if (NULL == b) continue; // If this list is empty, try the next one.
sewardjde4a1d02002-03-22 01:27:54 +0000906 while (True) {
njnd0e685c2005-07-17 17:55:42 +0000907 b_bszB = get_bszB(b);
nethercote2d5b8162004-08-11 09:40:52 +0000908 if (b_bszB >= req_bszB) goto obtained_block; // success!
909 b = get_next_b(b);
910 if (b == a->freelist[lno]) break; // traversed entire freelist
sewardjde4a1d02002-03-22 01:27:54 +0000911 }
sewardjde4a1d02002-03-22 01:27:54 +0000912 }
913
nethercote2d5b8162004-08-11 09:40:52 +0000914 // If we reach here, no suitable block found, allocate a new superblock
njn6e6588c2005-03-13 18:52:48 +0000915 vg_assert(lno == N_MALLOC_LISTS);
nethercote2d5b8162004-08-11 09:40:52 +0000916 new_sb = newSuperblock(a, req_bszB);
917 if (NULL == new_sb) {
918 // Should only fail if for client, otherwise, should have aborted
919 // already.
920 vg_assert(VG_AR_CLIENT == aid);
921 return NULL;
sewardjde4a1d02002-03-22 01:27:54 +0000922 }
nethercote2d5b8162004-08-11 09:40:52 +0000923 new_sb->next = a->sblocks;
924 a->sblocks = new_sb;
925 b = (Block*)&new_sb->payload_bytes[0];
926 lno = pszB_to_listNo(bszB_to_pszB(a, new_sb->n_payload_bytes));
927 mkFreeBlock ( a, b, new_sb->n_payload_bytes, lno);
928 // fall through
sewardjde4a1d02002-03-22 01:27:54 +0000929
nethercote2d5b8162004-08-11 09:40:52 +0000930 obtained_block:
931 // Ok, we can allocate from b, which lives in list lno.
sewardjde4a1d02002-03-22 01:27:54 +0000932 vg_assert(b != NULL);
njn6e6588c2005-03-13 18:52:48 +0000933 vg_assert(lno < N_MALLOC_LISTS);
sewardjde4a1d02002-03-22 01:27:54 +0000934 vg_assert(a->freelist[lno] != NULL);
njnd0e685c2005-07-17 17:55:42 +0000935 b_bszB = get_bszB(b);
nethercote2d5b8162004-08-11 09:40:52 +0000936 // req_bszB is the size of the block we are after. b_bszB is the
937 // size of what we've actually got. */
938 vg_assert(b_bszB >= req_bszB);
sewardjde4a1d02002-03-22 01:27:54 +0000939
nethercote2d5b8162004-08-11 09:40:52 +0000940 // Could we split this block and still get a useful fragment?
941 frag_bszB = b_bszB - req_bszB;
942 if (frag_bszB >= min_useful_bszB(a)) {
943 // Yes, split block in two, put the fragment on the appropriate free
944 // list, and update b_bszB accordingly.
945 // printf( "split %dB into %dB and %dB\n", b_bszB, req_bszB, frag_bszB );
sewardjde4a1d02002-03-22 01:27:54 +0000946 unlinkBlock(a, b, lno);
nethercote2d5b8162004-08-11 09:40:52 +0000947 mkInuseBlock(a, b, req_bszB);
948 mkFreeBlock(a, &b[req_bszB], frag_bszB,
949 pszB_to_listNo(bszB_to_pszB(a, frag_bszB)));
njnd0e685c2005-07-17 17:55:42 +0000950 b_bszB = get_bszB(b);
nethercote2d5b8162004-08-11 09:40:52 +0000951 } else {
952 // No, mark as in use and use as-is.
953 unlinkBlock(a, b, lno);
954 mkInuseBlock(a, b, b_bszB);
sewardjde4a1d02002-03-22 01:27:54 +0000955 }
sewardjde4a1d02002-03-22 01:27:54 +0000956
nethercote2d5b8162004-08-11 09:40:52 +0000957 // Update stats
958 a->bytes_on_loan += bszB_to_pszB(a, b_bszB);
sewardjde4a1d02002-03-22 01:27:54 +0000959 if (a->bytes_on_loan > a->bytes_on_loan_max)
960 a->bytes_on_loan_max = a->bytes_on_loan;
961
962# ifdef DEBUG_MALLOC
nethercote885dd912004-08-03 23:14:00 +0000963 sanity_check_malloc_arena(aid);
sewardjde4a1d02002-03-22 01:27:54 +0000964# endif
965
njn25e49d8e72002-09-23 09:36:25 +0000966 VGP_POPCC(VgpMalloc);
nethercote2d5b8162004-08-11 09:40:52 +0000967 v = get_block_payload(a, b);
968 vg_assert( (((Addr)v) & (VG_MIN_MALLOC_SZB-1)) == 0 );
sewardjb5f6f512005-03-10 23:59:00 +0000969
970 VALGRIND_MALLOCLIKE_BLOCK(v, req_pszB, 0, False);
jsewardb1a26ae2004-03-14 03:06:37 +0000971 return v;
sewardjde4a1d02002-03-22 01:27:54 +0000972}
973
974
njn25e49d8e72002-09-23 09:36:25 +0000975void VG_(arena_free) ( ArenaId aid, void* ptr )
sewardjde4a1d02002-03-22 01:27:54 +0000976{
977 Superblock* sb;
nethercote2d5b8162004-08-11 09:40:52 +0000978 UByte* sb_start;
979 UByte* sb_end;
njna2578652005-07-17 17:12:24 +0000980 Block* other_b;
nethercote2d5b8162004-08-11 09:40:52 +0000981 Block* b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000982 SizeT b_bszB, b_pszB, other_bszB;
983 UInt b_listno;
sewardjde4a1d02002-03-22 01:27:54 +0000984 Arena* a;
985
986 VGP_PUSHCC(VgpMalloc);
987
988 ensure_mm_init();
989 a = arenaId_to_ArenaP(aid);
990
njn25e49d8e72002-09-23 09:36:25 +0000991 if (ptr == NULL) {
992 VGP_POPCC(VgpMalloc);
993 return;
994 }
995
nethercote2d5b8162004-08-11 09:40:52 +0000996 b = get_payload_block(a, ptr);
sewardjde4a1d02002-03-22 01:27:54 +0000997
998# ifdef DEBUG_MALLOC
nethercote2d5b8162004-08-11 09:40:52 +0000999 vg_assert(blockSane(a, b));
sewardjde4a1d02002-03-22 01:27:54 +00001000# endif
1001
njnd0e685c2005-07-17 17:55:42 +00001002 a->bytes_on_loan -= bszB_to_pszB(a, get_bszB(b));
sewardjde4a1d02002-03-22 01:27:54 +00001003
nethercote2d5b8162004-08-11 09:40:52 +00001004 sb = findSb( a, b );
1005 sb_start = &sb->payload_bytes[0];
1006 sb_end = &sb->payload_bytes[sb->n_payload_bytes - 1];
sewardjde4a1d02002-03-22 01:27:54 +00001007
nethercote2d5b8162004-08-11 09:40:52 +00001008 // Put this chunk back on a list somewhere.
njnd0e685c2005-07-17 17:55:42 +00001009 b_bszB = get_bszB(b);
nethercote2d5b8162004-08-11 09:40:52 +00001010 b_pszB = bszB_to_pszB(a, b_bszB);
1011 b_listno = pszB_to_listNo(b_pszB);
1012 mkFreeBlock( a, b, b_bszB, b_listno );
sewardjde4a1d02002-03-22 01:27:54 +00001013
nethercote2d5b8162004-08-11 09:40:52 +00001014 // See if this block can be merged with its successor.
1015 // First test if we're far enough before the superblock's end to possibly
1016 // have a successor.
njna2578652005-07-17 17:12:24 +00001017 other_b = b + b_bszB;
1018 if (other_b+min_useful_bszB(a)-1 <= (Block*)sb_end) {
nethercote2d5b8162004-08-11 09:40:52 +00001019 // Ok, we have a successor, merge if it's not in use.
njnd0e685c2005-07-17 17:55:42 +00001020 other_bszB = get_bszB(other_b);
njn472cc7c2005-07-17 17:20:30 +00001021 if (!is_inuse_block(other_b)) {
nethercote2d5b8162004-08-11 09:40:52 +00001022 // VG_(printf)( "merge-successor\n");
sewardjde4a1d02002-03-22 01:27:54 +00001023# ifdef DEBUG_MALLOC
njna2578652005-07-17 17:12:24 +00001024 vg_assert(blockSane(a, other_b));
sewardjde4a1d02002-03-22 01:27:54 +00001025# endif
nethercote2d5b8162004-08-11 09:40:52 +00001026 unlinkBlock( a, b, b_listno );
njna2578652005-07-17 17:12:24 +00001027 unlinkBlock( a, other_b, pszB_to_listNo(bszB_to_pszB(a,other_bszB)) );
nethercote2d5b8162004-08-11 09:40:52 +00001028 b_bszB += other_bszB;
1029 b_listno = pszB_to_listNo(bszB_to_pszB(a, b_bszB));
1030 mkFreeBlock( a, b, b_bszB, b_listno );
sewardjde4a1d02002-03-22 01:27:54 +00001031 }
nethercote2d5b8162004-08-11 09:40:52 +00001032 } else {
1033 // Not enough space for successor: check that b is the last block
1034 // ie. there are no unused bytes at the end of the Superblock.
njna2578652005-07-17 17:12:24 +00001035 vg_assert(other_b-1 == (Block*)sb_end);
sewardjde4a1d02002-03-22 01:27:54 +00001036 }
1037
nethercote2d5b8162004-08-11 09:40:52 +00001038 // Then see if this block can be merged with its predecessor.
1039 // First test if we're far enough after the superblock's start to possibly
1040 // have a predecessor.
1041 if (b >= (Block*)sb_start + min_useful_bszB(a)) {
1042 // Ok, we have a predecessor, merge if it's not in use.
njna2578652005-07-17 17:12:24 +00001043 other_b = get_predecessor_block( b );
njnd0e685c2005-07-17 17:55:42 +00001044 other_bszB = get_bszB(other_b);
njn472cc7c2005-07-17 17:20:30 +00001045 if (!is_inuse_block(other_b)) {
nethercote2d5b8162004-08-11 09:40:52 +00001046 // VG_(printf)( "merge-predecessor\n");
nethercote2d5b8162004-08-11 09:40:52 +00001047 unlinkBlock( a, b, b_listno );
njna2578652005-07-17 17:12:24 +00001048 unlinkBlock( a, other_b, pszB_to_listNo(bszB_to_pszB(a, other_bszB)) );
1049 b = other_b;
nethercote2d5b8162004-08-11 09:40:52 +00001050 b_bszB += other_bszB;
1051 b_listno = pszB_to_listNo(bszB_to_pszB(a, b_bszB));
1052 mkFreeBlock( a, b, b_bszB, b_listno );
sewardjde4a1d02002-03-22 01:27:54 +00001053 }
nethercote2d5b8162004-08-11 09:40:52 +00001054 } else {
1055 // Not enough space for predecessor: check that b is the first block,
1056 // ie. there are no unused bytes at the start of the Superblock.
1057 vg_assert((Block*)sb_start == b);
sewardjde4a1d02002-03-22 01:27:54 +00001058 }
1059
1060# ifdef DEBUG_MALLOC
nethercote885dd912004-08-03 23:14:00 +00001061 sanity_check_malloc_arena(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001062# endif
1063
sewardjb5f6f512005-03-10 23:59:00 +00001064 VALGRIND_FREELIKE_BLOCK(ptr, 0);
1065
njn25e49d8e72002-09-23 09:36:25 +00001066 VGP_POPCC(VgpMalloc);
sewardjde4a1d02002-03-22 01:27:54 +00001067}
1068
1069
1070/*
1071 The idea for malloc_aligned() is to allocate a big block, base, and
1072 then split it into two parts: frag, which is returned to the the
1073 free pool, and align, which is the bit we're really after. Here's
1074 a picture. L and H denote the block lower and upper overheads, in
nethercote2d5b8162004-08-11 09:40:52 +00001075 bytes. The details are gruesome. Note it is slightly complicated
sewardjde4a1d02002-03-22 01:27:54 +00001076 because the initial request to generate base may return a bigger
1077 block than we asked for, so it is important to distinguish the base
1078 request size and the base actual size.
1079
1080 frag_b align_b
1081 | |
1082 | frag_p | align_p
1083 | | | |
1084 v v v v
1085
1086 +---+ +---+---+ +---+
1087 | L |----------------| H | L |---------------| H |
1088 +---+ +---+---+ +---+
1089
1090 ^ ^ ^
1091 | | :
1092 | base_p this addr must be aligned
1093 |
1094 base_b
1095
1096 . . . . . . .
nethercote2d5b8162004-08-11 09:40:52 +00001097 <------ frag_bszB -------> . . .
1098 . <------------- base_pszB_act -----------> .
sewardjde4a1d02002-03-22 01:27:54 +00001099 . . . . . . .
1100
1101*/
njn717cde52005-05-10 02:47:21 +00001102void* VG_(arena_memalign) ( ArenaId aid, SizeT req_alignB, SizeT req_pszB )
sewardjde4a1d02002-03-22 01:27:54 +00001103{
nethercote7ac7f7b2004-11-02 12:36:02 +00001104 SizeT base_pszB_req, base_pszB_act, frag_bszB;
nethercote2d5b8162004-08-11 09:40:52 +00001105 Block *base_b, *align_b;
1106 UByte *base_p, *align_p;
nethercote7ac7f7b2004-11-02 12:36:02 +00001107 SizeT saved_bytes_on_loan;
sewardjde4a1d02002-03-22 01:27:54 +00001108 Arena* a;
1109
njn25e49d8e72002-09-23 09:36:25 +00001110 VGP_PUSHCC(VgpMalloc);
1111
sewardjde4a1d02002-03-22 01:27:54 +00001112 ensure_mm_init();
1113 a = arenaId_to_ArenaP(aid);
1114
nethercote7ac7f7b2004-11-02 12:36:02 +00001115 vg_assert(req_pszB < MAX_PSZB);
sewardjde4a1d02002-03-22 01:27:54 +00001116
nethercote2d5b8162004-08-11 09:40:52 +00001117 // Check that the requested alignment seems reasonable; that is, is
1118 // a power of 2.
1119 if (req_alignB < VG_MIN_MALLOC_SZB
1120 || req_alignB > 1048576
njn717cde52005-05-10 02:47:21 +00001121 || VG_(log2)( req_alignB ) == -1 /* not a power of 2 */) {
1122 VG_(printf)("VG_(arena_memalign)(%p, %d, %d)\nbad alignment",
nethercote2d5b8162004-08-11 09:40:52 +00001123 a, req_alignB, req_pszB );
njn717cde52005-05-10 02:47:21 +00001124 VG_(core_panic)("VG_(arena_memalign)");
nethercote2d5b8162004-08-11 09:40:52 +00001125 /*NOTREACHED*/
sewardjde4a1d02002-03-22 01:27:54 +00001126 }
nethercote2d5b8162004-08-11 09:40:52 +00001127 // Paranoid
1128 vg_assert(req_alignB % VG_MIN_MALLOC_SZB == 0);
sewardjde4a1d02002-03-22 01:27:54 +00001129
1130 /* Required payload size for the aligned chunk. */
nethercote2d5b8162004-08-11 09:40:52 +00001131 req_pszB = align_req_pszB(req_pszB);
sewardjde4a1d02002-03-22 01:27:54 +00001132
nethercote2d5b8162004-08-11 09:40:52 +00001133 /* Payload size to request for the big block that we will split up. */
1134 base_pszB_req = req_pszB + min_useful_bszB(a) + req_alignB;
sewardjde4a1d02002-03-22 01:27:54 +00001135
1136 /* Payload ptr for the block we are going to split. Note this
1137 changes a->bytes_on_loan; we save and restore it ourselves. */
1138 saved_bytes_on_loan = a->bytes_on_loan;
nethercote2d5b8162004-08-11 09:40:52 +00001139 base_p = VG_(arena_malloc) ( aid, base_pszB_req );
sewardjde4a1d02002-03-22 01:27:54 +00001140 a->bytes_on_loan = saved_bytes_on_loan;
1141
1142 /* Block ptr for the block we are going to split. */
nethercote2d5b8162004-08-11 09:40:52 +00001143 base_b = get_payload_block ( a, base_p );
sewardjde4a1d02002-03-22 01:27:54 +00001144
1145 /* Pointer to the payload of the aligned block we are going to
1146 return. This has to be suitably aligned. */
nethercote2d5b8162004-08-11 09:40:52 +00001147 align_p = align_upwards ( base_b + 2 * overhead_szB_lo(a)
1148 + overhead_szB_hi(a),
sewardjde4a1d02002-03-22 01:27:54 +00001149 req_alignB );
nethercote2d5b8162004-08-11 09:40:52 +00001150 align_b = get_payload_block(a, align_p);
sewardjde4a1d02002-03-22 01:27:54 +00001151
1152 /* The block size of the fragment we will create. This must be big
1153 enough to actually create a fragment. */
nethercote2d5b8162004-08-11 09:40:52 +00001154 frag_bszB = align_b - base_b;
1155
1156 vg_assert(frag_bszB >= min_useful_bszB(a));
sewardjde4a1d02002-03-22 01:27:54 +00001157
1158 /* The actual payload size of the block we are going to split. */
njnd0e685c2005-07-17 17:55:42 +00001159 base_pszB_act = bszB_to_pszB(a, get_bszB(base_b));
sewardjde4a1d02002-03-22 01:27:54 +00001160
nethercote2d5b8162004-08-11 09:40:52 +00001161 /* Create the fragment block, and put it back on the relevant free list. */
1162 mkFreeBlock ( a, base_b, frag_bszB,
1163 pszB_to_listNo(bszB_to_pszB(a, frag_bszB)) );
sewardjde4a1d02002-03-22 01:27:54 +00001164
1165 /* Create the aligned block. */
nethercote2d5b8162004-08-11 09:40:52 +00001166 mkInuseBlock ( a, align_b,
1167 base_p + base_pszB_act
1168 + overhead_szB_hi(a) - (UByte*)align_b );
sewardjde4a1d02002-03-22 01:27:54 +00001169
1170 /* Final sanity checks. */
njn472cc7c2005-07-17 17:20:30 +00001171 vg_assert( is_inuse_block(get_payload_block(a, align_p)) );
sewardjde4a1d02002-03-22 01:27:54 +00001172
nethercote2d5b8162004-08-11 09:40:52 +00001173 vg_assert(req_pszB
sewardjde4a1d02002-03-22 01:27:54 +00001174 <=
njnd0e685c2005-07-17 17:55:42 +00001175 bszB_to_pszB(a, get_bszB(get_payload_block(a, align_p)))
sewardjde4a1d02002-03-22 01:27:54 +00001176 );
1177
1178 a->bytes_on_loan
njnd0e685c2005-07-17 17:55:42 +00001179 += bszB_to_pszB(a, get_bszB(get_payload_block(a, align_p)));
sewardjde4a1d02002-03-22 01:27:54 +00001180 if (a->bytes_on_loan > a->bytes_on_loan_max)
1181 a->bytes_on_loan_max = a->bytes_on_loan;
1182
1183# ifdef DEBUG_MALLOC
nethercote885dd912004-08-03 23:14:00 +00001184 sanity_check_malloc_arena(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001185# endif
1186
njn25e49d8e72002-09-23 09:36:25 +00001187 VGP_POPCC(VgpMalloc);
1188
nethercote2d5b8162004-08-11 09:40:52 +00001189 vg_assert( (((Addr)align_p) % req_alignB) == 0 );
sewardjb5f6f512005-03-10 23:59:00 +00001190
1191 VALGRIND_MALLOCLIKE_BLOCK(align_p, req_pszB, 0, False);
1192
nethercote2d5b8162004-08-11 09:40:52 +00001193 return align_p;
1194}
1195
1196
nethercote7ac7f7b2004-11-02 12:36:02 +00001197SizeT VG_(arena_payload_szB) ( ArenaId aid, void* ptr )
nethercote2d5b8162004-08-11 09:40:52 +00001198{
1199 Arena* a = arenaId_to_ArenaP(aid);
1200 Block* b = get_payload_block(a, ptr);
njnd0e685c2005-07-17 17:55:42 +00001201 return bszB_to_pszB(a, get_bszB(b));
sewardjde4a1d02002-03-22 01:27:54 +00001202}
1203
1204
1205/*------------------------------------------------------------*/
1206/*--- Services layered on top of malloc/free. ---*/
1207/*------------------------------------------------------------*/
1208
njn828022a2005-03-13 14:56:31 +00001209void* VG_(arena_calloc) ( ArenaId aid, SizeT nmemb, SizeT bytes_per_memb )
sewardjde4a1d02002-03-22 01:27:54 +00001210{
nethercote7ac7f7b2004-11-02 12:36:02 +00001211 SizeT size;
sewardjde4a1d02002-03-22 01:27:54 +00001212 UChar* p;
njn25e49d8e72002-09-23 09:36:25 +00001213
1214 VGP_PUSHCC(VgpMalloc);
1215
njn926ed472005-03-11 04:44:10 +00001216 size = nmemb * bytes_per_memb;
1217 vg_assert(size >= nmemb && size >= bytes_per_memb);// check against overflow
njn3e884182003-04-15 13:03:23 +00001218
njn828022a2005-03-13 14:56:31 +00001219 p = VG_(arena_malloc) ( aid, size );
njn3e884182003-04-15 13:03:23 +00001220
njn926ed472005-03-11 04:44:10 +00001221 VG_(memset)(p, 0, size);
sewardjb5f6f512005-03-10 23:59:00 +00001222
njn926ed472005-03-11 04:44:10 +00001223 VALGRIND_MALLOCLIKE_BLOCK(p, size, 0, True);
njn25e49d8e72002-09-23 09:36:25 +00001224
1225 VGP_POPCC(VgpMalloc);
1226
sewardjde4a1d02002-03-22 01:27:54 +00001227 return p;
1228}
1229
1230
njn828022a2005-03-13 14:56:31 +00001231void* VG_(arena_realloc) ( ArenaId aid, void* ptr, SizeT req_pszB )
sewardjde4a1d02002-03-22 01:27:54 +00001232{
1233 Arena* a;
nethercote7ac7f7b2004-11-02 12:36:02 +00001234 SizeT old_bszB, old_pszB;
sewardjb5f6f512005-03-10 23:59:00 +00001235 UChar *p_new;
nethercote2d5b8162004-08-11 09:40:52 +00001236 Block* b;
sewardjde4a1d02002-03-22 01:27:54 +00001237
njn25e49d8e72002-09-23 09:36:25 +00001238 VGP_PUSHCC(VgpMalloc);
1239
sewardjde4a1d02002-03-22 01:27:54 +00001240 ensure_mm_init();
1241 a = arenaId_to_ArenaP(aid);
1242
nethercote7ac7f7b2004-11-02 12:36:02 +00001243 vg_assert(req_pszB < MAX_PSZB);
sewardjde4a1d02002-03-22 01:27:54 +00001244
nethercote2d5b8162004-08-11 09:40:52 +00001245 b = get_payload_block(a, ptr);
1246 vg_assert(blockSane(a, b));
sewardjde4a1d02002-03-22 01:27:54 +00001247
njn472cc7c2005-07-17 17:20:30 +00001248 vg_assert(is_inuse_block(b));
njnd0e685c2005-07-17 17:55:42 +00001249 old_bszB = get_bszB(b);
nethercote2d5b8162004-08-11 09:40:52 +00001250 old_pszB = bszB_to_pszB(a, old_bszB);
sewardjde4a1d02002-03-22 01:27:54 +00001251
njn25e49d8e72002-09-23 09:36:25 +00001252 if (req_pszB <= old_pszB) {
1253 VGP_POPCC(VgpMalloc);
1254 return ptr;
1255 }
sewardjde4a1d02002-03-22 01:27:54 +00001256
njn828022a2005-03-13 14:56:31 +00001257 p_new = VG_(arena_malloc) ( aid, req_pszB );
1258
sewardjb5f6f512005-03-10 23:59:00 +00001259 VG_(memcpy)(p_new, ptr, old_pszB);
sewardjde4a1d02002-03-22 01:27:54 +00001260
sewardjb5f6f512005-03-10 23:59:00 +00001261 VG_(arena_free)(aid, ptr);
njn25e49d8e72002-09-23 09:36:25 +00001262
1263 VGP_POPCC(VgpMalloc);
sewardjde4a1d02002-03-22 01:27:54 +00001264 return p_new;
1265}
1266
1267
njn6ba622c2005-06-11 01:12:08 +00001268/* Inline just for the wrapper VG_(strdup) below */
1269__inline__ Char* VG_(arena_strdup) ( ArenaId aid, const Char* s )
1270{
1271 Int i;
1272 Int len;
1273 Char* res;
1274
1275 if (s == NULL)
1276 return NULL;
1277
1278 len = VG_(strlen)(s) + 1;
1279 res = VG_(arena_malloc) (aid, len);
1280
1281 for (i = 0; i < len; i++)
1282 res[i] = s[i];
1283 return res;
1284}
1285
1286
sewardjde4a1d02002-03-22 01:27:54 +00001287/*------------------------------------------------------------*/
nethercote996901a2004-08-03 13:29:09 +00001288/*--- Tool-visible functions. ---*/
njn25e49d8e72002-09-23 09:36:25 +00001289/*------------------------------------------------------------*/
1290
nethercote2d5b8162004-08-11 09:40:52 +00001291// All just wrappers to avoid exposing arenas to tools.
njn25e49d8e72002-09-23 09:36:25 +00001292
nethercote7ac7f7b2004-11-02 12:36:02 +00001293void* VG_(malloc) ( SizeT nbytes )
njn25e49d8e72002-09-23 09:36:25 +00001294{
nethercote60f5b822004-01-26 17:24:42 +00001295 return VG_(arena_malloc) ( VG_AR_TOOL, nbytes );
njn25e49d8e72002-09-23 09:36:25 +00001296}
1297
1298void VG_(free) ( void* ptr )
1299{
nethercote60f5b822004-01-26 17:24:42 +00001300 VG_(arena_free) ( VG_AR_TOOL, ptr );
njn25e49d8e72002-09-23 09:36:25 +00001301}
1302
njn926ed472005-03-11 04:44:10 +00001303void* VG_(calloc) ( SizeT nmemb, SizeT bytes_per_memb )
njn25e49d8e72002-09-23 09:36:25 +00001304{
njn828022a2005-03-13 14:56:31 +00001305 return VG_(arena_calloc) ( VG_AR_TOOL, nmemb, bytes_per_memb );
njn25e49d8e72002-09-23 09:36:25 +00001306}
1307
nethercote7ac7f7b2004-11-02 12:36:02 +00001308void* VG_(realloc) ( void* ptr, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00001309{
njn828022a2005-03-13 14:56:31 +00001310 return VG_(arena_realloc) ( VG_AR_TOOL, ptr, size );
njn25e49d8e72002-09-23 09:36:25 +00001311}
1312
njn6ba622c2005-06-11 01:12:08 +00001313Char* VG_(strdup) ( const Char* s )
1314{
1315 return VG_(arena_strdup) ( VG_AR_TOOL, s );
1316}
1317
sewardjde4a1d02002-03-22 01:27:54 +00001318/*--------------------------------------------------------------------*/
njn717cde52005-05-10 02:47:21 +00001319/*--- end ---*/
sewardjde4a1d02002-03-22 01:27:54 +00001320/*--------------------------------------------------------------------*/