blob: d975f0879ddb92d4ecc95dda20c15cde76431ca5 [file] [log] [blame]
sewardjde4a1d02002-03-22 01:27:54 +00001
2/*--------------------------------------------------------------------*/
3/*--- An implementation of malloc/free which doesn't use sbrk. ---*/
njn717cde52005-05-10 02:47:21 +00004/*--- m_mallocfree.c ---*/
sewardjde4a1d02002-03-22 01:27:54 +00005/*--------------------------------------------------------------------*/
6
7/*
njnb9c427c2004-12-01 14:14:42 +00008 This file is part of Valgrind, a dynamic binary instrumentation
9 framework.
sewardjde4a1d02002-03-22 01:27:54 +000010
njn53612422005-03-12 16:22:54 +000011 Copyright (C) 2000-2005 Julian Seward
sewardjde4a1d02002-03-22 01:27:54 +000012 jseward@acm.org
sewardjde4a1d02002-03-22 01:27:54 +000013
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
njn25e49d8e72002-09-23 09:36:25 +000029 The GNU General Public License is contained in the file COPYING.
sewardjde4a1d02002-03-22 01:27:54 +000030*/
31
njnc7561b92005-06-19 01:24:32 +000032#include "pub_core_basics.h"
sewardj45f4e7c2005-09-27 19:20:21 +000033#include "pub_core_debuglog.h"
njn97405b22005-06-02 03:39:33 +000034#include "pub_core_libcbase.h"
sewardj45f4e7c2005-09-27 19:20:21 +000035#include "pub_core_aspacemgr.h"
njn132bfcc2005-06-04 19:16:06 +000036#include "pub_core_libcassert.h"
njn36a20fa2005-06-03 03:08:39 +000037#include "pub_core_libcprint.h"
njnaf1d7df2005-06-11 01:31:52 +000038#include "pub_core_mallocfree.h"
njn20242342005-05-16 23:31:24 +000039#include "pub_core_options.h"
njn31513b42005-06-01 03:09:59 +000040#include "pub_core_profile.h"
njnfc51f8d2005-06-21 03:20:17 +000041#include "pub_core_tooliface.h"
njn296c24d2005-05-15 03:52:40 +000042#include "valgrind.h"
sewardj55f9d1a2005-04-25 11:11:44 +000043
sewardjb5f6f512005-03-10 23:59:00 +000044//zz#include "memcheck/memcheck.h"
sewardjde4a1d02002-03-22 01:27:54 +000045
nethercote2d5b8162004-08-11 09:40:52 +000046//#define DEBUG_MALLOC // turn on heavyweight debugging machinery
47//#define VERBOSE_MALLOC // make verbose, esp. in debugging machinery
48
49/*------------------------------------------------------------*/
50/*--- Main types ---*/
51/*------------------------------------------------------------*/
52
sewardj70e212d2005-05-19 10:54:01 +000053#define N_MALLOC_LISTS 18 // do not change this
nethercote2d5b8162004-08-11 09:40:52 +000054
nethercote7ac7f7b2004-11-02 12:36:02 +000055// The amount you can ask for is limited only by sizeof(SizeT)...
56#define MAX_PSZB (~((SizeT)0x0))
nethercote2d5b8162004-08-11 09:40:52 +000057
58typedef UChar UByte;
59
njn8d3f8452005-07-20 04:12:41 +000060/* Layout of an in-use block:
nethercote2d5b8162004-08-11 09:40:52 +000061
njn8d3f8452005-07-20 04:12:41 +000062 this block total szB (sizeof(SizeT) bytes)
njn7ce83112005-08-24 22:38:00 +000063 red zone bytes (depends on Arena.rz_szB, but >= sizeof(void*))
njn8d3f8452005-07-20 04:12:41 +000064 (payload bytes)
njn7ce83112005-08-24 22:38:00 +000065 red zone bytes (depends on Arena.rz_szB, but >= sizeof(void*))
njn8d3f8452005-07-20 04:12:41 +000066 this block total szB (sizeof(SizeT) bytes)
nethercote2d5b8162004-08-11 09:40:52 +000067
njn8d3f8452005-07-20 04:12:41 +000068 Layout of a block on the free list:
nethercote2d5b8162004-08-11 09:40:52 +000069
njn8d3f8452005-07-20 04:12:41 +000070 this block total szB (sizeof(SizeT) bytes)
71 freelist previous ptr (sizeof(void*) bytes)
72 excess red zone bytes (if Arena.rz_szB > sizeof(void*))
73 (payload bytes)
74 excess red zone bytes (if Arena.rz_szB > sizeof(void*))
75 freelist next ptr (sizeof(void*) bytes)
76 this block total szB (sizeof(SizeT) bytes)
nethercote2d5b8162004-08-11 09:40:52 +000077
njn8d3f8452005-07-20 04:12:41 +000078 Total size in bytes (bszB) and payload size in bytes (pszB)
79 are related by:
nethercote2d5b8162004-08-11 09:40:52 +000080
njn8d3f8452005-07-20 04:12:41 +000081 bszB == pszB + 2*sizeof(SizeT) + 2*a->rz_szB
82
83 Furthermore, both size fields in the block have their least-significant
84 bit set if the block is not in use, and unset if it is in use.
85 (The bottom 3 or so bits are always free for this because of alignment.)
86 A block size of zero is not possible, because a block always has at
87 least two SizeTs and two pointers of overhead.
88
89 Nb: All Block payloads must be VG_MIN_MALLOC_SZB-aligned. This is
90 achieved by ensuring that Superblocks are VG_MIN_MALLOC_SZB-aligned
91 (see newSuperblock() for how), and that the lengths of the following
92 things are a multiple of VG_MIN_MALLOC_SZB:
93 - Superblock admin section lengths (due to elastic padding)
94 - Block admin section (low and high) lengths (due to elastic redzones)
95 - Block payload lengths (due to req_pszB rounding up)
nethercote2d5b8162004-08-11 09:40:52 +000096*/
97typedef
98 struct {
99 // No fields are actually used in this struct, because a Block has
njn37517e82005-05-25 15:52:39 +0000100 // many variable sized fields and so can't be accessed
nethercote2d5b8162004-08-11 09:40:52 +0000101 // meaningfully with normal fields. So we use access functions all
102 // the time. This struct gives us a type to use, though. Also, we
103 // make sizeof(Block) 1 byte so that we can do arithmetic with the
104 // Block* type in increments of 1!
105 UByte dummy;
106 }
107 Block;
108
109// A superblock. 'padding' is never used, it just ensures that if the
110// entire Superblock is aligned to VG_MIN_MALLOC_SZB, then payload_bytes[]
111// will be too. It can add small amounts of padding unnecessarily -- eg.
112// 8-bytes on 32-bit machines with an 8-byte VG_MIN_MALLOC_SZB -- because
113// it's too hard to make a constant expression that works perfectly in all
114// cases.
115// payload_bytes[] is made a single big Block when the Superblock is
116// created, and then can be split and the splittings remerged, but Blocks
117// always cover its entire length -- there's never any unused bytes at the
118// end, for example.
119typedef
120 struct _Superblock {
121 struct _Superblock* next;
nethercote7ac7f7b2004-11-02 12:36:02 +0000122 SizeT n_payload_bytes;
nethercote2d5b8162004-08-11 09:40:52 +0000123 UByte padding[ VG_MIN_MALLOC_SZB -
nethercote7ac7f7b2004-11-02 12:36:02 +0000124 ((sizeof(struct _Superblock*) + sizeof(SizeT)) %
125 VG_MIN_MALLOC_SZB) ];
nethercote2d5b8162004-08-11 09:40:52 +0000126 UByte payload_bytes[0];
127 }
128 Superblock;
129
130// An arena. 'freelist' is a circular, doubly-linked list. 'rz_szB' is
131// elastic, in that it can be bigger than asked-for to ensure alignment.
132typedef
133 struct {
134 Char* name;
135 Bool clientmem; // Allocates in the client address space?
njn0e742df2004-11-30 13:26:29 +0000136 SizeT rz_szB; // Red zone size in bytes
nethercote7ac7f7b2004-11-02 12:36:02 +0000137 SizeT min_sblock_szB; // Minimum superblock size in bytes
njn6e6588c2005-03-13 18:52:48 +0000138 Block* freelist[N_MALLOC_LISTS];
nethercote2d5b8162004-08-11 09:40:52 +0000139 Superblock* sblocks;
140 // Stats only.
nethercote7ac7f7b2004-11-02 12:36:02 +0000141 SizeT bytes_on_loan;
142 SizeT bytes_mmaped;
143 SizeT bytes_on_loan_max;
nethercote2d5b8162004-08-11 09:40:52 +0000144 }
145 Arena;
146
147
148/*------------------------------------------------------------*/
149/*--- Low-level functions for working with Blocks. ---*/
150/*------------------------------------------------------------*/
151
nethercote7ac7f7b2004-11-02 12:36:02 +0000152#define SIZE_T_0x1 ((SizeT)0x1)
153
njn8d3f8452005-07-20 04:12:41 +0000154// Mark a bszB as in-use, and not in-use, and remove the in-use attribute.
nethercote2d5b8162004-08-11 09:40:52 +0000155static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000156SizeT mk_inuse_bszB ( SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000157{
158 vg_assert(bszB != 0);
nethercote7ac7f7b2004-11-02 12:36:02 +0000159 return bszB & (~SIZE_T_0x1);
nethercote2d5b8162004-08-11 09:40:52 +0000160}
161static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000162SizeT mk_free_bszB ( SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000163{
164 vg_assert(bszB != 0);
nethercote7ac7f7b2004-11-02 12:36:02 +0000165 return bszB | SIZE_T_0x1;
nethercote2d5b8162004-08-11 09:40:52 +0000166}
nethercote2d5b8162004-08-11 09:40:52 +0000167static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000168SizeT mk_plain_bszB ( SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000169{
170 vg_assert(bszB != 0);
nethercote7ac7f7b2004-11-02 12:36:02 +0000171 return bszB & (~SIZE_T_0x1);
nethercote2d5b8162004-08-11 09:40:52 +0000172}
173
njn402c8612005-08-23 22:11:20 +0000174//---------------------------------------------------------------------------
175
176// Get a block's size as stored, ie with the in-use/free attribute.
nethercote2d5b8162004-08-11 09:40:52 +0000177static __inline__
njn402c8612005-08-23 22:11:20 +0000178SizeT get_bszB_as_is ( Block* b )
nethercote2d5b8162004-08-11 09:40:52 +0000179{
njn402c8612005-08-23 22:11:20 +0000180 UByte* b2 = (UByte*)b;
181 SizeT bszB_lo = *(SizeT*)&b2[0];
182 SizeT bszB_hi = *(SizeT*)&b2[mk_plain_bszB(bszB_lo) - sizeof(SizeT)];
183 vg_assert2(bszB_lo == bszB_hi,
184 "Heap block lo/hi size mismatch: lo = %llu, hi = %llu.\n"
sewardj24596d82005-10-21 12:05:05 +0000185 "Probably caused by overrunning/underrunning a heap block's bounds.\n",
186 (ULong)bszB_lo, (ULong)bszB_hi);
njn402c8612005-08-23 22:11:20 +0000187 return bszB_lo;
nethercote2d5b8162004-08-11 09:40:52 +0000188}
189
njn402c8612005-08-23 22:11:20 +0000190// Get a block's plain size, ie. remove the in-use/free attribute.
191static __inline__
192SizeT get_bszB ( Block* b )
193{
194 return mk_plain_bszB(get_bszB_as_is(b));
195}
196
197// Set the size fields of a block. bszB may have the in-use/free attribute.
198static __inline__
199void set_bszB ( Block* b, SizeT bszB )
200{
201 UByte* b2 = (UByte*)b;
202 *(SizeT*)&b2[0] = bszB;
203 *(SizeT*)&b2[mk_plain_bszB(bszB) - sizeof(SizeT)] = bszB;
204}
205
206//---------------------------------------------------------------------------
207
njn472cc7c2005-07-17 17:20:30 +0000208// Does this block have the in-use attribute?
209static __inline__
210Bool is_inuse_block ( Block* b )
211{
njn402c8612005-08-23 22:11:20 +0000212 SizeT bszB = get_bszB_as_is(b);
njn472cc7c2005-07-17 17:20:30 +0000213 vg_assert(bszB != 0);
214 return (0 != (bszB & SIZE_T_0x1)) ? False : True;
215}
216
njn402c8612005-08-23 22:11:20 +0000217//---------------------------------------------------------------------------
njn8d3f8452005-07-20 04:12:41 +0000218
njn089f51f2005-07-17 18:12:00 +0000219// Return the lower, upper and total overhead in bytes for a block.
220// These are determined purely by which arena the block lives in.
221static __inline__
222SizeT overhead_szB_lo ( Arena* a )
223{
njn8d3f8452005-07-20 04:12:41 +0000224 return sizeof(SizeT) + a->rz_szB;
njn089f51f2005-07-17 18:12:00 +0000225}
226static __inline__
227SizeT overhead_szB_hi ( Arena* a )
228{
njn8d3f8452005-07-20 04:12:41 +0000229 return a->rz_szB + sizeof(SizeT);
njn089f51f2005-07-17 18:12:00 +0000230}
231static __inline__
232SizeT overhead_szB ( Arena* a )
233{
234 return overhead_szB_lo(a) + overhead_szB_hi(a);
235}
236
njn402c8612005-08-23 22:11:20 +0000237//---------------------------------------------------------------------------
238
njn089f51f2005-07-17 18:12:00 +0000239// Return the minimum bszB for a block in this arena. Can have zero-length
240// payloads, so it's the size of the admin bytes.
241static __inline__
242SizeT min_useful_bszB ( Arena* a )
243{
244 return overhead_szB(a);
245}
246
njn402c8612005-08-23 22:11:20 +0000247//---------------------------------------------------------------------------
248
njn089f51f2005-07-17 18:12:00 +0000249// Convert payload size <--> block size (both in bytes).
250static __inline__
251SizeT pszB_to_bszB ( Arena* a, SizeT pszB )
252{
253 return pszB + overhead_szB(a);
254}
255static __inline__
256SizeT bszB_to_pszB ( Arena* a, SizeT bszB )
257{
258 vg_assert(bszB >= overhead_szB(a));
259 return bszB - overhead_szB(a);
260}
261
njn402c8612005-08-23 22:11:20 +0000262//---------------------------------------------------------------------------
nethercote2d5b8162004-08-11 09:40:52 +0000263
njn089f51f2005-07-17 18:12:00 +0000264// Get a block's payload size.
nethercote7ac7f7b2004-11-02 12:36:02 +0000265static __inline__
njn089f51f2005-07-17 18:12:00 +0000266SizeT get_pszB ( Arena* a, Block* b )
nethercote7ac7f7b2004-11-02 12:36:02 +0000267{
njn089f51f2005-07-17 18:12:00 +0000268 return bszB_to_pszB(a, get_bszB(b));
nethercote7ac7f7b2004-11-02 12:36:02 +0000269}
270
njn402c8612005-08-23 22:11:20 +0000271//---------------------------------------------------------------------------
272
273// Given the addr of a block, return the addr of its payload, and vice versa.
nethercote2d5b8162004-08-11 09:40:52 +0000274static __inline__
275UByte* get_block_payload ( Arena* a, Block* b )
276{
277 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000278 return & b2[ overhead_szB_lo(a) ];
nethercote2d5b8162004-08-11 09:40:52 +0000279}
280// Given the addr of a block's payload, return the addr of the block itself.
281static __inline__
282Block* get_payload_block ( Arena* a, UByte* payload )
283{
nethercote7ac7f7b2004-11-02 12:36:02 +0000284 return (Block*)&payload[ -overhead_szB_lo(a) ];
nethercote2d5b8162004-08-11 09:40:52 +0000285}
286
njn402c8612005-08-23 22:11:20 +0000287//---------------------------------------------------------------------------
nethercote2d5b8162004-08-11 09:40:52 +0000288
289// Set and get the next and previous link fields of a block.
290static __inline__
291void set_prev_b ( Block* b, Block* prev_p )
292{
293 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000294 *(Block**)&b2[sizeof(SizeT)] = prev_p;
nethercote2d5b8162004-08-11 09:40:52 +0000295}
296static __inline__
297void set_next_b ( Block* b, Block* next_p )
298{
njn402c8612005-08-23 22:11:20 +0000299 UByte* b2 = (UByte*)b;
300 *(Block**)&b2[get_bszB(b) - sizeof(SizeT) - sizeof(void*)] = next_p;
nethercote2d5b8162004-08-11 09:40:52 +0000301}
302static __inline__
303Block* get_prev_b ( Block* b )
304{
305 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000306 return *(Block**)&b2[sizeof(SizeT)];
nethercote2d5b8162004-08-11 09:40:52 +0000307}
308static __inline__
309Block* get_next_b ( Block* b )
310{
njn402c8612005-08-23 22:11:20 +0000311 UByte* b2 = (UByte*)b;
312 return *(Block**)&b2[get_bszB(b) - sizeof(SizeT) - sizeof(void*)];
nethercote2d5b8162004-08-11 09:40:52 +0000313}
314
njn402c8612005-08-23 22:11:20 +0000315//---------------------------------------------------------------------------
nethercote2d5b8162004-08-11 09:40:52 +0000316
317// Get the block immediately preceding this one in the Superblock.
318static __inline__
319Block* get_predecessor_block ( Block* b )
320{
321 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000322 SizeT bszB = mk_plain_bszB( (*(SizeT*)&b2[-sizeof(SizeT)]) );
nethercote2d5b8162004-08-11 09:40:52 +0000323 return (Block*)&b2[-bszB];
324}
325
njn402c8612005-08-23 22:11:20 +0000326//---------------------------------------------------------------------------
327
nethercote2d5b8162004-08-11 09:40:52 +0000328// Read and write the lower and upper red-zone bytes of a block.
329static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000330void set_rz_lo_byte ( Arena* a, Block* b, UInt rz_byteno, UByte v )
nethercote2d5b8162004-08-11 09:40:52 +0000331{
332 UByte* b2 = (UByte*)b;
njn8d3f8452005-07-20 04:12:41 +0000333 b2[sizeof(SizeT) + rz_byteno] = v;
nethercote2d5b8162004-08-11 09:40:52 +0000334}
335static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000336void set_rz_hi_byte ( Arena* a, Block* b, UInt rz_byteno, UByte v )
nethercote2d5b8162004-08-11 09:40:52 +0000337{
njn402c8612005-08-23 22:11:20 +0000338 UByte* b2 = (UByte*)b;
339 b2[get_bszB(b) - sizeof(SizeT) - rz_byteno - 1] = v;
nethercote2d5b8162004-08-11 09:40:52 +0000340}
341static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000342UByte get_rz_lo_byte ( Arena* a, Block* b, UInt rz_byteno )
nethercote2d5b8162004-08-11 09:40:52 +0000343{
344 UByte* b2 = (UByte*)b;
njn8d3f8452005-07-20 04:12:41 +0000345 return b2[sizeof(SizeT) + rz_byteno];
nethercote2d5b8162004-08-11 09:40:52 +0000346}
347static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000348UByte get_rz_hi_byte ( Arena* a, Block* b, UInt rz_byteno )
nethercote2d5b8162004-08-11 09:40:52 +0000349{
njn402c8612005-08-23 22:11:20 +0000350 UByte* b2 = (UByte*)b;
351 return b2[get_bszB(b) - sizeof(SizeT) - rz_byteno - 1];
nethercote2d5b8162004-08-11 09:40:52 +0000352}
353
354
nethercote2d5b8162004-08-11 09:40:52 +0000355/*------------------------------------------------------------*/
356/*--- Arena management ---*/
357/*------------------------------------------------------------*/
358
359#define CORE_ARENA_MIN_SZB 1048576
360
361// The arena structures themselves.
362static Arena vg_arena[VG_N_ARENAS];
363
364// Functions external to this module identify arenas using ArenaIds,
365// not Arena*s. This fn converts the former to the latter.
366static Arena* arenaId_to_ArenaP ( ArenaId arena )
367{
368 vg_assert(arena >= 0 && arena < VG_N_ARENAS);
369 return & vg_arena[arena];
370}
371
372// Initialise an arena. rz_szB is the minimum redzone size; it might be
njn30490552005-03-13 06:30:42 +0000373// made bigger to ensure that VG_MIN_MALLOC_SZB is observed.
nethercote2d5b8162004-08-11 09:40:52 +0000374static
njn0e742df2004-11-30 13:26:29 +0000375void arena_init ( ArenaId aid, Char* name, SizeT rz_szB, SizeT min_sblock_szB )
nethercote2d5b8162004-08-11 09:40:52 +0000376{
nethercote7ac7f7b2004-11-02 12:36:02 +0000377 SizeT i;
nethercote2d5b8162004-08-11 09:40:52 +0000378 Arena* a = arenaId_to_ArenaP(aid);
379
njn7ce83112005-08-24 22:38:00 +0000380 // Ensure redzones are a reasonable size. They must always be at least
381 // the size of a pointer, for holding the prev/next pointer (see the layout
382 // details at the top of this file).
383 vg_assert(rz_szB < 128);
384 if (rz_szB < sizeof(void*)) rz_szB = sizeof(void*);
385
nethercote73b526f2004-10-31 18:48:21 +0000386 vg_assert((min_sblock_szB % VKI_PAGE_SIZE) == 0);
nethercote2d5b8162004-08-11 09:40:52 +0000387 a->name = name;
388 a->clientmem = ( VG_AR_CLIENT == aid ? True : False );
389
390 // The size of the low and high admin sections in a block must be a
njn30490552005-03-13 06:30:42 +0000391 // multiple of VG_MIN_MALLOC_SZB. So we round up the asked-for
nethercote2d5b8162004-08-11 09:40:52 +0000392 // redzone size if necessary to achieve this.
393 a->rz_szB = rz_szB;
394 while (0 != overhead_szB_lo(a) % VG_MIN_MALLOC_SZB) a->rz_szB++;
395 vg_assert(overhead_szB_lo(a) == overhead_szB_hi(a));
396
397 a->min_sblock_szB = min_sblock_szB;
njn6e6588c2005-03-13 18:52:48 +0000398 for (i = 0; i < N_MALLOC_LISTS; i++) a->freelist[i] = NULL;
nethercote2d5b8162004-08-11 09:40:52 +0000399 a->sblocks = NULL;
400 a->bytes_on_loan = 0;
401 a->bytes_mmaped = 0;
402 a->bytes_on_loan_max = 0;
403}
404
405/* Print vital stats for an arena. */
406void VG_(print_all_arena_stats) ( void )
407{
nethercote7ac7f7b2004-11-02 12:36:02 +0000408 UInt i;
nethercote2d5b8162004-08-11 09:40:52 +0000409 for (i = 0; i < VG_N_ARENAS; i++) {
410 Arena* a = arenaId_to_ArenaP(i);
411 VG_(message)(Vg_DebugMsg,
njn669ef072005-03-13 05:46:57 +0000412 "%8s: %8d mmap'd, %8d/%8d max/curr",
nethercote2d5b8162004-08-11 09:40:52 +0000413 a->name, a->bytes_mmaped, a->bytes_on_loan_max, a->bytes_on_loan
414 );
415 }
416}
417
418/* This library is self-initialising, as it makes this more self-contained,
419 less coupled with the outside world. Hence VG_(arena_malloc)() and
420 VG_(arena_free)() below always call ensure_mm_init() to ensure things are
sewardj45f4e7c2005-09-27 19:20:21 +0000421 correctly initialised.
422
423 We initialise the client arena separately (and later) because the core
424 must do non-client allocation before the tool has a chance to set the
425 client arena's redzone size.
426*/
nethercote2d5b8162004-08-11 09:40:52 +0000427static
sewardj45f4e7c2005-09-27 19:20:21 +0000428void ensure_mm_init ( ArenaId aid )
nethercote2d5b8162004-08-11 09:40:52 +0000429{
sewardj45f4e7c2005-09-27 19:20:21 +0000430 static Bool client_inited = False;
431 static Bool nonclient_inited = False;
njnfc51f8d2005-06-21 03:20:17 +0000432 static SizeT client_redzone_szB = 8; // default: be paranoid
433
sewardj45f4e7c2005-09-27 19:20:21 +0000434 /* We use checked red zones (of various sizes) for our internal stuff,
nethercote2d5b8162004-08-11 09:40:52 +0000435 and an unchecked zone of arbitrary size for the client. Of
436 course the client's red zone can be checked by the tool, eg.
437 by using addressibility maps, but not by the mechanism implemented
438 here, which merely checks at the time of freeing that the red
439 zone bytes are unchanged.
440
441 Nb: redzone sizes are *minimums*; they could be made bigger to ensure
njn8d3f8452005-07-20 04:12:41 +0000442 alignment. Eg. with 8 byte alignment, on 32-bit machines 4 stays as
443 4, but 16 becomes 20; but on 64-bit machines 4 becomes 8, and 16
444 stays as 16 --- the extra 4 bytes in both are accounted for by the
445 larger prev/next ptr.
nethercote2d5b8162004-08-11 09:40:52 +0000446 */
sewardj45f4e7c2005-09-27 19:20:21 +0000447 if (VG_AR_CLIENT == aid) {
448 if (client_inited) {
449 // This assertion ensures that a tool cannot try to change the client
450 // redzone size with VG_(needs_malloc_replacement)() after this module
451 // has done its first allocation from the client arena.
452 if (VG_(needs).malloc_replacement)
453 vg_assert(client_redzone_szB == VG_(tdict).tool_client_redzone_szB);
454 return;
455 }
nethercote2d5b8162004-08-11 09:40:52 +0000456
sewardj45f4e7c2005-09-27 19:20:21 +0000457 // Check and set the client arena redzone size
458 if (VG_(needs).malloc_replacement) {
459 client_redzone_szB = VG_(tdict).tool_client_redzone_szB;
460 // 128 is no special figure, just something not too big
461 if (client_redzone_szB > 128) {
462 VG_(printf)( "\nTool error:\n"
463 " specified redzone size is too big (%llu)\n",
464 (ULong)client_redzone_szB);
465 VG_(exit)(1);
466 }
467 }
468 // Initialise the client arena
469 arena_init ( VG_AR_CLIENT, "client", client_redzone_szB, 1048576 );
470 client_inited = True;
471
472 } else {
473 if (nonclient_inited) {
474 return;
475 }
476 // Initialise the non-client arenas
477 arena_init ( VG_AR_CORE, "core", 4, CORE_ARENA_MIN_SZB );
478 arena_init ( VG_AR_TOOL, "tool", 4, 1048576 );
479 arena_init ( VG_AR_SYMTAB, "symtab", 4, 1048576 );
480 arena_init ( VG_AR_DEMANGLE, "demangle", 4, 65536 );
481 arena_init ( VG_AR_EXECTXT, "exectxt", 4, 262144 );
482 arena_init ( VG_AR_ERRORS, "errors", 4, 65536 );
sewardjcd0309b2005-10-18 02:20:18 +0000483 arena_init ( VG_AR_TTAUX, "ttaux", 4, 65536 );
sewardj45f4e7c2005-09-27 19:20:21 +0000484 nonclient_inited = True;
485 }
486
nethercote2d5b8162004-08-11 09:40:52 +0000487# ifdef DEBUG_MALLOC
488 VG_(sanity_check_malloc_all)();
489# endif
490}
491
492
493/*------------------------------------------------------------*/
494/*--- Superblock management ---*/
495/*------------------------------------------------------------*/
496
sewardj45f4e7c2005-09-27 19:20:21 +0000497void VG_(out_of_memory_NORETURN) ( HChar* who, SizeT szB )
498{
499 static Bool alreadyCrashing = False;
500 ULong tot_alloc = VG_(am_get_anonsize_total)();
501 if (!alreadyCrashing) {
502 alreadyCrashing = True;
503 VG_(printf)("\n"
504 "Valgrind's memory management: out of memory:\n");
505 VG_(printf)(" %s's request for %llu bytes failed.\n",
506 who, (ULong)szB );
507 VG_(printf)(" %llu bytes have already been allocated.\n",
508 tot_alloc);
509 VG_(printf)("Valgrind cannot continue. Sorry.\n\n");
510 } else {
511 VG_(debugLog)(0,"mallocfree","\n");
512 VG_(debugLog)(0,"mallocfree",
513 "Valgrind's memory management: out of memory:\n");
514 VG_(debugLog)(0,"mallocfree",
515 " %s's request for %llu bytes failed.\n",
516 who, (ULong)szB );
517 VG_(debugLog)(0,"mallocfree",
518 " %llu bytes have already been allocated.\n",
519 tot_alloc);
520 VG_(debugLog)(0,"mallocfree","Valgrind cannot continue. Sorry.\n\n");
521 }
522 VG_(exit)(1);
523}
524
525
nethercote2d5b8162004-08-11 09:40:52 +0000526// Align ptr p upwards to an align-sized boundary.
527static
nethercote7ac7f7b2004-11-02 12:36:02 +0000528void* align_upwards ( void* p, SizeT align )
nethercote2d5b8162004-08-11 09:40:52 +0000529{
530 Addr a = (Addr)p;
531 if ((a % align) == 0) return (void*)a;
532 return (void*)(a - (a % align) + align);
533}
534
535// If not enough memory available, either aborts (for non-client memory)
536// or returns 0 (for client memory).
537static
nethercote7ac7f7b2004-11-02 12:36:02 +0000538Superblock* newSuperblock ( Arena* a, SizeT cszB )
nethercote2d5b8162004-08-11 09:40:52 +0000539{
nethercote2d5b8162004-08-11 09:40:52 +0000540 Superblock* sb;
sewardj45f4e7c2005-09-27 19:20:21 +0000541 SysRes sres;
542 NSegment* seg;
nethercote2d5b8162004-08-11 09:40:52 +0000543
544 // Take into account admin bytes in the Superblock.
545 cszB += sizeof(Superblock);
546
547 if (cszB < a->min_sblock_szB) cszB = a->min_sblock_szB;
nethercote73b526f2004-10-31 18:48:21 +0000548 while ((cszB % VKI_PAGE_SIZE) > 0) cszB++;
nethercote2d5b8162004-08-11 09:40:52 +0000549
sewardj45f4e7c2005-09-27 19:20:21 +0000550 if (a->clientmem) {
nethercote2d5b8162004-08-11 09:40:52 +0000551 // client allocation -- return 0 to client if it fails
sewardj45f4e7c2005-09-27 19:20:21 +0000552 sres = VG_(am_mmap_anon_float_client)
553 ( cszB, VKI_PROT_READ|VKI_PROT_WRITE|VKI_PROT_EXEC );
554 if (sres.isError)
nethercote2d5b8162004-08-11 09:40:52 +0000555 return 0;
sewardj45f4e7c2005-09-27 19:20:21 +0000556 sb = (Superblock*)sres.val;
557 // Mark this segment as containing client heap. The leak
558 // checker needs to be able to identify such segments so as not
559 // to use them as sources of roots during leak checks.
560 seg = VG_(am_find_nsegment)( (Addr)sb );
561 vg_assert(seg && seg->kind == SkAnonC);
562 seg->isCH = True;
nethercote2d5b8162004-08-11 09:40:52 +0000563 } else {
sewardj45f4e7c2005-09-27 19:20:21 +0000564 // non-client allocation -- abort if it fails
565 sres = VG_(am_mmap_anon_float_valgrind)( cszB );
566 if (sres.isError) {
567 VG_(out_of_memory_NORETURN)("newSuperblock", cszB);
568 /* NOTREACHED */
569 sb = NULL; /* keep gcc happy */
570 } else {
571 sb = (Superblock*)sres.val;
572 }
nethercote2d5b8162004-08-11 09:40:52 +0000573 }
574 vg_assert(NULL != sb);
sewardjb5f6f512005-03-10 23:59:00 +0000575 //zzVALGRIND_MAKE_WRITABLE(sb, cszB);
nethercote2d5b8162004-08-11 09:40:52 +0000576 vg_assert(0 == (Addr)sb % VG_MIN_MALLOC_SZB);
577 sb->n_payload_bytes = cszB - sizeof(Superblock);
578 a->bytes_mmaped += cszB;
sewardj45f4e7c2005-09-27 19:20:21 +0000579 VG_(debugLog)(1, "mallocfree",
580 "newSuperblock at %p (pszB %7ld) owner %s/%s\n",
581 sb, sb->n_payload_bytes,
582 a->clientmem ? "CLIENT" : "VALGRIND", a->name );
nethercote2d5b8162004-08-11 09:40:52 +0000583 return sb;
584}
585
586// Find the superblock containing the given chunk.
587static
588Superblock* findSb ( Arena* a, Block* b )
589{
590 Superblock* sb;
591 for (sb = a->sblocks; sb; sb = sb->next)
592 if ((Block*)&sb->payload_bytes[0] <= b
593 && b < (Block*)&sb->payload_bytes[sb->n_payload_bytes])
594 return sb;
njn02bc4b82005-05-15 17:28:26 +0000595 VG_(printf)("findSb: can't find pointer %p in arena '%s'\n", b, a->name );
nethercote2d5b8162004-08-11 09:40:52 +0000596 VG_(core_panic)("findSb: VG_(arena_free)() in wrong arena?");
597 return NULL; /*NOTREACHED*/
598}
599
sewardjde4a1d02002-03-22 01:27:54 +0000600
fitzhardinge98abfc72003-12-16 02:05:15 +0000601/*------------------------------------------------------------*/
sewardjde4a1d02002-03-22 01:27:54 +0000602/*--- Functions for working with freelists. ---*/
603/*------------------------------------------------------------*/
604
nethercote2d5b8162004-08-11 09:40:52 +0000605// Nb: Determination of which freelist a block lives on is based on the
606// payload size, not block size.
sewardjde4a1d02002-03-22 01:27:54 +0000607
nethercote2d5b8162004-08-11 09:40:52 +0000608// Convert a payload size in bytes to a freelist number.
sewardjde4a1d02002-03-22 01:27:54 +0000609static
nethercote7ac7f7b2004-11-02 12:36:02 +0000610UInt pszB_to_listNo ( SizeT pszB )
sewardjde4a1d02002-03-22 01:27:54 +0000611{
njndb247dc2005-07-17 23:12:33 +0000612 SizeT n = pszB / VG_MIN_MALLOC_SZB;
tom60a4b0b2005-10-12 10:45:27 +0000613 vg_assert(0 == pszB % VG_MIN_MALLOC_SZB);
njn61dcab82005-05-21 19:36:45 +0000614
615 // The first 13 lists hold blocks of size VG_MIN_MALLOC_SZB * list_num.
njndb247dc2005-07-17 23:12:33 +0000616 // The final 5 hold bigger blocks.
617 if (n <= 12) return (UInt)n;
618 if (n <= 16) return 13;
619 if (n <= 32) return 14;
620 if (n <= 64) return 15;
621 if (n <= 128) return 16;
sewardj70e212d2005-05-19 10:54:01 +0000622 return 17;
sewardjde4a1d02002-03-22 01:27:54 +0000623}
624
nethercote2d5b8162004-08-11 09:40:52 +0000625// What is the minimum payload size for a given list?
sewardjde4a1d02002-03-22 01:27:54 +0000626static
nethercote7ac7f7b2004-11-02 12:36:02 +0000627SizeT listNo_to_pszB_min ( UInt listNo )
sewardjde4a1d02002-03-22 01:27:54 +0000628{
nethercote7ac7f7b2004-11-02 12:36:02 +0000629 SizeT pszB = 0;
njn6e6588c2005-03-13 18:52:48 +0000630 vg_assert(listNo <= N_MALLOC_LISTS);
nethercote2d5b8162004-08-11 09:40:52 +0000631 while (pszB_to_listNo(pszB) < listNo) pszB += VG_MIN_MALLOC_SZB;
632 return pszB;
sewardjde4a1d02002-03-22 01:27:54 +0000633}
634
nethercote2d5b8162004-08-11 09:40:52 +0000635// What is the maximum payload size for a given list?
sewardjde4a1d02002-03-22 01:27:54 +0000636static
nethercote7ac7f7b2004-11-02 12:36:02 +0000637SizeT listNo_to_pszB_max ( UInt listNo )
sewardjde4a1d02002-03-22 01:27:54 +0000638{
njn6e6588c2005-03-13 18:52:48 +0000639 vg_assert(listNo <= N_MALLOC_LISTS);
640 if (listNo == N_MALLOC_LISTS-1) {
nethercote2d5b8162004-08-11 09:40:52 +0000641 return MAX_PSZB;
sewardjde4a1d02002-03-22 01:27:54 +0000642 } else {
nethercote2d5b8162004-08-11 09:40:52 +0000643 return listNo_to_pszB_min(listNo+1) - 1;
sewardjde4a1d02002-03-22 01:27:54 +0000644 }
645}
646
647
648/* A nasty hack to try and reduce fragmentation. Try and replace
649 a->freelist[lno] with another block on the same list but with a
650 lower address, with the idea of attempting to recycle the same
651 blocks rather than cruise through the address space. */
sewardjde4a1d02002-03-22 01:27:54 +0000652static
nethercote7ac7f7b2004-11-02 12:36:02 +0000653void swizzle ( Arena* a, UInt lno )
sewardjde4a1d02002-03-22 01:27:54 +0000654{
nethercote2d5b8162004-08-11 09:40:52 +0000655 Block* p_best;
656 Block* pp;
657 Block* pn;
nethercote7ac7f7b2004-11-02 12:36:02 +0000658 UInt i;
sewardjde4a1d02002-03-22 01:27:54 +0000659
660 p_best = a->freelist[lno];
661 if (p_best == NULL) return;
662
663 pn = pp = p_best;
664 for (i = 0; i < 20; i++) {
nethercote2d5b8162004-08-11 09:40:52 +0000665 pn = get_next_b(pn);
666 pp = get_prev_b(pp);
sewardjde4a1d02002-03-22 01:27:54 +0000667 if (pn < p_best) p_best = pn;
668 if (pp < p_best) p_best = pp;
669 }
670 if (p_best < a->freelist[lno]) {
nethercote2d5b8162004-08-11 09:40:52 +0000671# ifdef VERBOSE_MALLOC
672 VG_(printf)("retreat by %d\n", a->freelist[lno] - p_best);
sewardjde4a1d02002-03-22 01:27:54 +0000673# endif
674 a->freelist[lno] = p_best;
675 }
676}
677
678
679/*------------------------------------------------------------*/
sewardjde4a1d02002-03-22 01:27:54 +0000680/*--- Sanity-check/debugging machinery. ---*/
681/*------------------------------------------------------------*/
682
njn6e6588c2005-03-13 18:52:48 +0000683#define REDZONE_LO_MASK 0x31
684#define REDZONE_HI_MASK 0x7c
nethercote2d5b8162004-08-11 09:40:52 +0000685
nethercote7ac7f7b2004-11-02 12:36:02 +0000686// Do some crude sanity checks on a Block.
sewardjde4a1d02002-03-22 01:27:54 +0000687static
nethercote2d5b8162004-08-11 09:40:52 +0000688Bool blockSane ( Arena* a, Block* b )
sewardjde4a1d02002-03-22 01:27:54 +0000689{
690# define BLEAT(str) VG_(printf)("blockSane: fail -- %s\n",str)
nethercote7ac7f7b2004-11-02 12:36:02 +0000691 UInt i;
njn402c8612005-08-23 22:11:20 +0000692 // The lo and hi size fields will be checked (indirectly) by the call
693 // to get_rz_hi_byte().
njn472cc7c2005-07-17 17:20:30 +0000694 if (!a->clientmem && is_inuse_block(b)) {
nethercote2d5b8162004-08-11 09:40:52 +0000695 for (i = 0; i < a->rz_szB; i++) {
696 if (get_rz_lo_byte(a, b, i) !=
njn6e6588c2005-03-13 18:52:48 +0000697 (UByte)(((Addr)b&0xff) ^ REDZONE_LO_MASK))
nethercote2d5b8162004-08-11 09:40:52 +0000698 {BLEAT("redzone-lo");return False;}
699 if (get_rz_hi_byte(a, b, i) !=
njn6e6588c2005-03-13 18:52:48 +0000700 (UByte)(((Addr)b&0xff) ^ REDZONE_HI_MASK))
nethercote2d5b8162004-08-11 09:40:52 +0000701 {BLEAT("redzone-hi");return False;}
sewardjde4a1d02002-03-22 01:27:54 +0000702 }
703 }
704 return True;
705# undef BLEAT
706}
707
nethercote2d5b8162004-08-11 09:40:52 +0000708// Print superblocks (only for debugging).
sewardjde4a1d02002-03-22 01:27:54 +0000709static
710void ppSuperblocks ( Arena* a )
711{
njnd0e685c2005-07-17 17:55:42 +0000712 UInt i, blockno = 1;
sewardjde4a1d02002-03-22 01:27:54 +0000713 Superblock* sb = a->sblocks;
njnd0e685c2005-07-17 17:55:42 +0000714 SizeT b_bszB;
sewardjde4a1d02002-03-22 01:27:54 +0000715
716 while (sb) {
717 VG_(printf)( "\n" );
nethercote2d5b8162004-08-11 09:40:52 +0000718 VG_(printf)( "superblock %d at %p, sb->n_pl_bs = %d, next = %p\n",
719 blockno++, sb, sb->n_payload_bytes, sb->next );
njnd0e685c2005-07-17 17:55:42 +0000720 for (i = 0; i < sb->n_payload_bytes; i += b_bszB) {
721 Block* b = (Block*)&sb->payload_bytes[i];
722 b_bszB = get_bszB(b);
723 VG_(printf)( " block at %d, bszB %d: ", i, b_bszB );
njn472cc7c2005-07-17 17:20:30 +0000724 VG_(printf)( "%s, ", is_inuse_block(b) ? "inuse" : "free");
nethercote2d5b8162004-08-11 09:40:52 +0000725 VG_(printf)( "%s\n", blockSane(a, b) ? "ok" : "BAD" );
sewardjde4a1d02002-03-22 01:27:54 +0000726 }
nethercote2d5b8162004-08-11 09:40:52 +0000727 vg_assert(i == sb->n_payload_bytes); // no overshoot at end of Sb
sewardjde4a1d02002-03-22 01:27:54 +0000728 sb = sb->next;
729 }
730 VG_(printf)( "end of superblocks\n\n" );
731}
732
nethercote2d5b8162004-08-11 09:40:52 +0000733// Sanity check both the superblocks and the chains.
nethercote885dd912004-08-03 23:14:00 +0000734static void sanity_check_malloc_arena ( ArenaId aid )
sewardjde4a1d02002-03-22 01:27:54 +0000735{
nethercote7ac7f7b2004-11-02 12:36:02 +0000736 UInt i, superblockctr, blockctr_sb, blockctr_li;
737 UInt blockctr_sb_free, listno;
738 SizeT b_bszB, b_pszB, list_min_pszB, list_max_pszB;
sewardjde4a1d02002-03-22 01:27:54 +0000739 Superblock* sb;
740 Bool thisFree, lastWasFree;
nethercote2d5b8162004-08-11 09:40:52 +0000741 Block* b;
742 Block* b_prev;
nethercote7ac7f7b2004-11-02 12:36:02 +0000743 SizeT arena_bytes_on_loan;
sewardjde4a1d02002-03-22 01:27:54 +0000744 Arena* a;
745
nethercote885dd912004-08-03 23:14:00 +0000746# define BOMB VG_(core_panic)("sanity_check_malloc_arena")
sewardjde4a1d02002-03-22 01:27:54 +0000747
748 a = arenaId_to_ArenaP(aid);
749
nethercote2d5b8162004-08-11 09:40:52 +0000750 // First, traverse all the superblocks, inspecting the Blocks in each.
sewardjde4a1d02002-03-22 01:27:54 +0000751 superblockctr = blockctr_sb = blockctr_sb_free = 0;
752 arena_bytes_on_loan = 0;
753 sb = a->sblocks;
754 while (sb) {
755 lastWasFree = False;
756 superblockctr++;
nethercote2d5b8162004-08-11 09:40:52 +0000757 for (i = 0; i < sb->n_payload_bytes; i += mk_plain_bszB(b_bszB)) {
sewardjde4a1d02002-03-22 01:27:54 +0000758 blockctr_sb++;
nethercote2d5b8162004-08-11 09:40:52 +0000759 b = (Block*)&sb->payload_bytes[i];
njnd0e685c2005-07-17 17:55:42 +0000760 b_bszB = get_bszB_as_is(b);
sewardjde4a1d02002-03-22 01:27:54 +0000761 if (!blockSane(a, b)) {
nethercote2d5b8162004-08-11 09:40:52 +0000762 VG_(printf)("sanity_check_malloc_arena: sb %p, block %d (bszB %d): "
763 " BAD\n", sb, i, b_bszB );
sewardjde4a1d02002-03-22 01:27:54 +0000764 BOMB;
765 }
njn472cc7c2005-07-17 17:20:30 +0000766 thisFree = !is_inuse_block(b);
sewardjde4a1d02002-03-22 01:27:54 +0000767 if (thisFree && lastWasFree) {
nethercote2d5b8162004-08-11 09:40:52 +0000768 VG_(printf)("sanity_check_malloc_arena: sb %p, block %d (bszB %d): "
njn25e49d8e72002-09-23 09:36:25 +0000769 "UNMERGED FREES\n",
nethercote2d5b8162004-08-11 09:40:52 +0000770 sb, i, b_bszB );
sewardjde4a1d02002-03-22 01:27:54 +0000771 BOMB;
772 }
sewardjde4a1d02002-03-22 01:27:54 +0000773 if (thisFree) blockctr_sb_free++;
774 if (!thisFree)
nethercote2d5b8162004-08-11 09:40:52 +0000775 arena_bytes_on_loan += bszB_to_pszB(a, b_bszB);
776 lastWasFree = thisFree;
sewardjde4a1d02002-03-22 01:27:54 +0000777 }
nethercote2d5b8162004-08-11 09:40:52 +0000778 if (i > sb->n_payload_bytes) {
nethercote885dd912004-08-03 23:14:00 +0000779 VG_(printf)( "sanity_check_malloc_arena: sb %p: last block "
sewardjde4a1d02002-03-22 01:27:54 +0000780 "overshoots end\n", sb);
781 BOMB;
782 }
783 sb = sb->next;
784 }
785
786 if (arena_bytes_on_loan != a->bytes_on_loan) {
nethercote2d5b8162004-08-11 09:40:52 +0000787# ifdef VERBOSE_MALLOC
788 VG_(printf)( "sanity_check_malloc_arena: a->bytes_on_loan %d, "
789 "arena_bytes_on_loan %d: "
790 "MISMATCH\n", a->bytes_on_loan, arena_bytes_on_loan);
791# endif
sewardjde4a1d02002-03-22 01:27:54 +0000792 ppSuperblocks(a);
793 BOMB;
794 }
795
796 /* Second, traverse each list, checking that the back pointers make
797 sense, counting blocks encountered, and checking that each block
798 is an appropriate size for this list. */
799 blockctr_li = 0;
njn6e6588c2005-03-13 18:52:48 +0000800 for (listno = 0; listno < N_MALLOC_LISTS; listno++) {
nethercote2d5b8162004-08-11 09:40:52 +0000801 list_min_pszB = listNo_to_pszB_min(listno);
802 list_max_pszB = listNo_to_pszB_max(listno);
sewardjde4a1d02002-03-22 01:27:54 +0000803 b = a->freelist[listno];
804 if (b == NULL) continue;
805 while (True) {
806 b_prev = b;
nethercote2d5b8162004-08-11 09:40:52 +0000807 b = get_next_b(b);
808 if (get_prev_b(b) != b_prev) {
nethercote885dd912004-08-03 23:14:00 +0000809 VG_(printf)( "sanity_check_malloc_arena: list %d at %p: "
sewardjde4a1d02002-03-22 01:27:54 +0000810 "BAD LINKAGE\n",
811 listno, b );
812 BOMB;
813 }
njn089f51f2005-07-17 18:12:00 +0000814 b_pszB = get_pszB(a, b);
nethercote2d5b8162004-08-11 09:40:52 +0000815 if (b_pszB < list_min_pszB || b_pszB > list_max_pszB) {
sewardjde4a1d02002-03-22 01:27:54 +0000816 VG_(printf)(
nethercote885dd912004-08-03 23:14:00 +0000817 "sanity_check_malloc_arena: list %d at %p: "
nethercote2d5b8162004-08-11 09:40:52 +0000818 "WRONG CHAIN SIZE %dB (%dB, %dB)\n",
819 listno, b, b_pszB, list_min_pszB, list_max_pszB );
sewardjde4a1d02002-03-22 01:27:54 +0000820 BOMB;
821 }
822 blockctr_li++;
823 if (b == a->freelist[listno]) break;
824 }
825 }
826
827 if (blockctr_sb_free != blockctr_li) {
nethercote2d5b8162004-08-11 09:40:52 +0000828# ifdef VERBOSE_MALLOC
829 VG_(printf)( "sanity_check_malloc_arena: BLOCK COUNT MISMATCH "
830 "(via sbs %d, via lists %d)\n",
831 blockctr_sb_free, blockctr_li );
832# endif
sewardjde4a1d02002-03-22 01:27:54 +0000833 ppSuperblocks(a);
834 BOMB;
835 }
836
nethercote885dd912004-08-03 23:14:00 +0000837 if (VG_(clo_verbosity) > 2)
838 VG_(message)(Vg_DebugMsg,
njn669ef072005-03-13 05:46:57 +0000839 "%8s: %2d sbs, %5d bs, %2d/%-2d free bs, "
nethercote885dd912004-08-03 23:14:00 +0000840 "%7d mmap, %7d loan",
841 a->name,
842 superblockctr,
843 blockctr_sb, blockctr_sb_free, blockctr_li,
844 a->bytes_mmaped, a->bytes_on_loan);
sewardjde4a1d02002-03-22 01:27:54 +0000845# undef BOMB
846}
847
848
nethercote885dd912004-08-03 23:14:00 +0000849void VG_(sanity_check_malloc_all) ( void )
sewardjde4a1d02002-03-22 01:27:54 +0000850{
nethercote7ac7f7b2004-11-02 12:36:02 +0000851 UInt i;
sewardjde4a1d02002-03-22 01:27:54 +0000852 for (i = 0; i < VG_N_ARENAS; i++)
nethercote885dd912004-08-03 23:14:00 +0000853 sanity_check_malloc_arena ( i );
sewardjde4a1d02002-03-22 01:27:54 +0000854}
855
sewardjde4a1d02002-03-22 01:27:54 +0000856
nethercote2d5b8162004-08-11 09:40:52 +0000857/*------------------------------------------------------------*/
858/*--- Creating and deleting blocks. ---*/
859/*------------------------------------------------------------*/
860
861// Mark the bytes at b .. b+bszB-1 as not in use, and add them to the
862// relevant free list.
863
864static
nethercote7ac7f7b2004-11-02 12:36:02 +0000865void mkFreeBlock ( Arena* a, Block* b, SizeT bszB, UInt b_lno )
jsewardb1a26ae2004-03-14 03:06:37 +0000866{
nethercote7ac7f7b2004-11-02 12:36:02 +0000867 SizeT pszB = bszB_to_pszB(a, bszB);
nethercote2d5b8162004-08-11 09:40:52 +0000868 vg_assert(b_lno == pszB_to_listNo(pszB));
sewardjb5f6f512005-03-10 23:59:00 +0000869 //zzVALGRIND_MAKE_WRITABLE(b, bszB);
nethercote2d5b8162004-08-11 09:40:52 +0000870 // Set the size fields and indicate not-in-use.
njn8d3f8452005-07-20 04:12:41 +0000871 set_bszB(b, mk_free_bszB(bszB));
nethercote2d5b8162004-08-11 09:40:52 +0000872
873 // Add to the relevant list.
874 if (a->freelist[b_lno] == NULL) {
875 set_prev_b(b, b);
876 set_next_b(b, b);
877 a->freelist[b_lno] = b;
878 } else {
879 Block* b_prev = get_prev_b(a->freelist[b_lno]);
880 Block* b_next = a->freelist[b_lno];
881 set_next_b(b_prev, b);
882 set_prev_b(b_next, b);
883 set_next_b(b, b_next);
884 set_prev_b(b, b_prev);
885 }
886# ifdef DEBUG_MALLOC
887 (void)blockSane(a,b);
888# endif
889}
890
891// Mark the bytes at b .. b+bszB-1 as in use, and set up the block
892// appropriately.
893static
nethercote7ac7f7b2004-11-02 12:36:02 +0000894void mkInuseBlock ( Arena* a, Block* b, SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000895{
nethercote7ac7f7b2004-11-02 12:36:02 +0000896 UInt i;
nethercote2d5b8162004-08-11 09:40:52 +0000897 vg_assert(bszB >= min_useful_bszB(a));
sewardjb5f6f512005-03-10 23:59:00 +0000898 //zzVALGRIND_MAKE_WRITABLE(b, bszB);
njn8d3f8452005-07-20 04:12:41 +0000899 set_bszB(b, mk_inuse_bszB(bszB));
nethercote2d5b8162004-08-11 09:40:52 +0000900 set_prev_b(b, NULL); // Take off freelist
901 set_next_b(b, NULL); // ditto
902 if (!a->clientmem) {
903 for (i = 0; i < a->rz_szB; i++) {
njn6e6588c2005-03-13 18:52:48 +0000904 set_rz_lo_byte(a, b, i, (UByte)(((Addr)b&0xff) ^ REDZONE_LO_MASK));
905 set_rz_hi_byte(a, b, i, (UByte)(((Addr)b&0xff) ^ REDZONE_HI_MASK));
nethercote2d5b8162004-08-11 09:40:52 +0000906 }
907 }
908# ifdef DEBUG_MALLOC
909 (void)blockSane(a,b);
910# endif
911}
912
913// Remove a block from a given list. Does no sanity checking.
914static
nethercote7ac7f7b2004-11-02 12:36:02 +0000915void unlinkBlock ( Arena* a, Block* b, UInt listno )
nethercote2d5b8162004-08-11 09:40:52 +0000916{
njn6e6588c2005-03-13 18:52:48 +0000917 vg_assert(listno < N_MALLOC_LISTS);
nethercote2d5b8162004-08-11 09:40:52 +0000918 if (get_prev_b(b) == b) {
919 // Only one element in the list; treat it specially.
920 vg_assert(get_next_b(b) == b);
921 a->freelist[listno] = NULL;
922 } else {
923 Block* b_prev = get_prev_b(b);
924 Block* b_next = get_next_b(b);
925 a->freelist[listno] = b_prev;
926 set_next_b(b_prev, b_next);
927 set_prev_b(b_next, b_prev);
928 swizzle ( a, listno );
929 }
930 set_prev_b(b, NULL);
931 set_next_b(b, NULL);
jsewardb1a26ae2004-03-14 03:06:37 +0000932}
933
934
sewardjde4a1d02002-03-22 01:27:54 +0000935/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +0000936/*--- Core-visible functions. ---*/
sewardjde4a1d02002-03-22 01:27:54 +0000937/*------------------------------------------------------------*/
938
nethercote2d5b8162004-08-11 09:40:52 +0000939// Align the request size.
940static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000941SizeT align_req_pszB ( SizeT req_pszB )
nethercote2d5b8162004-08-11 09:40:52 +0000942{
nethercote7ac7f7b2004-11-02 12:36:02 +0000943 SizeT n = VG_MIN_MALLOC_SZB-1;
nethercote2d5b8162004-08-11 09:40:52 +0000944 return ((req_pszB + n) & (~n));
945}
946
nethercote7ac7f7b2004-11-02 12:36:02 +0000947void* VG_(arena_malloc) ( ArenaId aid, SizeT req_pszB )
sewardjde4a1d02002-03-22 01:27:54 +0000948{
nethercote7ac7f7b2004-11-02 12:36:02 +0000949 SizeT req_bszB, frag_bszB, b_bszB;
950 UInt lno;
sewardjde4a1d02002-03-22 01:27:54 +0000951 Superblock* new_sb;
nethercote2d5b8162004-08-11 09:40:52 +0000952 Block* b = NULL;
sewardjde4a1d02002-03-22 01:27:54 +0000953 Arena* a;
jsewardb1a26ae2004-03-14 03:06:37 +0000954 void* v;
sewardjde4a1d02002-03-22 01:27:54 +0000955
956 VGP_PUSHCC(VgpMalloc);
957
sewardj45f4e7c2005-09-27 19:20:21 +0000958 ensure_mm_init(aid);
sewardjde4a1d02002-03-22 01:27:54 +0000959 a = arenaId_to_ArenaP(aid);
960
nethercote7ac7f7b2004-11-02 12:36:02 +0000961 vg_assert(req_pszB < MAX_PSZB);
nethercote2d5b8162004-08-11 09:40:52 +0000962 req_pszB = align_req_pszB(req_pszB);
963 req_bszB = pszB_to_bszB(a, req_pszB);
sewardjde4a1d02002-03-22 01:27:54 +0000964
nethercote2d5b8162004-08-11 09:40:52 +0000965 // Scan through all the big-enough freelists for a block.
njn6e6588c2005-03-13 18:52:48 +0000966 for (lno = pszB_to_listNo(req_pszB); lno < N_MALLOC_LISTS; lno++) {
sewardjde4a1d02002-03-22 01:27:54 +0000967 b = a->freelist[lno];
nethercote2d5b8162004-08-11 09:40:52 +0000968 if (NULL == b) continue; // If this list is empty, try the next one.
sewardjde4a1d02002-03-22 01:27:54 +0000969 while (True) {
njnd0e685c2005-07-17 17:55:42 +0000970 b_bszB = get_bszB(b);
nethercote2d5b8162004-08-11 09:40:52 +0000971 if (b_bszB >= req_bszB) goto obtained_block; // success!
972 b = get_next_b(b);
973 if (b == a->freelist[lno]) break; // traversed entire freelist
sewardjde4a1d02002-03-22 01:27:54 +0000974 }
sewardjde4a1d02002-03-22 01:27:54 +0000975 }
976
nethercote2d5b8162004-08-11 09:40:52 +0000977 // If we reach here, no suitable block found, allocate a new superblock
njn6e6588c2005-03-13 18:52:48 +0000978 vg_assert(lno == N_MALLOC_LISTS);
nethercote2d5b8162004-08-11 09:40:52 +0000979 new_sb = newSuperblock(a, req_bszB);
980 if (NULL == new_sb) {
981 // Should only fail if for client, otherwise, should have aborted
982 // already.
983 vg_assert(VG_AR_CLIENT == aid);
984 return NULL;
sewardjde4a1d02002-03-22 01:27:54 +0000985 }
nethercote2d5b8162004-08-11 09:40:52 +0000986 new_sb->next = a->sblocks;
987 a->sblocks = new_sb;
988 b = (Block*)&new_sb->payload_bytes[0];
989 lno = pszB_to_listNo(bszB_to_pszB(a, new_sb->n_payload_bytes));
990 mkFreeBlock ( a, b, new_sb->n_payload_bytes, lno);
991 // fall through
sewardjde4a1d02002-03-22 01:27:54 +0000992
nethercote2d5b8162004-08-11 09:40:52 +0000993 obtained_block:
994 // Ok, we can allocate from b, which lives in list lno.
sewardjde4a1d02002-03-22 01:27:54 +0000995 vg_assert(b != NULL);
njn6e6588c2005-03-13 18:52:48 +0000996 vg_assert(lno < N_MALLOC_LISTS);
sewardjde4a1d02002-03-22 01:27:54 +0000997 vg_assert(a->freelist[lno] != NULL);
njnd0e685c2005-07-17 17:55:42 +0000998 b_bszB = get_bszB(b);
nethercote2d5b8162004-08-11 09:40:52 +0000999 // req_bszB is the size of the block we are after. b_bszB is the
1000 // size of what we've actually got. */
1001 vg_assert(b_bszB >= req_bszB);
sewardjde4a1d02002-03-22 01:27:54 +00001002
nethercote2d5b8162004-08-11 09:40:52 +00001003 // Could we split this block and still get a useful fragment?
1004 frag_bszB = b_bszB - req_bszB;
1005 if (frag_bszB >= min_useful_bszB(a)) {
1006 // Yes, split block in two, put the fragment on the appropriate free
1007 // list, and update b_bszB accordingly.
1008 // printf( "split %dB into %dB and %dB\n", b_bszB, req_bszB, frag_bszB );
sewardjde4a1d02002-03-22 01:27:54 +00001009 unlinkBlock(a, b, lno);
nethercote2d5b8162004-08-11 09:40:52 +00001010 mkInuseBlock(a, b, req_bszB);
1011 mkFreeBlock(a, &b[req_bszB], frag_bszB,
1012 pszB_to_listNo(bszB_to_pszB(a, frag_bszB)));
njnd0e685c2005-07-17 17:55:42 +00001013 b_bszB = get_bszB(b);
nethercote2d5b8162004-08-11 09:40:52 +00001014 } else {
1015 // No, mark as in use and use as-is.
1016 unlinkBlock(a, b, lno);
1017 mkInuseBlock(a, b, b_bszB);
sewardjde4a1d02002-03-22 01:27:54 +00001018 }
sewardjde4a1d02002-03-22 01:27:54 +00001019
nethercote2d5b8162004-08-11 09:40:52 +00001020 // Update stats
1021 a->bytes_on_loan += bszB_to_pszB(a, b_bszB);
sewardjde4a1d02002-03-22 01:27:54 +00001022 if (a->bytes_on_loan > a->bytes_on_loan_max)
1023 a->bytes_on_loan_max = a->bytes_on_loan;
1024
1025# ifdef DEBUG_MALLOC
nethercote885dd912004-08-03 23:14:00 +00001026 sanity_check_malloc_arena(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001027# endif
1028
njn25e49d8e72002-09-23 09:36:25 +00001029 VGP_POPCC(VgpMalloc);
nethercote2d5b8162004-08-11 09:40:52 +00001030 v = get_block_payload(a, b);
1031 vg_assert( (((Addr)v) & (VG_MIN_MALLOC_SZB-1)) == 0 );
sewardjb5f6f512005-03-10 23:59:00 +00001032
sewardj45f4e7c2005-09-27 19:20:21 +00001033 //zzVALGRIND_MALLOCLIKE_BLOCK(v, req_pszB, 0, False);
jsewardb1a26ae2004-03-14 03:06:37 +00001034 return v;
sewardjde4a1d02002-03-22 01:27:54 +00001035}
1036
1037
njn25e49d8e72002-09-23 09:36:25 +00001038void VG_(arena_free) ( ArenaId aid, void* ptr )
sewardjde4a1d02002-03-22 01:27:54 +00001039{
1040 Superblock* sb;
nethercote2d5b8162004-08-11 09:40:52 +00001041 UByte* sb_start;
1042 UByte* sb_end;
njna2578652005-07-17 17:12:24 +00001043 Block* other_b;
nethercote2d5b8162004-08-11 09:40:52 +00001044 Block* b;
nethercote7ac7f7b2004-11-02 12:36:02 +00001045 SizeT b_bszB, b_pszB, other_bszB;
1046 UInt b_listno;
sewardjde4a1d02002-03-22 01:27:54 +00001047 Arena* a;
1048
1049 VGP_PUSHCC(VgpMalloc);
1050
sewardj45f4e7c2005-09-27 19:20:21 +00001051 ensure_mm_init(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001052 a = arenaId_to_ArenaP(aid);
1053
njn25e49d8e72002-09-23 09:36:25 +00001054 if (ptr == NULL) {
1055 VGP_POPCC(VgpMalloc);
1056 return;
1057 }
1058
nethercote2d5b8162004-08-11 09:40:52 +00001059 b = get_payload_block(a, ptr);
sewardjde4a1d02002-03-22 01:27:54 +00001060
sewardj3187a4e2005-12-04 23:27:14 +00001061 /* If this is one of V's areas, check carefully the block we're
1062 getting back. This picks up simple block-end overruns. */
1063 if (aid != VG_AR_CLIENT)
1064 vg_assert(blockSane(a, b));
sewardjde4a1d02002-03-22 01:27:54 +00001065
njne6f9e3b2005-07-17 18:00:57 +00001066 b_bszB = get_bszB(b);
1067 b_pszB = bszB_to_pszB(a, b_bszB);
nethercote2d5b8162004-08-11 09:40:52 +00001068 sb = findSb( a, b );
1069 sb_start = &sb->payload_bytes[0];
1070 sb_end = &sb->payload_bytes[sb->n_payload_bytes - 1];
sewardjde4a1d02002-03-22 01:27:54 +00001071
njne6f9e3b2005-07-17 18:00:57 +00001072 a->bytes_on_loan -= b_pszB;
1073
sewardj3187a4e2005-12-04 23:27:14 +00001074 /* If this is one of V's areas, fill it up with junk to enhance the
1075 chances of catching any later reads of it. Note, 0xDD is
1076 carefully chosen junk :-), in that: (1) 0xDDDDDDDD is an invalid
1077 and non-word-aligned address on most systems, and (2) 0xDD is a
1078 value which is unlikely to be generated by the new compressed
1079 Vbits representation for memcheck. */
1080 if (aid != VG_AR_CLIENT)
1081 VG_(memset)(ptr, 0xDD, (SizeT)b_pszB);
1082
nethercote2d5b8162004-08-11 09:40:52 +00001083 // Put this chunk back on a list somewhere.
nethercote2d5b8162004-08-11 09:40:52 +00001084 b_listno = pszB_to_listNo(b_pszB);
1085 mkFreeBlock( a, b, b_bszB, b_listno );
sewardjde4a1d02002-03-22 01:27:54 +00001086
nethercote2d5b8162004-08-11 09:40:52 +00001087 // See if this block can be merged with its successor.
1088 // First test if we're far enough before the superblock's end to possibly
1089 // have a successor.
njna2578652005-07-17 17:12:24 +00001090 other_b = b + b_bszB;
1091 if (other_b+min_useful_bszB(a)-1 <= (Block*)sb_end) {
nethercote2d5b8162004-08-11 09:40:52 +00001092 // Ok, we have a successor, merge if it's not in use.
njnd0e685c2005-07-17 17:55:42 +00001093 other_bszB = get_bszB(other_b);
njn472cc7c2005-07-17 17:20:30 +00001094 if (!is_inuse_block(other_b)) {
nethercote2d5b8162004-08-11 09:40:52 +00001095 // VG_(printf)( "merge-successor\n");
sewardjde4a1d02002-03-22 01:27:54 +00001096# ifdef DEBUG_MALLOC
njna2578652005-07-17 17:12:24 +00001097 vg_assert(blockSane(a, other_b));
sewardjde4a1d02002-03-22 01:27:54 +00001098# endif
nethercote2d5b8162004-08-11 09:40:52 +00001099 unlinkBlock( a, b, b_listno );
njna2578652005-07-17 17:12:24 +00001100 unlinkBlock( a, other_b, pszB_to_listNo(bszB_to_pszB(a,other_bszB)) );
nethercote2d5b8162004-08-11 09:40:52 +00001101 b_bszB += other_bszB;
1102 b_listno = pszB_to_listNo(bszB_to_pszB(a, b_bszB));
1103 mkFreeBlock( a, b, b_bszB, b_listno );
sewardjde4a1d02002-03-22 01:27:54 +00001104 }
nethercote2d5b8162004-08-11 09:40:52 +00001105 } else {
1106 // Not enough space for successor: check that b is the last block
1107 // ie. there are no unused bytes at the end of the Superblock.
njna2578652005-07-17 17:12:24 +00001108 vg_assert(other_b-1 == (Block*)sb_end);
sewardjde4a1d02002-03-22 01:27:54 +00001109 }
1110
nethercote2d5b8162004-08-11 09:40:52 +00001111 // Then see if this block can be merged with its predecessor.
1112 // First test if we're far enough after the superblock's start to possibly
1113 // have a predecessor.
1114 if (b >= (Block*)sb_start + min_useful_bszB(a)) {
1115 // Ok, we have a predecessor, merge if it's not in use.
njna2578652005-07-17 17:12:24 +00001116 other_b = get_predecessor_block( b );
njnd0e685c2005-07-17 17:55:42 +00001117 other_bszB = get_bszB(other_b);
njn472cc7c2005-07-17 17:20:30 +00001118 if (!is_inuse_block(other_b)) {
nethercote2d5b8162004-08-11 09:40:52 +00001119 // VG_(printf)( "merge-predecessor\n");
nethercote2d5b8162004-08-11 09:40:52 +00001120 unlinkBlock( a, b, b_listno );
njna2578652005-07-17 17:12:24 +00001121 unlinkBlock( a, other_b, pszB_to_listNo(bszB_to_pszB(a, other_bszB)) );
1122 b = other_b;
nethercote2d5b8162004-08-11 09:40:52 +00001123 b_bszB += other_bszB;
1124 b_listno = pszB_to_listNo(bszB_to_pszB(a, b_bszB));
1125 mkFreeBlock( a, b, b_bszB, b_listno );
sewardjde4a1d02002-03-22 01:27:54 +00001126 }
nethercote2d5b8162004-08-11 09:40:52 +00001127 } else {
1128 // Not enough space for predecessor: check that b is the first block,
1129 // ie. there are no unused bytes at the start of the Superblock.
1130 vg_assert((Block*)sb_start == b);
sewardjde4a1d02002-03-22 01:27:54 +00001131 }
1132
1133# ifdef DEBUG_MALLOC
nethercote885dd912004-08-03 23:14:00 +00001134 sanity_check_malloc_arena(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001135# endif
1136
sewardj45f4e7c2005-09-27 19:20:21 +00001137 //zzVALGRIND_FREELIKE_BLOCK(ptr, 0);
sewardjb5f6f512005-03-10 23:59:00 +00001138
njn25e49d8e72002-09-23 09:36:25 +00001139 VGP_POPCC(VgpMalloc);
sewardjde4a1d02002-03-22 01:27:54 +00001140}
1141
1142
1143/*
1144 The idea for malloc_aligned() is to allocate a big block, base, and
1145 then split it into two parts: frag, which is returned to the the
1146 free pool, and align, which is the bit we're really after. Here's
1147 a picture. L and H denote the block lower and upper overheads, in
nethercote2d5b8162004-08-11 09:40:52 +00001148 bytes. The details are gruesome. Note it is slightly complicated
sewardjde4a1d02002-03-22 01:27:54 +00001149 because the initial request to generate base may return a bigger
1150 block than we asked for, so it is important to distinguish the base
1151 request size and the base actual size.
1152
1153 frag_b align_b
1154 | |
1155 | frag_p | align_p
1156 | | | |
1157 v v v v
1158
1159 +---+ +---+---+ +---+
1160 | L |----------------| H | L |---------------| H |
1161 +---+ +---+---+ +---+
1162
1163 ^ ^ ^
1164 | | :
1165 | base_p this addr must be aligned
1166 |
1167 base_b
1168
1169 . . . . . . .
nethercote2d5b8162004-08-11 09:40:52 +00001170 <------ frag_bszB -------> . . .
1171 . <------------- base_pszB_act -----------> .
sewardjde4a1d02002-03-22 01:27:54 +00001172 . . . . . . .
1173
1174*/
njn717cde52005-05-10 02:47:21 +00001175void* VG_(arena_memalign) ( ArenaId aid, SizeT req_alignB, SizeT req_pszB )
sewardjde4a1d02002-03-22 01:27:54 +00001176{
nethercote7ac7f7b2004-11-02 12:36:02 +00001177 SizeT base_pszB_req, base_pszB_act, frag_bszB;
nethercote2d5b8162004-08-11 09:40:52 +00001178 Block *base_b, *align_b;
1179 UByte *base_p, *align_p;
nethercote7ac7f7b2004-11-02 12:36:02 +00001180 SizeT saved_bytes_on_loan;
sewardjde4a1d02002-03-22 01:27:54 +00001181 Arena* a;
1182
njn25e49d8e72002-09-23 09:36:25 +00001183 VGP_PUSHCC(VgpMalloc);
1184
sewardj45f4e7c2005-09-27 19:20:21 +00001185 ensure_mm_init(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001186 a = arenaId_to_ArenaP(aid);
1187
nethercote7ac7f7b2004-11-02 12:36:02 +00001188 vg_assert(req_pszB < MAX_PSZB);
sewardjde4a1d02002-03-22 01:27:54 +00001189
nethercote2d5b8162004-08-11 09:40:52 +00001190 // Check that the requested alignment seems reasonable; that is, is
1191 // a power of 2.
1192 if (req_alignB < VG_MIN_MALLOC_SZB
1193 || req_alignB > 1048576
njn717cde52005-05-10 02:47:21 +00001194 || VG_(log2)( req_alignB ) == -1 /* not a power of 2 */) {
1195 VG_(printf)("VG_(arena_memalign)(%p, %d, %d)\nbad alignment",
nethercote2d5b8162004-08-11 09:40:52 +00001196 a, req_alignB, req_pszB );
njn717cde52005-05-10 02:47:21 +00001197 VG_(core_panic)("VG_(arena_memalign)");
nethercote2d5b8162004-08-11 09:40:52 +00001198 /*NOTREACHED*/
sewardjde4a1d02002-03-22 01:27:54 +00001199 }
nethercote2d5b8162004-08-11 09:40:52 +00001200 // Paranoid
1201 vg_assert(req_alignB % VG_MIN_MALLOC_SZB == 0);
sewardjde4a1d02002-03-22 01:27:54 +00001202
1203 /* Required payload size for the aligned chunk. */
nethercote2d5b8162004-08-11 09:40:52 +00001204 req_pszB = align_req_pszB(req_pszB);
sewardjde4a1d02002-03-22 01:27:54 +00001205
nethercote2d5b8162004-08-11 09:40:52 +00001206 /* Payload size to request for the big block that we will split up. */
1207 base_pszB_req = req_pszB + min_useful_bszB(a) + req_alignB;
sewardjde4a1d02002-03-22 01:27:54 +00001208
1209 /* Payload ptr for the block we are going to split. Note this
1210 changes a->bytes_on_loan; we save and restore it ourselves. */
1211 saved_bytes_on_loan = a->bytes_on_loan;
nethercote2d5b8162004-08-11 09:40:52 +00001212 base_p = VG_(arena_malloc) ( aid, base_pszB_req );
sewardjde4a1d02002-03-22 01:27:54 +00001213 a->bytes_on_loan = saved_bytes_on_loan;
1214
tom8af1a172005-10-06 12:04:26 +00001215 /* Give up if we couldn't allocate enough space */
1216 if (base_p == 0)
1217 return 0;
1218
sewardjde4a1d02002-03-22 01:27:54 +00001219 /* Block ptr for the block we are going to split. */
nethercote2d5b8162004-08-11 09:40:52 +00001220 base_b = get_payload_block ( a, base_p );
sewardjde4a1d02002-03-22 01:27:54 +00001221
1222 /* Pointer to the payload of the aligned block we are going to
1223 return. This has to be suitably aligned. */
nethercote2d5b8162004-08-11 09:40:52 +00001224 align_p = align_upwards ( base_b + 2 * overhead_szB_lo(a)
1225 + overhead_szB_hi(a),
sewardjde4a1d02002-03-22 01:27:54 +00001226 req_alignB );
nethercote2d5b8162004-08-11 09:40:52 +00001227 align_b = get_payload_block(a, align_p);
sewardjde4a1d02002-03-22 01:27:54 +00001228
1229 /* The block size of the fragment we will create. This must be big
1230 enough to actually create a fragment. */
nethercote2d5b8162004-08-11 09:40:52 +00001231 frag_bszB = align_b - base_b;
1232
1233 vg_assert(frag_bszB >= min_useful_bszB(a));
sewardjde4a1d02002-03-22 01:27:54 +00001234
1235 /* The actual payload size of the block we are going to split. */
njn089f51f2005-07-17 18:12:00 +00001236 base_pszB_act = get_pszB(a, base_b);
sewardjde4a1d02002-03-22 01:27:54 +00001237
nethercote2d5b8162004-08-11 09:40:52 +00001238 /* Create the fragment block, and put it back on the relevant free list. */
1239 mkFreeBlock ( a, base_b, frag_bszB,
1240 pszB_to_listNo(bszB_to_pszB(a, frag_bszB)) );
sewardjde4a1d02002-03-22 01:27:54 +00001241
1242 /* Create the aligned block. */
nethercote2d5b8162004-08-11 09:40:52 +00001243 mkInuseBlock ( a, align_b,
1244 base_p + base_pszB_act
1245 + overhead_szB_hi(a) - (UByte*)align_b );
sewardjde4a1d02002-03-22 01:27:54 +00001246
1247 /* Final sanity checks. */
njn472cc7c2005-07-17 17:20:30 +00001248 vg_assert( is_inuse_block(get_payload_block(a, align_p)) );
sewardjde4a1d02002-03-22 01:27:54 +00001249
njn089f51f2005-07-17 18:12:00 +00001250 vg_assert(req_pszB <= get_pszB(a, get_payload_block(a, align_p)));
sewardjde4a1d02002-03-22 01:27:54 +00001251
njn089f51f2005-07-17 18:12:00 +00001252 a->bytes_on_loan += get_pszB(a, get_payload_block(a, align_p));
sewardjde4a1d02002-03-22 01:27:54 +00001253 if (a->bytes_on_loan > a->bytes_on_loan_max)
1254 a->bytes_on_loan_max = a->bytes_on_loan;
1255
1256# ifdef DEBUG_MALLOC
nethercote885dd912004-08-03 23:14:00 +00001257 sanity_check_malloc_arena(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001258# endif
1259
njn25e49d8e72002-09-23 09:36:25 +00001260 VGP_POPCC(VgpMalloc);
1261
nethercote2d5b8162004-08-11 09:40:52 +00001262 vg_assert( (((Addr)align_p) % req_alignB) == 0 );
sewardjb5f6f512005-03-10 23:59:00 +00001263
sewardj45f4e7c2005-09-27 19:20:21 +00001264 //zzVALGRIND_MALLOCLIKE_BLOCK(align_p, req_pszB, 0, False);
sewardjb5f6f512005-03-10 23:59:00 +00001265
nethercote2d5b8162004-08-11 09:40:52 +00001266 return align_p;
1267}
1268
1269
njn2dc09e62005-08-17 04:03:31 +00001270SizeT VG_(arena_payload_szB) ( ThreadId tid, ArenaId aid, void* ptr )
nethercote2d5b8162004-08-11 09:40:52 +00001271{
1272 Arena* a = arenaId_to_ArenaP(aid);
1273 Block* b = get_payload_block(a, ptr);
njn089f51f2005-07-17 18:12:00 +00001274 return get_pszB(a, b);
sewardjde4a1d02002-03-22 01:27:54 +00001275}
1276
njn088bfb42005-08-17 05:01:37 +00001277// We cannot return the whole struct as the library function does,
1278// because this is called by a client request. So instead we use
1279// a pointer to do call by reference.
1280void VG_(mallinfo) ( ThreadId tid, struct vg_mallinfo* mi )
1281{
1282 // Should do better than this...
1283 VG_(memset)(mi, 0x0, sizeof(struct vg_mallinfo));
1284}
sewardjde4a1d02002-03-22 01:27:54 +00001285
sewardj45f4e7c2005-09-27 19:20:21 +00001286
sewardjde4a1d02002-03-22 01:27:54 +00001287/*------------------------------------------------------------*/
1288/*--- Services layered on top of malloc/free. ---*/
1289/*------------------------------------------------------------*/
1290
njn828022a2005-03-13 14:56:31 +00001291void* VG_(arena_calloc) ( ArenaId aid, SizeT nmemb, SizeT bytes_per_memb )
sewardjde4a1d02002-03-22 01:27:54 +00001292{
nethercote7ac7f7b2004-11-02 12:36:02 +00001293 SizeT size;
sewardjde4a1d02002-03-22 01:27:54 +00001294 UChar* p;
njn25e49d8e72002-09-23 09:36:25 +00001295
1296 VGP_PUSHCC(VgpMalloc);
1297
njn926ed472005-03-11 04:44:10 +00001298 size = nmemb * bytes_per_memb;
1299 vg_assert(size >= nmemb && size >= bytes_per_memb);// check against overflow
njn3e884182003-04-15 13:03:23 +00001300
njn828022a2005-03-13 14:56:31 +00001301 p = VG_(arena_malloc) ( aid, size );
njn3e884182003-04-15 13:03:23 +00001302
njn926ed472005-03-11 04:44:10 +00001303 VG_(memset)(p, 0, size);
sewardjb5f6f512005-03-10 23:59:00 +00001304
sewardj45f4e7c2005-09-27 19:20:21 +00001305 //zzVALGRIND_MALLOCLIKE_BLOCK(p, size, 0, True);
njn25e49d8e72002-09-23 09:36:25 +00001306
1307 VGP_POPCC(VgpMalloc);
1308
sewardjde4a1d02002-03-22 01:27:54 +00001309 return p;
1310}
1311
1312
njn828022a2005-03-13 14:56:31 +00001313void* VG_(arena_realloc) ( ArenaId aid, void* ptr, SizeT req_pszB )
sewardjde4a1d02002-03-22 01:27:54 +00001314{
1315 Arena* a;
njn089f51f2005-07-17 18:12:00 +00001316 SizeT old_pszB;
sewardjb5f6f512005-03-10 23:59:00 +00001317 UChar *p_new;
nethercote2d5b8162004-08-11 09:40:52 +00001318 Block* b;
sewardjde4a1d02002-03-22 01:27:54 +00001319
njn25e49d8e72002-09-23 09:36:25 +00001320 VGP_PUSHCC(VgpMalloc);
1321
sewardj45f4e7c2005-09-27 19:20:21 +00001322 ensure_mm_init(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001323 a = arenaId_to_ArenaP(aid);
1324
nethercote7ac7f7b2004-11-02 12:36:02 +00001325 vg_assert(req_pszB < MAX_PSZB);
sewardjde4a1d02002-03-22 01:27:54 +00001326
nethercote2d5b8162004-08-11 09:40:52 +00001327 b = get_payload_block(a, ptr);
1328 vg_assert(blockSane(a, b));
sewardjde4a1d02002-03-22 01:27:54 +00001329
njn472cc7c2005-07-17 17:20:30 +00001330 vg_assert(is_inuse_block(b));
njn089f51f2005-07-17 18:12:00 +00001331 old_pszB = get_pszB(a, b);
sewardjde4a1d02002-03-22 01:27:54 +00001332
njn25e49d8e72002-09-23 09:36:25 +00001333 if (req_pszB <= old_pszB) {
1334 VGP_POPCC(VgpMalloc);
1335 return ptr;
1336 }
sewardjde4a1d02002-03-22 01:27:54 +00001337
njn828022a2005-03-13 14:56:31 +00001338 p_new = VG_(arena_malloc) ( aid, req_pszB );
1339
sewardjb5f6f512005-03-10 23:59:00 +00001340 VG_(memcpy)(p_new, ptr, old_pszB);
sewardjde4a1d02002-03-22 01:27:54 +00001341
sewardjb5f6f512005-03-10 23:59:00 +00001342 VG_(arena_free)(aid, ptr);
njn25e49d8e72002-09-23 09:36:25 +00001343
1344 VGP_POPCC(VgpMalloc);
sewardjde4a1d02002-03-22 01:27:54 +00001345 return p_new;
1346}
1347
1348
njn6ba622c2005-06-11 01:12:08 +00001349/* Inline just for the wrapper VG_(strdup) below */
1350__inline__ Char* VG_(arena_strdup) ( ArenaId aid, const Char* s )
1351{
1352 Int i;
1353 Int len;
1354 Char* res;
1355
1356 if (s == NULL)
1357 return NULL;
1358
1359 len = VG_(strlen)(s) + 1;
1360 res = VG_(arena_malloc) (aid, len);
1361
1362 for (i = 0; i < len; i++)
1363 res[i] = s[i];
1364 return res;
1365}
1366
1367
sewardjde4a1d02002-03-22 01:27:54 +00001368/*------------------------------------------------------------*/
nethercote996901a2004-08-03 13:29:09 +00001369/*--- Tool-visible functions. ---*/
njn25e49d8e72002-09-23 09:36:25 +00001370/*------------------------------------------------------------*/
1371
nethercote2d5b8162004-08-11 09:40:52 +00001372// All just wrappers to avoid exposing arenas to tools.
njn25e49d8e72002-09-23 09:36:25 +00001373
nethercote7ac7f7b2004-11-02 12:36:02 +00001374void* VG_(malloc) ( SizeT nbytes )
njn25e49d8e72002-09-23 09:36:25 +00001375{
nethercote60f5b822004-01-26 17:24:42 +00001376 return VG_(arena_malloc) ( VG_AR_TOOL, nbytes );
njn25e49d8e72002-09-23 09:36:25 +00001377}
1378
1379void VG_(free) ( void* ptr )
1380{
nethercote60f5b822004-01-26 17:24:42 +00001381 VG_(arena_free) ( VG_AR_TOOL, ptr );
njn25e49d8e72002-09-23 09:36:25 +00001382}
1383
njn926ed472005-03-11 04:44:10 +00001384void* VG_(calloc) ( SizeT nmemb, SizeT bytes_per_memb )
njn25e49d8e72002-09-23 09:36:25 +00001385{
njn828022a2005-03-13 14:56:31 +00001386 return VG_(arena_calloc) ( VG_AR_TOOL, nmemb, bytes_per_memb );
njn25e49d8e72002-09-23 09:36:25 +00001387}
1388
nethercote7ac7f7b2004-11-02 12:36:02 +00001389void* VG_(realloc) ( void* ptr, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00001390{
njn828022a2005-03-13 14:56:31 +00001391 return VG_(arena_realloc) ( VG_AR_TOOL, ptr, size );
njn25e49d8e72002-09-23 09:36:25 +00001392}
1393
njn6ba622c2005-06-11 01:12:08 +00001394Char* VG_(strdup) ( const Char* s )
1395{
1396 return VG_(arena_strdup) ( VG_AR_TOOL, s );
1397}
1398
sewardjde4a1d02002-03-22 01:27:54 +00001399/*--------------------------------------------------------------------*/
njn717cde52005-05-10 02:47:21 +00001400/*--- end ---*/
sewardjde4a1d02002-03-22 01:27:54 +00001401/*--------------------------------------------------------------------*/