blob: f78b18eac73c54810441c64a7ed2cf5b6c4c02b4 [file] [log] [blame]
sewardjde4a1d02002-03-22 01:27:54 +00001
2/*--------------------------------------------------------------------*/
3/*--- An implementation of malloc/free which doesn't use sbrk. ---*/
njn717cde52005-05-10 02:47:21 +00004/*--- m_mallocfree.c ---*/
sewardjde4a1d02002-03-22 01:27:54 +00005/*--------------------------------------------------------------------*/
6
7/*
njnb9c427c2004-12-01 14:14:42 +00008 This file is part of Valgrind, a dynamic binary instrumentation
9 framework.
sewardjde4a1d02002-03-22 01:27:54 +000010
sewardj4d474d02008-02-11 11:34:59 +000011 Copyright (C) 2000-2008 Julian Seward
sewardjde4a1d02002-03-22 01:27:54 +000012 jseward@acm.org
sewardjde4a1d02002-03-22 01:27:54 +000013
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
njn25e49d8e72002-09-23 09:36:25 +000029 The GNU General Public License is contained in the file COPYING.
sewardjde4a1d02002-03-22 01:27:54 +000030*/
31
njnc7561b92005-06-19 01:24:32 +000032#include "pub_core_basics.h"
sewardj4cfea4f2006-10-14 19:26:10 +000033#include "pub_core_vki.h"
sewardj45f4e7c2005-09-27 19:20:21 +000034#include "pub_core_debuglog.h"
njn97405b22005-06-02 03:39:33 +000035#include "pub_core_libcbase.h"
sewardj45f4e7c2005-09-27 19:20:21 +000036#include "pub_core_aspacemgr.h"
njn132bfcc2005-06-04 19:16:06 +000037#include "pub_core_libcassert.h"
njn36a20fa2005-06-03 03:08:39 +000038#include "pub_core_libcprint.h"
njnaf1d7df2005-06-11 01:31:52 +000039#include "pub_core_mallocfree.h"
njn20242342005-05-16 23:31:24 +000040#include "pub_core_options.h"
njn32397c02007-11-10 04:08:08 +000041#include "pub_core_threadstate.h" // For VG_INVALID_THREADID
njnfc51f8d2005-06-21 03:20:17 +000042#include "pub_core_tooliface.h"
njn296c24d2005-05-15 03:52:40 +000043#include "valgrind.h"
sewardj55f9d1a2005-04-25 11:11:44 +000044
sewardjb5f6f512005-03-10 23:59:00 +000045//zz#include "memcheck/memcheck.h"
sewardjde4a1d02002-03-22 01:27:54 +000046
sewardj0b3fd2d2007-08-21 10:55:26 +000047// #define DEBUG_MALLOC // turn on heavyweight debugging machinery
48// #define VERBOSE_MALLOC // make verbose, esp. in debugging machinery
nethercote2d5b8162004-08-11 09:40:52 +000049
bart545380e2008-04-21 17:28:50 +000050/* Number and total size of blocks in free queue. Used by mallinfo(). */
51Long VG_(free_queue_volume) = 0;
52Long VG_(free_queue_length) = 0;
53
nethercote2d5b8162004-08-11 09:40:52 +000054/*------------------------------------------------------------*/
55/*--- Main types ---*/
56/*------------------------------------------------------------*/
57
sewardjc1ac9772007-08-20 22:57:56 +000058#define N_MALLOC_LISTS 112 // do not change this
nethercote2d5b8162004-08-11 09:40:52 +000059
nethercote7ac7f7b2004-11-02 12:36:02 +000060// The amount you can ask for is limited only by sizeof(SizeT)...
61#define MAX_PSZB (~((SizeT)0x0))
nethercote2d5b8162004-08-11 09:40:52 +000062
sewardj0b3fd2d2007-08-21 10:55:26 +000063// Each arena has a sorted array of superblocks, which expands
64// dynamically. This is its initial size.
65#define SBLOCKS_SIZE_INITIAL 50
66
nethercote2d5b8162004-08-11 09:40:52 +000067typedef UChar UByte;
68
njn8d3f8452005-07-20 04:12:41 +000069/* Layout of an in-use block:
nethercote2d5b8162004-08-11 09:40:52 +000070
njn8d3f8452005-07-20 04:12:41 +000071 this block total szB (sizeof(SizeT) bytes)
njn7ce83112005-08-24 22:38:00 +000072 red zone bytes (depends on Arena.rz_szB, but >= sizeof(void*))
njn8d3f8452005-07-20 04:12:41 +000073 (payload bytes)
njn7ce83112005-08-24 22:38:00 +000074 red zone bytes (depends on Arena.rz_szB, but >= sizeof(void*))
njn8d3f8452005-07-20 04:12:41 +000075 this block total szB (sizeof(SizeT) bytes)
nethercote2d5b8162004-08-11 09:40:52 +000076
njn8d3f8452005-07-20 04:12:41 +000077 Layout of a block on the free list:
nethercote2d5b8162004-08-11 09:40:52 +000078
njn8d3f8452005-07-20 04:12:41 +000079 this block total szB (sizeof(SizeT) bytes)
80 freelist previous ptr (sizeof(void*) bytes)
81 excess red zone bytes (if Arena.rz_szB > sizeof(void*))
82 (payload bytes)
83 excess red zone bytes (if Arena.rz_szB > sizeof(void*))
84 freelist next ptr (sizeof(void*) bytes)
85 this block total szB (sizeof(SizeT) bytes)
nethercote2d5b8162004-08-11 09:40:52 +000086
njn8d3f8452005-07-20 04:12:41 +000087 Total size in bytes (bszB) and payload size in bytes (pszB)
88 are related by:
nethercote2d5b8162004-08-11 09:40:52 +000089
njn8d3f8452005-07-20 04:12:41 +000090 bszB == pszB + 2*sizeof(SizeT) + 2*a->rz_szB
91
njna527a492005-12-16 17:06:37 +000092 The minimum overhead per heap block for arenas used by
93 the core is:
94
95 32-bit platforms: 2*4 + 2*4 == 16 bytes
96 64-bit platforms: 2*8 + 2*8 == 32 bytes
97
98 In both cases extra overhead may be incurred when rounding the payload
99 size up to VG_MIN_MALLOC_SZB.
100
njn8d3f8452005-07-20 04:12:41 +0000101 Furthermore, both size fields in the block have their least-significant
102 bit set if the block is not in use, and unset if it is in use.
103 (The bottom 3 or so bits are always free for this because of alignment.)
104 A block size of zero is not possible, because a block always has at
105 least two SizeTs and two pointers of overhead.
106
107 Nb: All Block payloads must be VG_MIN_MALLOC_SZB-aligned. This is
108 achieved by ensuring that Superblocks are VG_MIN_MALLOC_SZB-aligned
109 (see newSuperblock() for how), and that the lengths of the following
110 things are a multiple of VG_MIN_MALLOC_SZB:
111 - Superblock admin section lengths (due to elastic padding)
112 - Block admin section (low and high) lengths (due to elastic redzones)
113 - Block payload lengths (due to req_pszB rounding up)
nethercote2d5b8162004-08-11 09:40:52 +0000114*/
115typedef
116 struct {
117 // No fields are actually used in this struct, because a Block has
njn37517e82005-05-25 15:52:39 +0000118 // many variable sized fields and so can't be accessed
nethercote2d5b8162004-08-11 09:40:52 +0000119 // meaningfully with normal fields. So we use access functions all
120 // the time. This struct gives us a type to use, though. Also, we
121 // make sizeof(Block) 1 byte so that we can do arithmetic with the
122 // Block* type in increments of 1!
123 UByte dummy;
124 }
125 Block;
126
127// A superblock. 'padding' is never used, it just ensures that if the
128// entire Superblock is aligned to VG_MIN_MALLOC_SZB, then payload_bytes[]
129// will be too. It can add small amounts of padding unnecessarily -- eg.
130// 8-bytes on 32-bit machines with an 8-byte VG_MIN_MALLOC_SZB -- because
131// it's too hard to make a constant expression that works perfectly in all
132// cases.
133// payload_bytes[] is made a single big Block when the Superblock is
134// created, and then can be split and the splittings remerged, but Blocks
135// always cover its entire length -- there's never any unused bytes at the
136// end, for example.
sewardj0b3fd2d2007-08-21 10:55:26 +0000137typedef
nethercote2d5b8162004-08-11 09:40:52 +0000138 struct _Superblock {
nethercote7ac7f7b2004-11-02 12:36:02 +0000139 SizeT n_payload_bytes;
sewardj0b3fd2d2007-08-21 10:55:26 +0000140 void* padding2;
141 UByte padding[ VG_MIN_MALLOC_SZB -
142 ((sizeof(struct _Superblock*) + sizeof(SizeT)) %
nethercote7ac7f7b2004-11-02 12:36:02 +0000143 VG_MIN_MALLOC_SZB) ];
nethercote2d5b8162004-08-11 09:40:52 +0000144 UByte payload_bytes[0];
145 }
146 Superblock;
147
148// An arena. 'freelist' is a circular, doubly-linked list. 'rz_szB' is
149// elastic, in that it can be bigger than asked-for to ensure alignment.
sewardj0b3fd2d2007-08-21 10:55:26 +0000150typedef
nethercote2d5b8162004-08-11 09:40:52 +0000151 struct {
sewardj0b3fd2d2007-08-21 10:55:26 +0000152 Char* name;
153 Bool clientmem; // Allocates in the client address space?
154 SizeT rz_szB; // Red zone size in bytes
155 SizeT min_sblock_szB; // Minimum superblock size in bytes
156 Block* freelist[N_MALLOC_LISTS];
157 // A dynamically expanding, ordered array of (pointers to)
158 // superblocks in the arena. If this array is expanded, which
159 // is rare, the previous space it occupies is simply abandoned.
160 // To avoid having to get yet another block from m_aspacemgr for
161 // the first incarnation of this array, the first allocation of
162 // it is within this struct. If it has to be expanded then the
163 // new space is acquired from m_aspacemgr as you would expect.
164 Superblock** sblocks;
165 SizeT sblocks_size;
166 SizeT sblocks_used;
167 Superblock* sblocks_initial[SBLOCKS_SIZE_INITIAL];
nethercote2d5b8162004-08-11 09:40:52 +0000168 // Stats only.
sewardj0b3fd2d2007-08-21 10:55:26 +0000169 SizeT bytes_on_loan;
170 SizeT bytes_mmaped;
171 SizeT bytes_on_loan_max;
172 }
nethercote2d5b8162004-08-11 09:40:52 +0000173 Arena;
174
175
176/*------------------------------------------------------------*/
177/*--- Low-level functions for working with Blocks. ---*/
178/*------------------------------------------------------------*/
179
nethercote7ac7f7b2004-11-02 12:36:02 +0000180#define SIZE_T_0x1 ((SizeT)0x1)
181
njn8d3f8452005-07-20 04:12:41 +0000182// Mark a bszB as in-use, and not in-use, and remove the in-use attribute.
nethercote2d5b8162004-08-11 09:40:52 +0000183static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000184SizeT mk_inuse_bszB ( SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000185{
186 vg_assert(bszB != 0);
nethercote7ac7f7b2004-11-02 12:36:02 +0000187 return bszB & (~SIZE_T_0x1);
nethercote2d5b8162004-08-11 09:40:52 +0000188}
189static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000190SizeT mk_free_bszB ( SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000191{
192 vg_assert(bszB != 0);
nethercote7ac7f7b2004-11-02 12:36:02 +0000193 return bszB | SIZE_T_0x1;
nethercote2d5b8162004-08-11 09:40:52 +0000194}
nethercote2d5b8162004-08-11 09:40:52 +0000195static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000196SizeT mk_plain_bszB ( SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000197{
198 vg_assert(bszB != 0);
nethercote7ac7f7b2004-11-02 12:36:02 +0000199 return bszB & (~SIZE_T_0x1);
nethercote2d5b8162004-08-11 09:40:52 +0000200}
201
njn402c8612005-08-23 22:11:20 +0000202//---------------------------------------------------------------------------
203
204// Get a block's size as stored, ie with the in-use/free attribute.
nethercote2d5b8162004-08-11 09:40:52 +0000205static __inline__
njn402c8612005-08-23 22:11:20 +0000206SizeT get_bszB_as_is ( Block* b )
nethercote2d5b8162004-08-11 09:40:52 +0000207{
njn402c8612005-08-23 22:11:20 +0000208 UByte* b2 = (UByte*)b;
209 SizeT bszB_lo = *(SizeT*)&b2[0];
210 SizeT bszB_hi = *(SizeT*)&b2[mk_plain_bszB(bszB_lo) - sizeof(SizeT)];
211 vg_assert2(bszB_lo == bszB_hi,
212 "Heap block lo/hi size mismatch: lo = %llu, hi = %llu.\n"
sewardj24596d82005-10-21 12:05:05 +0000213 "Probably caused by overrunning/underrunning a heap block's bounds.\n",
214 (ULong)bszB_lo, (ULong)bszB_hi);
njn402c8612005-08-23 22:11:20 +0000215 return bszB_lo;
nethercote2d5b8162004-08-11 09:40:52 +0000216}
217
njn402c8612005-08-23 22:11:20 +0000218// Get a block's plain size, ie. remove the in-use/free attribute.
219static __inline__
220SizeT get_bszB ( Block* b )
221{
222 return mk_plain_bszB(get_bszB_as_is(b));
223}
224
225// Set the size fields of a block. bszB may have the in-use/free attribute.
226static __inline__
227void set_bszB ( Block* b, SizeT bszB )
228{
229 UByte* b2 = (UByte*)b;
230 *(SizeT*)&b2[0] = bszB;
231 *(SizeT*)&b2[mk_plain_bszB(bszB) - sizeof(SizeT)] = bszB;
232}
233
234//---------------------------------------------------------------------------
235
njn472cc7c2005-07-17 17:20:30 +0000236// Does this block have the in-use attribute?
237static __inline__
238Bool is_inuse_block ( Block* b )
239{
njn402c8612005-08-23 22:11:20 +0000240 SizeT bszB = get_bszB_as_is(b);
njn472cc7c2005-07-17 17:20:30 +0000241 vg_assert(bszB != 0);
242 return (0 != (bszB & SIZE_T_0x1)) ? False : True;
243}
244
njn402c8612005-08-23 22:11:20 +0000245//---------------------------------------------------------------------------
njn8d3f8452005-07-20 04:12:41 +0000246
njn089f51f2005-07-17 18:12:00 +0000247// Return the lower, upper and total overhead in bytes for a block.
248// These are determined purely by which arena the block lives in.
249static __inline__
250SizeT overhead_szB_lo ( Arena* a )
251{
njn8d3f8452005-07-20 04:12:41 +0000252 return sizeof(SizeT) + a->rz_szB;
njn089f51f2005-07-17 18:12:00 +0000253}
254static __inline__
255SizeT overhead_szB_hi ( Arena* a )
256{
njn8d3f8452005-07-20 04:12:41 +0000257 return a->rz_szB + sizeof(SizeT);
njn089f51f2005-07-17 18:12:00 +0000258}
259static __inline__
260SizeT overhead_szB ( Arena* a )
261{
262 return overhead_szB_lo(a) + overhead_szB_hi(a);
263}
264
njn402c8612005-08-23 22:11:20 +0000265//---------------------------------------------------------------------------
266
njn089f51f2005-07-17 18:12:00 +0000267// Return the minimum bszB for a block in this arena. Can have zero-length
268// payloads, so it's the size of the admin bytes.
269static __inline__
270SizeT min_useful_bszB ( Arena* a )
271{
272 return overhead_szB(a);
273}
274
njn402c8612005-08-23 22:11:20 +0000275//---------------------------------------------------------------------------
276
njn089f51f2005-07-17 18:12:00 +0000277// Convert payload size <--> block size (both in bytes).
278static __inline__
279SizeT pszB_to_bszB ( Arena* a, SizeT pszB )
280{
281 return pszB + overhead_szB(a);
282}
283static __inline__
284SizeT bszB_to_pszB ( Arena* a, SizeT bszB )
285{
286 vg_assert(bszB >= overhead_szB(a));
287 return bszB - overhead_szB(a);
288}
289
njn402c8612005-08-23 22:11:20 +0000290//---------------------------------------------------------------------------
nethercote2d5b8162004-08-11 09:40:52 +0000291
njn089f51f2005-07-17 18:12:00 +0000292// Get a block's payload size.
nethercote7ac7f7b2004-11-02 12:36:02 +0000293static __inline__
njn089f51f2005-07-17 18:12:00 +0000294SizeT get_pszB ( Arena* a, Block* b )
nethercote7ac7f7b2004-11-02 12:36:02 +0000295{
njn089f51f2005-07-17 18:12:00 +0000296 return bszB_to_pszB(a, get_bszB(b));
nethercote7ac7f7b2004-11-02 12:36:02 +0000297}
298
njn402c8612005-08-23 22:11:20 +0000299//---------------------------------------------------------------------------
300
301// Given the addr of a block, return the addr of its payload, and vice versa.
nethercote2d5b8162004-08-11 09:40:52 +0000302static __inline__
303UByte* get_block_payload ( Arena* a, Block* b )
304{
305 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000306 return & b2[ overhead_szB_lo(a) ];
nethercote2d5b8162004-08-11 09:40:52 +0000307}
308// Given the addr of a block's payload, return the addr of the block itself.
309static __inline__
310Block* get_payload_block ( Arena* a, UByte* payload )
311{
nethercote7ac7f7b2004-11-02 12:36:02 +0000312 return (Block*)&payload[ -overhead_szB_lo(a) ];
nethercote2d5b8162004-08-11 09:40:52 +0000313}
314
njn402c8612005-08-23 22:11:20 +0000315//---------------------------------------------------------------------------
nethercote2d5b8162004-08-11 09:40:52 +0000316
317// Set and get the next and previous link fields of a block.
318static __inline__
319void set_prev_b ( Block* b, Block* prev_p )
320{
321 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000322 *(Block**)&b2[sizeof(SizeT)] = prev_p;
nethercote2d5b8162004-08-11 09:40:52 +0000323}
324static __inline__
325void set_next_b ( Block* b, Block* next_p )
326{
njn402c8612005-08-23 22:11:20 +0000327 UByte* b2 = (UByte*)b;
328 *(Block**)&b2[get_bszB(b) - sizeof(SizeT) - sizeof(void*)] = next_p;
nethercote2d5b8162004-08-11 09:40:52 +0000329}
330static __inline__
331Block* get_prev_b ( Block* b )
332{
333 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000334 return *(Block**)&b2[sizeof(SizeT)];
nethercote2d5b8162004-08-11 09:40:52 +0000335}
336static __inline__
337Block* get_next_b ( Block* b )
338{
njn402c8612005-08-23 22:11:20 +0000339 UByte* b2 = (UByte*)b;
340 return *(Block**)&b2[get_bszB(b) - sizeof(SizeT) - sizeof(void*)];
nethercote2d5b8162004-08-11 09:40:52 +0000341}
342
njn402c8612005-08-23 22:11:20 +0000343//---------------------------------------------------------------------------
nethercote2d5b8162004-08-11 09:40:52 +0000344
345// Get the block immediately preceding this one in the Superblock.
346static __inline__
347Block* get_predecessor_block ( Block* b )
348{
349 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000350 SizeT bszB = mk_plain_bszB( (*(SizeT*)&b2[-sizeof(SizeT)]) );
nethercote2d5b8162004-08-11 09:40:52 +0000351 return (Block*)&b2[-bszB];
352}
353
njn402c8612005-08-23 22:11:20 +0000354//---------------------------------------------------------------------------
355
nethercote2d5b8162004-08-11 09:40:52 +0000356// Read and write the lower and upper red-zone bytes of a block.
357static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000358void set_rz_lo_byte ( Arena* a, Block* b, UInt rz_byteno, UByte v )
nethercote2d5b8162004-08-11 09:40:52 +0000359{
360 UByte* b2 = (UByte*)b;
njn8d3f8452005-07-20 04:12:41 +0000361 b2[sizeof(SizeT) + rz_byteno] = v;
nethercote2d5b8162004-08-11 09:40:52 +0000362}
363static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000364void set_rz_hi_byte ( Arena* a, Block* b, UInt rz_byteno, UByte v )
nethercote2d5b8162004-08-11 09:40:52 +0000365{
njn402c8612005-08-23 22:11:20 +0000366 UByte* b2 = (UByte*)b;
367 b2[get_bszB(b) - sizeof(SizeT) - rz_byteno - 1] = v;
nethercote2d5b8162004-08-11 09:40:52 +0000368}
369static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000370UByte get_rz_lo_byte ( Arena* a, Block* b, UInt rz_byteno )
nethercote2d5b8162004-08-11 09:40:52 +0000371{
372 UByte* b2 = (UByte*)b;
njn8d3f8452005-07-20 04:12:41 +0000373 return b2[sizeof(SizeT) + rz_byteno];
nethercote2d5b8162004-08-11 09:40:52 +0000374}
375static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000376UByte get_rz_hi_byte ( Arena* a, Block* b, UInt rz_byteno )
nethercote2d5b8162004-08-11 09:40:52 +0000377{
njn402c8612005-08-23 22:11:20 +0000378 UByte* b2 = (UByte*)b;
379 return b2[get_bszB(b) - sizeof(SizeT) - rz_byteno - 1];
nethercote2d5b8162004-08-11 09:40:52 +0000380}
381
382
nethercote2d5b8162004-08-11 09:40:52 +0000383/*------------------------------------------------------------*/
384/*--- Arena management ---*/
385/*------------------------------------------------------------*/
386
387#define CORE_ARENA_MIN_SZB 1048576
388
389// The arena structures themselves.
390static Arena vg_arena[VG_N_ARENAS];
391
392// Functions external to this module identify arenas using ArenaIds,
393// not Arena*s. This fn converts the former to the latter.
394static Arena* arenaId_to_ArenaP ( ArenaId arena )
395{
396 vg_assert(arena >= 0 && arena < VG_N_ARENAS);
397 return & vg_arena[arena];
398}
399
400// Initialise an arena. rz_szB is the minimum redzone size; it might be
njn30490552005-03-13 06:30:42 +0000401// made bigger to ensure that VG_MIN_MALLOC_SZB is observed.
nethercote2d5b8162004-08-11 09:40:52 +0000402static
njn0e742df2004-11-30 13:26:29 +0000403void arena_init ( ArenaId aid, Char* name, SizeT rz_szB, SizeT min_sblock_szB )
nethercote2d5b8162004-08-11 09:40:52 +0000404{
sewardj0b3fd2d2007-08-21 10:55:26 +0000405 SizeT i;
nethercote2d5b8162004-08-11 09:40:52 +0000406 Arena* a = arenaId_to_ArenaP(aid);
407
njn7ce83112005-08-24 22:38:00 +0000408 // Ensure redzones are a reasonable size. They must always be at least
409 // the size of a pointer, for holding the prev/next pointer (see the layout
410 // details at the top of this file).
411 vg_assert(rz_szB < 128);
412 if (rz_szB < sizeof(void*)) rz_szB = sizeof(void*);
413
nethercote73b526f2004-10-31 18:48:21 +0000414 vg_assert((min_sblock_szB % VKI_PAGE_SIZE) == 0);
nethercote2d5b8162004-08-11 09:40:52 +0000415 a->name = name;
416 a->clientmem = ( VG_AR_CLIENT == aid ? True : False );
417
418 // The size of the low and high admin sections in a block must be a
njn30490552005-03-13 06:30:42 +0000419 // multiple of VG_MIN_MALLOC_SZB. So we round up the asked-for
nethercote2d5b8162004-08-11 09:40:52 +0000420 // redzone size if necessary to achieve this.
421 a->rz_szB = rz_szB;
422 while (0 != overhead_szB_lo(a) % VG_MIN_MALLOC_SZB) a->rz_szB++;
423 vg_assert(overhead_szB_lo(a) == overhead_szB_hi(a));
424
425 a->min_sblock_szB = min_sblock_szB;
njn6e6588c2005-03-13 18:52:48 +0000426 for (i = 0; i < N_MALLOC_LISTS; i++) a->freelist[i] = NULL;
sewardj0b3fd2d2007-08-21 10:55:26 +0000427
428 a->sblocks = & a->sblocks_initial[0];
429 a->sblocks_size = SBLOCKS_SIZE_INITIAL;
430 a->sblocks_used = 0;
nethercote2d5b8162004-08-11 09:40:52 +0000431 a->bytes_on_loan = 0;
432 a->bytes_mmaped = 0;
433 a->bytes_on_loan_max = 0;
sewardj0b3fd2d2007-08-21 10:55:26 +0000434 vg_assert(sizeof(a->sblocks_initial)
435 == SBLOCKS_SIZE_INITIAL * sizeof(Superblock*));
nethercote2d5b8162004-08-11 09:40:52 +0000436}
437
438/* Print vital stats for an arena. */
439void VG_(print_all_arena_stats) ( void )
440{
nethercote7ac7f7b2004-11-02 12:36:02 +0000441 UInt i;
nethercote2d5b8162004-08-11 09:40:52 +0000442 for (i = 0; i < VG_N_ARENAS; i++) {
443 Arena* a = arenaId_to_ArenaP(i);
444 VG_(message)(Vg_DebugMsg,
njn669ef072005-03-13 05:46:57 +0000445 "%8s: %8d mmap'd, %8d/%8d max/curr",
nethercote2d5b8162004-08-11 09:40:52 +0000446 a->name, a->bytes_mmaped, a->bytes_on_loan_max, a->bytes_on_loan
447 );
448 }
449}
450
451/* This library is self-initialising, as it makes this more self-contained,
452 less coupled with the outside world. Hence VG_(arena_malloc)() and
453 VG_(arena_free)() below always call ensure_mm_init() to ensure things are
sewardj45f4e7c2005-09-27 19:20:21 +0000454 correctly initialised.
455
456 We initialise the client arena separately (and later) because the core
457 must do non-client allocation before the tool has a chance to set the
458 client arena's redzone size.
459*/
sewardj0b3fd2d2007-08-21 10:55:26 +0000460static Bool client_inited = False;
461static Bool nonclient_inited = False;
462
nethercote2d5b8162004-08-11 09:40:52 +0000463static
sewardj45f4e7c2005-09-27 19:20:21 +0000464void ensure_mm_init ( ArenaId aid )
nethercote2d5b8162004-08-11 09:40:52 +0000465{
njn95c23292005-12-26 17:50:22 +0000466 static SizeT client_rz_szB = 8; // default: be paranoid
njnfc51f8d2005-06-21 03:20:17 +0000467
sewardj45f4e7c2005-09-27 19:20:21 +0000468 /* We use checked red zones (of various sizes) for our internal stuff,
nethercote2d5b8162004-08-11 09:40:52 +0000469 and an unchecked zone of arbitrary size for the client. Of
470 course the client's red zone can be checked by the tool, eg.
471 by using addressibility maps, but not by the mechanism implemented
472 here, which merely checks at the time of freeing that the red
473 zone bytes are unchanged.
474
475 Nb: redzone sizes are *minimums*; they could be made bigger to ensure
njn8d3f8452005-07-20 04:12:41 +0000476 alignment. Eg. with 8 byte alignment, on 32-bit machines 4 stays as
477 4, but 16 becomes 20; but on 64-bit machines 4 becomes 8, and 16
478 stays as 16 --- the extra 4 bytes in both are accounted for by the
479 larger prev/next ptr.
nethercote2d5b8162004-08-11 09:40:52 +0000480 */
sewardj45f4e7c2005-09-27 19:20:21 +0000481 if (VG_AR_CLIENT == aid) {
sewardj5600ab32006-10-17 01:42:40 +0000482 Int ar_client_sbszB;
sewardj45f4e7c2005-09-27 19:20:21 +0000483 if (client_inited) {
484 // This assertion ensures that a tool cannot try to change the client
485 // redzone size with VG_(needs_malloc_replacement)() after this module
486 // has done its first allocation from the client arena.
487 if (VG_(needs).malloc_replacement)
njn95c23292005-12-26 17:50:22 +0000488 vg_assert(client_rz_szB == VG_(tdict).tool_client_redzone_szB);
sewardj45f4e7c2005-09-27 19:20:21 +0000489 return;
490 }
nethercote2d5b8162004-08-11 09:40:52 +0000491
sewardj45f4e7c2005-09-27 19:20:21 +0000492 // Check and set the client arena redzone size
493 if (VG_(needs).malloc_replacement) {
njn95c23292005-12-26 17:50:22 +0000494 client_rz_szB = VG_(tdict).tool_client_redzone_szB;
sewardj45f4e7c2005-09-27 19:20:21 +0000495 // 128 is no special figure, just something not too big
njn95c23292005-12-26 17:50:22 +0000496 if (client_rz_szB > 128) {
sewardj45f4e7c2005-09-27 19:20:21 +0000497 VG_(printf)( "\nTool error:\n"
498 " specified redzone size is too big (%llu)\n",
njn95c23292005-12-26 17:50:22 +0000499 (ULong)client_rz_szB);
sewardj45f4e7c2005-09-27 19:20:21 +0000500 VG_(exit)(1);
501 }
502 }
sewardj5600ab32006-10-17 01:42:40 +0000503 // Initialise the client arena. On AIX it's important to have
504 // relatively large client blocks so as not to cause excessively
505 // fine-grained interleaving of V and C address space. On Linux
506 // this is irrelevant since aspacem can keep the two spaces
sewardjc1ac9772007-08-20 22:57:56 +0000507 // well apart, but not so on AIX. On all platforms though,
508 // increasing the superblock size reduces the number of superblocks
509 // in the client arena, which makes findSb cheaper.
sewardj5600ab32006-10-17 01:42:40 +0000510# if defined(VGO_aix5)
511 ar_client_sbszB = 16777216;
512# else
sewardjc1ac9772007-08-20 22:57:56 +0000513 ar_client_sbszB = 4194304;
sewardj5600ab32006-10-17 01:42:40 +0000514# endif
515 arena_init ( VG_AR_CLIENT, "client", client_rz_szB, ar_client_sbszB );
sewardj45f4e7c2005-09-27 19:20:21 +0000516 client_inited = True;
517
518 } else {
519 if (nonclient_inited) {
520 return;
521 }
522 // Initialise the non-client arenas
njn95c23292005-12-26 17:50:22 +0000523 arena_init ( VG_AR_CORE, "core", 4, 1048576 );
sewardjc1ac9772007-08-20 22:57:56 +0000524 arena_init ( VG_AR_TOOL, "tool", 4, 4194304 );
sewardjb8b79ad2008-03-03 01:35:41 +0000525 arena_init ( VG_AR_DINFO, "dinfo", 4, 1048576 );
njn95c23292005-12-26 17:50:22 +0000526 arena_init ( VG_AR_DEMANGLE, "demangle", 4, 65536 );
sewardjc1ac9772007-08-20 22:57:56 +0000527 arena_init ( VG_AR_EXECTXT, "exectxt", 4, 1048576 );
njn95c23292005-12-26 17:50:22 +0000528 arena_init ( VG_AR_ERRORS, "errors", 4, 65536 );
529 arena_init ( VG_AR_TTAUX, "ttaux", 4, 65536 );
sewardj45f4e7c2005-09-27 19:20:21 +0000530 nonclient_inited = True;
531 }
532
nethercote2d5b8162004-08-11 09:40:52 +0000533# ifdef DEBUG_MALLOC
sewardj0b3fd2d2007-08-21 10:55:26 +0000534 VG_(printf)("ZZZ1\n");
nethercote2d5b8162004-08-11 09:40:52 +0000535 VG_(sanity_check_malloc_all)();
sewardj0b3fd2d2007-08-21 10:55:26 +0000536 VG_(printf)("ZZZ2\n");
nethercote2d5b8162004-08-11 09:40:52 +0000537# endif
538}
539
540
541/*------------------------------------------------------------*/
542/*--- Superblock management ---*/
543/*------------------------------------------------------------*/
544
sewardj45f4e7c2005-09-27 19:20:21 +0000545void VG_(out_of_memory_NORETURN) ( HChar* who, SizeT szB )
546{
547 static Bool alreadyCrashing = False;
548 ULong tot_alloc = VG_(am_get_anonsize_total)();
njnb81c7952007-03-22 03:36:55 +0000549 Char* s1 =
550 "\n"
551 " Valgrind's memory management: out of memory:\n"
552 " %s's request for %llu bytes failed.\n"
553 " %llu bytes have already been allocated.\n"
554 " Valgrind cannot continue. Sorry.\n\n"
555 " There are several possible reasons for this.\n"
556 " - You have some kind of memory limit in place. Look at the\n"
557 " output of 'ulimit -a'. Is there a limit on the size of\n"
558 " virtual memory or address space?\n"
559 " - You have run out of swap space.\n"
560 " - Valgrind has a bug. If you think this is the case or you are\n"
561 " not sure, please let us know and we'll try to fix it.\n"
562 " Please note that programs can take substantially more memory than\n"
563 " normal when running under Valgrind tools, eg. up to twice or\n"
564 " more, depending on the tool. On a 64-bit machine, Valgrind\n"
565 " should be able to make use of up 32GB memory. On a 32-bit\n"
566 " machine, Valgrind should be able to use all the memory available\n"
567 " to a single process, up to 4GB if that's how you have your\n"
568 " kernel configured. Most 32-bit Linux setups allow a maximum of\n"
569 " 3GB per process.\n\n"
570 " Whatever the reason, Valgrind cannot continue. Sorry.\n";
571
sewardj45f4e7c2005-09-27 19:20:21 +0000572 if (!alreadyCrashing) {
573 alreadyCrashing = True;
njnb81c7952007-03-22 03:36:55 +0000574 VG_(message)(Vg_UserMsg, s1, who, (ULong)szB, tot_alloc);
sewardj45f4e7c2005-09-27 19:20:21 +0000575 } else {
njnb81c7952007-03-22 03:36:55 +0000576 VG_(debugLog)(0,"mallocfree", s1, who, (ULong)szB, tot_alloc);
sewardj45f4e7c2005-09-27 19:20:21 +0000577 }
578 VG_(exit)(1);
579}
580
581
nethercote2d5b8162004-08-11 09:40:52 +0000582// Align ptr p upwards to an align-sized boundary.
583static
nethercote7ac7f7b2004-11-02 12:36:02 +0000584void* align_upwards ( void* p, SizeT align )
nethercote2d5b8162004-08-11 09:40:52 +0000585{
586 Addr a = (Addr)p;
587 if ((a % align) == 0) return (void*)a;
588 return (void*)(a - (a % align) + align);
589}
590
591// If not enough memory available, either aborts (for non-client memory)
592// or returns 0 (for client memory).
593static
nethercote7ac7f7b2004-11-02 12:36:02 +0000594Superblock* newSuperblock ( Arena* a, SizeT cszB )
nethercote2d5b8162004-08-11 09:40:52 +0000595{
nethercote2d5b8162004-08-11 09:40:52 +0000596 Superblock* sb;
sewardj45f4e7c2005-09-27 19:20:21 +0000597 SysRes sres;
nethercote2d5b8162004-08-11 09:40:52 +0000598
599 // Take into account admin bytes in the Superblock.
600 cszB += sizeof(Superblock);
601
602 if (cszB < a->min_sblock_szB) cszB = a->min_sblock_szB;
bartc3c98392008-04-19 14:43:30 +0000603 cszB = VG_PGROUNDUP(cszB);
nethercote2d5b8162004-08-11 09:40:52 +0000604
sewardj45f4e7c2005-09-27 19:20:21 +0000605 if (a->clientmem) {
nethercote2d5b8162004-08-11 09:40:52 +0000606 // client allocation -- return 0 to client if it fails
sewardj5600ab32006-10-17 01:42:40 +0000607 sres = VG_(am_sbrk_anon_float_client)
sewardj45f4e7c2005-09-27 19:20:21 +0000608 ( cszB, VKI_PROT_READ|VKI_PROT_WRITE|VKI_PROT_EXEC );
609 if (sres.isError)
nethercote2d5b8162004-08-11 09:40:52 +0000610 return 0;
sewardj5600ab32006-10-17 01:42:40 +0000611 sb = (Superblock*)sres.res;
sewardj45f4e7c2005-09-27 19:20:21 +0000612 // Mark this segment as containing client heap. The leak
613 // checker needs to be able to identify such segments so as not
614 // to use them as sources of roots during leak checks.
sewardj5600ab32006-10-17 01:42:40 +0000615 VG_(am_set_segment_isCH_if_SkAnonC)(
616 (NSegment*) VG_(am_find_nsegment)( (Addr)sb )
617 );
nethercote2d5b8162004-08-11 09:40:52 +0000618 } else {
sewardj45f4e7c2005-09-27 19:20:21 +0000619 // non-client allocation -- abort if it fails
sewardj5600ab32006-10-17 01:42:40 +0000620 sres = VG_(am_sbrk_anon_float_valgrind)( cszB );
sewardj45f4e7c2005-09-27 19:20:21 +0000621 if (sres.isError) {
622 VG_(out_of_memory_NORETURN)("newSuperblock", cszB);
623 /* NOTREACHED */
624 sb = NULL; /* keep gcc happy */
625 } else {
sewardj5600ab32006-10-17 01:42:40 +0000626 sb = (Superblock*)sres.res;
sewardj45f4e7c2005-09-27 19:20:21 +0000627 }
nethercote2d5b8162004-08-11 09:40:52 +0000628 }
629 vg_assert(NULL != sb);
njndbf7ca72006-03-31 11:57:59 +0000630 //zzVALGRIND_MAKE_MEM_UNDEFINED(sb, cszB);
nethercote2d5b8162004-08-11 09:40:52 +0000631 vg_assert(0 == (Addr)sb % VG_MIN_MALLOC_SZB);
632 sb->n_payload_bytes = cszB - sizeof(Superblock);
633 a->bytes_mmaped += cszB;
sewardj45f4e7c2005-09-27 19:20:21 +0000634 VG_(debugLog)(1, "mallocfree",
635 "newSuperblock at %p (pszB %7ld) owner %s/%s\n",
636 sb, sb->n_payload_bytes,
637 a->clientmem ? "CLIENT" : "VALGRIND", a->name );
nethercote2d5b8162004-08-11 09:40:52 +0000638 return sb;
639}
640
641// Find the superblock containing the given chunk.
642static
643Superblock* findSb ( Arena* a, Block* b )
644{
sewardj0b3fd2d2007-08-21 10:55:26 +0000645 SizeT min = 0;
646 SizeT max = a->sblocks_used;
sewardj49bdd7a2005-12-17 20:37:36 +0000647
sewardj0b3fd2d2007-08-21 10:55:26 +0000648 while (min <= max) {
649 Superblock * sb;
650 SizeT pos = min + (max - min)/2;
651
652 vg_assert(pos >= 0 && pos < a->sblocks_used);
653 sb = a->sblocks[pos];
654 if ((Block*)&sb->payload_bytes[0] <= b
655 && b < (Block*)&sb->payload_bytes[sb->n_payload_bytes])
656 {
657 return sb;
658 } else if ((Block*)&sb->payload_bytes[0] <= b) {
659 min = pos + 1;
660 } else {
661 max = pos - 1;
sewardj49bdd7a2005-12-17 20:37:36 +0000662 }
663 }
sewardj0b3fd2d2007-08-21 10:55:26 +0000664 VG_(printf)("findSb: can't find pointer %p in arena '%s'\n",
665 b, a->name );
666 VG_(core_panic)("findSb: VG_(arena_free)() in wrong arena?");
667 return NULL; /*NOTREACHED*/
nethercote2d5b8162004-08-11 09:40:52 +0000668}
669
sewardjde4a1d02002-03-22 01:27:54 +0000670
fitzhardinge98abfc72003-12-16 02:05:15 +0000671/*------------------------------------------------------------*/
sewardjde4a1d02002-03-22 01:27:54 +0000672/*--- Functions for working with freelists. ---*/
673/*------------------------------------------------------------*/
674
nethercote2d5b8162004-08-11 09:40:52 +0000675// Nb: Determination of which freelist a block lives on is based on the
676// payload size, not block size.
sewardjde4a1d02002-03-22 01:27:54 +0000677
nethercote2d5b8162004-08-11 09:40:52 +0000678// Convert a payload size in bytes to a freelist number.
sewardjde4a1d02002-03-22 01:27:54 +0000679static
nethercote7ac7f7b2004-11-02 12:36:02 +0000680UInt pszB_to_listNo ( SizeT pszB )
sewardjde4a1d02002-03-22 01:27:54 +0000681{
njndb247dc2005-07-17 23:12:33 +0000682 SizeT n = pszB / VG_MIN_MALLOC_SZB;
tom60a4b0b2005-10-12 10:45:27 +0000683 vg_assert(0 == pszB % VG_MIN_MALLOC_SZB);
njn61dcab82005-05-21 19:36:45 +0000684
sewardjc1ac9772007-08-20 22:57:56 +0000685 // The first 64 lists hold blocks of size VG_MIN_MALLOC_SZB * list_num.
686 // The final 48 hold bigger blocks.
687 if (n < 64) return (UInt)n;
688 /* Exponential slope up, factor 1.05 */
689 if (n < 67) return 64;
690 if (n < 70) return 65;
691 if (n < 74) return 66;
692 if (n < 77) return 67;
693 if (n < 81) return 68;
694 if (n < 85) return 69;
695 if (n < 90) return 70;
696 if (n < 94) return 71;
697 if (n < 99) return 72;
698 if (n < 104) return 73;
699 if (n < 109) return 74;
700 if (n < 114) return 75;
701 if (n < 120) return 76;
702 if (n < 126) return 77;
703 if (n < 133) return 78;
704 if (n < 139) return 79;
705 /* Exponential slope up, factor 1.10 */
706 if (n < 153) return 80;
707 if (n < 169) return 81;
708 if (n < 185) return 82;
709 if (n < 204) return 83;
710 if (n < 224) return 84;
711 if (n < 247) return 85;
712 if (n < 272) return 86;
713 if (n < 299) return 87;
714 if (n < 329) return 88;
715 if (n < 362) return 89;
716 if (n < 398) return 90;
717 if (n < 438) return 91;
718 if (n < 482) return 92;
719 if (n < 530) return 93;
720 if (n < 583) return 94;
721 if (n < 641) return 95;
722 /* Exponential slope up, factor 1.20 */
723 if (n < 770) return 96;
724 if (n < 924) return 97;
725 if (n < 1109) return 98;
726 if (n < 1331) return 99;
727 if (n < 1597) return 100;
728 if (n < 1916) return 101;
729 if (n < 2300) return 102;
730 if (n < 2760) return 103;
731 if (n < 3312) return 104;
732 if (n < 3974) return 105;
733 if (n < 4769) return 106;
734 if (n < 5723) return 107;
735 if (n < 6868) return 108;
736 if (n < 8241) return 109;
737 if (n < 9890) return 110;
738 return 111;
sewardjde4a1d02002-03-22 01:27:54 +0000739}
740
nethercote2d5b8162004-08-11 09:40:52 +0000741// What is the minimum payload size for a given list?
sewardjde4a1d02002-03-22 01:27:54 +0000742static
nethercote7ac7f7b2004-11-02 12:36:02 +0000743SizeT listNo_to_pszB_min ( UInt listNo )
sewardjde4a1d02002-03-22 01:27:54 +0000744{
sewardj1d2e2e62007-08-23 10:22:44 +0000745 /* Repeatedly computing this function at every request is
746 expensive. Hence at the first call just cache the result for
747 every possible argument. */
748 static SizeT cache[N_MALLOC_LISTS];
749 static Bool cache_valid = False;
750 if (!cache_valid) {
751 UInt i;
752 for (i = 0; i < N_MALLOC_LISTS; i++) {
753 SizeT pszB = 0;
754 while (pszB_to_listNo(pszB) < i)
755 pszB += VG_MIN_MALLOC_SZB;
756 cache[i] = pszB;
757 }
758 cache_valid = True;
759 }
760 /* Returned cached answer. */
njn6e6588c2005-03-13 18:52:48 +0000761 vg_assert(listNo <= N_MALLOC_LISTS);
sewardj1d2e2e62007-08-23 10:22:44 +0000762 return cache[listNo];
sewardjde4a1d02002-03-22 01:27:54 +0000763}
764
nethercote2d5b8162004-08-11 09:40:52 +0000765// What is the maximum payload size for a given list?
sewardjde4a1d02002-03-22 01:27:54 +0000766static
nethercote7ac7f7b2004-11-02 12:36:02 +0000767SizeT listNo_to_pszB_max ( UInt listNo )
sewardjde4a1d02002-03-22 01:27:54 +0000768{
njn6e6588c2005-03-13 18:52:48 +0000769 vg_assert(listNo <= N_MALLOC_LISTS);
770 if (listNo == N_MALLOC_LISTS-1) {
nethercote2d5b8162004-08-11 09:40:52 +0000771 return MAX_PSZB;
sewardjde4a1d02002-03-22 01:27:54 +0000772 } else {
nethercote2d5b8162004-08-11 09:40:52 +0000773 return listNo_to_pszB_min(listNo+1) - 1;
sewardjde4a1d02002-03-22 01:27:54 +0000774 }
775}
776
777
778/* A nasty hack to try and reduce fragmentation. Try and replace
779 a->freelist[lno] with another block on the same list but with a
780 lower address, with the idea of attempting to recycle the same
781 blocks rather than cruise through the address space. */
sewardjde4a1d02002-03-22 01:27:54 +0000782static
nethercote7ac7f7b2004-11-02 12:36:02 +0000783void swizzle ( Arena* a, UInt lno )
sewardjde4a1d02002-03-22 01:27:54 +0000784{
nethercote2d5b8162004-08-11 09:40:52 +0000785 Block* p_best;
786 Block* pp;
787 Block* pn;
nethercote7ac7f7b2004-11-02 12:36:02 +0000788 UInt i;
sewardjde4a1d02002-03-22 01:27:54 +0000789
790 p_best = a->freelist[lno];
791 if (p_best == NULL) return;
792
793 pn = pp = p_best;
njn2bf9ba62005-12-25 02:47:12 +0000794
795 // This loop bound was 20 for a long time, but experiments showed that
796 // reducing it to 10 gave the same result in all the tests, and 5 got the
797 // same result in 85--100% of cases. And it's called often enough to be
798 // noticeable in programs that allocated a lot.
799 for (i = 0; i < 5; i++) {
nethercote2d5b8162004-08-11 09:40:52 +0000800 pn = get_next_b(pn);
801 pp = get_prev_b(pp);
sewardjde4a1d02002-03-22 01:27:54 +0000802 if (pn < p_best) p_best = pn;
803 if (pp < p_best) p_best = pp;
804 }
805 if (p_best < a->freelist[lno]) {
nethercote2d5b8162004-08-11 09:40:52 +0000806# ifdef VERBOSE_MALLOC
807 VG_(printf)("retreat by %d\n", a->freelist[lno] - p_best);
sewardjde4a1d02002-03-22 01:27:54 +0000808# endif
809 a->freelist[lno] = p_best;
810 }
811}
812
813
814/*------------------------------------------------------------*/
sewardjde4a1d02002-03-22 01:27:54 +0000815/*--- Sanity-check/debugging machinery. ---*/
816/*------------------------------------------------------------*/
817
njn6e6588c2005-03-13 18:52:48 +0000818#define REDZONE_LO_MASK 0x31
819#define REDZONE_HI_MASK 0x7c
nethercote2d5b8162004-08-11 09:40:52 +0000820
nethercote7ac7f7b2004-11-02 12:36:02 +0000821// Do some crude sanity checks on a Block.
sewardjde4a1d02002-03-22 01:27:54 +0000822static
nethercote2d5b8162004-08-11 09:40:52 +0000823Bool blockSane ( Arena* a, Block* b )
sewardjde4a1d02002-03-22 01:27:54 +0000824{
825# define BLEAT(str) VG_(printf)("blockSane: fail -- %s\n",str)
nethercote7ac7f7b2004-11-02 12:36:02 +0000826 UInt i;
njn402c8612005-08-23 22:11:20 +0000827 // The lo and hi size fields will be checked (indirectly) by the call
828 // to get_rz_hi_byte().
njn472cc7c2005-07-17 17:20:30 +0000829 if (!a->clientmem && is_inuse_block(b)) {
nethercote2d5b8162004-08-11 09:40:52 +0000830 for (i = 0; i < a->rz_szB; i++) {
831 if (get_rz_lo_byte(a, b, i) !=
njn6e6588c2005-03-13 18:52:48 +0000832 (UByte)(((Addr)b&0xff) ^ REDZONE_LO_MASK))
nethercote2d5b8162004-08-11 09:40:52 +0000833 {BLEAT("redzone-lo");return False;}
834 if (get_rz_hi_byte(a, b, i) !=
njn6e6588c2005-03-13 18:52:48 +0000835 (UByte)(((Addr)b&0xff) ^ REDZONE_HI_MASK))
nethercote2d5b8162004-08-11 09:40:52 +0000836 {BLEAT("redzone-hi");return False;}
sewardjde4a1d02002-03-22 01:27:54 +0000837 }
838 }
839 return True;
840# undef BLEAT
841}
842
nethercote2d5b8162004-08-11 09:40:52 +0000843// Print superblocks (only for debugging).
sewardjde4a1d02002-03-22 01:27:54 +0000844static
845void ppSuperblocks ( Arena* a )
846{
sewardj0b3fd2d2007-08-21 10:55:26 +0000847 UInt i, j, blockno = 1;
njnd0e685c2005-07-17 17:55:42 +0000848 SizeT b_bszB;
sewardjde4a1d02002-03-22 01:27:54 +0000849
sewardj0b3fd2d2007-08-21 10:55:26 +0000850 for (j = 0; j < a->sblocks_used; ++j) {
851 Superblock * sb = a->sblocks[j];
852
sewardjde4a1d02002-03-22 01:27:54 +0000853 VG_(printf)( "\n" );
njn8a7b41b2007-09-23 00:51:24 +0000854 VG_(printf)( "superblock %d at %p, sb->n_pl_bs = %lu\n",
sewardj0b3fd2d2007-08-21 10:55:26 +0000855 blockno++, sb, sb->n_payload_bytes);
njnd0e685c2005-07-17 17:55:42 +0000856 for (i = 0; i < sb->n_payload_bytes; i += b_bszB) {
857 Block* b = (Block*)&sb->payload_bytes[i];
858 b_bszB = get_bszB(b);
njn8a7b41b2007-09-23 00:51:24 +0000859 VG_(printf)( " block at %d, bszB %lu: ", i, b_bszB );
njn472cc7c2005-07-17 17:20:30 +0000860 VG_(printf)( "%s, ", is_inuse_block(b) ? "inuse" : "free");
nethercote2d5b8162004-08-11 09:40:52 +0000861 VG_(printf)( "%s\n", blockSane(a, b) ? "ok" : "BAD" );
sewardjde4a1d02002-03-22 01:27:54 +0000862 }
nethercote2d5b8162004-08-11 09:40:52 +0000863 vg_assert(i == sb->n_payload_bytes); // no overshoot at end of Sb
sewardjde4a1d02002-03-22 01:27:54 +0000864 }
865 VG_(printf)( "end of superblocks\n\n" );
866}
867
nethercote2d5b8162004-08-11 09:40:52 +0000868// Sanity check both the superblocks and the chains.
nethercote885dd912004-08-03 23:14:00 +0000869static void sanity_check_malloc_arena ( ArenaId aid )
sewardjde4a1d02002-03-22 01:27:54 +0000870{
sewardj0b3fd2d2007-08-21 10:55:26 +0000871 UInt i, j, superblockctr, blockctr_sb, blockctr_li;
nethercote7ac7f7b2004-11-02 12:36:02 +0000872 UInt blockctr_sb_free, listno;
873 SizeT b_bszB, b_pszB, list_min_pszB, list_max_pszB;
sewardj0b3fd2d2007-08-21 10:55:26 +0000874 Bool thisFree, lastWasFree, sblockarrOK;
nethercote2d5b8162004-08-11 09:40:52 +0000875 Block* b;
876 Block* b_prev;
nethercote7ac7f7b2004-11-02 12:36:02 +0000877 SizeT arena_bytes_on_loan;
sewardjde4a1d02002-03-22 01:27:54 +0000878 Arena* a;
879
nethercote885dd912004-08-03 23:14:00 +0000880# define BOMB VG_(core_panic)("sanity_check_malloc_arena")
sewardjde4a1d02002-03-22 01:27:54 +0000881
882 a = arenaId_to_ArenaP(aid);
sewardj0b3fd2d2007-08-21 10:55:26 +0000883
884 // Check the superblock array.
885 sblockarrOK
886 = a->sblocks != NULL
887 && a->sblocks_size >= SBLOCKS_SIZE_INITIAL
888 && a->sblocks_used <= a->sblocks_size
889 && (a->sblocks_size == SBLOCKS_SIZE_INITIAL
890 ? (a->sblocks == &a->sblocks_initial[0])
891 : (a->sblocks != &a->sblocks_initial[0]));
892 if (!sblockarrOK) {
893 VG_(printf)("sanity_check_malloc_arena: sblock array BAD\n");
894 BOMB;
895 }
896
nethercote2d5b8162004-08-11 09:40:52 +0000897 // First, traverse all the superblocks, inspecting the Blocks in each.
sewardjde4a1d02002-03-22 01:27:54 +0000898 superblockctr = blockctr_sb = blockctr_sb_free = 0;
899 arena_bytes_on_loan = 0;
sewardj0b3fd2d2007-08-21 10:55:26 +0000900 for (j = 0; j < a->sblocks_used; ++j) {
901 Superblock * sb = a->sblocks[j];
sewardjde4a1d02002-03-22 01:27:54 +0000902 lastWasFree = False;
903 superblockctr++;
nethercote2d5b8162004-08-11 09:40:52 +0000904 for (i = 0; i < sb->n_payload_bytes; i += mk_plain_bszB(b_bszB)) {
sewardjde4a1d02002-03-22 01:27:54 +0000905 blockctr_sb++;
nethercote2d5b8162004-08-11 09:40:52 +0000906 b = (Block*)&sb->payload_bytes[i];
njnd0e685c2005-07-17 17:55:42 +0000907 b_bszB = get_bszB_as_is(b);
sewardjde4a1d02002-03-22 01:27:54 +0000908 if (!blockSane(a, b)) {
njn8a7b41b2007-09-23 00:51:24 +0000909 VG_(printf)("sanity_check_malloc_arena: sb %p, block %d "
910 "(bszB %lu): BAD\n", sb, i, b_bszB );
sewardjde4a1d02002-03-22 01:27:54 +0000911 BOMB;
912 }
njn472cc7c2005-07-17 17:20:30 +0000913 thisFree = !is_inuse_block(b);
sewardjde4a1d02002-03-22 01:27:54 +0000914 if (thisFree && lastWasFree) {
njn8a7b41b2007-09-23 00:51:24 +0000915 VG_(printf)("sanity_check_malloc_arena: sb %p, block %d "
916 "(bszB %lu): UNMERGED FREES\n", sb, i, b_bszB );
sewardjde4a1d02002-03-22 01:27:54 +0000917 BOMB;
918 }
sewardjde4a1d02002-03-22 01:27:54 +0000919 if (thisFree) blockctr_sb_free++;
sewardj0b3fd2d2007-08-21 10:55:26 +0000920 if (!thisFree)
nethercote2d5b8162004-08-11 09:40:52 +0000921 arena_bytes_on_loan += bszB_to_pszB(a, b_bszB);
922 lastWasFree = thisFree;
sewardjde4a1d02002-03-22 01:27:54 +0000923 }
nethercote2d5b8162004-08-11 09:40:52 +0000924 if (i > sb->n_payload_bytes) {
nethercote885dd912004-08-03 23:14:00 +0000925 VG_(printf)( "sanity_check_malloc_arena: sb %p: last block "
sewardjde4a1d02002-03-22 01:27:54 +0000926 "overshoots end\n", sb);
927 BOMB;
928 }
sewardjde4a1d02002-03-22 01:27:54 +0000929 }
930
931 if (arena_bytes_on_loan != a->bytes_on_loan) {
nethercote2d5b8162004-08-11 09:40:52 +0000932# ifdef VERBOSE_MALLOC
933 VG_(printf)( "sanity_check_malloc_arena: a->bytes_on_loan %d, "
934 "arena_bytes_on_loan %d: "
935 "MISMATCH\n", a->bytes_on_loan, arena_bytes_on_loan);
936# endif
sewardjde4a1d02002-03-22 01:27:54 +0000937 ppSuperblocks(a);
938 BOMB;
939 }
940
941 /* Second, traverse each list, checking that the back pointers make
942 sense, counting blocks encountered, and checking that each block
943 is an appropriate size for this list. */
944 blockctr_li = 0;
njn6e6588c2005-03-13 18:52:48 +0000945 for (listno = 0; listno < N_MALLOC_LISTS; listno++) {
nethercote2d5b8162004-08-11 09:40:52 +0000946 list_min_pszB = listNo_to_pszB_min(listno);
947 list_max_pszB = listNo_to_pszB_max(listno);
sewardjde4a1d02002-03-22 01:27:54 +0000948 b = a->freelist[listno];
949 if (b == NULL) continue;
950 while (True) {
951 b_prev = b;
nethercote2d5b8162004-08-11 09:40:52 +0000952 b = get_next_b(b);
953 if (get_prev_b(b) != b_prev) {
nethercote885dd912004-08-03 23:14:00 +0000954 VG_(printf)( "sanity_check_malloc_arena: list %d at %p: "
sewardj0b3fd2d2007-08-21 10:55:26 +0000955 "BAD LINKAGE\n",
sewardjde4a1d02002-03-22 01:27:54 +0000956 listno, b );
957 BOMB;
958 }
njn089f51f2005-07-17 18:12:00 +0000959 b_pszB = get_pszB(a, b);
nethercote2d5b8162004-08-11 09:40:52 +0000960 if (b_pszB < list_min_pszB || b_pszB > list_max_pszB) {
sewardj0b3fd2d2007-08-21 10:55:26 +0000961 VG_(printf)(
nethercote885dd912004-08-03 23:14:00 +0000962 "sanity_check_malloc_arena: list %d at %p: "
njn8a7b41b2007-09-23 00:51:24 +0000963 "WRONG CHAIN SIZE %luB (%luB, %luB)\n",
nethercote2d5b8162004-08-11 09:40:52 +0000964 listno, b, b_pszB, list_min_pszB, list_max_pszB );
sewardjde4a1d02002-03-22 01:27:54 +0000965 BOMB;
966 }
967 blockctr_li++;
968 if (b == a->freelist[listno]) break;
969 }
970 }
971
972 if (blockctr_sb_free != blockctr_li) {
nethercote2d5b8162004-08-11 09:40:52 +0000973# ifdef VERBOSE_MALLOC
974 VG_(printf)( "sanity_check_malloc_arena: BLOCK COUNT MISMATCH "
975 "(via sbs %d, via lists %d)\n",
976 blockctr_sb_free, blockctr_li );
977# endif
sewardjde4a1d02002-03-22 01:27:54 +0000978 ppSuperblocks(a);
979 BOMB;
980 }
981
nethercote885dd912004-08-03 23:14:00 +0000982 if (VG_(clo_verbosity) > 2)
983 VG_(message)(Vg_DebugMsg,
njn669ef072005-03-13 05:46:57 +0000984 "%8s: %2d sbs, %5d bs, %2d/%-2d free bs, "
nethercote885dd912004-08-03 23:14:00 +0000985 "%7d mmap, %7d loan",
986 a->name,
987 superblockctr,
988 blockctr_sb, blockctr_sb_free, blockctr_li,
989 a->bytes_mmaped, a->bytes_on_loan);
sewardjde4a1d02002-03-22 01:27:54 +0000990# undef BOMB
991}
992
993
nethercote885dd912004-08-03 23:14:00 +0000994void VG_(sanity_check_malloc_all) ( void )
sewardjde4a1d02002-03-22 01:27:54 +0000995{
nethercote7ac7f7b2004-11-02 12:36:02 +0000996 UInt i;
sewardj0b3fd2d2007-08-21 10:55:26 +0000997 for (i = 0; i < VG_N_ARENAS; i++) {
998 if (i == VG_AR_CLIENT && !client_inited)
999 continue;
nethercote885dd912004-08-03 23:14:00 +00001000 sanity_check_malloc_arena ( i );
sewardj0b3fd2d2007-08-21 10:55:26 +00001001 }
sewardjde4a1d02002-03-22 01:27:54 +00001002}
1003
sewardjde4a1d02002-03-22 01:27:54 +00001004
nethercote2d5b8162004-08-11 09:40:52 +00001005/*------------------------------------------------------------*/
1006/*--- Creating and deleting blocks. ---*/
1007/*------------------------------------------------------------*/
1008
1009// Mark the bytes at b .. b+bszB-1 as not in use, and add them to the
1010// relevant free list.
1011
1012static
nethercote7ac7f7b2004-11-02 12:36:02 +00001013void mkFreeBlock ( Arena* a, Block* b, SizeT bszB, UInt b_lno )
jsewardb1a26ae2004-03-14 03:06:37 +00001014{
nethercote7ac7f7b2004-11-02 12:36:02 +00001015 SizeT pszB = bszB_to_pszB(a, bszB);
nethercote2d5b8162004-08-11 09:40:52 +00001016 vg_assert(b_lno == pszB_to_listNo(pszB));
njndbf7ca72006-03-31 11:57:59 +00001017 //zzVALGRIND_MAKE_MEM_UNDEFINED(b, bszB);
nethercote2d5b8162004-08-11 09:40:52 +00001018 // Set the size fields and indicate not-in-use.
njn8d3f8452005-07-20 04:12:41 +00001019 set_bszB(b, mk_free_bszB(bszB));
nethercote2d5b8162004-08-11 09:40:52 +00001020
1021 // Add to the relevant list.
1022 if (a->freelist[b_lno] == NULL) {
1023 set_prev_b(b, b);
1024 set_next_b(b, b);
1025 a->freelist[b_lno] = b;
1026 } else {
1027 Block* b_prev = get_prev_b(a->freelist[b_lno]);
1028 Block* b_next = a->freelist[b_lno];
1029 set_next_b(b_prev, b);
1030 set_prev_b(b_next, b);
1031 set_next_b(b, b_next);
1032 set_prev_b(b, b_prev);
1033 }
1034# ifdef DEBUG_MALLOC
1035 (void)blockSane(a,b);
1036# endif
1037}
1038
1039// Mark the bytes at b .. b+bszB-1 as in use, and set up the block
1040// appropriately.
1041static
nethercote7ac7f7b2004-11-02 12:36:02 +00001042void mkInuseBlock ( Arena* a, Block* b, SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +00001043{
nethercote7ac7f7b2004-11-02 12:36:02 +00001044 UInt i;
nethercote2d5b8162004-08-11 09:40:52 +00001045 vg_assert(bszB >= min_useful_bszB(a));
njndbf7ca72006-03-31 11:57:59 +00001046 //zzVALGRIND_MAKE_MEM_UNDEFINED(b, bszB);
njn8d3f8452005-07-20 04:12:41 +00001047 set_bszB(b, mk_inuse_bszB(bszB));
nethercote2d5b8162004-08-11 09:40:52 +00001048 set_prev_b(b, NULL); // Take off freelist
1049 set_next_b(b, NULL); // ditto
1050 if (!a->clientmem) {
1051 for (i = 0; i < a->rz_szB; i++) {
njn6e6588c2005-03-13 18:52:48 +00001052 set_rz_lo_byte(a, b, i, (UByte)(((Addr)b&0xff) ^ REDZONE_LO_MASK));
1053 set_rz_hi_byte(a, b, i, (UByte)(((Addr)b&0xff) ^ REDZONE_HI_MASK));
nethercote2d5b8162004-08-11 09:40:52 +00001054 }
1055 }
1056# ifdef DEBUG_MALLOC
1057 (void)blockSane(a,b);
1058# endif
1059}
1060
1061// Remove a block from a given list. Does no sanity checking.
1062static
nethercote7ac7f7b2004-11-02 12:36:02 +00001063void unlinkBlock ( Arena* a, Block* b, UInt listno )
nethercote2d5b8162004-08-11 09:40:52 +00001064{
njn6e6588c2005-03-13 18:52:48 +00001065 vg_assert(listno < N_MALLOC_LISTS);
nethercote2d5b8162004-08-11 09:40:52 +00001066 if (get_prev_b(b) == b) {
1067 // Only one element in the list; treat it specially.
1068 vg_assert(get_next_b(b) == b);
1069 a->freelist[listno] = NULL;
1070 } else {
1071 Block* b_prev = get_prev_b(b);
1072 Block* b_next = get_next_b(b);
1073 a->freelist[listno] = b_prev;
1074 set_next_b(b_prev, b_next);
1075 set_prev_b(b_next, b_prev);
1076 swizzle ( a, listno );
1077 }
1078 set_prev_b(b, NULL);
1079 set_next_b(b, NULL);
jsewardb1a26ae2004-03-14 03:06:37 +00001080}
1081
1082
sewardjde4a1d02002-03-22 01:27:54 +00001083/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00001084/*--- Core-visible functions. ---*/
sewardjde4a1d02002-03-22 01:27:54 +00001085/*------------------------------------------------------------*/
1086
nethercote2d5b8162004-08-11 09:40:52 +00001087// Align the request size.
1088static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +00001089SizeT align_req_pszB ( SizeT req_pszB )
nethercote2d5b8162004-08-11 09:40:52 +00001090{
nethercote7ac7f7b2004-11-02 12:36:02 +00001091 SizeT n = VG_MIN_MALLOC_SZB-1;
nethercote2d5b8162004-08-11 09:40:52 +00001092 return ((req_pszB + n) & (~n));
1093}
1094
nethercote7ac7f7b2004-11-02 12:36:02 +00001095void* VG_(arena_malloc) ( ArenaId aid, SizeT req_pszB )
sewardjde4a1d02002-03-22 01:27:54 +00001096{
nethercote7ac7f7b2004-11-02 12:36:02 +00001097 SizeT req_bszB, frag_bszB, b_bszB;
sewardj0b3fd2d2007-08-21 10:55:26 +00001098 UInt lno, i;
sewardjde4a1d02002-03-22 01:27:54 +00001099 Superblock* new_sb;
nethercote2d5b8162004-08-11 09:40:52 +00001100 Block* b = NULL;
sewardjde4a1d02002-03-22 01:27:54 +00001101 Arena* a;
jsewardb1a26ae2004-03-14 03:06:37 +00001102 void* v;
sewardjde4a1d02002-03-22 01:27:54 +00001103
sewardj45f4e7c2005-09-27 19:20:21 +00001104 ensure_mm_init(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001105 a = arenaId_to_ArenaP(aid);
1106
nethercote7ac7f7b2004-11-02 12:36:02 +00001107 vg_assert(req_pszB < MAX_PSZB);
nethercote2d5b8162004-08-11 09:40:52 +00001108 req_pszB = align_req_pszB(req_pszB);
1109 req_bszB = pszB_to_bszB(a, req_pszB);
sewardjde4a1d02002-03-22 01:27:54 +00001110
nethercote2d5b8162004-08-11 09:40:52 +00001111 // Scan through all the big-enough freelists for a block.
njn4ab6d532007-10-16 23:18:06 +00001112 //
1113 // Nb: this scanning might be expensive in some cases. Eg. if you
1114 // allocate lots of small objects without freeing them, but no
1115 // medium-sized objects, it will repeatedly scanning through the whole
1116 // list, and each time not find any free blocks until the last element.
1117 //
1118 // If this becomes a noticeable problem... the loop answers the question
1119 // "where is the first nonempty list above me?" And most of the time,
1120 // you ask the same question and get the same answer. So it would be
1121 // good to somehow cache the results of previous searches.
1122 // One possibility is an array (with N_MALLOC_LISTS elements) of
1123 // shortcuts. shortcut[i] would give the index number of the nearest
1124 // larger list above list i which is non-empty. Then this loop isn't
1125 // necessary. However, we'd have to modify some section [ .. i-1] of the
1126 // shortcut array every time a list [i] changes from empty to nonempty or
1127 // back. This would require care to avoid pathological worst-case
1128 // behaviour.
1129 //
njn6e6588c2005-03-13 18:52:48 +00001130 for (lno = pszB_to_listNo(req_pszB); lno < N_MALLOC_LISTS; lno++) {
sewardjde4a1d02002-03-22 01:27:54 +00001131 b = a->freelist[lno];
nethercote2d5b8162004-08-11 09:40:52 +00001132 if (NULL == b) continue; // If this list is empty, try the next one.
sewardjde4a1d02002-03-22 01:27:54 +00001133 while (True) {
njnd0e685c2005-07-17 17:55:42 +00001134 b_bszB = get_bszB(b);
nethercote2d5b8162004-08-11 09:40:52 +00001135 if (b_bszB >= req_bszB) goto obtained_block; // success!
1136 b = get_next_b(b);
1137 if (b == a->freelist[lno]) break; // traversed entire freelist
sewardjde4a1d02002-03-22 01:27:54 +00001138 }
sewardjde4a1d02002-03-22 01:27:54 +00001139 }
1140
nethercote2d5b8162004-08-11 09:40:52 +00001141 // If we reach here, no suitable block found, allocate a new superblock
njn6e6588c2005-03-13 18:52:48 +00001142 vg_assert(lno == N_MALLOC_LISTS);
nethercote2d5b8162004-08-11 09:40:52 +00001143 new_sb = newSuperblock(a, req_bszB);
1144 if (NULL == new_sb) {
1145 // Should only fail if for client, otherwise, should have aborted
1146 // already.
1147 vg_assert(VG_AR_CLIENT == aid);
1148 return NULL;
sewardjde4a1d02002-03-22 01:27:54 +00001149 }
sewardj0b3fd2d2007-08-21 10:55:26 +00001150
1151 vg_assert(a->sblocks_used <= a->sblocks_size);
1152 if (a->sblocks_used == a->sblocks_size) {
1153 Superblock ** array;
1154 SysRes sres = VG_(am_sbrk_anon_float_valgrind)(sizeof(Superblock *) *
1155 a->sblocks_size * 2);
1156 if (sres.isError) {
1157 VG_(out_of_memory_NORETURN)("arena_init", sizeof(Superblock *) *
1158 a->sblocks_size * 2);
1159 /* NOTREACHED */
1160 }
1161 array = (Superblock**) sres.res;
1162 for (i = 0; i < a->sblocks_used; ++i) array[i] = a->sblocks[i];
1163
1164 a->sblocks_size *= 2;
1165 a->sblocks = array;
1166 VG_(debugLog)(1, "mallocfree",
1167 "sblock array for arena `%s' resized to %ld\n",
1168 a->name, a->sblocks_size);
1169 }
1170
1171 vg_assert(a->sblocks_used < a->sblocks_size);
1172
1173 i = a->sblocks_used;
1174 while (i > 0) {
1175 if (a->sblocks[i-1] > new_sb) {
1176 a->sblocks[i] = a->sblocks[i-1];
1177 } else {
1178 break;
1179 }
1180 --i;
1181 }
1182 a->sblocks[i] = new_sb;
1183 a->sblocks_used++;
1184
nethercote2d5b8162004-08-11 09:40:52 +00001185 b = (Block*)&new_sb->payload_bytes[0];
1186 lno = pszB_to_listNo(bszB_to_pszB(a, new_sb->n_payload_bytes));
1187 mkFreeBlock ( a, b, new_sb->n_payload_bytes, lno);
1188 // fall through
sewardjde4a1d02002-03-22 01:27:54 +00001189
nethercote2d5b8162004-08-11 09:40:52 +00001190 obtained_block:
1191 // Ok, we can allocate from b, which lives in list lno.
sewardjde4a1d02002-03-22 01:27:54 +00001192 vg_assert(b != NULL);
njn6e6588c2005-03-13 18:52:48 +00001193 vg_assert(lno < N_MALLOC_LISTS);
sewardjde4a1d02002-03-22 01:27:54 +00001194 vg_assert(a->freelist[lno] != NULL);
njnd0e685c2005-07-17 17:55:42 +00001195 b_bszB = get_bszB(b);
nethercote2d5b8162004-08-11 09:40:52 +00001196 // req_bszB is the size of the block we are after. b_bszB is the
1197 // size of what we've actually got. */
1198 vg_assert(b_bszB >= req_bszB);
sewardjde4a1d02002-03-22 01:27:54 +00001199
nethercote2d5b8162004-08-11 09:40:52 +00001200 // Could we split this block and still get a useful fragment?
1201 frag_bszB = b_bszB - req_bszB;
1202 if (frag_bszB >= min_useful_bszB(a)) {
1203 // Yes, split block in two, put the fragment on the appropriate free
1204 // list, and update b_bszB accordingly.
1205 // printf( "split %dB into %dB and %dB\n", b_bszB, req_bszB, frag_bszB );
sewardjde4a1d02002-03-22 01:27:54 +00001206 unlinkBlock(a, b, lno);
nethercote2d5b8162004-08-11 09:40:52 +00001207 mkInuseBlock(a, b, req_bszB);
1208 mkFreeBlock(a, &b[req_bszB], frag_bszB,
1209 pszB_to_listNo(bszB_to_pszB(a, frag_bszB)));
njnd0e685c2005-07-17 17:55:42 +00001210 b_bszB = get_bszB(b);
nethercote2d5b8162004-08-11 09:40:52 +00001211 } else {
1212 // No, mark as in use and use as-is.
1213 unlinkBlock(a, b, lno);
1214 mkInuseBlock(a, b, b_bszB);
sewardjde4a1d02002-03-22 01:27:54 +00001215 }
sewardjde4a1d02002-03-22 01:27:54 +00001216
nethercote2d5b8162004-08-11 09:40:52 +00001217 // Update stats
1218 a->bytes_on_loan += bszB_to_pszB(a, b_bszB);
sewardjde4a1d02002-03-22 01:27:54 +00001219 if (a->bytes_on_loan > a->bytes_on_loan_max)
1220 a->bytes_on_loan_max = a->bytes_on_loan;
1221
1222# ifdef DEBUG_MALLOC
nethercote885dd912004-08-03 23:14:00 +00001223 sanity_check_malloc_arena(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001224# endif
1225
nethercote2d5b8162004-08-11 09:40:52 +00001226 v = get_block_payload(a, b);
1227 vg_assert( (((Addr)v) & (VG_MIN_MALLOC_SZB-1)) == 0 );
sewardjb5f6f512005-03-10 23:59:00 +00001228
sewardja53462a2007-11-24 23:37:07 +00001229 /* VALGRIND_MALLOCLIKE_BLOCK(v, req_pszB, 0, False); */
1230
1231 /* For debugging/testing purposes, fill the newly allocated area
1232 with a definite value in an attempt to shake out any
1233 uninitialised uses of the data (by V core / V tools, not by the
1234 client). Testing on 25 Nov 07 with the values 0x00, 0xFF, 0x55,
1235 0xAA showed no differences in the regression tests on
1236 amd64-linux. Note, is disabled by default. */
1237 if (0 && aid != VG_AR_CLIENT)
1238 VG_(memset)(v, 0xAA, (SizeT)req_pszB);
1239
jsewardb1a26ae2004-03-14 03:06:37 +00001240 return v;
sewardjde4a1d02002-03-22 01:27:54 +00001241}
1242
1243
njn25e49d8e72002-09-23 09:36:25 +00001244void VG_(arena_free) ( ArenaId aid, void* ptr )
sewardjde4a1d02002-03-22 01:27:54 +00001245{
1246 Superblock* sb;
nethercote2d5b8162004-08-11 09:40:52 +00001247 UByte* sb_start;
1248 UByte* sb_end;
njna2578652005-07-17 17:12:24 +00001249 Block* other_b;
nethercote2d5b8162004-08-11 09:40:52 +00001250 Block* b;
nethercote7ac7f7b2004-11-02 12:36:02 +00001251 SizeT b_bszB, b_pszB, other_bszB;
1252 UInt b_listno;
sewardjde4a1d02002-03-22 01:27:54 +00001253 Arena* a;
1254
sewardj45f4e7c2005-09-27 19:20:21 +00001255 ensure_mm_init(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001256 a = arenaId_to_ArenaP(aid);
1257
njn25e49d8e72002-09-23 09:36:25 +00001258 if (ptr == NULL) {
njn25e49d8e72002-09-23 09:36:25 +00001259 return;
1260 }
1261
nethercote2d5b8162004-08-11 09:40:52 +00001262 b = get_payload_block(a, ptr);
sewardjde4a1d02002-03-22 01:27:54 +00001263
sewardj3187a4e2005-12-04 23:27:14 +00001264 /* If this is one of V's areas, check carefully the block we're
1265 getting back. This picks up simple block-end overruns. */
1266 if (aid != VG_AR_CLIENT)
1267 vg_assert(blockSane(a, b));
sewardjde4a1d02002-03-22 01:27:54 +00001268
njne6f9e3b2005-07-17 18:00:57 +00001269 b_bszB = get_bszB(b);
1270 b_pszB = bszB_to_pszB(a, b_bszB);
nethercote2d5b8162004-08-11 09:40:52 +00001271 sb = findSb( a, b );
1272 sb_start = &sb->payload_bytes[0];
1273 sb_end = &sb->payload_bytes[sb->n_payload_bytes - 1];
sewardjde4a1d02002-03-22 01:27:54 +00001274
njne6f9e3b2005-07-17 18:00:57 +00001275 a->bytes_on_loan -= b_pszB;
1276
sewardj3187a4e2005-12-04 23:27:14 +00001277 /* If this is one of V's areas, fill it up with junk to enhance the
1278 chances of catching any later reads of it. Note, 0xDD is
1279 carefully chosen junk :-), in that: (1) 0xDDDDDDDD is an invalid
1280 and non-word-aligned address on most systems, and (2) 0xDD is a
1281 value which is unlikely to be generated by the new compressed
1282 Vbits representation for memcheck. */
1283 if (aid != VG_AR_CLIENT)
1284 VG_(memset)(ptr, 0xDD, (SizeT)b_pszB);
1285
nethercote2d5b8162004-08-11 09:40:52 +00001286 // Put this chunk back on a list somewhere.
nethercote2d5b8162004-08-11 09:40:52 +00001287 b_listno = pszB_to_listNo(b_pszB);
1288 mkFreeBlock( a, b, b_bszB, b_listno );
sewardjde4a1d02002-03-22 01:27:54 +00001289
nethercote2d5b8162004-08-11 09:40:52 +00001290 // See if this block can be merged with its successor.
1291 // First test if we're far enough before the superblock's end to possibly
1292 // have a successor.
njna2578652005-07-17 17:12:24 +00001293 other_b = b + b_bszB;
1294 if (other_b+min_useful_bszB(a)-1 <= (Block*)sb_end) {
nethercote2d5b8162004-08-11 09:40:52 +00001295 // Ok, we have a successor, merge if it's not in use.
njnd0e685c2005-07-17 17:55:42 +00001296 other_bszB = get_bszB(other_b);
njn472cc7c2005-07-17 17:20:30 +00001297 if (!is_inuse_block(other_b)) {
nethercote2d5b8162004-08-11 09:40:52 +00001298 // VG_(printf)( "merge-successor\n");
sewardjde4a1d02002-03-22 01:27:54 +00001299# ifdef DEBUG_MALLOC
njna2578652005-07-17 17:12:24 +00001300 vg_assert(blockSane(a, other_b));
sewardjde4a1d02002-03-22 01:27:54 +00001301# endif
nethercote2d5b8162004-08-11 09:40:52 +00001302 unlinkBlock( a, b, b_listno );
njna2578652005-07-17 17:12:24 +00001303 unlinkBlock( a, other_b, pszB_to_listNo(bszB_to_pszB(a,other_bszB)) );
nethercote2d5b8162004-08-11 09:40:52 +00001304 b_bszB += other_bszB;
1305 b_listno = pszB_to_listNo(bszB_to_pszB(a, b_bszB));
1306 mkFreeBlock( a, b, b_bszB, b_listno );
sewardjde4a1d02002-03-22 01:27:54 +00001307 }
nethercote2d5b8162004-08-11 09:40:52 +00001308 } else {
1309 // Not enough space for successor: check that b is the last block
1310 // ie. there are no unused bytes at the end of the Superblock.
njna2578652005-07-17 17:12:24 +00001311 vg_assert(other_b-1 == (Block*)sb_end);
sewardjde4a1d02002-03-22 01:27:54 +00001312 }
1313
nethercote2d5b8162004-08-11 09:40:52 +00001314 // Then see if this block can be merged with its predecessor.
1315 // First test if we're far enough after the superblock's start to possibly
1316 // have a predecessor.
1317 if (b >= (Block*)sb_start + min_useful_bszB(a)) {
1318 // Ok, we have a predecessor, merge if it's not in use.
njna2578652005-07-17 17:12:24 +00001319 other_b = get_predecessor_block( b );
njnd0e685c2005-07-17 17:55:42 +00001320 other_bszB = get_bszB(other_b);
njn472cc7c2005-07-17 17:20:30 +00001321 if (!is_inuse_block(other_b)) {
nethercote2d5b8162004-08-11 09:40:52 +00001322 // VG_(printf)( "merge-predecessor\n");
nethercote2d5b8162004-08-11 09:40:52 +00001323 unlinkBlock( a, b, b_listno );
njna2578652005-07-17 17:12:24 +00001324 unlinkBlock( a, other_b, pszB_to_listNo(bszB_to_pszB(a, other_bszB)) );
1325 b = other_b;
nethercote2d5b8162004-08-11 09:40:52 +00001326 b_bszB += other_bszB;
1327 b_listno = pszB_to_listNo(bszB_to_pszB(a, b_bszB));
1328 mkFreeBlock( a, b, b_bszB, b_listno );
sewardjde4a1d02002-03-22 01:27:54 +00001329 }
nethercote2d5b8162004-08-11 09:40:52 +00001330 } else {
1331 // Not enough space for predecessor: check that b is the first block,
1332 // ie. there are no unused bytes at the start of the Superblock.
1333 vg_assert((Block*)sb_start == b);
sewardjde4a1d02002-03-22 01:27:54 +00001334 }
1335
1336# ifdef DEBUG_MALLOC
nethercote885dd912004-08-03 23:14:00 +00001337 sanity_check_malloc_arena(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001338# endif
1339
sewardj45f4e7c2005-09-27 19:20:21 +00001340 //zzVALGRIND_FREELIKE_BLOCK(ptr, 0);
sewardjde4a1d02002-03-22 01:27:54 +00001341}
1342
1343
1344/*
1345 The idea for malloc_aligned() is to allocate a big block, base, and
1346 then split it into two parts: frag, which is returned to the the
1347 free pool, and align, which is the bit we're really after. Here's
1348 a picture. L and H denote the block lower and upper overheads, in
nethercote2d5b8162004-08-11 09:40:52 +00001349 bytes. The details are gruesome. Note it is slightly complicated
sewardjde4a1d02002-03-22 01:27:54 +00001350 because the initial request to generate base may return a bigger
1351 block than we asked for, so it is important to distinguish the base
1352 request size and the base actual size.
1353
1354 frag_b align_b
1355 | |
1356 | frag_p | align_p
1357 | | | |
1358 v v v v
1359
1360 +---+ +---+---+ +---+
1361 | L |----------------| H | L |---------------| H |
1362 +---+ +---+---+ +---+
1363
1364 ^ ^ ^
1365 | | :
1366 | base_p this addr must be aligned
1367 |
1368 base_b
1369
1370 . . . . . . .
nethercote2d5b8162004-08-11 09:40:52 +00001371 <------ frag_bszB -------> . . .
1372 . <------------- base_pszB_act -----------> .
sewardjde4a1d02002-03-22 01:27:54 +00001373 . . . . . . .
1374
1375*/
njn717cde52005-05-10 02:47:21 +00001376void* VG_(arena_memalign) ( ArenaId aid, SizeT req_alignB, SizeT req_pszB )
sewardjde4a1d02002-03-22 01:27:54 +00001377{
nethercote7ac7f7b2004-11-02 12:36:02 +00001378 SizeT base_pszB_req, base_pszB_act, frag_bszB;
nethercote2d5b8162004-08-11 09:40:52 +00001379 Block *base_b, *align_b;
1380 UByte *base_p, *align_p;
nethercote7ac7f7b2004-11-02 12:36:02 +00001381 SizeT saved_bytes_on_loan;
sewardjde4a1d02002-03-22 01:27:54 +00001382 Arena* a;
1383
sewardj45f4e7c2005-09-27 19:20:21 +00001384 ensure_mm_init(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001385 a = arenaId_to_ArenaP(aid);
1386
nethercote7ac7f7b2004-11-02 12:36:02 +00001387 vg_assert(req_pszB < MAX_PSZB);
sewardjde4a1d02002-03-22 01:27:54 +00001388
nethercote2d5b8162004-08-11 09:40:52 +00001389 // Check that the requested alignment seems reasonable; that is, is
1390 // a power of 2.
1391 if (req_alignB < VG_MIN_MALLOC_SZB
1392 || req_alignB > 1048576
njn717cde52005-05-10 02:47:21 +00001393 || VG_(log2)( req_alignB ) == -1 /* not a power of 2 */) {
njn8a7b41b2007-09-23 00:51:24 +00001394 VG_(printf)("VG_(arena_memalign)(%p, %lu, %lu)\nbad alignment",
nethercote2d5b8162004-08-11 09:40:52 +00001395 a, req_alignB, req_pszB );
njn717cde52005-05-10 02:47:21 +00001396 VG_(core_panic)("VG_(arena_memalign)");
nethercote2d5b8162004-08-11 09:40:52 +00001397 /*NOTREACHED*/
sewardjde4a1d02002-03-22 01:27:54 +00001398 }
nethercote2d5b8162004-08-11 09:40:52 +00001399 // Paranoid
1400 vg_assert(req_alignB % VG_MIN_MALLOC_SZB == 0);
sewardjde4a1d02002-03-22 01:27:54 +00001401
1402 /* Required payload size for the aligned chunk. */
nethercote2d5b8162004-08-11 09:40:52 +00001403 req_pszB = align_req_pszB(req_pszB);
sewardjde4a1d02002-03-22 01:27:54 +00001404
nethercote2d5b8162004-08-11 09:40:52 +00001405 /* Payload size to request for the big block that we will split up. */
1406 base_pszB_req = req_pszB + min_useful_bszB(a) + req_alignB;
sewardjde4a1d02002-03-22 01:27:54 +00001407
1408 /* Payload ptr for the block we are going to split. Note this
1409 changes a->bytes_on_loan; we save and restore it ourselves. */
1410 saved_bytes_on_loan = a->bytes_on_loan;
nethercote2d5b8162004-08-11 09:40:52 +00001411 base_p = VG_(arena_malloc) ( aid, base_pszB_req );
sewardjde4a1d02002-03-22 01:27:54 +00001412 a->bytes_on_loan = saved_bytes_on_loan;
1413
tom8af1a172005-10-06 12:04:26 +00001414 /* Give up if we couldn't allocate enough space */
1415 if (base_p == 0)
1416 return 0;
1417
sewardjde4a1d02002-03-22 01:27:54 +00001418 /* Block ptr for the block we are going to split. */
nethercote2d5b8162004-08-11 09:40:52 +00001419 base_b = get_payload_block ( a, base_p );
sewardjde4a1d02002-03-22 01:27:54 +00001420
1421 /* Pointer to the payload of the aligned block we are going to
1422 return. This has to be suitably aligned. */
nethercote2d5b8162004-08-11 09:40:52 +00001423 align_p = align_upwards ( base_b + 2 * overhead_szB_lo(a)
1424 + overhead_szB_hi(a),
sewardjde4a1d02002-03-22 01:27:54 +00001425 req_alignB );
nethercote2d5b8162004-08-11 09:40:52 +00001426 align_b = get_payload_block(a, align_p);
sewardjde4a1d02002-03-22 01:27:54 +00001427
1428 /* The block size of the fragment we will create. This must be big
1429 enough to actually create a fragment. */
nethercote2d5b8162004-08-11 09:40:52 +00001430 frag_bszB = align_b - base_b;
1431
1432 vg_assert(frag_bszB >= min_useful_bszB(a));
sewardjde4a1d02002-03-22 01:27:54 +00001433
1434 /* The actual payload size of the block we are going to split. */
njn089f51f2005-07-17 18:12:00 +00001435 base_pszB_act = get_pszB(a, base_b);
sewardjde4a1d02002-03-22 01:27:54 +00001436
nethercote2d5b8162004-08-11 09:40:52 +00001437 /* Create the fragment block, and put it back on the relevant free list. */
1438 mkFreeBlock ( a, base_b, frag_bszB,
1439 pszB_to_listNo(bszB_to_pszB(a, frag_bszB)) );
sewardjde4a1d02002-03-22 01:27:54 +00001440
1441 /* Create the aligned block. */
nethercote2d5b8162004-08-11 09:40:52 +00001442 mkInuseBlock ( a, align_b,
1443 base_p + base_pszB_act
1444 + overhead_szB_hi(a) - (UByte*)align_b );
sewardjde4a1d02002-03-22 01:27:54 +00001445
1446 /* Final sanity checks. */
njn472cc7c2005-07-17 17:20:30 +00001447 vg_assert( is_inuse_block(get_payload_block(a, align_p)) );
sewardjde4a1d02002-03-22 01:27:54 +00001448
njn089f51f2005-07-17 18:12:00 +00001449 vg_assert(req_pszB <= get_pszB(a, get_payload_block(a, align_p)));
sewardjde4a1d02002-03-22 01:27:54 +00001450
njn089f51f2005-07-17 18:12:00 +00001451 a->bytes_on_loan += get_pszB(a, get_payload_block(a, align_p));
sewardjde4a1d02002-03-22 01:27:54 +00001452 if (a->bytes_on_loan > a->bytes_on_loan_max)
1453 a->bytes_on_loan_max = a->bytes_on_loan;
1454
1455# ifdef DEBUG_MALLOC
nethercote885dd912004-08-03 23:14:00 +00001456 sanity_check_malloc_arena(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001457# endif
1458
nethercote2d5b8162004-08-11 09:40:52 +00001459 vg_assert( (((Addr)align_p) % req_alignB) == 0 );
sewardjb5f6f512005-03-10 23:59:00 +00001460
sewardj45f4e7c2005-09-27 19:20:21 +00001461 //zzVALGRIND_MALLOCLIKE_BLOCK(align_p, req_pszB, 0, False);
sewardjb5f6f512005-03-10 23:59:00 +00001462
nethercote2d5b8162004-08-11 09:40:52 +00001463 return align_p;
1464}
1465
1466
njn32397c02007-11-10 04:08:08 +00001467// The ThreadId doesn't matter, it's not used.
njn2dc09e62005-08-17 04:03:31 +00001468SizeT VG_(arena_payload_szB) ( ThreadId tid, ArenaId aid, void* ptr )
nethercote2d5b8162004-08-11 09:40:52 +00001469{
1470 Arena* a = arenaId_to_ArenaP(aid);
1471 Block* b = get_payload_block(a, ptr);
njn089f51f2005-07-17 18:12:00 +00001472 return get_pszB(a, b);
sewardjde4a1d02002-03-22 01:27:54 +00001473}
1474
bart545380e2008-04-21 17:28:50 +00001475
1476// Implementation of mallinfo(). There is no recent standard that defines
1477// the behavior of mallinfo(). The meaning of the fields in struct mallinfo
1478// is as follows:
1479//
1480// struct mallinfo {
1481// int arena; /* total space in arena */
1482// int ordblks; /* number of ordinary blocks */
1483// int smblks; /* number of small blocks */
1484// int hblks; /* number of holding blocks */
1485// int hblkhd; /* space in holding block headers */
1486// int usmblks; /* space in small blocks in use */
1487// int fsmblks; /* space in free small blocks */
1488// int uordblks; /* space in ordinary blocks in use */
1489// int fordblks; /* space in free ordinary blocks */
1490// int keepcost; /* space penalty if keep option */
1491// /* is used */
1492// };
1493//
1494// The glibc documentation about mallinfo (which is somewhat outdated) can
1495// be found here:
1496// http://www.gnu.org/software/libtool/manual/libc/Statistics-of-Malloc.html
1497//
1498// See also http://bugs.kde.org/show_bug.cgi?id=160956.
1499//
1500// Regarding the implementation of VG_(mallinfo)(): we cannot return the
1501// whole struct as the library function does, because this is called by a
1502// client request. So instead we use a pointer to do call by reference.
njn088bfb42005-08-17 05:01:37 +00001503void VG_(mallinfo) ( ThreadId tid, struct vg_mallinfo* mi )
1504{
bartc3c98392008-04-19 14:43:30 +00001505 UInt i, free_blocks, free_blocks_size;
1506 Arena* a = arenaId_to_ArenaP(VG_AR_CLIENT);
1507
1508 // Traverse free list and calculate free blocks statistics.
1509 // This may seem slow but glibc works the same way.
1510 free_blocks_size = free_blocks = 0;
1511 for (i = 0; i < N_MALLOC_LISTS; i++) {
1512 Block* b = a->freelist[i];
1513 if (b == NULL) continue;
1514 for (;;) {
1515 free_blocks++;
1516 free_blocks_size += get_pszB(a, b);
1517 b = get_next_b(b);
1518 if (b == a->freelist[i]) break;
1519 }
1520 }
1521
1522 // We don't have fastbins so smblks & fsmblks are always 0. Also we don't
bart545380e2008-04-21 17:28:50 +00001523 // have a separate mmap allocator so set hblks & hblkhd to 0.
bartc3c98392008-04-19 14:43:30 +00001524 mi->arena = a->bytes_mmaped;
bart545380e2008-04-21 17:28:50 +00001525 mi->ordblks = free_blocks + VG_(free_queue_length);
bartc3c98392008-04-19 14:43:30 +00001526 mi->smblks = 0;
1527 mi->hblks = 0;
1528 mi->hblkhd = 0;
1529 mi->usmblks = 0;
1530 mi->fsmblks = 0;
bart545380e2008-04-21 17:28:50 +00001531 mi->uordblks = a->bytes_on_loan - VG_(free_queue_volume);
1532 mi->fordblks = free_blocks_size + VG_(free_queue_volume);
bartc3c98392008-04-19 14:43:30 +00001533 mi->keepcost = 0; // may want some value in here
njn088bfb42005-08-17 05:01:37 +00001534}
sewardjde4a1d02002-03-22 01:27:54 +00001535
sewardj45f4e7c2005-09-27 19:20:21 +00001536
sewardjde4a1d02002-03-22 01:27:54 +00001537/*------------------------------------------------------------*/
1538/*--- Services layered on top of malloc/free. ---*/
1539/*------------------------------------------------------------*/
1540
njn828022a2005-03-13 14:56:31 +00001541void* VG_(arena_calloc) ( ArenaId aid, SizeT nmemb, SizeT bytes_per_memb )
sewardjde4a1d02002-03-22 01:27:54 +00001542{
nethercote7ac7f7b2004-11-02 12:36:02 +00001543 SizeT size;
sewardjde4a1d02002-03-22 01:27:54 +00001544 UChar* p;
njn25e49d8e72002-09-23 09:36:25 +00001545
njn926ed472005-03-11 04:44:10 +00001546 size = nmemb * bytes_per_memb;
1547 vg_assert(size >= nmemb && size >= bytes_per_memb);// check against overflow
njn3e884182003-04-15 13:03:23 +00001548
njn828022a2005-03-13 14:56:31 +00001549 p = VG_(arena_malloc) ( aid, size );
njn3e884182003-04-15 13:03:23 +00001550
njn926ed472005-03-11 04:44:10 +00001551 VG_(memset)(p, 0, size);
sewardjb5f6f512005-03-10 23:59:00 +00001552
sewardj45f4e7c2005-09-27 19:20:21 +00001553 //zzVALGRIND_MALLOCLIKE_BLOCK(p, size, 0, True);
njn25e49d8e72002-09-23 09:36:25 +00001554
sewardjde4a1d02002-03-22 01:27:54 +00001555 return p;
1556}
1557
1558
njn828022a2005-03-13 14:56:31 +00001559void* VG_(arena_realloc) ( ArenaId aid, void* ptr, SizeT req_pszB )
sewardjde4a1d02002-03-22 01:27:54 +00001560{
1561 Arena* a;
njn089f51f2005-07-17 18:12:00 +00001562 SizeT old_pszB;
sewardjb5f6f512005-03-10 23:59:00 +00001563 UChar *p_new;
nethercote2d5b8162004-08-11 09:40:52 +00001564 Block* b;
sewardjde4a1d02002-03-22 01:27:54 +00001565
sewardj45f4e7c2005-09-27 19:20:21 +00001566 ensure_mm_init(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001567 a = arenaId_to_ArenaP(aid);
1568
nethercote7ac7f7b2004-11-02 12:36:02 +00001569 vg_assert(req_pszB < MAX_PSZB);
sewardjde4a1d02002-03-22 01:27:54 +00001570
nethercote2d5b8162004-08-11 09:40:52 +00001571 b = get_payload_block(a, ptr);
1572 vg_assert(blockSane(a, b));
sewardjde4a1d02002-03-22 01:27:54 +00001573
njn472cc7c2005-07-17 17:20:30 +00001574 vg_assert(is_inuse_block(b));
njn089f51f2005-07-17 18:12:00 +00001575 old_pszB = get_pszB(a, b);
sewardjde4a1d02002-03-22 01:27:54 +00001576
njn25e49d8e72002-09-23 09:36:25 +00001577 if (req_pszB <= old_pszB) {
njn25e49d8e72002-09-23 09:36:25 +00001578 return ptr;
1579 }
sewardjde4a1d02002-03-22 01:27:54 +00001580
njn828022a2005-03-13 14:56:31 +00001581 p_new = VG_(arena_malloc) ( aid, req_pszB );
1582
sewardjb5f6f512005-03-10 23:59:00 +00001583 VG_(memcpy)(p_new, ptr, old_pszB);
sewardjde4a1d02002-03-22 01:27:54 +00001584
sewardjb5f6f512005-03-10 23:59:00 +00001585 VG_(arena_free)(aid, ptr);
njn25e49d8e72002-09-23 09:36:25 +00001586
sewardjde4a1d02002-03-22 01:27:54 +00001587 return p_new;
1588}
1589
1590
njn6ba622c2005-06-11 01:12:08 +00001591/* Inline just for the wrapper VG_(strdup) below */
1592__inline__ Char* VG_(arena_strdup) ( ArenaId aid, const Char* s )
1593{
1594 Int i;
1595 Int len;
1596 Char* res;
1597
1598 if (s == NULL)
1599 return NULL;
1600
1601 len = VG_(strlen)(s) + 1;
1602 res = VG_(arena_malloc) (aid, len);
1603
1604 for (i = 0; i < len; i++)
1605 res[i] = s[i];
1606 return res;
1607}
1608
1609
sewardjde4a1d02002-03-22 01:27:54 +00001610/*------------------------------------------------------------*/
nethercote996901a2004-08-03 13:29:09 +00001611/*--- Tool-visible functions. ---*/
njn25e49d8e72002-09-23 09:36:25 +00001612/*------------------------------------------------------------*/
1613
nethercote2d5b8162004-08-11 09:40:52 +00001614// All just wrappers to avoid exposing arenas to tools.
njn25e49d8e72002-09-23 09:36:25 +00001615
nethercote7ac7f7b2004-11-02 12:36:02 +00001616void* VG_(malloc) ( SizeT nbytes )
njn25e49d8e72002-09-23 09:36:25 +00001617{
nethercote60f5b822004-01-26 17:24:42 +00001618 return VG_(arena_malloc) ( VG_AR_TOOL, nbytes );
njn25e49d8e72002-09-23 09:36:25 +00001619}
1620
1621void VG_(free) ( void* ptr )
1622{
nethercote60f5b822004-01-26 17:24:42 +00001623 VG_(arena_free) ( VG_AR_TOOL, ptr );
njn25e49d8e72002-09-23 09:36:25 +00001624}
1625
njn926ed472005-03-11 04:44:10 +00001626void* VG_(calloc) ( SizeT nmemb, SizeT bytes_per_memb )
njn25e49d8e72002-09-23 09:36:25 +00001627{
njn828022a2005-03-13 14:56:31 +00001628 return VG_(arena_calloc) ( VG_AR_TOOL, nmemb, bytes_per_memb );
njn25e49d8e72002-09-23 09:36:25 +00001629}
1630
nethercote7ac7f7b2004-11-02 12:36:02 +00001631void* VG_(realloc) ( void* ptr, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00001632{
njn828022a2005-03-13 14:56:31 +00001633 return VG_(arena_realloc) ( VG_AR_TOOL, ptr, size );
njn25e49d8e72002-09-23 09:36:25 +00001634}
1635
njn6ba622c2005-06-11 01:12:08 +00001636Char* VG_(strdup) ( const Char* s )
1637{
1638 return VG_(arena_strdup) ( VG_AR_TOOL, s );
1639}
1640
njn32397c02007-11-10 04:08:08 +00001641// Useful for querying user blocks.
1642SizeT VG_(malloc_usable_size) ( void* p )
1643{
1644 return VG_(arena_payload_szB)(VG_INVALID_THREADID, VG_AR_CLIENT, p);
1645}
1646
1647
sewardjde4a1d02002-03-22 01:27:54 +00001648/*--------------------------------------------------------------------*/
njn717cde52005-05-10 02:47:21 +00001649/*--- end ---*/
sewardjde4a1d02002-03-22 01:27:54 +00001650/*--------------------------------------------------------------------*/