blob: c775a163f2f15a1de229a6c4658989238c118755 [file] [log] [blame]
sewardjde4a1d02002-03-22 01:27:54 +00001
2/*--------------------------------------------------------------------*/
3/*--- An implementation of malloc/free which doesn't use sbrk. ---*/
njn717cde52005-05-10 02:47:21 +00004/*--- m_mallocfree.c ---*/
sewardjde4a1d02002-03-22 01:27:54 +00005/*--------------------------------------------------------------------*/
6
7/*
njnb9c427c2004-12-01 14:14:42 +00008 This file is part of Valgrind, a dynamic binary instrumentation
9 framework.
sewardjde4a1d02002-03-22 01:27:54 +000010
sewardj4d474d02008-02-11 11:34:59 +000011 Copyright (C) 2000-2008 Julian Seward
sewardjde4a1d02002-03-22 01:27:54 +000012 jseward@acm.org
sewardjde4a1d02002-03-22 01:27:54 +000013
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
njn25e49d8e72002-09-23 09:36:25 +000029 The GNU General Public License is contained in the file COPYING.
sewardjde4a1d02002-03-22 01:27:54 +000030*/
31
njnc7561b92005-06-19 01:24:32 +000032#include "pub_core_basics.h"
sewardj4cfea4f2006-10-14 19:26:10 +000033#include "pub_core_vki.h"
sewardj45f4e7c2005-09-27 19:20:21 +000034#include "pub_core_debuglog.h"
njn97405b22005-06-02 03:39:33 +000035#include "pub_core_libcbase.h"
sewardj45f4e7c2005-09-27 19:20:21 +000036#include "pub_core_aspacemgr.h"
njn132bfcc2005-06-04 19:16:06 +000037#include "pub_core_libcassert.h"
njn36a20fa2005-06-03 03:08:39 +000038#include "pub_core_libcprint.h"
njnaf1d7df2005-06-11 01:31:52 +000039#include "pub_core_mallocfree.h"
njn20242342005-05-16 23:31:24 +000040#include "pub_core_options.h"
njn32397c02007-11-10 04:08:08 +000041#include "pub_core_threadstate.h" // For VG_INVALID_THREADID
njnfc51f8d2005-06-21 03:20:17 +000042#include "pub_core_tooliface.h"
njn296c24d2005-05-15 03:52:40 +000043#include "valgrind.h"
sewardj55f9d1a2005-04-25 11:11:44 +000044
sewardjb5f6f512005-03-10 23:59:00 +000045//zz#include "memcheck/memcheck.h"
sewardjde4a1d02002-03-22 01:27:54 +000046
sewardj0b3fd2d2007-08-21 10:55:26 +000047// #define DEBUG_MALLOC // turn on heavyweight debugging machinery
48// #define VERBOSE_MALLOC // make verbose, esp. in debugging machinery
nethercote2d5b8162004-08-11 09:40:52 +000049
bart545380e2008-04-21 17:28:50 +000050/* Number and total size of blocks in free queue. Used by mallinfo(). */
51Long VG_(free_queue_volume) = 0;
52Long VG_(free_queue_length) = 0;
53
sewardj9c606bd2008-09-18 18:12:50 +000054static void cc_analyse_alloc_arena ( ArenaId aid ); /* fwds */
55
nethercote2d5b8162004-08-11 09:40:52 +000056/*------------------------------------------------------------*/
57/*--- Main types ---*/
58/*------------------------------------------------------------*/
59
sewardjc1ac9772007-08-20 22:57:56 +000060#define N_MALLOC_LISTS 112 // do not change this
nethercote2d5b8162004-08-11 09:40:52 +000061
nethercote7ac7f7b2004-11-02 12:36:02 +000062// The amount you can ask for is limited only by sizeof(SizeT)...
63#define MAX_PSZB (~((SizeT)0x0))
nethercote2d5b8162004-08-11 09:40:52 +000064
sewardj0b3fd2d2007-08-21 10:55:26 +000065// Each arena has a sorted array of superblocks, which expands
66// dynamically. This is its initial size.
67#define SBLOCKS_SIZE_INITIAL 50
68
nethercote2d5b8162004-08-11 09:40:52 +000069typedef UChar UByte;
70
njn8d3f8452005-07-20 04:12:41 +000071/* Layout of an in-use block:
nethercote2d5b8162004-08-11 09:40:52 +000072
sewardj94c8eb42008-09-19 20:13:39 +000073 cost center (OPTIONAL) (sizeof(ULong) bytes, only when h-p enabled)
njn8d3f8452005-07-20 04:12:41 +000074 this block total szB (sizeof(SizeT) bytes)
njn7ce83112005-08-24 22:38:00 +000075 red zone bytes (depends on Arena.rz_szB, but >= sizeof(void*))
njn8d3f8452005-07-20 04:12:41 +000076 (payload bytes)
njn7ce83112005-08-24 22:38:00 +000077 red zone bytes (depends on Arena.rz_szB, but >= sizeof(void*))
njn8d3f8452005-07-20 04:12:41 +000078 this block total szB (sizeof(SizeT) bytes)
nethercote2d5b8162004-08-11 09:40:52 +000079
njn8d3f8452005-07-20 04:12:41 +000080 Layout of a block on the free list:
nethercote2d5b8162004-08-11 09:40:52 +000081
sewardj94c8eb42008-09-19 20:13:39 +000082 cost center (OPTIONAL) (sizeof(ULong) bytes, only when h-p enabled)
njn8d3f8452005-07-20 04:12:41 +000083 this block total szB (sizeof(SizeT) bytes)
84 freelist previous ptr (sizeof(void*) bytes)
85 excess red zone bytes (if Arena.rz_szB > sizeof(void*))
86 (payload bytes)
87 excess red zone bytes (if Arena.rz_szB > sizeof(void*))
88 freelist next ptr (sizeof(void*) bytes)
89 this block total szB (sizeof(SizeT) bytes)
nethercote2d5b8162004-08-11 09:40:52 +000090
njn8d3f8452005-07-20 04:12:41 +000091 Total size in bytes (bszB) and payload size in bytes (pszB)
92 are related by:
nethercote2d5b8162004-08-11 09:40:52 +000093
sewardj94c8eb42008-09-19 20:13:39 +000094 bszB == pszB + 2*sizeof(SizeT) + 2*a->rz_szB
95
96 when heap profiling is not enabled, and
97
sewardj9c606bd2008-09-18 18:12:50 +000098 bszB == pszB + 2*sizeof(SizeT) + 2*a->rz_szB + sizeof(ULong)
njn8d3f8452005-07-20 04:12:41 +000099
sewardj94c8eb42008-09-19 20:13:39 +0000100 when it is enabled. It follows that the minimum overhead per heap
101 block for arenas used by the core is:
102
103 32-bit platforms: 2*4 + 2*4 == 16 bytes
104 64-bit platforms: 2*8 + 2*8 == 32 bytes
105
106 when heap profiling is not enabled, and
njna527a492005-12-16 17:06:37 +0000107
sewardj9c606bd2008-09-18 18:12:50 +0000108 32-bit platforms: 2*4 + 2*4 + 8 == 24 bytes
109 64-bit platforms: 2*8 + 2*8 + 8 == 40 bytes
njna527a492005-12-16 17:06:37 +0000110
sewardj94c8eb42008-09-19 20:13:39 +0000111 when it is enabled. In all cases, extra overhead may be incurred
112 when rounding the payload size up to VG_MIN_MALLOC_SZB.
njna527a492005-12-16 17:06:37 +0000113
njn8d3f8452005-07-20 04:12:41 +0000114 Furthermore, both size fields in the block have their least-significant
115 bit set if the block is not in use, and unset if it is in use.
116 (The bottom 3 or so bits are always free for this because of alignment.)
117 A block size of zero is not possible, because a block always has at
118 least two SizeTs and two pointers of overhead.
119
120 Nb: All Block payloads must be VG_MIN_MALLOC_SZB-aligned. This is
121 achieved by ensuring that Superblocks are VG_MIN_MALLOC_SZB-aligned
122 (see newSuperblock() for how), and that the lengths of the following
123 things are a multiple of VG_MIN_MALLOC_SZB:
124 - Superblock admin section lengths (due to elastic padding)
125 - Block admin section (low and high) lengths (due to elastic redzones)
126 - Block payload lengths (due to req_pszB rounding up)
sewardj9c606bd2008-09-18 18:12:50 +0000127
128 The heap-profile cost-center field is 8 bytes even on 32 bit
129 platforms. This is so as to keep the payload field 8-aligned. On
130 a 64-bit platform, this cc-field contains a pointer to a const
131 HChar*, which is the cost center name. On 32-bit platforms, the
132 pointer lives in the lower-addressed half of the field, regardless
133 of the endianness of the host.
nethercote2d5b8162004-08-11 09:40:52 +0000134*/
135typedef
136 struct {
137 // No fields are actually used in this struct, because a Block has
njn37517e82005-05-25 15:52:39 +0000138 // many variable sized fields and so can't be accessed
nethercote2d5b8162004-08-11 09:40:52 +0000139 // meaningfully with normal fields. So we use access functions all
140 // the time. This struct gives us a type to use, though. Also, we
141 // make sizeof(Block) 1 byte so that we can do arithmetic with the
142 // Block* type in increments of 1!
143 UByte dummy;
144 }
145 Block;
146
147// A superblock. 'padding' is never used, it just ensures that if the
148// entire Superblock is aligned to VG_MIN_MALLOC_SZB, then payload_bytes[]
149// will be too. It can add small amounts of padding unnecessarily -- eg.
150// 8-bytes on 32-bit machines with an 8-byte VG_MIN_MALLOC_SZB -- because
151// it's too hard to make a constant expression that works perfectly in all
152// cases.
153// payload_bytes[] is made a single big Block when the Superblock is
154// created, and then can be split and the splittings remerged, but Blocks
155// always cover its entire length -- there's never any unused bytes at the
156// end, for example.
sewardj0b3fd2d2007-08-21 10:55:26 +0000157typedef
nethercote2d5b8162004-08-11 09:40:52 +0000158 struct _Superblock {
nethercote7ac7f7b2004-11-02 12:36:02 +0000159 SizeT n_payload_bytes;
sewardj0b3fd2d2007-08-21 10:55:26 +0000160 void* padding2;
161 UByte padding[ VG_MIN_MALLOC_SZB -
162 ((sizeof(struct _Superblock*) + sizeof(SizeT)) %
nethercote7ac7f7b2004-11-02 12:36:02 +0000163 VG_MIN_MALLOC_SZB) ];
nethercote2d5b8162004-08-11 09:40:52 +0000164 UByte payload_bytes[0];
165 }
166 Superblock;
167
168// An arena. 'freelist' is a circular, doubly-linked list. 'rz_szB' is
169// elastic, in that it can be bigger than asked-for to ensure alignment.
sewardj0b3fd2d2007-08-21 10:55:26 +0000170typedef
nethercote2d5b8162004-08-11 09:40:52 +0000171 struct {
sewardj0b3fd2d2007-08-21 10:55:26 +0000172 Char* name;
173 Bool clientmem; // Allocates in the client address space?
174 SizeT rz_szB; // Red zone size in bytes
175 SizeT min_sblock_szB; // Minimum superblock size in bytes
176 Block* freelist[N_MALLOC_LISTS];
177 // A dynamically expanding, ordered array of (pointers to)
178 // superblocks in the arena. If this array is expanded, which
179 // is rare, the previous space it occupies is simply abandoned.
180 // To avoid having to get yet another block from m_aspacemgr for
181 // the first incarnation of this array, the first allocation of
182 // it is within this struct. If it has to be expanded then the
183 // new space is acquired from m_aspacemgr as you would expect.
184 Superblock** sblocks;
185 SizeT sblocks_size;
186 SizeT sblocks_used;
187 Superblock* sblocks_initial[SBLOCKS_SIZE_INITIAL];
nethercote2d5b8162004-08-11 09:40:52 +0000188 // Stats only.
sewardj0b3fd2d2007-08-21 10:55:26 +0000189 SizeT bytes_on_loan;
190 SizeT bytes_mmaped;
191 SizeT bytes_on_loan_max;
sewardj9c606bd2008-09-18 18:12:50 +0000192 SizeT next_profile_at;
sewardj0b3fd2d2007-08-21 10:55:26 +0000193 }
nethercote2d5b8162004-08-11 09:40:52 +0000194 Arena;
195
196
197/*------------------------------------------------------------*/
198/*--- Low-level functions for working with Blocks. ---*/
199/*------------------------------------------------------------*/
200
nethercote7ac7f7b2004-11-02 12:36:02 +0000201#define SIZE_T_0x1 ((SizeT)0x1)
202
njn8d3f8452005-07-20 04:12:41 +0000203// Mark a bszB as in-use, and not in-use, and remove the in-use attribute.
nethercote2d5b8162004-08-11 09:40:52 +0000204static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000205SizeT mk_inuse_bszB ( SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000206{
207 vg_assert(bszB != 0);
nethercote7ac7f7b2004-11-02 12:36:02 +0000208 return bszB & (~SIZE_T_0x1);
nethercote2d5b8162004-08-11 09:40:52 +0000209}
210static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000211SizeT mk_free_bszB ( SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000212{
213 vg_assert(bszB != 0);
nethercote7ac7f7b2004-11-02 12:36:02 +0000214 return bszB | SIZE_T_0x1;
nethercote2d5b8162004-08-11 09:40:52 +0000215}
nethercote2d5b8162004-08-11 09:40:52 +0000216static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000217SizeT mk_plain_bszB ( SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000218{
219 vg_assert(bszB != 0);
nethercote7ac7f7b2004-11-02 12:36:02 +0000220 return bszB & (~SIZE_T_0x1);
nethercote2d5b8162004-08-11 09:40:52 +0000221}
222
sewardj94c8eb42008-09-19 20:13:39 +0000223// return either 0 or sizeof(ULong) depending on whether or not
224// heap profiling is engaged
225static __inline__
226SizeT hp_overhead_szB ( void )
227{
228 return VG_(clo_profile_heap) ? sizeof(ULong) : 0;
229}
230
njn402c8612005-08-23 22:11:20 +0000231//---------------------------------------------------------------------------
232
233// Get a block's size as stored, ie with the in-use/free attribute.
nethercote2d5b8162004-08-11 09:40:52 +0000234static __inline__
njn402c8612005-08-23 22:11:20 +0000235SizeT get_bszB_as_is ( Block* b )
nethercote2d5b8162004-08-11 09:40:52 +0000236{
njn402c8612005-08-23 22:11:20 +0000237 UByte* b2 = (UByte*)b;
sewardj94c8eb42008-09-19 20:13:39 +0000238 SizeT bszB_lo = *(SizeT*)&b2[0 + hp_overhead_szB()];
njn402c8612005-08-23 22:11:20 +0000239 SizeT bszB_hi = *(SizeT*)&b2[mk_plain_bszB(bszB_lo) - sizeof(SizeT)];
240 vg_assert2(bszB_lo == bszB_hi,
241 "Heap block lo/hi size mismatch: lo = %llu, hi = %llu.\n"
sewardj24596d82005-10-21 12:05:05 +0000242 "Probably caused by overrunning/underrunning a heap block's bounds.\n",
243 (ULong)bszB_lo, (ULong)bszB_hi);
njn402c8612005-08-23 22:11:20 +0000244 return bszB_lo;
nethercote2d5b8162004-08-11 09:40:52 +0000245}
246
njn402c8612005-08-23 22:11:20 +0000247// Get a block's plain size, ie. remove the in-use/free attribute.
248static __inline__
249SizeT get_bszB ( Block* b )
250{
251 return mk_plain_bszB(get_bszB_as_is(b));
252}
253
254// Set the size fields of a block. bszB may have the in-use/free attribute.
255static __inline__
256void set_bszB ( Block* b, SizeT bszB )
257{
258 UByte* b2 = (UByte*)b;
sewardj94c8eb42008-09-19 20:13:39 +0000259 *(SizeT*)&b2[0 + hp_overhead_szB()] = bszB;
njn402c8612005-08-23 22:11:20 +0000260 *(SizeT*)&b2[mk_plain_bszB(bszB) - sizeof(SizeT)] = bszB;
261}
262
263//---------------------------------------------------------------------------
264
njn472cc7c2005-07-17 17:20:30 +0000265// Does this block have the in-use attribute?
266static __inline__
267Bool is_inuse_block ( Block* b )
268{
njn402c8612005-08-23 22:11:20 +0000269 SizeT bszB = get_bszB_as_is(b);
njn472cc7c2005-07-17 17:20:30 +0000270 vg_assert(bszB != 0);
271 return (0 != (bszB & SIZE_T_0x1)) ? False : True;
272}
273
njn402c8612005-08-23 22:11:20 +0000274//---------------------------------------------------------------------------
njn8d3f8452005-07-20 04:12:41 +0000275
njn089f51f2005-07-17 18:12:00 +0000276// Return the lower, upper and total overhead in bytes for a block.
277// These are determined purely by which arena the block lives in.
278static __inline__
279SizeT overhead_szB_lo ( Arena* a )
280{
sewardj94c8eb42008-09-19 20:13:39 +0000281 return hp_overhead_szB() + sizeof(SizeT) + a->rz_szB;
njn089f51f2005-07-17 18:12:00 +0000282}
283static __inline__
284SizeT overhead_szB_hi ( Arena* a )
285{
njn8d3f8452005-07-20 04:12:41 +0000286 return a->rz_szB + sizeof(SizeT);
njn089f51f2005-07-17 18:12:00 +0000287}
288static __inline__
289SizeT overhead_szB ( Arena* a )
290{
291 return overhead_szB_lo(a) + overhead_szB_hi(a);
292}
293
njn402c8612005-08-23 22:11:20 +0000294//---------------------------------------------------------------------------
295
njn089f51f2005-07-17 18:12:00 +0000296// Return the minimum bszB for a block in this arena. Can have zero-length
297// payloads, so it's the size of the admin bytes.
298static __inline__
299SizeT min_useful_bszB ( Arena* a )
300{
301 return overhead_szB(a);
302}
303
njn402c8612005-08-23 22:11:20 +0000304//---------------------------------------------------------------------------
305
njn089f51f2005-07-17 18:12:00 +0000306// Convert payload size <--> block size (both in bytes).
307static __inline__
308SizeT pszB_to_bszB ( Arena* a, SizeT pszB )
309{
310 return pszB + overhead_szB(a);
311}
312static __inline__
313SizeT bszB_to_pszB ( Arena* a, SizeT bszB )
314{
315 vg_assert(bszB >= overhead_szB(a));
316 return bszB - overhead_szB(a);
317}
318
njn402c8612005-08-23 22:11:20 +0000319//---------------------------------------------------------------------------
nethercote2d5b8162004-08-11 09:40:52 +0000320
njn089f51f2005-07-17 18:12:00 +0000321// Get a block's payload size.
nethercote7ac7f7b2004-11-02 12:36:02 +0000322static __inline__
njn089f51f2005-07-17 18:12:00 +0000323SizeT get_pszB ( Arena* a, Block* b )
nethercote7ac7f7b2004-11-02 12:36:02 +0000324{
njn089f51f2005-07-17 18:12:00 +0000325 return bszB_to_pszB(a, get_bszB(b));
nethercote7ac7f7b2004-11-02 12:36:02 +0000326}
327
njn402c8612005-08-23 22:11:20 +0000328//---------------------------------------------------------------------------
329
330// Given the addr of a block, return the addr of its payload, and vice versa.
nethercote2d5b8162004-08-11 09:40:52 +0000331static __inline__
332UByte* get_block_payload ( Arena* a, Block* b )
333{
334 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000335 return & b2[ overhead_szB_lo(a) ];
nethercote2d5b8162004-08-11 09:40:52 +0000336}
337// Given the addr of a block's payload, return the addr of the block itself.
338static __inline__
339Block* get_payload_block ( Arena* a, UByte* payload )
340{
nethercote7ac7f7b2004-11-02 12:36:02 +0000341 return (Block*)&payload[ -overhead_szB_lo(a) ];
nethercote2d5b8162004-08-11 09:40:52 +0000342}
343
njn402c8612005-08-23 22:11:20 +0000344//---------------------------------------------------------------------------
nethercote2d5b8162004-08-11 09:40:52 +0000345
346// Set and get the next and previous link fields of a block.
347static __inline__
348void set_prev_b ( Block* b, Block* prev_p )
349{
350 UByte* b2 = (UByte*)b;
sewardj94c8eb42008-09-19 20:13:39 +0000351 *(Block**)&b2[hp_overhead_szB() + sizeof(SizeT)] = prev_p;
nethercote2d5b8162004-08-11 09:40:52 +0000352}
353static __inline__
354void set_next_b ( Block* b, Block* next_p )
355{
njn402c8612005-08-23 22:11:20 +0000356 UByte* b2 = (UByte*)b;
357 *(Block**)&b2[get_bszB(b) - sizeof(SizeT) - sizeof(void*)] = next_p;
nethercote2d5b8162004-08-11 09:40:52 +0000358}
359static __inline__
360Block* get_prev_b ( Block* b )
361{
362 UByte* b2 = (UByte*)b;
sewardj94c8eb42008-09-19 20:13:39 +0000363 return *(Block**)&b2[hp_overhead_szB() + sizeof(SizeT)];
nethercote2d5b8162004-08-11 09:40:52 +0000364}
365static __inline__
366Block* get_next_b ( Block* b )
367{
njn402c8612005-08-23 22:11:20 +0000368 UByte* b2 = (UByte*)b;
369 return *(Block**)&b2[get_bszB(b) - sizeof(SizeT) - sizeof(void*)];
nethercote2d5b8162004-08-11 09:40:52 +0000370}
371
njn402c8612005-08-23 22:11:20 +0000372//---------------------------------------------------------------------------
nethercote2d5b8162004-08-11 09:40:52 +0000373
sewardj9c606bd2008-09-18 18:12:50 +0000374// Set and get the cost-center field of a block.
375static __inline__
376void set_cc ( Block* b, HChar* cc )
377{
378 UByte* b2 = (UByte*)b;
sewardj94c8eb42008-09-19 20:13:39 +0000379 vg_assert( VG_(clo_profile_heap) );
sewardj9c606bd2008-09-18 18:12:50 +0000380 *(HChar**)&b2[0] = cc;
381}
382static __inline__
383HChar* get_cc ( Block* b )
384{
385 UByte* b2 = (UByte*)b;
sewardj94c8eb42008-09-19 20:13:39 +0000386 vg_assert( VG_(clo_profile_heap) );
sewardj9c606bd2008-09-18 18:12:50 +0000387 return *(HChar**)&b2[0];
388}
389
390//---------------------------------------------------------------------------
391
nethercote2d5b8162004-08-11 09:40:52 +0000392// Get the block immediately preceding this one in the Superblock.
393static __inline__
394Block* get_predecessor_block ( Block* b )
395{
396 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000397 SizeT bszB = mk_plain_bszB( (*(SizeT*)&b2[-sizeof(SizeT)]) );
nethercote2d5b8162004-08-11 09:40:52 +0000398 return (Block*)&b2[-bszB];
399}
400
njn402c8612005-08-23 22:11:20 +0000401//---------------------------------------------------------------------------
402
nethercote2d5b8162004-08-11 09:40:52 +0000403// Read and write the lower and upper red-zone bytes of a block.
404static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000405void set_rz_lo_byte ( Arena* a, Block* b, UInt rz_byteno, UByte v )
nethercote2d5b8162004-08-11 09:40:52 +0000406{
407 UByte* b2 = (UByte*)b;
sewardj94c8eb42008-09-19 20:13:39 +0000408 b2[hp_overhead_szB() + sizeof(SizeT) + rz_byteno] = v;
nethercote2d5b8162004-08-11 09:40:52 +0000409}
410static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000411void set_rz_hi_byte ( Arena* a, Block* b, UInt rz_byteno, UByte v )
nethercote2d5b8162004-08-11 09:40:52 +0000412{
njn402c8612005-08-23 22:11:20 +0000413 UByte* b2 = (UByte*)b;
414 b2[get_bszB(b) - sizeof(SizeT) - rz_byteno - 1] = v;
nethercote2d5b8162004-08-11 09:40:52 +0000415}
416static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000417UByte get_rz_lo_byte ( Arena* a, Block* b, UInt rz_byteno )
nethercote2d5b8162004-08-11 09:40:52 +0000418{
419 UByte* b2 = (UByte*)b;
sewardj94c8eb42008-09-19 20:13:39 +0000420 return b2[hp_overhead_szB() + sizeof(SizeT) + rz_byteno];
nethercote2d5b8162004-08-11 09:40:52 +0000421}
422static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000423UByte get_rz_hi_byte ( Arena* a, Block* b, UInt rz_byteno )
nethercote2d5b8162004-08-11 09:40:52 +0000424{
njn402c8612005-08-23 22:11:20 +0000425 UByte* b2 = (UByte*)b;
426 return b2[get_bszB(b) - sizeof(SizeT) - rz_byteno - 1];
nethercote2d5b8162004-08-11 09:40:52 +0000427}
428
429
nethercote2d5b8162004-08-11 09:40:52 +0000430/*------------------------------------------------------------*/
431/*--- Arena management ---*/
432/*------------------------------------------------------------*/
433
434#define CORE_ARENA_MIN_SZB 1048576
435
436// The arena structures themselves.
437static Arena vg_arena[VG_N_ARENAS];
438
439// Functions external to this module identify arenas using ArenaIds,
440// not Arena*s. This fn converts the former to the latter.
441static Arena* arenaId_to_ArenaP ( ArenaId arena )
442{
443 vg_assert(arena >= 0 && arena < VG_N_ARENAS);
444 return & vg_arena[arena];
445}
446
447// Initialise an arena. rz_szB is the minimum redzone size; it might be
njn30490552005-03-13 06:30:42 +0000448// made bigger to ensure that VG_MIN_MALLOC_SZB is observed.
nethercote2d5b8162004-08-11 09:40:52 +0000449static
njn0e742df2004-11-30 13:26:29 +0000450void arena_init ( ArenaId aid, Char* name, SizeT rz_szB, SizeT min_sblock_szB )
nethercote2d5b8162004-08-11 09:40:52 +0000451{
sewardj0b3fd2d2007-08-21 10:55:26 +0000452 SizeT i;
nethercote2d5b8162004-08-11 09:40:52 +0000453 Arena* a = arenaId_to_ArenaP(aid);
454
njn7ce83112005-08-24 22:38:00 +0000455 // Ensure redzones are a reasonable size. They must always be at least
456 // the size of a pointer, for holding the prev/next pointer (see the layout
457 // details at the top of this file).
458 vg_assert(rz_szB < 128);
459 if (rz_szB < sizeof(void*)) rz_szB = sizeof(void*);
460
nethercote73b526f2004-10-31 18:48:21 +0000461 vg_assert((min_sblock_szB % VKI_PAGE_SIZE) == 0);
nethercote2d5b8162004-08-11 09:40:52 +0000462 a->name = name;
463 a->clientmem = ( VG_AR_CLIENT == aid ? True : False );
464
465 // The size of the low and high admin sections in a block must be a
njn30490552005-03-13 06:30:42 +0000466 // multiple of VG_MIN_MALLOC_SZB. So we round up the asked-for
nethercote2d5b8162004-08-11 09:40:52 +0000467 // redzone size if necessary to achieve this.
468 a->rz_szB = rz_szB;
469 while (0 != overhead_szB_lo(a) % VG_MIN_MALLOC_SZB) a->rz_szB++;
sewardj9c606bd2008-09-18 18:12:50 +0000470 // vg_assert(overhead_szB_lo(a) == overhead_szB_hi(a));
471 vg_assert(0 == overhead_szB_lo(a) % VG_MIN_MALLOC_SZB);
472 vg_assert(0 == overhead_szB_hi(a) % VG_MIN_MALLOC_SZB);
nethercote2d5b8162004-08-11 09:40:52 +0000473
474 a->min_sblock_szB = min_sblock_szB;
njn6e6588c2005-03-13 18:52:48 +0000475 for (i = 0; i < N_MALLOC_LISTS; i++) a->freelist[i] = NULL;
sewardj0b3fd2d2007-08-21 10:55:26 +0000476
477 a->sblocks = & a->sblocks_initial[0];
478 a->sblocks_size = SBLOCKS_SIZE_INITIAL;
479 a->sblocks_used = 0;
nethercote2d5b8162004-08-11 09:40:52 +0000480 a->bytes_on_loan = 0;
481 a->bytes_mmaped = 0;
482 a->bytes_on_loan_max = 0;
sewardj9c606bd2008-09-18 18:12:50 +0000483 a->next_profile_at = 25 * 1000 * 1000;
sewardj0b3fd2d2007-08-21 10:55:26 +0000484 vg_assert(sizeof(a->sblocks_initial)
485 == SBLOCKS_SIZE_INITIAL * sizeof(Superblock*));
nethercote2d5b8162004-08-11 09:40:52 +0000486}
487
488/* Print vital stats for an arena. */
489void VG_(print_all_arena_stats) ( void )
490{
nethercote7ac7f7b2004-11-02 12:36:02 +0000491 UInt i;
nethercote2d5b8162004-08-11 09:40:52 +0000492 for (i = 0; i < VG_N_ARENAS; i++) {
493 Arena* a = arenaId_to_ArenaP(i);
494 VG_(message)(Vg_DebugMsg,
barta0b6b2c2008-07-07 06:49:24 +0000495 "%8s: %8ld mmap'd, %8ld/%8ld max/curr",
nethercote2d5b8162004-08-11 09:40:52 +0000496 a->name, a->bytes_mmaped, a->bytes_on_loan_max, a->bytes_on_loan
497 );
498 }
499}
500
sewardj9c606bd2008-09-18 18:12:50 +0000501void VG_(print_arena_cc_analysis) ( void )
502{
503 UInt i;
504 vg_assert( VG_(clo_profile_heap) );
505 for (i = 0; i < VG_N_ARENAS; i++) {
506 cc_analyse_alloc_arena(i);
507 }
508}
509
510
nethercote2d5b8162004-08-11 09:40:52 +0000511/* This library is self-initialising, as it makes this more self-contained,
512 less coupled with the outside world. Hence VG_(arena_malloc)() and
513 VG_(arena_free)() below always call ensure_mm_init() to ensure things are
sewardj45f4e7c2005-09-27 19:20:21 +0000514 correctly initialised.
515
516 We initialise the client arena separately (and later) because the core
517 must do non-client allocation before the tool has a chance to set the
518 client arena's redzone size.
519*/
sewardj0b3fd2d2007-08-21 10:55:26 +0000520static Bool client_inited = False;
521static Bool nonclient_inited = False;
522
nethercote2d5b8162004-08-11 09:40:52 +0000523static
sewardj45f4e7c2005-09-27 19:20:21 +0000524void ensure_mm_init ( ArenaId aid )
nethercote2d5b8162004-08-11 09:40:52 +0000525{
njn95c23292005-12-26 17:50:22 +0000526 static SizeT client_rz_szB = 8; // default: be paranoid
njnfc51f8d2005-06-21 03:20:17 +0000527
sewardj45f4e7c2005-09-27 19:20:21 +0000528 /* We use checked red zones (of various sizes) for our internal stuff,
nethercote2d5b8162004-08-11 09:40:52 +0000529 and an unchecked zone of arbitrary size for the client. Of
530 course the client's red zone can be checked by the tool, eg.
531 by using addressibility maps, but not by the mechanism implemented
532 here, which merely checks at the time of freeing that the red
533 zone bytes are unchanged.
534
535 Nb: redzone sizes are *minimums*; they could be made bigger to ensure
njn8d3f8452005-07-20 04:12:41 +0000536 alignment. Eg. with 8 byte alignment, on 32-bit machines 4 stays as
537 4, but 16 becomes 20; but on 64-bit machines 4 becomes 8, and 16
538 stays as 16 --- the extra 4 bytes in both are accounted for by the
539 larger prev/next ptr.
nethercote2d5b8162004-08-11 09:40:52 +0000540 */
sewardj45f4e7c2005-09-27 19:20:21 +0000541 if (VG_AR_CLIENT == aid) {
sewardj5600ab32006-10-17 01:42:40 +0000542 Int ar_client_sbszB;
sewardj45f4e7c2005-09-27 19:20:21 +0000543 if (client_inited) {
544 // This assertion ensures that a tool cannot try to change the client
545 // redzone size with VG_(needs_malloc_replacement)() after this module
546 // has done its first allocation from the client arena.
547 if (VG_(needs).malloc_replacement)
njn95c23292005-12-26 17:50:22 +0000548 vg_assert(client_rz_szB == VG_(tdict).tool_client_redzone_szB);
sewardj45f4e7c2005-09-27 19:20:21 +0000549 return;
550 }
nethercote2d5b8162004-08-11 09:40:52 +0000551
sewardj45f4e7c2005-09-27 19:20:21 +0000552 // Check and set the client arena redzone size
553 if (VG_(needs).malloc_replacement) {
njn95c23292005-12-26 17:50:22 +0000554 client_rz_szB = VG_(tdict).tool_client_redzone_szB;
sewardj45f4e7c2005-09-27 19:20:21 +0000555 // 128 is no special figure, just something not too big
njn95c23292005-12-26 17:50:22 +0000556 if (client_rz_szB > 128) {
sewardj45f4e7c2005-09-27 19:20:21 +0000557 VG_(printf)( "\nTool error:\n"
558 " specified redzone size is too big (%llu)\n",
njn95c23292005-12-26 17:50:22 +0000559 (ULong)client_rz_szB);
sewardj45f4e7c2005-09-27 19:20:21 +0000560 VG_(exit)(1);
561 }
562 }
sewardj5600ab32006-10-17 01:42:40 +0000563 // Initialise the client arena. On AIX it's important to have
564 // relatively large client blocks so as not to cause excessively
565 // fine-grained interleaving of V and C address space. On Linux
566 // this is irrelevant since aspacem can keep the two spaces
sewardjc1ac9772007-08-20 22:57:56 +0000567 // well apart, but not so on AIX. On all platforms though,
568 // increasing the superblock size reduces the number of superblocks
569 // in the client arena, which makes findSb cheaper.
sewardj5600ab32006-10-17 01:42:40 +0000570# if defined(VGO_aix5)
571 ar_client_sbszB = 16777216;
572# else
sewardjc1ac9772007-08-20 22:57:56 +0000573 ar_client_sbszB = 4194304;
sewardj5600ab32006-10-17 01:42:40 +0000574# endif
575 arena_init ( VG_AR_CLIENT, "client", client_rz_szB, ar_client_sbszB );
sewardj45f4e7c2005-09-27 19:20:21 +0000576 client_inited = True;
577
578 } else {
579 if (nonclient_inited) {
580 return;
581 }
582 // Initialise the non-client arenas
njn95c23292005-12-26 17:50:22 +0000583 arena_init ( VG_AR_CORE, "core", 4, 1048576 );
sewardjc1ac9772007-08-20 22:57:56 +0000584 arena_init ( VG_AR_TOOL, "tool", 4, 4194304 );
sewardjb8b79ad2008-03-03 01:35:41 +0000585 arena_init ( VG_AR_DINFO, "dinfo", 4, 1048576 );
njn95c23292005-12-26 17:50:22 +0000586 arena_init ( VG_AR_DEMANGLE, "demangle", 4, 65536 );
sewardjc1ac9772007-08-20 22:57:56 +0000587 arena_init ( VG_AR_EXECTXT, "exectxt", 4, 1048576 );
njn95c23292005-12-26 17:50:22 +0000588 arena_init ( VG_AR_ERRORS, "errors", 4, 65536 );
589 arena_init ( VG_AR_TTAUX, "ttaux", 4, 65536 );
sewardj45f4e7c2005-09-27 19:20:21 +0000590 nonclient_inited = True;
591 }
592
nethercote2d5b8162004-08-11 09:40:52 +0000593# ifdef DEBUG_MALLOC
sewardj0b3fd2d2007-08-21 10:55:26 +0000594 VG_(printf)("ZZZ1\n");
nethercote2d5b8162004-08-11 09:40:52 +0000595 VG_(sanity_check_malloc_all)();
sewardj0b3fd2d2007-08-21 10:55:26 +0000596 VG_(printf)("ZZZ2\n");
nethercote2d5b8162004-08-11 09:40:52 +0000597# endif
598}
599
600
601/*------------------------------------------------------------*/
602/*--- Superblock management ---*/
603/*------------------------------------------------------------*/
604
sewardj45f4e7c2005-09-27 19:20:21 +0000605void VG_(out_of_memory_NORETURN) ( HChar* who, SizeT szB )
606{
607 static Bool alreadyCrashing = False;
608 ULong tot_alloc = VG_(am_get_anonsize_total)();
njnb81c7952007-03-22 03:36:55 +0000609 Char* s1 =
610 "\n"
611 " Valgrind's memory management: out of memory:\n"
612 " %s's request for %llu bytes failed.\n"
613 " %llu bytes have already been allocated.\n"
614 " Valgrind cannot continue. Sorry.\n\n"
615 " There are several possible reasons for this.\n"
616 " - You have some kind of memory limit in place. Look at the\n"
617 " output of 'ulimit -a'. Is there a limit on the size of\n"
618 " virtual memory or address space?\n"
619 " - You have run out of swap space.\n"
620 " - Valgrind has a bug. If you think this is the case or you are\n"
621 " not sure, please let us know and we'll try to fix it.\n"
622 " Please note that programs can take substantially more memory than\n"
623 " normal when running under Valgrind tools, eg. up to twice or\n"
624 " more, depending on the tool. On a 64-bit machine, Valgrind\n"
625 " should be able to make use of up 32GB memory. On a 32-bit\n"
626 " machine, Valgrind should be able to use all the memory available\n"
627 " to a single process, up to 4GB if that's how you have your\n"
628 " kernel configured. Most 32-bit Linux setups allow a maximum of\n"
629 " 3GB per process.\n\n"
630 " Whatever the reason, Valgrind cannot continue. Sorry.\n";
631
sewardj45f4e7c2005-09-27 19:20:21 +0000632 if (!alreadyCrashing) {
633 alreadyCrashing = True;
njnb81c7952007-03-22 03:36:55 +0000634 VG_(message)(Vg_UserMsg, s1, who, (ULong)szB, tot_alloc);
sewardj45f4e7c2005-09-27 19:20:21 +0000635 } else {
njnb81c7952007-03-22 03:36:55 +0000636 VG_(debugLog)(0,"mallocfree", s1, who, (ULong)szB, tot_alloc);
sewardj45f4e7c2005-09-27 19:20:21 +0000637 }
638 VG_(exit)(1);
639}
640
641
nethercote2d5b8162004-08-11 09:40:52 +0000642// Align ptr p upwards to an align-sized boundary.
643static
nethercote7ac7f7b2004-11-02 12:36:02 +0000644void* align_upwards ( void* p, SizeT align )
nethercote2d5b8162004-08-11 09:40:52 +0000645{
646 Addr a = (Addr)p;
647 if ((a % align) == 0) return (void*)a;
648 return (void*)(a - (a % align) + align);
649}
650
651// If not enough memory available, either aborts (for non-client memory)
652// or returns 0 (for client memory).
653static
nethercote7ac7f7b2004-11-02 12:36:02 +0000654Superblock* newSuperblock ( Arena* a, SizeT cszB )
nethercote2d5b8162004-08-11 09:40:52 +0000655{
nethercote2d5b8162004-08-11 09:40:52 +0000656 Superblock* sb;
sewardj45f4e7c2005-09-27 19:20:21 +0000657 SysRes sres;
nethercote2d5b8162004-08-11 09:40:52 +0000658
659 // Take into account admin bytes in the Superblock.
660 cszB += sizeof(Superblock);
661
662 if (cszB < a->min_sblock_szB) cszB = a->min_sblock_szB;
bartc3c98392008-04-19 14:43:30 +0000663 cszB = VG_PGROUNDUP(cszB);
nethercote2d5b8162004-08-11 09:40:52 +0000664
sewardj45f4e7c2005-09-27 19:20:21 +0000665 if (a->clientmem) {
nethercote2d5b8162004-08-11 09:40:52 +0000666 // client allocation -- return 0 to client if it fails
sewardj5600ab32006-10-17 01:42:40 +0000667 sres = VG_(am_sbrk_anon_float_client)
sewardj45f4e7c2005-09-27 19:20:21 +0000668 ( cszB, VKI_PROT_READ|VKI_PROT_WRITE|VKI_PROT_EXEC );
669 if (sres.isError)
nethercote2d5b8162004-08-11 09:40:52 +0000670 return 0;
sewardj5600ab32006-10-17 01:42:40 +0000671 sb = (Superblock*)sres.res;
sewardj45f4e7c2005-09-27 19:20:21 +0000672 // Mark this segment as containing client heap. The leak
673 // checker needs to be able to identify such segments so as not
674 // to use them as sources of roots during leak checks.
sewardj5600ab32006-10-17 01:42:40 +0000675 VG_(am_set_segment_isCH_if_SkAnonC)(
676 (NSegment*) VG_(am_find_nsegment)( (Addr)sb )
677 );
nethercote2d5b8162004-08-11 09:40:52 +0000678 } else {
sewardj45f4e7c2005-09-27 19:20:21 +0000679 // non-client allocation -- abort if it fails
sewardj5600ab32006-10-17 01:42:40 +0000680 sres = VG_(am_sbrk_anon_float_valgrind)( cszB );
sewardj45f4e7c2005-09-27 19:20:21 +0000681 if (sres.isError) {
682 VG_(out_of_memory_NORETURN)("newSuperblock", cszB);
683 /* NOTREACHED */
684 sb = NULL; /* keep gcc happy */
685 } else {
sewardj5600ab32006-10-17 01:42:40 +0000686 sb = (Superblock*)sres.res;
sewardj45f4e7c2005-09-27 19:20:21 +0000687 }
nethercote2d5b8162004-08-11 09:40:52 +0000688 }
689 vg_assert(NULL != sb);
njndbf7ca72006-03-31 11:57:59 +0000690 //zzVALGRIND_MAKE_MEM_UNDEFINED(sb, cszB);
nethercote2d5b8162004-08-11 09:40:52 +0000691 vg_assert(0 == (Addr)sb % VG_MIN_MALLOC_SZB);
692 sb->n_payload_bytes = cszB - sizeof(Superblock);
693 a->bytes_mmaped += cszB;
sewardj45f4e7c2005-09-27 19:20:21 +0000694 VG_(debugLog)(1, "mallocfree",
695 "newSuperblock at %p (pszB %7ld) owner %s/%s\n",
696 sb, sb->n_payload_bytes,
697 a->clientmem ? "CLIENT" : "VALGRIND", a->name );
nethercote2d5b8162004-08-11 09:40:52 +0000698 return sb;
699}
700
701// Find the superblock containing the given chunk.
702static
703Superblock* findSb ( Arena* a, Block* b )
704{
sewardj0b3fd2d2007-08-21 10:55:26 +0000705 SizeT min = 0;
706 SizeT max = a->sblocks_used;
sewardj49bdd7a2005-12-17 20:37:36 +0000707
sewardj0b3fd2d2007-08-21 10:55:26 +0000708 while (min <= max) {
709 Superblock * sb;
710 SizeT pos = min + (max - min)/2;
711
712 vg_assert(pos >= 0 && pos < a->sblocks_used);
713 sb = a->sblocks[pos];
714 if ((Block*)&sb->payload_bytes[0] <= b
715 && b < (Block*)&sb->payload_bytes[sb->n_payload_bytes])
716 {
717 return sb;
718 } else if ((Block*)&sb->payload_bytes[0] <= b) {
719 min = pos + 1;
720 } else {
721 max = pos - 1;
sewardj49bdd7a2005-12-17 20:37:36 +0000722 }
723 }
sewardj0b3fd2d2007-08-21 10:55:26 +0000724 VG_(printf)("findSb: can't find pointer %p in arena '%s'\n",
725 b, a->name );
726 VG_(core_panic)("findSb: VG_(arena_free)() in wrong arena?");
727 return NULL; /*NOTREACHED*/
nethercote2d5b8162004-08-11 09:40:52 +0000728}
729
sewardjde4a1d02002-03-22 01:27:54 +0000730
fitzhardinge98abfc72003-12-16 02:05:15 +0000731/*------------------------------------------------------------*/
sewardjde4a1d02002-03-22 01:27:54 +0000732/*--- Functions for working with freelists. ---*/
733/*------------------------------------------------------------*/
734
nethercote2d5b8162004-08-11 09:40:52 +0000735// Nb: Determination of which freelist a block lives on is based on the
736// payload size, not block size.
sewardjde4a1d02002-03-22 01:27:54 +0000737
nethercote2d5b8162004-08-11 09:40:52 +0000738// Convert a payload size in bytes to a freelist number.
sewardjde4a1d02002-03-22 01:27:54 +0000739static
nethercote7ac7f7b2004-11-02 12:36:02 +0000740UInt pszB_to_listNo ( SizeT pszB )
sewardjde4a1d02002-03-22 01:27:54 +0000741{
njndb247dc2005-07-17 23:12:33 +0000742 SizeT n = pszB / VG_MIN_MALLOC_SZB;
tom60a4b0b2005-10-12 10:45:27 +0000743 vg_assert(0 == pszB % VG_MIN_MALLOC_SZB);
njn61dcab82005-05-21 19:36:45 +0000744
sewardjc1ac9772007-08-20 22:57:56 +0000745 // The first 64 lists hold blocks of size VG_MIN_MALLOC_SZB * list_num.
746 // The final 48 hold bigger blocks.
747 if (n < 64) return (UInt)n;
748 /* Exponential slope up, factor 1.05 */
749 if (n < 67) return 64;
750 if (n < 70) return 65;
751 if (n < 74) return 66;
752 if (n < 77) return 67;
753 if (n < 81) return 68;
754 if (n < 85) return 69;
755 if (n < 90) return 70;
756 if (n < 94) return 71;
757 if (n < 99) return 72;
758 if (n < 104) return 73;
759 if (n < 109) return 74;
760 if (n < 114) return 75;
761 if (n < 120) return 76;
762 if (n < 126) return 77;
763 if (n < 133) return 78;
764 if (n < 139) return 79;
765 /* Exponential slope up, factor 1.10 */
766 if (n < 153) return 80;
767 if (n < 169) return 81;
768 if (n < 185) return 82;
769 if (n < 204) return 83;
770 if (n < 224) return 84;
771 if (n < 247) return 85;
772 if (n < 272) return 86;
773 if (n < 299) return 87;
774 if (n < 329) return 88;
775 if (n < 362) return 89;
776 if (n < 398) return 90;
777 if (n < 438) return 91;
778 if (n < 482) return 92;
779 if (n < 530) return 93;
780 if (n < 583) return 94;
781 if (n < 641) return 95;
782 /* Exponential slope up, factor 1.20 */
783 if (n < 770) return 96;
784 if (n < 924) return 97;
785 if (n < 1109) return 98;
786 if (n < 1331) return 99;
787 if (n < 1597) return 100;
788 if (n < 1916) return 101;
789 if (n < 2300) return 102;
790 if (n < 2760) return 103;
791 if (n < 3312) return 104;
792 if (n < 3974) return 105;
793 if (n < 4769) return 106;
794 if (n < 5723) return 107;
795 if (n < 6868) return 108;
796 if (n < 8241) return 109;
797 if (n < 9890) return 110;
798 return 111;
sewardjde4a1d02002-03-22 01:27:54 +0000799}
800
nethercote2d5b8162004-08-11 09:40:52 +0000801// What is the minimum payload size for a given list?
sewardjde4a1d02002-03-22 01:27:54 +0000802static
nethercote7ac7f7b2004-11-02 12:36:02 +0000803SizeT listNo_to_pszB_min ( UInt listNo )
sewardjde4a1d02002-03-22 01:27:54 +0000804{
sewardj1d2e2e62007-08-23 10:22:44 +0000805 /* Repeatedly computing this function at every request is
806 expensive. Hence at the first call just cache the result for
807 every possible argument. */
808 static SizeT cache[N_MALLOC_LISTS];
809 static Bool cache_valid = False;
810 if (!cache_valid) {
811 UInt i;
812 for (i = 0; i < N_MALLOC_LISTS; i++) {
813 SizeT pszB = 0;
814 while (pszB_to_listNo(pszB) < i)
815 pszB += VG_MIN_MALLOC_SZB;
816 cache[i] = pszB;
817 }
818 cache_valid = True;
819 }
820 /* Returned cached answer. */
njn6e6588c2005-03-13 18:52:48 +0000821 vg_assert(listNo <= N_MALLOC_LISTS);
sewardj1d2e2e62007-08-23 10:22:44 +0000822 return cache[listNo];
sewardjde4a1d02002-03-22 01:27:54 +0000823}
824
nethercote2d5b8162004-08-11 09:40:52 +0000825// What is the maximum payload size for a given list?
sewardjde4a1d02002-03-22 01:27:54 +0000826static
nethercote7ac7f7b2004-11-02 12:36:02 +0000827SizeT listNo_to_pszB_max ( UInt listNo )
sewardjde4a1d02002-03-22 01:27:54 +0000828{
njn6e6588c2005-03-13 18:52:48 +0000829 vg_assert(listNo <= N_MALLOC_LISTS);
830 if (listNo == N_MALLOC_LISTS-1) {
nethercote2d5b8162004-08-11 09:40:52 +0000831 return MAX_PSZB;
sewardjde4a1d02002-03-22 01:27:54 +0000832 } else {
nethercote2d5b8162004-08-11 09:40:52 +0000833 return listNo_to_pszB_min(listNo+1) - 1;
sewardjde4a1d02002-03-22 01:27:54 +0000834 }
835}
836
837
838/* A nasty hack to try and reduce fragmentation. Try and replace
839 a->freelist[lno] with another block on the same list but with a
840 lower address, with the idea of attempting to recycle the same
841 blocks rather than cruise through the address space. */
sewardjde4a1d02002-03-22 01:27:54 +0000842static
nethercote7ac7f7b2004-11-02 12:36:02 +0000843void swizzle ( Arena* a, UInt lno )
sewardjde4a1d02002-03-22 01:27:54 +0000844{
nethercote2d5b8162004-08-11 09:40:52 +0000845 Block* p_best;
846 Block* pp;
847 Block* pn;
nethercote7ac7f7b2004-11-02 12:36:02 +0000848 UInt i;
sewardjde4a1d02002-03-22 01:27:54 +0000849
850 p_best = a->freelist[lno];
851 if (p_best == NULL) return;
852
853 pn = pp = p_best;
njn2bf9ba62005-12-25 02:47:12 +0000854
855 // This loop bound was 20 for a long time, but experiments showed that
856 // reducing it to 10 gave the same result in all the tests, and 5 got the
857 // same result in 85--100% of cases. And it's called often enough to be
858 // noticeable in programs that allocated a lot.
859 for (i = 0; i < 5; i++) {
nethercote2d5b8162004-08-11 09:40:52 +0000860 pn = get_next_b(pn);
861 pp = get_prev_b(pp);
sewardjde4a1d02002-03-22 01:27:54 +0000862 if (pn < p_best) p_best = pn;
863 if (pp < p_best) p_best = pp;
864 }
865 if (p_best < a->freelist[lno]) {
nethercote2d5b8162004-08-11 09:40:52 +0000866# ifdef VERBOSE_MALLOC
sewardj9c606bd2008-09-18 18:12:50 +0000867 VG_(printf)("retreat by %ld\n", (Word)(a->freelist[lno] - p_best));
sewardjde4a1d02002-03-22 01:27:54 +0000868# endif
869 a->freelist[lno] = p_best;
870 }
871}
872
873
874/*------------------------------------------------------------*/
sewardjde4a1d02002-03-22 01:27:54 +0000875/*--- Sanity-check/debugging machinery. ---*/
876/*------------------------------------------------------------*/
877
njn6e6588c2005-03-13 18:52:48 +0000878#define REDZONE_LO_MASK 0x31
879#define REDZONE_HI_MASK 0x7c
nethercote2d5b8162004-08-11 09:40:52 +0000880
nethercote7ac7f7b2004-11-02 12:36:02 +0000881// Do some crude sanity checks on a Block.
sewardjde4a1d02002-03-22 01:27:54 +0000882static
nethercote2d5b8162004-08-11 09:40:52 +0000883Bool blockSane ( Arena* a, Block* b )
sewardjde4a1d02002-03-22 01:27:54 +0000884{
885# define BLEAT(str) VG_(printf)("blockSane: fail -- %s\n",str)
nethercote7ac7f7b2004-11-02 12:36:02 +0000886 UInt i;
njn402c8612005-08-23 22:11:20 +0000887 // The lo and hi size fields will be checked (indirectly) by the call
888 // to get_rz_hi_byte().
njn472cc7c2005-07-17 17:20:30 +0000889 if (!a->clientmem && is_inuse_block(b)) {
nethercote2d5b8162004-08-11 09:40:52 +0000890 for (i = 0; i < a->rz_szB; i++) {
891 if (get_rz_lo_byte(a, b, i) !=
njn6e6588c2005-03-13 18:52:48 +0000892 (UByte)(((Addr)b&0xff) ^ REDZONE_LO_MASK))
nethercote2d5b8162004-08-11 09:40:52 +0000893 {BLEAT("redzone-lo");return False;}
894 if (get_rz_hi_byte(a, b, i) !=
njn6e6588c2005-03-13 18:52:48 +0000895 (UByte)(((Addr)b&0xff) ^ REDZONE_HI_MASK))
nethercote2d5b8162004-08-11 09:40:52 +0000896 {BLEAT("redzone-hi");return False;}
sewardjde4a1d02002-03-22 01:27:54 +0000897 }
898 }
899 return True;
900# undef BLEAT
901}
902
nethercote2d5b8162004-08-11 09:40:52 +0000903// Print superblocks (only for debugging).
sewardjde4a1d02002-03-22 01:27:54 +0000904static
905void ppSuperblocks ( Arena* a )
906{
sewardj0b3fd2d2007-08-21 10:55:26 +0000907 UInt i, j, blockno = 1;
njnd0e685c2005-07-17 17:55:42 +0000908 SizeT b_bszB;
sewardjde4a1d02002-03-22 01:27:54 +0000909
sewardj0b3fd2d2007-08-21 10:55:26 +0000910 for (j = 0; j < a->sblocks_used; ++j) {
911 Superblock * sb = a->sblocks[j];
912
sewardjde4a1d02002-03-22 01:27:54 +0000913 VG_(printf)( "\n" );
njn8a7b41b2007-09-23 00:51:24 +0000914 VG_(printf)( "superblock %d at %p, sb->n_pl_bs = %lu\n",
sewardj0b3fd2d2007-08-21 10:55:26 +0000915 blockno++, sb, sb->n_payload_bytes);
njnd0e685c2005-07-17 17:55:42 +0000916 for (i = 0; i < sb->n_payload_bytes; i += b_bszB) {
917 Block* b = (Block*)&sb->payload_bytes[i];
918 b_bszB = get_bszB(b);
njn8a7b41b2007-09-23 00:51:24 +0000919 VG_(printf)( " block at %d, bszB %lu: ", i, b_bszB );
njn472cc7c2005-07-17 17:20:30 +0000920 VG_(printf)( "%s, ", is_inuse_block(b) ? "inuse" : "free");
nethercote2d5b8162004-08-11 09:40:52 +0000921 VG_(printf)( "%s\n", blockSane(a, b) ? "ok" : "BAD" );
sewardjde4a1d02002-03-22 01:27:54 +0000922 }
nethercote2d5b8162004-08-11 09:40:52 +0000923 vg_assert(i == sb->n_payload_bytes); // no overshoot at end of Sb
sewardjde4a1d02002-03-22 01:27:54 +0000924 }
925 VG_(printf)( "end of superblocks\n\n" );
926}
927
nethercote2d5b8162004-08-11 09:40:52 +0000928// Sanity check both the superblocks and the chains.
nethercote885dd912004-08-03 23:14:00 +0000929static void sanity_check_malloc_arena ( ArenaId aid )
sewardjde4a1d02002-03-22 01:27:54 +0000930{
sewardj0b3fd2d2007-08-21 10:55:26 +0000931 UInt i, j, superblockctr, blockctr_sb, blockctr_li;
nethercote7ac7f7b2004-11-02 12:36:02 +0000932 UInt blockctr_sb_free, listno;
933 SizeT b_bszB, b_pszB, list_min_pszB, list_max_pszB;
sewardj0b3fd2d2007-08-21 10:55:26 +0000934 Bool thisFree, lastWasFree, sblockarrOK;
nethercote2d5b8162004-08-11 09:40:52 +0000935 Block* b;
936 Block* b_prev;
nethercote7ac7f7b2004-11-02 12:36:02 +0000937 SizeT arena_bytes_on_loan;
sewardjde4a1d02002-03-22 01:27:54 +0000938 Arena* a;
939
nethercote885dd912004-08-03 23:14:00 +0000940# define BOMB VG_(core_panic)("sanity_check_malloc_arena")
sewardjde4a1d02002-03-22 01:27:54 +0000941
942 a = arenaId_to_ArenaP(aid);
sewardj0b3fd2d2007-08-21 10:55:26 +0000943
944 // Check the superblock array.
945 sblockarrOK
946 = a->sblocks != NULL
947 && a->sblocks_size >= SBLOCKS_SIZE_INITIAL
948 && a->sblocks_used <= a->sblocks_size
949 && (a->sblocks_size == SBLOCKS_SIZE_INITIAL
950 ? (a->sblocks == &a->sblocks_initial[0])
951 : (a->sblocks != &a->sblocks_initial[0]));
952 if (!sblockarrOK) {
953 VG_(printf)("sanity_check_malloc_arena: sblock array BAD\n");
954 BOMB;
955 }
956
nethercote2d5b8162004-08-11 09:40:52 +0000957 // First, traverse all the superblocks, inspecting the Blocks in each.
sewardjde4a1d02002-03-22 01:27:54 +0000958 superblockctr = blockctr_sb = blockctr_sb_free = 0;
959 arena_bytes_on_loan = 0;
sewardj0b3fd2d2007-08-21 10:55:26 +0000960 for (j = 0; j < a->sblocks_used; ++j) {
961 Superblock * sb = a->sblocks[j];
sewardjde4a1d02002-03-22 01:27:54 +0000962 lastWasFree = False;
963 superblockctr++;
nethercote2d5b8162004-08-11 09:40:52 +0000964 for (i = 0; i < sb->n_payload_bytes; i += mk_plain_bszB(b_bszB)) {
sewardjde4a1d02002-03-22 01:27:54 +0000965 blockctr_sb++;
nethercote2d5b8162004-08-11 09:40:52 +0000966 b = (Block*)&sb->payload_bytes[i];
njnd0e685c2005-07-17 17:55:42 +0000967 b_bszB = get_bszB_as_is(b);
sewardjde4a1d02002-03-22 01:27:54 +0000968 if (!blockSane(a, b)) {
njn8a7b41b2007-09-23 00:51:24 +0000969 VG_(printf)("sanity_check_malloc_arena: sb %p, block %d "
970 "(bszB %lu): BAD\n", sb, i, b_bszB );
sewardjde4a1d02002-03-22 01:27:54 +0000971 BOMB;
972 }
njn472cc7c2005-07-17 17:20:30 +0000973 thisFree = !is_inuse_block(b);
sewardjde4a1d02002-03-22 01:27:54 +0000974 if (thisFree && lastWasFree) {
njn8a7b41b2007-09-23 00:51:24 +0000975 VG_(printf)("sanity_check_malloc_arena: sb %p, block %d "
976 "(bszB %lu): UNMERGED FREES\n", sb, i, b_bszB );
sewardjde4a1d02002-03-22 01:27:54 +0000977 BOMB;
978 }
sewardjde4a1d02002-03-22 01:27:54 +0000979 if (thisFree) blockctr_sb_free++;
sewardj0b3fd2d2007-08-21 10:55:26 +0000980 if (!thisFree)
nethercote2d5b8162004-08-11 09:40:52 +0000981 arena_bytes_on_loan += bszB_to_pszB(a, b_bszB);
982 lastWasFree = thisFree;
sewardjde4a1d02002-03-22 01:27:54 +0000983 }
nethercote2d5b8162004-08-11 09:40:52 +0000984 if (i > sb->n_payload_bytes) {
nethercote885dd912004-08-03 23:14:00 +0000985 VG_(printf)( "sanity_check_malloc_arena: sb %p: last block "
sewardjde4a1d02002-03-22 01:27:54 +0000986 "overshoots end\n", sb);
987 BOMB;
988 }
sewardjde4a1d02002-03-22 01:27:54 +0000989 }
990
991 if (arena_bytes_on_loan != a->bytes_on_loan) {
nethercote2d5b8162004-08-11 09:40:52 +0000992# ifdef VERBOSE_MALLOC
sewardj9c606bd2008-09-18 18:12:50 +0000993 VG_(printf)( "sanity_check_malloc_arena: a->bytes_on_loan %ld, "
994 "arena_bytes_on_loan %ld: "
nethercote2d5b8162004-08-11 09:40:52 +0000995 "MISMATCH\n", a->bytes_on_loan, arena_bytes_on_loan);
996# endif
sewardjde4a1d02002-03-22 01:27:54 +0000997 ppSuperblocks(a);
998 BOMB;
999 }
1000
1001 /* Second, traverse each list, checking that the back pointers make
1002 sense, counting blocks encountered, and checking that each block
1003 is an appropriate size for this list. */
1004 blockctr_li = 0;
njn6e6588c2005-03-13 18:52:48 +00001005 for (listno = 0; listno < N_MALLOC_LISTS; listno++) {
nethercote2d5b8162004-08-11 09:40:52 +00001006 list_min_pszB = listNo_to_pszB_min(listno);
1007 list_max_pszB = listNo_to_pszB_max(listno);
sewardjde4a1d02002-03-22 01:27:54 +00001008 b = a->freelist[listno];
1009 if (b == NULL) continue;
1010 while (True) {
1011 b_prev = b;
nethercote2d5b8162004-08-11 09:40:52 +00001012 b = get_next_b(b);
1013 if (get_prev_b(b) != b_prev) {
nethercote885dd912004-08-03 23:14:00 +00001014 VG_(printf)( "sanity_check_malloc_arena: list %d at %p: "
sewardj0b3fd2d2007-08-21 10:55:26 +00001015 "BAD LINKAGE\n",
sewardjde4a1d02002-03-22 01:27:54 +00001016 listno, b );
1017 BOMB;
1018 }
njn089f51f2005-07-17 18:12:00 +00001019 b_pszB = get_pszB(a, b);
nethercote2d5b8162004-08-11 09:40:52 +00001020 if (b_pszB < list_min_pszB || b_pszB > list_max_pszB) {
sewardj0b3fd2d2007-08-21 10:55:26 +00001021 VG_(printf)(
nethercote885dd912004-08-03 23:14:00 +00001022 "sanity_check_malloc_arena: list %d at %p: "
njn8a7b41b2007-09-23 00:51:24 +00001023 "WRONG CHAIN SIZE %luB (%luB, %luB)\n",
nethercote2d5b8162004-08-11 09:40:52 +00001024 listno, b, b_pszB, list_min_pszB, list_max_pszB );
sewardjde4a1d02002-03-22 01:27:54 +00001025 BOMB;
1026 }
1027 blockctr_li++;
1028 if (b == a->freelist[listno]) break;
1029 }
1030 }
1031
1032 if (blockctr_sb_free != blockctr_li) {
nethercote2d5b8162004-08-11 09:40:52 +00001033# ifdef VERBOSE_MALLOC
1034 VG_(printf)( "sanity_check_malloc_arena: BLOCK COUNT MISMATCH "
1035 "(via sbs %d, via lists %d)\n",
1036 blockctr_sb_free, blockctr_li );
1037# endif
sewardjde4a1d02002-03-22 01:27:54 +00001038 ppSuperblocks(a);
1039 BOMB;
1040 }
1041
nethercote885dd912004-08-03 23:14:00 +00001042 if (VG_(clo_verbosity) > 2)
1043 VG_(message)(Vg_DebugMsg,
njn669ef072005-03-13 05:46:57 +00001044 "%8s: %2d sbs, %5d bs, %2d/%-2d free bs, "
barta0b6b2c2008-07-07 06:49:24 +00001045 "%7ld mmap, %7ld loan",
nethercote885dd912004-08-03 23:14:00 +00001046 a->name,
1047 superblockctr,
1048 blockctr_sb, blockctr_sb_free, blockctr_li,
1049 a->bytes_mmaped, a->bytes_on_loan);
sewardjde4a1d02002-03-22 01:27:54 +00001050# undef BOMB
1051}
1052
1053
sewardj9c606bd2008-09-18 18:12:50 +00001054#define N_AN_CCS 1000
1055
1056typedef struct { ULong nBytes; ULong nBlocks; HChar* cc; } AnCC;
1057
1058static AnCC anCCs[N_AN_CCS];
1059
1060static Int cmp_AnCC_by_vol ( void* v1, void* v2 ) {
1061 AnCC* ancc1 = (AnCC*)v1;
1062 AnCC* ancc2 = (AnCC*)v2;
1063 if (ancc1->nBytes < ancc2->nBytes) return -1;
1064 if (ancc1->nBytes > ancc2->nBytes) return 1;
1065 return 0;
1066}
1067
1068static void cc_analyse_alloc_arena ( ArenaId aid )
1069{
1070 Word i, j, k;
1071 Arena* a;
1072 Block* b;
1073 Bool thisFree, lastWasFree;
1074 SizeT b_bszB;
1075
1076 HChar* cc;
1077 UInt n_ccs = 0;
1078 //return;
1079 a = arenaId_to_ArenaP(aid);
1080 if (a->name == NULL) {
1081 /* arena is not in use, is not initialised and will fail the
1082 sanity check that follows. */
1083 return;
1084 }
1085
1086 sanity_check_malloc_arena(aid);
1087
1088 VG_(printf)(
1089 "-------- Arena \"%s\": %ld mmap'd, %ld/%ld max/curr --------\n",
1090 a->name, a->bytes_mmaped, a->bytes_on_loan_max, a->bytes_on_loan
1091 );
1092
1093 for (j = 0; j < a->sblocks_used; ++j) {
1094 Superblock * sb = a->sblocks[j];
1095 lastWasFree = False;
1096 for (i = 0; i < sb->n_payload_bytes; i += mk_plain_bszB(b_bszB)) {
1097 b = (Block*)&sb->payload_bytes[i];
1098 b_bszB = get_bszB_as_is(b);
1099 if (!blockSane(a, b)) {
1100 VG_(printf)("sanity_check_malloc_arena: sb %p, block %ld "
1101 "(bszB %lu): BAD\n", sb, i, b_bszB );
1102 tl_assert(0);
1103 }
1104 thisFree = !is_inuse_block(b);
1105 if (thisFree && lastWasFree) {
1106 VG_(printf)("sanity_check_malloc_arena: sb %p, block %ld "
1107 "(bszB %lu): UNMERGED FREES\n", sb, i, b_bszB );
1108 tl_assert(0);
1109 }
1110 lastWasFree = thisFree;
1111
1112 if (thisFree) continue;
1113
1114 if (0)
1115 VG_(printf)("block: inUse=%d pszB=%d cc=%s\n",
1116 (Int)(!thisFree),
1117 (Int)bszB_to_pszB(a, b_bszB),
1118 get_cc(b));
1119 cc = get_cc(b);
1120 tl_assert(cc);
1121 for (k = 0; k < n_ccs; k++) {
1122 tl_assert(anCCs[k].cc);
1123 if (0 == VG_(strcmp)(cc, anCCs[k].cc))
1124 break;
1125 }
1126 tl_assert(k >= 0 && k <= n_ccs);
1127
1128 if (k == n_ccs) {
1129 tl_assert(n_ccs < N_AN_CCS-1);
1130 n_ccs++;
1131 anCCs[k].nBytes = 0;
1132 anCCs[k].nBlocks = 0;
1133 anCCs[k].cc = cc;
1134 }
1135
1136 tl_assert(k >= 0 && k < n_ccs && k < N_AN_CCS);
1137 anCCs[k].nBytes += (ULong)bszB_to_pszB(a, b_bszB);
1138 anCCs[k].nBlocks++;
1139 }
1140 if (i > sb->n_payload_bytes) {
1141 VG_(printf)( "sanity_check_malloc_arena: sb %p: last block "
1142 "overshoots end\n", sb);
1143 tl_assert(0);
1144 }
1145 }
1146
1147 VG_(ssort)( &anCCs[0], n_ccs, sizeof(anCCs[0]), cmp_AnCC_by_vol );
1148
1149 for (k = 0; k < n_ccs; k++) {
1150 VG_(printf)("%'13llu in %'9llu: %s\n",
1151 anCCs[k].nBytes, anCCs[k].nBlocks, anCCs[k].cc );
1152 }
1153
1154 VG_(printf)("\n");
1155}
1156
1157
nethercote885dd912004-08-03 23:14:00 +00001158void VG_(sanity_check_malloc_all) ( void )
sewardjde4a1d02002-03-22 01:27:54 +00001159{
nethercote7ac7f7b2004-11-02 12:36:02 +00001160 UInt i;
sewardj0b3fd2d2007-08-21 10:55:26 +00001161 for (i = 0; i < VG_N_ARENAS; i++) {
1162 if (i == VG_AR_CLIENT && !client_inited)
1163 continue;
nethercote885dd912004-08-03 23:14:00 +00001164 sanity_check_malloc_arena ( i );
sewardj0b3fd2d2007-08-21 10:55:26 +00001165 }
sewardjde4a1d02002-03-22 01:27:54 +00001166}
1167
sewardjde4a1d02002-03-22 01:27:54 +00001168
nethercote2d5b8162004-08-11 09:40:52 +00001169/*------------------------------------------------------------*/
1170/*--- Creating and deleting blocks. ---*/
1171/*------------------------------------------------------------*/
1172
1173// Mark the bytes at b .. b+bszB-1 as not in use, and add them to the
1174// relevant free list.
1175
1176static
nethercote7ac7f7b2004-11-02 12:36:02 +00001177void mkFreeBlock ( Arena* a, Block* b, SizeT bszB, UInt b_lno )
jsewardb1a26ae2004-03-14 03:06:37 +00001178{
nethercote7ac7f7b2004-11-02 12:36:02 +00001179 SizeT pszB = bszB_to_pszB(a, bszB);
nethercote2d5b8162004-08-11 09:40:52 +00001180 vg_assert(b_lno == pszB_to_listNo(pszB));
njndbf7ca72006-03-31 11:57:59 +00001181 //zzVALGRIND_MAKE_MEM_UNDEFINED(b, bszB);
nethercote2d5b8162004-08-11 09:40:52 +00001182 // Set the size fields and indicate not-in-use.
njn8d3f8452005-07-20 04:12:41 +00001183 set_bszB(b, mk_free_bszB(bszB));
nethercote2d5b8162004-08-11 09:40:52 +00001184
1185 // Add to the relevant list.
1186 if (a->freelist[b_lno] == NULL) {
1187 set_prev_b(b, b);
1188 set_next_b(b, b);
1189 a->freelist[b_lno] = b;
1190 } else {
1191 Block* b_prev = get_prev_b(a->freelist[b_lno]);
1192 Block* b_next = a->freelist[b_lno];
1193 set_next_b(b_prev, b);
1194 set_prev_b(b_next, b);
1195 set_next_b(b, b_next);
1196 set_prev_b(b, b_prev);
1197 }
1198# ifdef DEBUG_MALLOC
1199 (void)blockSane(a,b);
1200# endif
1201}
1202
1203// Mark the bytes at b .. b+bszB-1 as in use, and set up the block
1204// appropriately.
1205static
nethercote7ac7f7b2004-11-02 12:36:02 +00001206void mkInuseBlock ( Arena* a, Block* b, SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +00001207{
nethercote7ac7f7b2004-11-02 12:36:02 +00001208 UInt i;
nethercote2d5b8162004-08-11 09:40:52 +00001209 vg_assert(bszB >= min_useful_bszB(a));
njndbf7ca72006-03-31 11:57:59 +00001210 //zzVALGRIND_MAKE_MEM_UNDEFINED(b, bszB);
njn8d3f8452005-07-20 04:12:41 +00001211 set_bszB(b, mk_inuse_bszB(bszB));
nethercote2d5b8162004-08-11 09:40:52 +00001212 set_prev_b(b, NULL); // Take off freelist
1213 set_next_b(b, NULL); // ditto
1214 if (!a->clientmem) {
1215 for (i = 0; i < a->rz_szB; i++) {
njn6e6588c2005-03-13 18:52:48 +00001216 set_rz_lo_byte(a, b, i, (UByte)(((Addr)b&0xff) ^ REDZONE_LO_MASK));
1217 set_rz_hi_byte(a, b, i, (UByte)(((Addr)b&0xff) ^ REDZONE_HI_MASK));
nethercote2d5b8162004-08-11 09:40:52 +00001218 }
1219 }
1220# ifdef DEBUG_MALLOC
1221 (void)blockSane(a,b);
1222# endif
1223}
1224
1225// Remove a block from a given list. Does no sanity checking.
1226static
nethercote7ac7f7b2004-11-02 12:36:02 +00001227void unlinkBlock ( Arena* a, Block* b, UInt listno )
nethercote2d5b8162004-08-11 09:40:52 +00001228{
njn6e6588c2005-03-13 18:52:48 +00001229 vg_assert(listno < N_MALLOC_LISTS);
nethercote2d5b8162004-08-11 09:40:52 +00001230 if (get_prev_b(b) == b) {
1231 // Only one element in the list; treat it specially.
1232 vg_assert(get_next_b(b) == b);
1233 a->freelist[listno] = NULL;
1234 } else {
1235 Block* b_prev = get_prev_b(b);
1236 Block* b_next = get_next_b(b);
1237 a->freelist[listno] = b_prev;
1238 set_next_b(b_prev, b_next);
1239 set_prev_b(b_next, b_prev);
1240 swizzle ( a, listno );
1241 }
1242 set_prev_b(b, NULL);
1243 set_next_b(b, NULL);
jsewardb1a26ae2004-03-14 03:06:37 +00001244}
1245
1246
sewardjde4a1d02002-03-22 01:27:54 +00001247/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00001248/*--- Core-visible functions. ---*/
sewardjde4a1d02002-03-22 01:27:54 +00001249/*------------------------------------------------------------*/
1250
nethercote2d5b8162004-08-11 09:40:52 +00001251// Align the request size.
1252static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +00001253SizeT align_req_pszB ( SizeT req_pszB )
nethercote2d5b8162004-08-11 09:40:52 +00001254{
nethercote7ac7f7b2004-11-02 12:36:02 +00001255 SizeT n = VG_MIN_MALLOC_SZB-1;
nethercote2d5b8162004-08-11 09:40:52 +00001256 return ((req_pszB + n) & (~n));
1257}
1258
sewardj9c606bd2008-09-18 18:12:50 +00001259void* VG_(arena_malloc) ( ArenaId aid, HChar* cc, SizeT req_pszB )
sewardjde4a1d02002-03-22 01:27:54 +00001260{
nethercote7ac7f7b2004-11-02 12:36:02 +00001261 SizeT req_bszB, frag_bszB, b_bszB;
sewardj0b3fd2d2007-08-21 10:55:26 +00001262 UInt lno, i;
sewardjde4a1d02002-03-22 01:27:54 +00001263 Superblock* new_sb;
nethercote2d5b8162004-08-11 09:40:52 +00001264 Block* b = NULL;
sewardjde4a1d02002-03-22 01:27:54 +00001265 Arena* a;
jsewardb1a26ae2004-03-14 03:06:37 +00001266 void* v;
sewardjde4a1d02002-03-22 01:27:54 +00001267
sewardj45f4e7c2005-09-27 19:20:21 +00001268 ensure_mm_init(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001269 a = arenaId_to_ArenaP(aid);
1270
nethercote7ac7f7b2004-11-02 12:36:02 +00001271 vg_assert(req_pszB < MAX_PSZB);
nethercote2d5b8162004-08-11 09:40:52 +00001272 req_pszB = align_req_pszB(req_pszB);
1273 req_bszB = pszB_to_bszB(a, req_pszB);
sewardjde4a1d02002-03-22 01:27:54 +00001274
sewardj9c606bd2008-09-18 18:12:50 +00001275 // You must provide a cost-center name against which to charge
1276 // this allocation; it isn't optional.
1277 vg_assert(cc);
1278
nethercote2d5b8162004-08-11 09:40:52 +00001279 // Scan through all the big-enough freelists for a block.
njn4ab6d532007-10-16 23:18:06 +00001280 //
1281 // Nb: this scanning might be expensive in some cases. Eg. if you
1282 // allocate lots of small objects without freeing them, but no
1283 // medium-sized objects, it will repeatedly scanning through the whole
1284 // list, and each time not find any free blocks until the last element.
1285 //
1286 // If this becomes a noticeable problem... the loop answers the question
1287 // "where is the first nonempty list above me?" And most of the time,
1288 // you ask the same question and get the same answer. So it would be
1289 // good to somehow cache the results of previous searches.
1290 // One possibility is an array (with N_MALLOC_LISTS elements) of
1291 // shortcuts. shortcut[i] would give the index number of the nearest
1292 // larger list above list i which is non-empty. Then this loop isn't
1293 // necessary. However, we'd have to modify some section [ .. i-1] of the
1294 // shortcut array every time a list [i] changes from empty to nonempty or
1295 // back. This would require care to avoid pathological worst-case
1296 // behaviour.
1297 //
njn6e6588c2005-03-13 18:52:48 +00001298 for (lno = pszB_to_listNo(req_pszB); lno < N_MALLOC_LISTS; lno++) {
sewardjde4a1d02002-03-22 01:27:54 +00001299 b = a->freelist[lno];
nethercote2d5b8162004-08-11 09:40:52 +00001300 if (NULL == b) continue; // If this list is empty, try the next one.
sewardjde4a1d02002-03-22 01:27:54 +00001301 while (True) {
njnd0e685c2005-07-17 17:55:42 +00001302 b_bszB = get_bszB(b);
nethercote2d5b8162004-08-11 09:40:52 +00001303 if (b_bszB >= req_bszB) goto obtained_block; // success!
1304 b = get_next_b(b);
1305 if (b == a->freelist[lno]) break; // traversed entire freelist
sewardjde4a1d02002-03-22 01:27:54 +00001306 }
sewardjde4a1d02002-03-22 01:27:54 +00001307 }
1308
nethercote2d5b8162004-08-11 09:40:52 +00001309 // If we reach here, no suitable block found, allocate a new superblock
njn6e6588c2005-03-13 18:52:48 +00001310 vg_assert(lno == N_MALLOC_LISTS);
nethercote2d5b8162004-08-11 09:40:52 +00001311 new_sb = newSuperblock(a, req_bszB);
1312 if (NULL == new_sb) {
1313 // Should only fail if for client, otherwise, should have aborted
1314 // already.
1315 vg_assert(VG_AR_CLIENT == aid);
1316 return NULL;
sewardjde4a1d02002-03-22 01:27:54 +00001317 }
sewardj0b3fd2d2007-08-21 10:55:26 +00001318
1319 vg_assert(a->sblocks_used <= a->sblocks_size);
1320 if (a->sblocks_used == a->sblocks_size) {
1321 Superblock ** array;
1322 SysRes sres = VG_(am_sbrk_anon_float_valgrind)(sizeof(Superblock *) *
1323 a->sblocks_size * 2);
1324 if (sres.isError) {
1325 VG_(out_of_memory_NORETURN)("arena_init", sizeof(Superblock *) *
1326 a->sblocks_size * 2);
1327 /* NOTREACHED */
1328 }
1329 array = (Superblock**) sres.res;
1330 for (i = 0; i < a->sblocks_used; ++i) array[i] = a->sblocks[i];
1331
1332 a->sblocks_size *= 2;
1333 a->sblocks = array;
1334 VG_(debugLog)(1, "mallocfree",
1335 "sblock array for arena `%s' resized to %ld\n",
1336 a->name, a->sblocks_size);
1337 }
1338
1339 vg_assert(a->sblocks_used < a->sblocks_size);
1340
1341 i = a->sblocks_used;
1342 while (i > 0) {
1343 if (a->sblocks[i-1] > new_sb) {
1344 a->sblocks[i] = a->sblocks[i-1];
1345 } else {
1346 break;
1347 }
1348 --i;
1349 }
1350 a->sblocks[i] = new_sb;
1351 a->sblocks_used++;
1352
nethercote2d5b8162004-08-11 09:40:52 +00001353 b = (Block*)&new_sb->payload_bytes[0];
1354 lno = pszB_to_listNo(bszB_to_pszB(a, new_sb->n_payload_bytes));
1355 mkFreeBlock ( a, b, new_sb->n_payload_bytes, lno);
sewardj94c8eb42008-09-19 20:13:39 +00001356 if (VG_(clo_profile_heap))
1357 set_cc(b, "admin.free-new-sb-1");
nethercote2d5b8162004-08-11 09:40:52 +00001358 // fall through
sewardjde4a1d02002-03-22 01:27:54 +00001359
nethercote2d5b8162004-08-11 09:40:52 +00001360 obtained_block:
1361 // Ok, we can allocate from b, which lives in list lno.
sewardjde4a1d02002-03-22 01:27:54 +00001362 vg_assert(b != NULL);
njn6e6588c2005-03-13 18:52:48 +00001363 vg_assert(lno < N_MALLOC_LISTS);
sewardjde4a1d02002-03-22 01:27:54 +00001364 vg_assert(a->freelist[lno] != NULL);
njnd0e685c2005-07-17 17:55:42 +00001365 b_bszB = get_bszB(b);
nethercote2d5b8162004-08-11 09:40:52 +00001366 // req_bszB is the size of the block we are after. b_bszB is the
1367 // size of what we've actually got. */
1368 vg_assert(b_bszB >= req_bszB);
sewardjde4a1d02002-03-22 01:27:54 +00001369
nethercote2d5b8162004-08-11 09:40:52 +00001370 // Could we split this block and still get a useful fragment?
1371 frag_bszB = b_bszB - req_bszB;
1372 if (frag_bszB >= min_useful_bszB(a)) {
1373 // Yes, split block in two, put the fragment on the appropriate free
1374 // list, and update b_bszB accordingly.
1375 // printf( "split %dB into %dB and %dB\n", b_bszB, req_bszB, frag_bszB );
sewardjde4a1d02002-03-22 01:27:54 +00001376 unlinkBlock(a, b, lno);
nethercote2d5b8162004-08-11 09:40:52 +00001377 mkInuseBlock(a, b, req_bszB);
sewardj94c8eb42008-09-19 20:13:39 +00001378 if (VG_(clo_profile_heap))
1379 set_cc(b, cc);
nethercote2d5b8162004-08-11 09:40:52 +00001380 mkFreeBlock(a, &b[req_bszB], frag_bszB,
1381 pszB_to_listNo(bszB_to_pszB(a, frag_bszB)));
sewardj94c8eb42008-09-19 20:13:39 +00001382 if (VG_(clo_profile_heap))
1383 set_cc(&b[req_bszB], "admin.fragmentation-1");
njnd0e685c2005-07-17 17:55:42 +00001384 b_bszB = get_bszB(b);
nethercote2d5b8162004-08-11 09:40:52 +00001385 } else {
1386 // No, mark as in use and use as-is.
1387 unlinkBlock(a, b, lno);
1388 mkInuseBlock(a, b, b_bszB);
sewardj94c8eb42008-09-19 20:13:39 +00001389 if (VG_(clo_profile_heap))
1390 set_cc(b, cc);
sewardjde4a1d02002-03-22 01:27:54 +00001391 }
sewardjde4a1d02002-03-22 01:27:54 +00001392
nethercote2d5b8162004-08-11 09:40:52 +00001393 // Update stats
1394 a->bytes_on_loan += bszB_to_pszB(a, b_bszB);
sewardj9c606bd2008-09-18 18:12:50 +00001395 if (a->bytes_on_loan > a->bytes_on_loan_max) {
sewardjde4a1d02002-03-22 01:27:54 +00001396 a->bytes_on_loan_max = a->bytes_on_loan;
sewardj9c606bd2008-09-18 18:12:50 +00001397 if (a->bytes_on_loan_max >= a->next_profile_at) {
1398 /* next profile after 10% more growth */
1399 a->next_profile_at
1400 = (SizeT)(
1401 (((ULong)a->bytes_on_loan_max) * 110ULL) / 100ULL );
1402 if (VG_(clo_profile_heap))
1403 cc_analyse_alloc_arena(aid);
1404 }
1405 }
sewardjde4a1d02002-03-22 01:27:54 +00001406
1407# ifdef DEBUG_MALLOC
nethercote885dd912004-08-03 23:14:00 +00001408 sanity_check_malloc_arena(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001409# endif
1410
nethercote2d5b8162004-08-11 09:40:52 +00001411 v = get_block_payload(a, b);
1412 vg_assert( (((Addr)v) & (VG_MIN_MALLOC_SZB-1)) == 0 );
sewardjb5f6f512005-03-10 23:59:00 +00001413
sewardja53462a2007-11-24 23:37:07 +00001414 /* VALGRIND_MALLOCLIKE_BLOCK(v, req_pszB, 0, False); */
1415
1416 /* For debugging/testing purposes, fill the newly allocated area
1417 with a definite value in an attempt to shake out any
1418 uninitialised uses of the data (by V core / V tools, not by the
1419 client). Testing on 25 Nov 07 with the values 0x00, 0xFF, 0x55,
1420 0xAA showed no differences in the regression tests on
1421 amd64-linux. Note, is disabled by default. */
1422 if (0 && aid != VG_AR_CLIENT)
1423 VG_(memset)(v, 0xAA, (SizeT)req_pszB);
1424
jsewardb1a26ae2004-03-14 03:06:37 +00001425 return v;
sewardjde4a1d02002-03-22 01:27:54 +00001426}
1427
1428
njn25e49d8e72002-09-23 09:36:25 +00001429void VG_(arena_free) ( ArenaId aid, void* ptr )
sewardjde4a1d02002-03-22 01:27:54 +00001430{
1431 Superblock* sb;
nethercote2d5b8162004-08-11 09:40:52 +00001432 UByte* sb_start;
1433 UByte* sb_end;
njna2578652005-07-17 17:12:24 +00001434 Block* other_b;
nethercote2d5b8162004-08-11 09:40:52 +00001435 Block* b;
nethercote7ac7f7b2004-11-02 12:36:02 +00001436 SizeT b_bszB, b_pszB, other_bszB;
1437 UInt b_listno;
sewardjde4a1d02002-03-22 01:27:54 +00001438 Arena* a;
1439
sewardj45f4e7c2005-09-27 19:20:21 +00001440 ensure_mm_init(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001441 a = arenaId_to_ArenaP(aid);
1442
njn25e49d8e72002-09-23 09:36:25 +00001443 if (ptr == NULL) {
njn25e49d8e72002-09-23 09:36:25 +00001444 return;
1445 }
1446
nethercote2d5b8162004-08-11 09:40:52 +00001447 b = get_payload_block(a, ptr);
sewardjde4a1d02002-03-22 01:27:54 +00001448
sewardj3187a4e2005-12-04 23:27:14 +00001449 /* If this is one of V's areas, check carefully the block we're
1450 getting back. This picks up simple block-end overruns. */
1451 if (aid != VG_AR_CLIENT)
1452 vg_assert(blockSane(a, b));
sewardjde4a1d02002-03-22 01:27:54 +00001453
njne6f9e3b2005-07-17 18:00:57 +00001454 b_bszB = get_bszB(b);
1455 b_pszB = bszB_to_pszB(a, b_bszB);
nethercote2d5b8162004-08-11 09:40:52 +00001456 sb = findSb( a, b );
1457 sb_start = &sb->payload_bytes[0];
1458 sb_end = &sb->payload_bytes[sb->n_payload_bytes - 1];
sewardjde4a1d02002-03-22 01:27:54 +00001459
njne6f9e3b2005-07-17 18:00:57 +00001460 a->bytes_on_loan -= b_pszB;
1461
sewardj3187a4e2005-12-04 23:27:14 +00001462 /* If this is one of V's areas, fill it up with junk to enhance the
1463 chances of catching any later reads of it. Note, 0xDD is
1464 carefully chosen junk :-), in that: (1) 0xDDDDDDDD is an invalid
1465 and non-word-aligned address on most systems, and (2) 0xDD is a
1466 value which is unlikely to be generated by the new compressed
1467 Vbits representation for memcheck. */
1468 if (aid != VG_AR_CLIENT)
1469 VG_(memset)(ptr, 0xDD, (SizeT)b_pszB);
1470
nethercote2d5b8162004-08-11 09:40:52 +00001471 // Put this chunk back on a list somewhere.
nethercote2d5b8162004-08-11 09:40:52 +00001472 b_listno = pszB_to_listNo(b_pszB);
1473 mkFreeBlock( a, b, b_bszB, b_listno );
sewardj94c8eb42008-09-19 20:13:39 +00001474 if (VG_(clo_profile_heap))
1475 set_cc(b, "admin.free-1");
sewardjde4a1d02002-03-22 01:27:54 +00001476
nethercote2d5b8162004-08-11 09:40:52 +00001477 // See if this block can be merged with its successor.
1478 // First test if we're far enough before the superblock's end to possibly
1479 // have a successor.
njna2578652005-07-17 17:12:24 +00001480 other_b = b + b_bszB;
1481 if (other_b+min_useful_bszB(a)-1 <= (Block*)sb_end) {
nethercote2d5b8162004-08-11 09:40:52 +00001482 // Ok, we have a successor, merge if it's not in use.
njnd0e685c2005-07-17 17:55:42 +00001483 other_bszB = get_bszB(other_b);
njn472cc7c2005-07-17 17:20:30 +00001484 if (!is_inuse_block(other_b)) {
nethercote2d5b8162004-08-11 09:40:52 +00001485 // VG_(printf)( "merge-successor\n");
sewardjde4a1d02002-03-22 01:27:54 +00001486# ifdef DEBUG_MALLOC
njna2578652005-07-17 17:12:24 +00001487 vg_assert(blockSane(a, other_b));
sewardjde4a1d02002-03-22 01:27:54 +00001488# endif
nethercote2d5b8162004-08-11 09:40:52 +00001489 unlinkBlock( a, b, b_listno );
njna2578652005-07-17 17:12:24 +00001490 unlinkBlock( a, other_b, pszB_to_listNo(bszB_to_pszB(a,other_bszB)) );
nethercote2d5b8162004-08-11 09:40:52 +00001491 b_bszB += other_bszB;
1492 b_listno = pszB_to_listNo(bszB_to_pszB(a, b_bszB));
1493 mkFreeBlock( a, b, b_bszB, b_listno );
sewardj94c8eb42008-09-19 20:13:39 +00001494 if (VG_(clo_profile_heap))
1495 set_cc(b, "admin.free-2");
sewardjde4a1d02002-03-22 01:27:54 +00001496 }
nethercote2d5b8162004-08-11 09:40:52 +00001497 } else {
1498 // Not enough space for successor: check that b is the last block
1499 // ie. there are no unused bytes at the end of the Superblock.
njna2578652005-07-17 17:12:24 +00001500 vg_assert(other_b-1 == (Block*)sb_end);
sewardjde4a1d02002-03-22 01:27:54 +00001501 }
1502
nethercote2d5b8162004-08-11 09:40:52 +00001503 // Then see if this block can be merged with its predecessor.
1504 // First test if we're far enough after the superblock's start to possibly
1505 // have a predecessor.
1506 if (b >= (Block*)sb_start + min_useful_bszB(a)) {
1507 // Ok, we have a predecessor, merge if it's not in use.
njna2578652005-07-17 17:12:24 +00001508 other_b = get_predecessor_block( b );
njnd0e685c2005-07-17 17:55:42 +00001509 other_bszB = get_bszB(other_b);
njn472cc7c2005-07-17 17:20:30 +00001510 if (!is_inuse_block(other_b)) {
nethercote2d5b8162004-08-11 09:40:52 +00001511 // VG_(printf)( "merge-predecessor\n");
nethercote2d5b8162004-08-11 09:40:52 +00001512 unlinkBlock( a, b, b_listno );
njna2578652005-07-17 17:12:24 +00001513 unlinkBlock( a, other_b, pszB_to_listNo(bszB_to_pszB(a, other_bszB)) );
1514 b = other_b;
nethercote2d5b8162004-08-11 09:40:52 +00001515 b_bszB += other_bszB;
1516 b_listno = pszB_to_listNo(bszB_to_pszB(a, b_bszB));
1517 mkFreeBlock( a, b, b_bszB, b_listno );
sewardj94c8eb42008-09-19 20:13:39 +00001518 if (VG_(clo_profile_heap))
1519 set_cc(b, "admin.free-3");
sewardjde4a1d02002-03-22 01:27:54 +00001520 }
nethercote2d5b8162004-08-11 09:40:52 +00001521 } else {
1522 // Not enough space for predecessor: check that b is the first block,
1523 // ie. there are no unused bytes at the start of the Superblock.
1524 vg_assert((Block*)sb_start == b);
sewardjde4a1d02002-03-22 01:27:54 +00001525 }
1526
1527# ifdef DEBUG_MALLOC
nethercote885dd912004-08-03 23:14:00 +00001528 sanity_check_malloc_arena(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001529# endif
1530
sewardj45f4e7c2005-09-27 19:20:21 +00001531 //zzVALGRIND_FREELIKE_BLOCK(ptr, 0);
sewardjde4a1d02002-03-22 01:27:54 +00001532}
1533
1534
1535/*
1536 The idea for malloc_aligned() is to allocate a big block, base, and
1537 then split it into two parts: frag, which is returned to the the
1538 free pool, and align, which is the bit we're really after. Here's
1539 a picture. L and H denote the block lower and upper overheads, in
nethercote2d5b8162004-08-11 09:40:52 +00001540 bytes. The details are gruesome. Note it is slightly complicated
sewardjde4a1d02002-03-22 01:27:54 +00001541 because the initial request to generate base may return a bigger
1542 block than we asked for, so it is important to distinguish the base
1543 request size and the base actual size.
1544
1545 frag_b align_b
1546 | |
1547 | frag_p | align_p
1548 | | | |
1549 v v v v
1550
1551 +---+ +---+---+ +---+
1552 | L |----------------| H | L |---------------| H |
1553 +---+ +---+---+ +---+
1554
1555 ^ ^ ^
1556 | | :
1557 | base_p this addr must be aligned
1558 |
1559 base_b
1560
1561 . . . . . . .
nethercote2d5b8162004-08-11 09:40:52 +00001562 <------ frag_bszB -------> . . .
1563 . <------------- base_pszB_act -----------> .
sewardjde4a1d02002-03-22 01:27:54 +00001564 . . . . . . .
1565
1566*/
sewardj9c606bd2008-09-18 18:12:50 +00001567void* VG_(arena_memalign) ( ArenaId aid, HChar* cc,
1568 SizeT req_alignB, SizeT req_pszB )
sewardjde4a1d02002-03-22 01:27:54 +00001569{
nethercote7ac7f7b2004-11-02 12:36:02 +00001570 SizeT base_pszB_req, base_pszB_act, frag_bszB;
nethercote2d5b8162004-08-11 09:40:52 +00001571 Block *base_b, *align_b;
1572 UByte *base_p, *align_p;
nethercote7ac7f7b2004-11-02 12:36:02 +00001573 SizeT saved_bytes_on_loan;
sewardjde4a1d02002-03-22 01:27:54 +00001574 Arena* a;
1575
sewardj45f4e7c2005-09-27 19:20:21 +00001576 ensure_mm_init(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001577 a = arenaId_to_ArenaP(aid);
1578
nethercote7ac7f7b2004-11-02 12:36:02 +00001579 vg_assert(req_pszB < MAX_PSZB);
sewardjde4a1d02002-03-22 01:27:54 +00001580
sewardj9c606bd2008-09-18 18:12:50 +00001581 // You must provide a cost-center name against which to charge
1582 // this allocation; it isn't optional.
1583 vg_assert(cc);
1584
nethercote2d5b8162004-08-11 09:40:52 +00001585 // Check that the requested alignment seems reasonable; that is, is
1586 // a power of 2.
1587 if (req_alignB < VG_MIN_MALLOC_SZB
1588 || req_alignB > 1048576
njn717cde52005-05-10 02:47:21 +00001589 || VG_(log2)( req_alignB ) == -1 /* not a power of 2 */) {
njn8a7b41b2007-09-23 00:51:24 +00001590 VG_(printf)("VG_(arena_memalign)(%p, %lu, %lu)\nbad alignment",
nethercote2d5b8162004-08-11 09:40:52 +00001591 a, req_alignB, req_pszB );
njn717cde52005-05-10 02:47:21 +00001592 VG_(core_panic)("VG_(arena_memalign)");
nethercote2d5b8162004-08-11 09:40:52 +00001593 /*NOTREACHED*/
sewardjde4a1d02002-03-22 01:27:54 +00001594 }
nethercote2d5b8162004-08-11 09:40:52 +00001595 // Paranoid
1596 vg_assert(req_alignB % VG_MIN_MALLOC_SZB == 0);
sewardjde4a1d02002-03-22 01:27:54 +00001597
1598 /* Required payload size for the aligned chunk. */
nethercote2d5b8162004-08-11 09:40:52 +00001599 req_pszB = align_req_pszB(req_pszB);
sewardjde4a1d02002-03-22 01:27:54 +00001600
nethercote2d5b8162004-08-11 09:40:52 +00001601 /* Payload size to request for the big block that we will split up. */
1602 base_pszB_req = req_pszB + min_useful_bszB(a) + req_alignB;
sewardjde4a1d02002-03-22 01:27:54 +00001603
1604 /* Payload ptr for the block we are going to split. Note this
1605 changes a->bytes_on_loan; we save and restore it ourselves. */
1606 saved_bytes_on_loan = a->bytes_on_loan;
sewardj9c606bd2008-09-18 18:12:50 +00001607 base_p = VG_(arena_malloc) ( aid, cc, base_pszB_req );
sewardjde4a1d02002-03-22 01:27:54 +00001608 a->bytes_on_loan = saved_bytes_on_loan;
1609
tom8af1a172005-10-06 12:04:26 +00001610 /* Give up if we couldn't allocate enough space */
1611 if (base_p == 0)
1612 return 0;
1613
sewardjde4a1d02002-03-22 01:27:54 +00001614 /* Block ptr for the block we are going to split. */
nethercote2d5b8162004-08-11 09:40:52 +00001615 base_b = get_payload_block ( a, base_p );
sewardjde4a1d02002-03-22 01:27:54 +00001616
1617 /* Pointer to the payload of the aligned block we are going to
1618 return. This has to be suitably aligned. */
nethercote2d5b8162004-08-11 09:40:52 +00001619 align_p = align_upwards ( base_b + 2 * overhead_szB_lo(a)
1620 + overhead_szB_hi(a),
sewardjde4a1d02002-03-22 01:27:54 +00001621 req_alignB );
nethercote2d5b8162004-08-11 09:40:52 +00001622 align_b = get_payload_block(a, align_p);
sewardjde4a1d02002-03-22 01:27:54 +00001623
1624 /* The block size of the fragment we will create. This must be big
1625 enough to actually create a fragment. */
nethercote2d5b8162004-08-11 09:40:52 +00001626 frag_bszB = align_b - base_b;
1627
1628 vg_assert(frag_bszB >= min_useful_bszB(a));
sewardjde4a1d02002-03-22 01:27:54 +00001629
1630 /* The actual payload size of the block we are going to split. */
njn089f51f2005-07-17 18:12:00 +00001631 base_pszB_act = get_pszB(a, base_b);
sewardjde4a1d02002-03-22 01:27:54 +00001632
nethercote2d5b8162004-08-11 09:40:52 +00001633 /* Create the fragment block, and put it back on the relevant free list. */
1634 mkFreeBlock ( a, base_b, frag_bszB,
1635 pszB_to_listNo(bszB_to_pszB(a, frag_bszB)) );
sewardj94c8eb42008-09-19 20:13:39 +00001636 if (VG_(clo_profile_heap))
1637 set_cc(base_b, "admin.frag-memalign-1");
sewardjde4a1d02002-03-22 01:27:54 +00001638
1639 /* Create the aligned block. */
nethercote2d5b8162004-08-11 09:40:52 +00001640 mkInuseBlock ( a, align_b,
1641 base_p + base_pszB_act
1642 + overhead_szB_hi(a) - (UByte*)align_b );
sewardj94c8eb42008-09-19 20:13:39 +00001643 if (VG_(clo_profile_heap))
1644 set_cc(align_b, cc);
sewardjde4a1d02002-03-22 01:27:54 +00001645
1646 /* Final sanity checks. */
njn472cc7c2005-07-17 17:20:30 +00001647 vg_assert( is_inuse_block(get_payload_block(a, align_p)) );
sewardjde4a1d02002-03-22 01:27:54 +00001648
njn089f51f2005-07-17 18:12:00 +00001649 vg_assert(req_pszB <= get_pszB(a, get_payload_block(a, align_p)));
sewardjde4a1d02002-03-22 01:27:54 +00001650
njn089f51f2005-07-17 18:12:00 +00001651 a->bytes_on_loan += get_pszB(a, get_payload_block(a, align_p));
sewardjde4a1d02002-03-22 01:27:54 +00001652 if (a->bytes_on_loan > a->bytes_on_loan_max)
1653 a->bytes_on_loan_max = a->bytes_on_loan;
1654
1655# ifdef DEBUG_MALLOC
nethercote885dd912004-08-03 23:14:00 +00001656 sanity_check_malloc_arena(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001657# endif
1658
nethercote2d5b8162004-08-11 09:40:52 +00001659 vg_assert( (((Addr)align_p) % req_alignB) == 0 );
sewardjb5f6f512005-03-10 23:59:00 +00001660
sewardj45f4e7c2005-09-27 19:20:21 +00001661 //zzVALGRIND_MALLOCLIKE_BLOCK(align_p, req_pszB, 0, False);
sewardjb5f6f512005-03-10 23:59:00 +00001662
nethercote2d5b8162004-08-11 09:40:52 +00001663 return align_p;
1664}
1665
1666
njn32397c02007-11-10 04:08:08 +00001667// The ThreadId doesn't matter, it's not used.
njn2dc09e62005-08-17 04:03:31 +00001668SizeT VG_(arena_payload_szB) ( ThreadId tid, ArenaId aid, void* ptr )
nethercote2d5b8162004-08-11 09:40:52 +00001669{
1670 Arena* a = arenaId_to_ArenaP(aid);
1671 Block* b = get_payload_block(a, ptr);
njn089f51f2005-07-17 18:12:00 +00001672 return get_pszB(a, b);
sewardjde4a1d02002-03-22 01:27:54 +00001673}
1674
bart545380e2008-04-21 17:28:50 +00001675
1676// Implementation of mallinfo(). There is no recent standard that defines
1677// the behavior of mallinfo(). The meaning of the fields in struct mallinfo
1678// is as follows:
1679//
1680// struct mallinfo {
1681// int arena; /* total space in arena */
1682// int ordblks; /* number of ordinary blocks */
1683// int smblks; /* number of small blocks */
1684// int hblks; /* number of holding blocks */
1685// int hblkhd; /* space in holding block headers */
1686// int usmblks; /* space in small blocks in use */
1687// int fsmblks; /* space in free small blocks */
1688// int uordblks; /* space in ordinary blocks in use */
1689// int fordblks; /* space in free ordinary blocks */
1690// int keepcost; /* space penalty if keep option */
1691// /* is used */
1692// };
1693//
1694// The glibc documentation about mallinfo (which is somewhat outdated) can
1695// be found here:
1696// http://www.gnu.org/software/libtool/manual/libc/Statistics-of-Malloc.html
1697//
1698// See also http://bugs.kde.org/show_bug.cgi?id=160956.
1699//
1700// Regarding the implementation of VG_(mallinfo)(): we cannot return the
1701// whole struct as the library function does, because this is called by a
1702// client request. So instead we use a pointer to do call by reference.
njn088bfb42005-08-17 05:01:37 +00001703void VG_(mallinfo) ( ThreadId tid, struct vg_mallinfo* mi )
1704{
sewardj76dda8f2008-05-29 13:45:49 +00001705 UWord i, free_blocks, free_blocks_size;
bartc3c98392008-04-19 14:43:30 +00001706 Arena* a = arenaId_to_ArenaP(VG_AR_CLIENT);
1707
1708 // Traverse free list and calculate free blocks statistics.
1709 // This may seem slow but glibc works the same way.
1710 free_blocks_size = free_blocks = 0;
1711 for (i = 0; i < N_MALLOC_LISTS; i++) {
1712 Block* b = a->freelist[i];
1713 if (b == NULL) continue;
1714 for (;;) {
1715 free_blocks++;
sewardj76dda8f2008-05-29 13:45:49 +00001716 free_blocks_size += (UWord)get_pszB(a, b);
bartc3c98392008-04-19 14:43:30 +00001717 b = get_next_b(b);
1718 if (b == a->freelist[i]) break;
1719 }
1720 }
1721
1722 // We don't have fastbins so smblks & fsmblks are always 0. Also we don't
bart545380e2008-04-21 17:28:50 +00001723 // have a separate mmap allocator so set hblks & hblkhd to 0.
bartc3c98392008-04-19 14:43:30 +00001724 mi->arena = a->bytes_mmaped;
bart545380e2008-04-21 17:28:50 +00001725 mi->ordblks = free_blocks + VG_(free_queue_length);
bartc3c98392008-04-19 14:43:30 +00001726 mi->smblks = 0;
1727 mi->hblks = 0;
1728 mi->hblkhd = 0;
1729 mi->usmblks = 0;
1730 mi->fsmblks = 0;
bart545380e2008-04-21 17:28:50 +00001731 mi->uordblks = a->bytes_on_loan - VG_(free_queue_volume);
1732 mi->fordblks = free_blocks_size + VG_(free_queue_volume);
bartc3c98392008-04-19 14:43:30 +00001733 mi->keepcost = 0; // may want some value in here
njn088bfb42005-08-17 05:01:37 +00001734}
sewardjde4a1d02002-03-22 01:27:54 +00001735
sewardj45f4e7c2005-09-27 19:20:21 +00001736
sewardjde4a1d02002-03-22 01:27:54 +00001737/*------------------------------------------------------------*/
1738/*--- Services layered on top of malloc/free. ---*/
1739/*------------------------------------------------------------*/
1740
sewardj9c606bd2008-09-18 18:12:50 +00001741void* VG_(arena_calloc) ( ArenaId aid, HChar* cc,
1742 SizeT nmemb, SizeT bytes_per_memb )
sewardjde4a1d02002-03-22 01:27:54 +00001743{
nethercote7ac7f7b2004-11-02 12:36:02 +00001744 SizeT size;
sewardjde4a1d02002-03-22 01:27:54 +00001745 UChar* p;
njn25e49d8e72002-09-23 09:36:25 +00001746
njn926ed472005-03-11 04:44:10 +00001747 size = nmemb * bytes_per_memb;
1748 vg_assert(size >= nmemb && size >= bytes_per_memb);// check against overflow
njn3e884182003-04-15 13:03:23 +00001749
sewardj9c606bd2008-09-18 18:12:50 +00001750 p = VG_(arena_malloc) ( aid, cc, size );
njn3e884182003-04-15 13:03:23 +00001751
njn926ed472005-03-11 04:44:10 +00001752 VG_(memset)(p, 0, size);
sewardjb5f6f512005-03-10 23:59:00 +00001753
sewardj45f4e7c2005-09-27 19:20:21 +00001754 //zzVALGRIND_MALLOCLIKE_BLOCK(p, size, 0, True);
njn25e49d8e72002-09-23 09:36:25 +00001755
sewardjde4a1d02002-03-22 01:27:54 +00001756 return p;
1757}
1758
1759
sewardj9c606bd2008-09-18 18:12:50 +00001760void* VG_(arena_realloc) ( ArenaId aid, HChar* cc,
1761 void* ptr, SizeT req_pszB )
sewardjde4a1d02002-03-22 01:27:54 +00001762{
1763 Arena* a;
njn089f51f2005-07-17 18:12:00 +00001764 SizeT old_pszB;
sewardjb5f6f512005-03-10 23:59:00 +00001765 UChar *p_new;
nethercote2d5b8162004-08-11 09:40:52 +00001766 Block* b;
sewardjde4a1d02002-03-22 01:27:54 +00001767
sewardj45f4e7c2005-09-27 19:20:21 +00001768 ensure_mm_init(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001769 a = arenaId_to_ArenaP(aid);
1770
nethercote7ac7f7b2004-11-02 12:36:02 +00001771 vg_assert(req_pszB < MAX_PSZB);
sewardjde4a1d02002-03-22 01:27:54 +00001772
njn180f6982008-10-12 19:51:41 +00001773 if (NULL == ptr) {
1774 return VG_(arena_malloc)(aid, cc, req_pszB);
1775 }
1776
1777 if (req_pszB == 0) {
1778 VG_(arena_free)(aid, ptr);
1779 return NULL;
1780 }
1781
nethercote2d5b8162004-08-11 09:40:52 +00001782 b = get_payload_block(a, ptr);
1783 vg_assert(blockSane(a, b));
sewardjde4a1d02002-03-22 01:27:54 +00001784
njn472cc7c2005-07-17 17:20:30 +00001785 vg_assert(is_inuse_block(b));
njn089f51f2005-07-17 18:12:00 +00001786 old_pszB = get_pszB(a, b);
sewardjde4a1d02002-03-22 01:27:54 +00001787
njn25e49d8e72002-09-23 09:36:25 +00001788 if (req_pszB <= old_pszB) {
njn25e49d8e72002-09-23 09:36:25 +00001789 return ptr;
1790 }
sewardjde4a1d02002-03-22 01:27:54 +00001791
sewardj9c606bd2008-09-18 18:12:50 +00001792 p_new = VG_(arena_malloc) ( aid, cc, req_pszB );
njn828022a2005-03-13 14:56:31 +00001793
sewardjb5f6f512005-03-10 23:59:00 +00001794 VG_(memcpy)(p_new, ptr, old_pszB);
sewardjde4a1d02002-03-22 01:27:54 +00001795
sewardjb5f6f512005-03-10 23:59:00 +00001796 VG_(arena_free)(aid, ptr);
njn25e49d8e72002-09-23 09:36:25 +00001797
sewardjde4a1d02002-03-22 01:27:54 +00001798 return p_new;
1799}
1800
1801
njn6ba622c2005-06-11 01:12:08 +00001802/* Inline just for the wrapper VG_(strdup) below */
sewardj9c606bd2008-09-18 18:12:50 +00001803__inline__ Char* VG_(arena_strdup) ( ArenaId aid, HChar* cc,
1804 const Char* s )
njn6ba622c2005-06-11 01:12:08 +00001805{
1806 Int i;
1807 Int len;
1808 Char* res;
1809
1810 if (s == NULL)
1811 return NULL;
1812
1813 len = VG_(strlen)(s) + 1;
sewardj9c606bd2008-09-18 18:12:50 +00001814 res = VG_(arena_malloc) (aid, cc, len);
njn6ba622c2005-06-11 01:12:08 +00001815
1816 for (i = 0; i < len; i++)
1817 res[i] = s[i];
1818 return res;
1819}
1820
1821
sewardjde4a1d02002-03-22 01:27:54 +00001822/*------------------------------------------------------------*/
nethercote996901a2004-08-03 13:29:09 +00001823/*--- Tool-visible functions. ---*/
njn25e49d8e72002-09-23 09:36:25 +00001824/*------------------------------------------------------------*/
1825
nethercote2d5b8162004-08-11 09:40:52 +00001826// All just wrappers to avoid exposing arenas to tools.
njn25e49d8e72002-09-23 09:36:25 +00001827
sewardj9c606bd2008-09-18 18:12:50 +00001828void* VG_(malloc) ( HChar* cc, SizeT nbytes )
njn25e49d8e72002-09-23 09:36:25 +00001829{
sewardj9c606bd2008-09-18 18:12:50 +00001830 return VG_(arena_malloc) ( VG_AR_TOOL, cc, nbytes );
njn25e49d8e72002-09-23 09:36:25 +00001831}
1832
1833void VG_(free) ( void* ptr )
1834{
nethercote60f5b822004-01-26 17:24:42 +00001835 VG_(arena_free) ( VG_AR_TOOL, ptr );
njn25e49d8e72002-09-23 09:36:25 +00001836}
1837
sewardj9c606bd2008-09-18 18:12:50 +00001838void* VG_(calloc) ( HChar* cc, SizeT nmemb, SizeT bytes_per_memb )
njn25e49d8e72002-09-23 09:36:25 +00001839{
sewardj9c606bd2008-09-18 18:12:50 +00001840 return VG_(arena_calloc) ( VG_AR_TOOL, cc, nmemb, bytes_per_memb );
njn25e49d8e72002-09-23 09:36:25 +00001841}
1842
sewardj9c606bd2008-09-18 18:12:50 +00001843void* VG_(realloc) ( HChar* cc, void* ptr, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00001844{
sewardj9c606bd2008-09-18 18:12:50 +00001845 return VG_(arena_realloc) ( VG_AR_TOOL, cc, ptr, size );
njn25e49d8e72002-09-23 09:36:25 +00001846}
1847
sewardj9c606bd2008-09-18 18:12:50 +00001848Char* VG_(strdup) ( HChar* cc, const Char* s )
njn6ba622c2005-06-11 01:12:08 +00001849{
sewardj9c606bd2008-09-18 18:12:50 +00001850 return VG_(arena_strdup) ( VG_AR_TOOL, cc, s );
njn6ba622c2005-06-11 01:12:08 +00001851}
1852
njn32397c02007-11-10 04:08:08 +00001853// Useful for querying user blocks.
1854SizeT VG_(malloc_usable_size) ( void* p )
1855{
1856 return VG_(arena_payload_szB)(VG_INVALID_THREADID, VG_AR_CLIENT, p);
1857}
1858
1859
sewardjde4a1d02002-03-22 01:27:54 +00001860/*--------------------------------------------------------------------*/
njn717cde52005-05-10 02:47:21 +00001861/*--- end ---*/
sewardjde4a1d02002-03-22 01:27:54 +00001862/*--------------------------------------------------------------------*/