blob: dabc39ac4d6932468dd6596715e182f997a3fb08 [file] [log] [blame]
sewardjde4a1d02002-03-22 01:27:54 +00001
2/*--------------------------------------------------------------------*/
3/*--- An implementation of malloc/free which doesn't use sbrk. ---*/
njn717cde52005-05-10 02:47:21 +00004/*--- m_mallocfree.c ---*/
sewardjde4a1d02002-03-22 01:27:54 +00005/*--------------------------------------------------------------------*/
6
7/*
njnb9c427c2004-12-01 14:14:42 +00008 This file is part of Valgrind, a dynamic binary instrumentation
9 framework.
sewardjde4a1d02002-03-22 01:27:54 +000010
sewardj9eecbbb2010-05-03 21:37:12 +000011 Copyright (C) 2000-2010 Julian Seward
sewardjde4a1d02002-03-22 01:27:54 +000012 jseward@acm.org
sewardjde4a1d02002-03-22 01:27:54 +000013
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
njn25e49d8e72002-09-23 09:36:25 +000029 The GNU General Public License is contained in the file COPYING.
sewardjde4a1d02002-03-22 01:27:54 +000030*/
31
njnc7561b92005-06-19 01:24:32 +000032#include "pub_core_basics.h"
sewardj4cfea4f2006-10-14 19:26:10 +000033#include "pub_core_vki.h"
sewardj45f4e7c2005-09-27 19:20:21 +000034#include "pub_core_debuglog.h"
njn97405b22005-06-02 03:39:33 +000035#include "pub_core_libcbase.h"
sewardj45f4e7c2005-09-27 19:20:21 +000036#include "pub_core_aspacemgr.h"
njn132bfcc2005-06-04 19:16:06 +000037#include "pub_core_libcassert.h"
njn36a20fa2005-06-03 03:08:39 +000038#include "pub_core_libcprint.h"
njnaf1d7df2005-06-11 01:31:52 +000039#include "pub_core_mallocfree.h"
njn20242342005-05-16 23:31:24 +000040#include "pub_core_options.h"
njn32397c02007-11-10 04:08:08 +000041#include "pub_core_threadstate.h" // For VG_INVALID_THREADID
njnfc51f8d2005-06-21 03:20:17 +000042#include "pub_core_tooliface.h"
njn296c24d2005-05-15 03:52:40 +000043#include "valgrind.h"
sewardj55f9d1a2005-04-25 11:11:44 +000044
sewardjb5f6f512005-03-10 23:59:00 +000045//zz#include "memcheck/memcheck.h"
sewardjde4a1d02002-03-22 01:27:54 +000046
sewardj0b3fd2d2007-08-21 10:55:26 +000047// #define DEBUG_MALLOC // turn on heavyweight debugging machinery
48// #define VERBOSE_MALLOC // make verbose, esp. in debugging machinery
nethercote2d5b8162004-08-11 09:40:52 +000049
bart545380e2008-04-21 17:28:50 +000050/* Number and total size of blocks in free queue. Used by mallinfo(). */
51Long VG_(free_queue_volume) = 0;
52Long VG_(free_queue_length) = 0;
53
sewardj9c606bd2008-09-18 18:12:50 +000054static void cc_analyse_alloc_arena ( ArenaId aid ); /* fwds */
55
nethercote2d5b8162004-08-11 09:40:52 +000056/*------------------------------------------------------------*/
57/*--- Main types ---*/
58/*------------------------------------------------------------*/
59
sewardjc1ac9772007-08-20 22:57:56 +000060#define N_MALLOC_LISTS 112 // do not change this
nethercote2d5b8162004-08-11 09:40:52 +000061
nethercote7ac7f7b2004-11-02 12:36:02 +000062// The amount you can ask for is limited only by sizeof(SizeT)...
63#define MAX_PSZB (~((SizeT)0x0))
nethercote2d5b8162004-08-11 09:40:52 +000064
sewardj0b3fd2d2007-08-21 10:55:26 +000065// Each arena has a sorted array of superblocks, which expands
66// dynamically. This is its initial size.
67#define SBLOCKS_SIZE_INITIAL 50
68
nethercote2d5b8162004-08-11 09:40:52 +000069typedef UChar UByte;
70
njn8d3f8452005-07-20 04:12:41 +000071/* Layout of an in-use block:
nethercote2d5b8162004-08-11 09:40:52 +000072
njn341a6642009-05-24 23:36:50 +000073 cost center (OPTIONAL) (VG_MIN_MALLOC_SZB bytes, only when h-p enabled)
njn8d3f8452005-07-20 04:12:41 +000074 this block total szB (sizeof(SizeT) bytes)
njn7ce83112005-08-24 22:38:00 +000075 red zone bytes (depends on Arena.rz_szB, but >= sizeof(void*))
njn8d3f8452005-07-20 04:12:41 +000076 (payload bytes)
njn7ce83112005-08-24 22:38:00 +000077 red zone bytes (depends on Arena.rz_szB, but >= sizeof(void*))
njn8d3f8452005-07-20 04:12:41 +000078 this block total szB (sizeof(SizeT) bytes)
nethercote2d5b8162004-08-11 09:40:52 +000079
njn8d3f8452005-07-20 04:12:41 +000080 Layout of a block on the free list:
nethercote2d5b8162004-08-11 09:40:52 +000081
njn341a6642009-05-24 23:36:50 +000082 cost center (OPTIONAL) (VG_MIN_MALLOC_SZB bytes, only when h-p enabled)
njn8d3f8452005-07-20 04:12:41 +000083 this block total szB (sizeof(SizeT) bytes)
84 freelist previous ptr (sizeof(void*) bytes)
85 excess red zone bytes (if Arena.rz_szB > sizeof(void*))
86 (payload bytes)
87 excess red zone bytes (if Arena.rz_szB > sizeof(void*))
88 freelist next ptr (sizeof(void*) bytes)
89 this block total szB (sizeof(SizeT) bytes)
nethercote2d5b8162004-08-11 09:40:52 +000090
njn8d3f8452005-07-20 04:12:41 +000091 Total size in bytes (bszB) and payload size in bytes (pszB)
92 are related by:
nethercote2d5b8162004-08-11 09:40:52 +000093
sewardj94c8eb42008-09-19 20:13:39 +000094 bszB == pszB + 2*sizeof(SizeT) + 2*a->rz_szB
95
96 when heap profiling is not enabled, and
97
njn341a6642009-05-24 23:36:50 +000098 bszB == pszB + 2*sizeof(SizeT) + 2*a->rz_szB + VG_MIN_MALLOC_SZB
njn8d3f8452005-07-20 04:12:41 +000099
sewardj94c8eb42008-09-19 20:13:39 +0000100 when it is enabled. It follows that the minimum overhead per heap
101 block for arenas used by the core is:
102
103 32-bit platforms: 2*4 + 2*4 == 16 bytes
104 64-bit platforms: 2*8 + 2*8 == 32 bytes
105
106 when heap profiling is not enabled, and
njna527a492005-12-16 17:06:37 +0000107
njn341a6642009-05-24 23:36:50 +0000108 32-bit platforms: 2*4 + 2*4 + 8 == 24 bytes
109 64-bit platforms: 2*8 + 2*8 + 16 == 48 bytes
njna527a492005-12-16 17:06:37 +0000110
sewardj94c8eb42008-09-19 20:13:39 +0000111 when it is enabled. In all cases, extra overhead may be incurred
112 when rounding the payload size up to VG_MIN_MALLOC_SZB.
njna527a492005-12-16 17:06:37 +0000113
njn8d3f8452005-07-20 04:12:41 +0000114 Furthermore, both size fields in the block have their least-significant
115 bit set if the block is not in use, and unset if it is in use.
116 (The bottom 3 or so bits are always free for this because of alignment.)
117 A block size of zero is not possible, because a block always has at
118 least two SizeTs and two pointers of overhead.
119
120 Nb: All Block payloads must be VG_MIN_MALLOC_SZB-aligned. This is
121 achieved by ensuring that Superblocks are VG_MIN_MALLOC_SZB-aligned
122 (see newSuperblock() for how), and that the lengths of the following
123 things are a multiple of VG_MIN_MALLOC_SZB:
124 - Superblock admin section lengths (due to elastic padding)
125 - Block admin section (low and high) lengths (due to elastic redzones)
126 - Block payload lengths (due to req_pszB rounding up)
sewardj9c606bd2008-09-18 18:12:50 +0000127
128 The heap-profile cost-center field is 8 bytes even on 32 bit
129 platforms. This is so as to keep the payload field 8-aligned. On
130 a 64-bit platform, this cc-field contains a pointer to a const
131 HChar*, which is the cost center name. On 32-bit platforms, the
132 pointer lives in the lower-addressed half of the field, regardless
133 of the endianness of the host.
nethercote2d5b8162004-08-11 09:40:52 +0000134*/
135typedef
136 struct {
137 // No fields are actually used in this struct, because a Block has
njn37517e82005-05-25 15:52:39 +0000138 // many variable sized fields and so can't be accessed
nethercote2d5b8162004-08-11 09:40:52 +0000139 // meaningfully with normal fields. So we use access functions all
140 // the time. This struct gives us a type to use, though. Also, we
141 // make sizeof(Block) 1 byte so that we can do arithmetic with the
142 // Block* type in increments of 1!
143 UByte dummy;
144 }
145 Block;
146
147// A superblock. 'padding' is never used, it just ensures that if the
148// entire Superblock is aligned to VG_MIN_MALLOC_SZB, then payload_bytes[]
149// will be too. It can add small amounts of padding unnecessarily -- eg.
150// 8-bytes on 32-bit machines with an 8-byte VG_MIN_MALLOC_SZB -- because
151// it's too hard to make a constant expression that works perfectly in all
152// cases.
153// payload_bytes[] is made a single big Block when the Superblock is
154// created, and then can be split and the splittings remerged, but Blocks
155// always cover its entire length -- there's never any unused bytes at the
156// end, for example.
sewardj0b3fd2d2007-08-21 10:55:26 +0000157typedef
nethercote2d5b8162004-08-11 09:40:52 +0000158 struct _Superblock {
nethercote7ac7f7b2004-11-02 12:36:02 +0000159 SizeT n_payload_bytes;
sewardj0b3fd2d2007-08-21 10:55:26 +0000160 void* padding2;
161 UByte padding[ VG_MIN_MALLOC_SZB -
162 ((sizeof(struct _Superblock*) + sizeof(SizeT)) %
nethercote7ac7f7b2004-11-02 12:36:02 +0000163 VG_MIN_MALLOC_SZB) ];
nethercote2d5b8162004-08-11 09:40:52 +0000164 UByte payload_bytes[0];
165 }
166 Superblock;
167
168// An arena. 'freelist' is a circular, doubly-linked list. 'rz_szB' is
169// elastic, in that it can be bigger than asked-for to ensure alignment.
sewardj0b3fd2d2007-08-21 10:55:26 +0000170typedef
nethercote2d5b8162004-08-11 09:40:52 +0000171 struct {
sewardj0b3fd2d2007-08-21 10:55:26 +0000172 Char* name;
173 Bool clientmem; // Allocates in the client address space?
174 SizeT rz_szB; // Red zone size in bytes
175 SizeT min_sblock_szB; // Minimum superblock size in bytes
176 Block* freelist[N_MALLOC_LISTS];
177 // A dynamically expanding, ordered array of (pointers to)
178 // superblocks in the arena. If this array is expanded, which
179 // is rare, the previous space it occupies is simply abandoned.
180 // To avoid having to get yet another block from m_aspacemgr for
181 // the first incarnation of this array, the first allocation of
182 // it is within this struct. If it has to be expanded then the
183 // new space is acquired from m_aspacemgr as you would expect.
184 Superblock** sblocks;
185 SizeT sblocks_size;
186 SizeT sblocks_used;
187 Superblock* sblocks_initial[SBLOCKS_SIZE_INITIAL];
nethercote2d5b8162004-08-11 09:40:52 +0000188 // Stats only.
sewardj0b3fd2d2007-08-21 10:55:26 +0000189 SizeT bytes_on_loan;
190 SizeT bytes_mmaped;
191 SizeT bytes_on_loan_max;
sewardj9c606bd2008-09-18 18:12:50 +0000192 SizeT next_profile_at;
sewardj0b3fd2d2007-08-21 10:55:26 +0000193 }
nethercote2d5b8162004-08-11 09:40:52 +0000194 Arena;
195
196
197/*------------------------------------------------------------*/
198/*--- Low-level functions for working with Blocks. ---*/
199/*------------------------------------------------------------*/
200
nethercote7ac7f7b2004-11-02 12:36:02 +0000201#define SIZE_T_0x1 ((SizeT)0x1)
202
njnb8329f02009-04-16 00:33:20 +0000203static char* probably_your_fault =
204 "This is probably caused by your program erroneously writing past the\n"
205 "end of a heap block and corrupting heap metadata. If you fix any\n"
206 "invalid writes reported by Memcheck, this assertion failure will\n"
207 "probably go away. Please try that before reporting this as a bug.\n";
208
njn8d3f8452005-07-20 04:12:41 +0000209// Mark a bszB as in-use, and not in-use, and remove the in-use attribute.
nethercote2d5b8162004-08-11 09:40:52 +0000210static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000211SizeT mk_inuse_bszB ( SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000212{
njnb8329f02009-04-16 00:33:20 +0000213 vg_assert2(bszB != 0, probably_your_fault);
nethercote7ac7f7b2004-11-02 12:36:02 +0000214 return bszB & (~SIZE_T_0x1);
nethercote2d5b8162004-08-11 09:40:52 +0000215}
216static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000217SizeT mk_free_bszB ( SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000218{
njnb8329f02009-04-16 00:33:20 +0000219 vg_assert2(bszB != 0, probably_your_fault);
nethercote7ac7f7b2004-11-02 12:36:02 +0000220 return bszB | SIZE_T_0x1;
nethercote2d5b8162004-08-11 09:40:52 +0000221}
nethercote2d5b8162004-08-11 09:40:52 +0000222static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000223SizeT mk_plain_bszB ( SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000224{
njnb8329f02009-04-16 00:33:20 +0000225 vg_assert2(bszB != 0, probably_your_fault);
nethercote7ac7f7b2004-11-02 12:36:02 +0000226 return bszB & (~SIZE_T_0x1);
nethercote2d5b8162004-08-11 09:40:52 +0000227}
228
sewardj94c8eb42008-09-19 20:13:39 +0000229// return either 0 or sizeof(ULong) depending on whether or not
230// heap profiling is engaged
231static __inline__
232SizeT hp_overhead_szB ( void )
233{
njn341a6642009-05-24 23:36:50 +0000234 return VG_(clo_profile_heap) ? VG_MIN_MALLOC_SZB : 0;
sewardj94c8eb42008-09-19 20:13:39 +0000235}
236
njn402c8612005-08-23 22:11:20 +0000237//---------------------------------------------------------------------------
238
239// Get a block's size as stored, ie with the in-use/free attribute.
nethercote2d5b8162004-08-11 09:40:52 +0000240static __inline__
njn402c8612005-08-23 22:11:20 +0000241SizeT get_bszB_as_is ( Block* b )
nethercote2d5b8162004-08-11 09:40:52 +0000242{
njn402c8612005-08-23 22:11:20 +0000243 UByte* b2 = (UByte*)b;
sewardj94c8eb42008-09-19 20:13:39 +0000244 SizeT bszB_lo = *(SizeT*)&b2[0 + hp_overhead_szB()];
njn402c8612005-08-23 22:11:20 +0000245 SizeT bszB_hi = *(SizeT*)&b2[mk_plain_bszB(bszB_lo) - sizeof(SizeT)];
246 vg_assert2(bszB_lo == bszB_hi,
njnb8329f02009-04-16 00:33:20 +0000247 "Heap block lo/hi size mismatch: lo = %llu, hi = %llu.\n%s",
248 (ULong)bszB_lo, (ULong)bszB_hi, probably_your_fault);
njn402c8612005-08-23 22:11:20 +0000249 return bszB_lo;
nethercote2d5b8162004-08-11 09:40:52 +0000250}
251
njn402c8612005-08-23 22:11:20 +0000252// Get a block's plain size, ie. remove the in-use/free attribute.
253static __inline__
254SizeT get_bszB ( Block* b )
255{
256 return mk_plain_bszB(get_bszB_as_is(b));
257}
258
259// Set the size fields of a block. bszB may have the in-use/free attribute.
260static __inline__
261void set_bszB ( Block* b, SizeT bszB )
262{
263 UByte* b2 = (UByte*)b;
sewardj94c8eb42008-09-19 20:13:39 +0000264 *(SizeT*)&b2[0 + hp_overhead_szB()] = bszB;
njn402c8612005-08-23 22:11:20 +0000265 *(SizeT*)&b2[mk_plain_bszB(bszB) - sizeof(SizeT)] = bszB;
266}
267
268//---------------------------------------------------------------------------
269
njn472cc7c2005-07-17 17:20:30 +0000270// Does this block have the in-use attribute?
271static __inline__
272Bool is_inuse_block ( Block* b )
273{
njn402c8612005-08-23 22:11:20 +0000274 SizeT bszB = get_bszB_as_is(b);
njnb8329f02009-04-16 00:33:20 +0000275 vg_assert2(bszB != 0, probably_your_fault);
njn472cc7c2005-07-17 17:20:30 +0000276 return (0 != (bszB & SIZE_T_0x1)) ? False : True;
277}
278
njn402c8612005-08-23 22:11:20 +0000279//---------------------------------------------------------------------------
njn8d3f8452005-07-20 04:12:41 +0000280
njn089f51f2005-07-17 18:12:00 +0000281// Return the lower, upper and total overhead in bytes for a block.
282// These are determined purely by which arena the block lives in.
283static __inline__
284SizeT overhead_szB_lo ( Arena* a )
285{
sewardj94c8eb42008-09-19 20:13:39 +0000286 return hp_overhead_szB() + sizeof(SizeT) + a->rz_szB;
njn089f51f2005-07-17 18:12:00 +0000287}
288static __inline__
289SizeT overhead_szB_hi ( Arena* a )
290{
njn8d3f8452005-07-20 04:12:41 +0000291 return a->rz_szB + sizeof(SizeT);
njn089f51f2005-07-17 18:12:00 +0000292}
293static __inline__
294SizeT overhead_szB ( Arena* a )
295{
296 return overhead_szB_lo(a) + overhead_szB_hi(a);
297}
298
njn402c8612005-08-23 22:11:20 +0000299//---------------------------------------------------------------------------
300
njn089f51f2005-07-17 18:12:00 +0000301// Return the minimum bszB for a block in this arena. Can have zero-length
302// payloads, so it's the size of the admin bytes.
303static __inline__
304SizeT min_useful_bszB ( Arena* a )
305{
306 return overhead_szB(a);
307}
308
njn402c8612005-08-23 22:11:20 +0000309//---------------------------------------------------------------------------
310
njn089f51f2005-07-17 18:12:00 +0000311// Convert payload size <--> block size (both in bytes).
312static __inline__
313SizeT pszB_to_bszB ( Arena* a, SizeT pszB )
314{
315 return pszB + overhead_szB(a);
316}
317static __inline__
318SizeT bszB_to_pszB ( Arena* a, SizeT bszB )
319{
njnb8329f02009-04-16 00:33:20 +0000320 vg_assert2(bszB >= overhead_szB(a), probably_your_fault);
njn089f51f2005-07-17 18:12:00 +0000321 return bszB - overhead_szB(a);
322}
323
njn402c8612005-08-23 22:11:20 +0000324//---------------------------------------------------------------------------
nethercote2d5b8162004-08-11 09:40:52 +0000325
njn089f51f2005-07-17 18:12:00 +0000326// Get a block's payload size.
nethercote7ac7f7b2004-11-02 12:36:02 +0000327static __inline__
njn089f51f2005-07-17 18:12:00 +0000328SizeT get_pszB ( Arena* a, Block* b )
nethercote7ac7f7b2004-11-02 12:36:02 +0000329{
njn089f51f2005-07-17 18:12:00 +0000330 return bszB_to_pszB(a, get_bszB(b));
nethercote7ac7f7b2004-11-02 12:36:02 +0000331}
332
njn402c8612005-08-23 22:11:20 +0000333//---------------------------------------------------------------------------
334
335// Given the addr of a block, return the addr of its payload, and vice versa.
nethercote2d5b8162004-08-11 09:40:52 +0000336static __inline__
337UByte* get_block_payload ( Arena* a, Block* b )
338{
339 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000340 return & b2[ overhead_szB_lo(a) ];
nethercote2d5b8162004-08-11 09:40:52 +0000341}
342// Given the addr of a block's payload, return the addr of the block itself.
343static __inline__
344Block* get_payload_block ( Arena* a, UByte* payload )
345{
nethercote7ac7f7b2004-11-02 12:36:02 +0000346 return (Block*)&payload[ -overhead_szB_lo(a) ];
nethercote2d5b8162004-08-11 09:40:52 +0000347}
348
njn402c8612005-08-23 22:11:20 +0000349//---------------------------------------------------------------------------
nethercote2d5b8162004-08-11 09:40:52 +0000350
351// Set and get the next and previous link fields of a block.
352static __inline__
353void set_prev_b ( Block* b, Block* prev_p )
354{
355 UByte* b2 = (UByte*)b;
sewardj94c8eb42008-09-19 20:13:39 +0000356 *(Block**)&b2[hp_overhead_szB() + sizeof(SizeT)] = prev_p;
nethercote2d5b8162004-08-11 09:40:52 +0000357}
358static __inline__
359void set_next_b ( Block* b, Block* next_p )
360{
njn402c8612005-08-23 22:11:20 +0000361 UByte* b2 = (UByte*)b;
362 *(Block**)&b2[get_bszB(b) - sizeof(SizeT) - sizeof(void*)] = next_p;
nethercote2d5b8162004-08-11 09:40:52 +0000363}
364static __inline__
365Block* get_prev_b ( Block* b )
366{
367 UByte* b2 = (UByte*)b;
sewardj94c8eb42008-09-19 20:13:39 +0000368 return *(Block**)&b2[hp_overhead_szB() + sizeof(SizeT)];
nethercote2d5b8162004-08-11 09:40:52 +0000369}
370static __inline__
371Block* get_next_b ( Block* b )
372{
njn402c8612005-08-23 22:11:20 +0000373 UByte* b2 = (UByte*)b;
374 return *(Block**)&b2[get_bszB(b) - sizeof(SizeT) - sizeof(void*)];
nethercote2d5b8162004-08-11 09:40:52 +0000375}
376
njn402c8612005-08-23 22:11:20 +0000377//---------------------------------------------------------------------------
nethercote2d5b8162004-08-11 09:40:52 +0000378
sewardj9c606bd2008-09-18 18:12:50 +0000379// Set and get the cost-center field of a block.
380static __inline__
381void set_cc ( Block* b, HChar* cc )
382{
383 UByte* b2 = (UByte*)b;
sewardj94c8eb42008-09-19 20:13:39 +0000384 vg_assert( VG_(clo_profile_heap) );
sewardj9c606bd2008-09-18 18:12:50 +0000385 *(HChar**)&b2[0] = cc;
386}
387static __inline__
388HChar* get_cc ( Block* b )
389{
390 UByte* b2 = (UByte*)b;
sewardj94c8eb42008-09-19 20:13:39 +0000391 vg_assert( VG_(clo_profile_heap) );
sewardj9c606bd2008-09-18 18:12:50 +0000392 return *(HChar**)&b2[0];
393}
394
395//---------------------------------------------------------------------------
396
nethercote2d5b8162004-08-11 09:40:52 +0000397// Get the block immediately preceding this one in the Superblock.
398static __inline__
399Block* get_predecessor_block ( Block* b )
400{
401 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000402 SizeT bszB = mk_plain_bszB( (*(SizeT*)&b2[-sizeof(SizeT)]) );
nethercote2d5b8162004-08-11 09:40:52 +0000403 return (Block*)&b2[-bszB];
404}
405
njn402c8612005-08-23 22:11:20 +0000406//---------------------------------------------------------------------------
407
nethercote2d5b8162004-08-11 09:40:52 +0000408// Read and write the lower and upper red-zone bytes of a block.
409static __inline__
njn1dcee092009-02-24 03:07:37 +0000410void set_rz_lo_byte ( Block* b, UInt rz_byteno, UByte v )
nethercote2d5b8162004-08-11 09:40:52 +0000411{
412 UByte* b2 = (UByte*)b;
sewardj94c8eb42008-09-19 20:13:39 +0000413 b2[hp_overhead_szB() + sizeof(SizeT) + rz_byteno] = v;
nethercote2d5b8162004-08-11 09:40:52 +0000414}
415static __inline__
njn1dcee092009-02-24 03:07:37 +0000416void set_rz_hi_byte ( Block* b, UInt rz_byteno, UByte v )
nethercote2d5b8162004-08-11 09:40:52 +0000417{
njn402c8612005-08-23 22:11:20 +0000418 UByte* b2 = (UByte*)b;
419 b2[get_bszB(b) - sizeof(SizeT) - rz_byteno - 1] = v;
nethercote2d5b8162004-08-11 09:40:52 +0000420}
421static __inline__
njn1dcee092009-02-24 03:07:37 +0000422UByte get_rz_lo_byte ( Block* b, UInt rz_byteno )
nethercote2d5b8162004-08-11 09:40:52 +0000423{
424 UByte* b2 = (UByte*)b;
sewardj94c8eb42008-09-19 20:13:39 +0000425 return b2[hp_overhead_szB() + sizeof(SizeT) + rz_byteno];
nethercote2d5b8162004-08-11 09:40:52 +0000426}
427static __inline__
njn1dcee092009-02-24 03:07:37 +0000428UByte get_rz_hi_byte ( Block* b, UInt rz_byteno )
nethercote2d5b8162004-08-11 09:40:52 +0000429{
njn402c8612005-08-23 22:11:20 +0000430 UByte* b2 = (UByte*)b;
431 return b2[get_bszB(b) - sizeof(SizeT) - rz_byteno - 1];
nethercote2d5b8162004-08-11 09:40:52 +0000432}
433
434
nethercote2d5b8162004-08-11 09:40:52 +0000435/*------------------------------------------------------------*/
436/*--- Arena management ---*/
437/*------------------------------------------------------------*/
438
439#define CORE_ARENA_MIN_SZB 1048576
440
441// The arena structures themselves.
442static Arena vg_arena[VG_N_ARENAS];
443
444// Functions external to this module identify arenas using ArenaIds,
445// not Arena*s. This fn converts the former to the latter.
446static Arena* arenaId_to_ArenaP ( ArenaId arena )
447{
448 vg_assert(arena >= 0 && arena < VG_N_ARENAS);
449 return & vg_arena[arena];
450}
451
452// Initialise an arena. rz_szB is the minimum redzone size; it might be
njn30490552005-03-13 06:30:42 +0000453// made bigger to ensure that VG_MIN_MALLOC_SZB is observed.
nethercote2d5b8162004-08-11 09:40:52 +0000454static
njn0e742df2004-11-30 13:26:29 +0000455void arena_init ( ArenaId aid, Char* name, SizeT rz_szB, SizeT min_sblock_szB )
nethercote2d5b8162004-08-11 09:40:52 +0000456{
sewardj0b3fd2d2007-08-21 10:55:26 +0000457 SizeT i;
nethercote2d5b8162004-08-11 09:40:52 +0000458 Arena* a = arenaId_to_ArenaP(aid);
459
njn7ce83112005-08-24 22:38:00 +0000460 // Ensure redzones are a reasonable size. They must always be at least
461 // the size of a pointer, for holding the prev/next pointer (see the layout
462 // details at the top of this file).
463 vg_assert(rz_szB < 128);
464 if (rz_szB < sizeof(void*)) rz_szB = sizeof(void*);
465
nethercote73b526f2004-10-31 18:48:21 +0000466 vg_assert((min_sblock_szB % VKI_PAGE_SIZE) == 0);
nethercote2d5b8162004-08-11 09:40:52 +0000467 a->name = name;
468 a->clientmem = ( VG_AR_CLIENT == aid ? True : False );
469
470 // The size of the low and high admin sections in a block must be a
njn30490552005-03-13 06:30:42 +0000471 // multiple of VG_MIN_MALLOC_SZB. So we round up the asked-for
nethercote2d5b8162004-08-11 09:40:52 +0000472 // redzone size if necessary to achieve this.
473 a->rz_szB = rz_szB;
474 while (0 != overhead_szB_lo(a) % VG_MIN_MALLOC_SZB) a->rz_szB++;
njn341a6642009-05-24 23:36:50 +0000475 vg_assert(overhead_szB_lo(a) - hp_overhead_szB() == overhead_szB_hi(a));
nethercote2d5b8162004-08-11 09:40:52 +0000476
477 a->min_sblock_szB = min_sblock_szB;
njn6e6588c2005-03-13 18:52:48 +0000478 for (i = 0; i < N_MALLOC_LISTS; i++) a->freelist[i] = NULL;
sewardj0b3fd2d2007-08-21 10:55:26 +0000479
480 a->sblocks = & a->sblocks_initial[0];
481 a->sblocks_size = SBLOCKS_SIZE_INITIAL;
482 a->sblocks_used = 0;
nethercote2d5b8162004-08-11 09:40:52 +0000483 a->bytes_on_loan = 0;
484 a->bytes_mmaped = 0;
485 a->bytes_on_loan_max = 0;
sewardj9c606bd2008-09-18 18:12:50 +0000486 a->next_profile_at = 25 * 1000 * 1000;
sewardj0b3fd2d2007-08-21 10:55:26 +0000487 vg_assert(sizeof(a->sblocks_initial)
488 == SBLOCKS_SIZE_INITIAL * sizeof(Superblock*));
nethercote2d5b8162004-08-11 09:40:52 +0000489}
490
491/* Print vital stats for an arena. */
492void VG_(print_all_arena_stats) ( void )
493{
nethercote7ac7f7b2004-11-02 12:36:02 +0000494 UInt i;
nethercote2d5b8162004-08-11 09:40:52 +0000495 for (i = 0; i < VG_N_ARENAS; i++) {
496 Arena* a = arenaId_to_ArenaP(i);
497 VG_(message)(Vg_DebugMsg,
sewardj738856f2009-07-15 14:48:32 +0000498 "%8s: %8ld mmap'd, %8ld/%8ld max/curr\n",
nethercote2d5b8162004-08-11 09:40:52 +0000499 a->name, a->bytes_mmaped, a->bytes_on_loan_max, a->bytes_on_loan
500 );
501 }
502}
503
sewardj9c606bd2008-09-18 18:12:50 +0000504void VG_(print_arena_cc_analysis) ( void )
505{
506 UInt i;
507 vg_assert( VG_(clo_profile_heap) );
508 for (i = 0; i < VG_N_ARENAS; i++) {
509 cc_analyse_alloc_arena(i);
510 }
511}
512
513
nethercote2d5b8162004-08-11 09:40:52 +0000514/* This library is self-initialising, as it makes this more self-contained,
515 less coupled with the outside world. Hence VG_(arena_malloc)() and
516 VG_(arena_free)() below always call ensure_mm_init() to ensure things are
sewardj45f4e7c2005-09-27 19:20:21 +0000517 correctly initialised.
518
519 We initialise the client arena separately (and later) because the core
520 must do non-client allocation before the tool has a chance to set the
521 client arena's redzone size.
522*/
sewardj0b3fd2d2007-08-21 10:55:26 +0000523static Bool client_inited = False;
524static Bool nonclient_inited = False;
525
nethercote2d5b8162004-08-11 09:40:52 +0000526static
sewardj45f4e7c2005-09-27 19:20:21 +0000527void ensure_mm_init ( ArenaId aid )
nethercote2d5b8162004-08-11 09:40:52 +0000528{
njn95c23292005-12-26 17:50:22 +0000529 static SizeT client_rz_szB = 8; // default: be paranoid
njnfc51f8d2005-06-21 03:20:17 +0000530
sewardj45f4e7c2005-09-27 19:20:21 +0000531 /* We use checked red zones (of various sizes) for our internal stuff,
nethercote2d5b8162004-08-11 09:40:52 +0000532 and an unchecked zone of arbitrary size for the client. Of
533 course the client's red zone can be checked by the tool, eg.
534 by using addressibility maps, but not by the mechanism implemented
535 here, which merely checks at the time of freeing that the red
536 zone bytes are unchanged.
537
538 Nb: redzone sizes are *minimums*; they could be made bigger to ensure
njn8d3f8452005-07-20 04:12:41 +0000539 alignment. Eg. with 8 byte alignment, on 32-bit machines 4 stays as
540 4, but 16 becomes 20; but on 64-bit machines 4 becomes 8, and 16
541 stays as 16 --- the extra 4 bytes in both are accounted for by the
542 larger prev/next ptr.
nethercote2d5b8162004-08-11 09:40:52 +0000543 */
sewardj45f4e7c2005-09-27 19:20:21 +0000544 if (VG_AR_CLIENT == aid) {
sewardj5600ab32006-10-17 01:42:40 +0000545 Int ar_client_sbszB;
sewardj45f4e7c2005-09-27 19:20:21 +0000546 if (client_inited) {
547 // This assertion ensures that a tool cannot try to change the client
548 // redzone size with VG_(needs_malloc_replacement)() after this module
549 // has done its first allocation from the client arena.
550 if (VG_(needs).malloc_replacement)
njn95c23292005-12-26 17:50:22 +0000551 vg_assert(client_rz_szB == VG_(tdict).tool_client_redzone_szB);
sewardj45f4e7c2005-09-27 19:20:21 +0000552 return;
553 }
nethercote2d5b8162004-08-11 09:40:52 +0000554
sewardj45f4e7c2005-09-27 19:20:21 +0000555 // Check and set the client arena redzone size
556 if (VG_(needs).malloc_replacement) {
njn95c23292005-12-26 17:50:22 +0000557 client_rz_szB = VG_(tdict).tool_client_redzone_szB;
sewardj45f4e7c2005-09-27 19:20:21 +0000558 // 128 is no special figure, just something not too big
njn95c23292005-12-26 17:50:22 +0000559 if (client_rz_szB > 128) {
sewardj45f4e7c2005-09-27 19:20:21 +0000560 VG_(printf)( "\nTool error:\n"
561 " specified redzone size is too big (%llu)\n",
njn95c23292005-12-26 17:50:22 +0000562 (ULong)client_rz_szB);
sewardj45f4e7c2005-09-27 19:20:21 +0000563 VG_(exit)(1);
564 }
565 }
sewardj5600ab32006-10-17 01:42:40 +0000566 // Initialise the client arena. On AIX it's important to have
567 // relatively large client blocks so as not to cause excessively
568 // fine-grained interleaving of V and C address space. On Linux
569 // this is irrelevant since aspacem can keep the two spaces
sewardjc1ac9772007-08-20 22:57:56 +0000570 // well apart, but not so on AIX. On all platforms though,
571 // increasing the superblock size reduces the number of superblocks
572 // in the client arena, which makes findSb cheaper.
sewardj5600ab32006-10-17 01:42:40 +0000573# if defined(VGO_aix5)
574 ar_client_sbszB = 16777216;
575# else
sewardjc1ac9772007-08-20 22:57:56 +0000576 ar_client_sbszB = 4194304;
sewardj5600ab32006-10-17 01:42:40 +0000577# endif
578 arena_init ( VG_AR_CLIENT, "client", client_rz_szB, ar_client_sbszB );
sewardj45f4e7c2005-09-27 19:20:21 +0000579 client_inited = True;
580
581 } else {
582 if (nonclient_inited) {
583 return;
584 }
585 // Initialise the non-client arenas
njn95c23292005-12-26 17:50:22 +0000586 arena_init ( VG_AR_CORE, "core", 4, 1048576 );
sewardjc1ac9772007-08-20 22:57:56 +0000587 arena_init ( VG_AR_TOOL, "tool", 4, 4194304 );
sewardjb8b79ad2008-03-03 01:35:41 +0000588 arena_init ( VG_AR_DINFO, "dinfo", 4, 1048576 );
njn95c23292005-12-26 17:50:22 +0000589 arena_init ( VG_AR_DEMANGLE, "demangle", 4, 65536 );
sewardjc1ac9772007-08-20 22:57:56 +0000590 arena_init ( VG_AR_EXECTXT, "exectxt", 4, 1048576 );
njn95c23292005-12-26 17:50:22 +0000591 arena_init ( VG_AR_ERRORS, "errors", 4, 65536 );
592 arena_init ( VG_AR_TTAUX, "ttaux", 4, 65536 );
sewardj45f4e7c2005-09-27 19:20:21 +0000593 nonclient_inited = True;
594 }
595
nethercote2d5b8162004-08-11 09:40:52 +0000596# ifdef DEBUG_MALLOC
sewardj0b3fd2d2007-08-21 10:55:26 +0000597 VG_(printf)("ZZZ1\n");
nethercote2d5b8162004-08-11 09:40:52 +0000598 VG_(sanity_check_malloc_all)();
sewardj0b3fd2d2007-08-21 10:55:26 +0000599 VG_(printf)("ZZZ2\n");
nethercote2d5b8162004-08-11 09:40:52 +0000600# endif
601}
602
603
604/*------------------------------------------------------------*/
605/*--- Superblock management ---*/
606/*------------------------------------------------------------*/
607
njn4c245e52009-03-15 23:25:38 +0000608__attribute__((noreturn))
sewardj45f4e7c2005-09-27 19:20:21 +0000609void VG_(out_of_memory_NORETURN) ( HChar* who, SizeT szB )
610{
611 static Bool alreadyCrashing = False;
612 ULong tot_alloc = VG_(am_get_anonsize_total)();
njnb81c7952007-03-22 03:36:55 +0000613 Char* s1 =
614 "\n"
615 " Valgrind's memory management: out of memory:\n"
616 " %s's request for %llu bytes failed.\n"
617 " %llu bytes have already been allocated.\n"
618 " Valgrind cannot continue. Sorry.\n\n"
619 " There are several possible reasons for this.\n"
620 " - You have some kind of memory limit in place. Look at the\n"
621 " output of 'ulimit -a'. Is there a limit on the size of\n"
622 " virtual memory or address space?\n"
623 " - You have run out of swap space.\n"
624 " - Valgrind has a bug. If you think this is the case or you are\n"
625 " not sure, please let us know and we'll try to fix it.\n"
626 " Please note that programs can take substantially more memory than\n"
627 " normal when running under Valgrind tools, eg. up to twice or\n"
628 " more, depending on the tool. On a 64-bit machine, Valgrind\n"
629 " should be able to make use of up 32GB memory. On a 32-bit\n"
630 " machine, Valgrind should be able to use all the memory available\n"
631 " to a single process, up to 4GB if that's how you have your\n"
632 " kernel configured. Most 32-bit Linux setups allow a maximum of\n"
633 " 3GB per process.\n\n"
634 " Whatever the reason, Valgrind cannot continue. Sorry.\n";
635
sewardj45f4e7c2005-09-27 19:20:21 +0000636 if (!alreadyCrashing) {
637 alreadyCrashing = True;
njnb81c7952007-03-22 03:36:55 +0000638 VG_(message)(Vg_UserMsg, s1, who, (ULong)szB, tot_alloc);
sewardj45f4e7c2005-09-27 19:20:21 +0000639 } else {
njnb81c7952007-03-22 03:36:55 +0000640 VG_(debugLog)(0,"mallocfree", s1, who, (ULong)szB, tot_alloc);
sewardj45f4e7c2005-09-27 19:20:21 +0000641 }
njncda2f0f2009-05-18 02:12:08 +0000642
sewardj45f4e7c2005-09-27 19:20:21 +0000643 VG_(exit)(1);
644}
645
646
nethercote2d5b8162004-08-11 09:40:52 +0000647// Align ptr p upwards to an align-sized boundary.
648static
nethercote7ac7f7b2004-11-02 12:36:02 +0000649void* align_upwards ( void* p, SizeT align )
nethercote2d5b8162004-08-11 09:40:52 +0000650{
651 Addr a = (Addr)p;
652 if ((a % align) == 0) return (void*)a;
653 return (void*)(a - (a % align) + align);
654}
655
656// If not enough memory available, either aborts (for non-client memory)
657// or returns 0 (for client memory).
658static
nethercote7ac7f7b2004-11-02 12:36:02 +0000659Superblock* newSuperblock ( Arena* a, SizeT cszB )
nethercote2d5b8162004-08-11 09:40:52 +0000660{
nethercote2d5b8162004-08-11 09:40:52 +0000661 Superblock* sb;
sewardj45f4e7c2005-09-27 19:20:21 +0000662 SysRes sres;
nethercote2d5b8162004-08-11 09:40:52 +0000663
664 // Take into account admin bytes in the Superblock.
665 cszB += sizeof(Superblock);
666
667 if (cszB < a->min_sblock_szB) cszB = a->min_sblock_szB;
bartc3c98392008-04-19 14:43:30 +0000668 cszB = VG_PGROUNDUP(cszB);
nethercote2d5b8162004-08-11 09:40:52 +0000669
sewardj45f4e7c2005-09-27 19:20:21 +0000670 if (a->clientmem) {
nethercote2d5b8162004-08-11 09:40:52 +0000671 // client allocation -- return 0 to client if it fails
sewardj5600ab32006-10-17 01:42:40 +0000672 sres = VG_(am_sbrk_anon_float_client)
sewardj45f4e7c2005-09-27 19:20:21 +0000673 ( cszB, VKI_PROT_READ|VKI_PROT_WRITE|VKI_PROT_EXEC );
njncda2f0f2009-05-18 02:12:08 +0000674 if (sr_isError(sres))
nethercote2d5b8162004-08-11 09:40:52 +0000675 return 0;
njncda2f0f2009-05-18 02:12:08 +0000676 sb = (Superblock*)(AddrH)sr_Res(sres);
sewardj45f4e7c2005-09-27 19:20:21 +0000677 // Mark this segment as containing client heap. The leak
678 // checker needs to be able to identify such segments so as not
679 // to use them as sources of roots during leak checks.
sewardj5600ab32006-10-17 01:42:40 +0000680 VG_(am_set_segment_isCH_if_SkAnonC)(
681 (NSegment*) VG_(am_find_nsegment)( (Addr)sb )
682 );
nethercote2d5b8162004-08-11 09:40:52 +0000683 } else {
sewardj45f4e7c2005-09-27 19:20:21 +0000684 // non-client allocation -- abort if it fails
sewardj5600ab32006-10-17 01:42:40 +0000685 sres = VG_(am_sbrk_anon_float_valgrind)( cszB );
njncda2f0f2009-05-18 02:12:08 +0000686 if (sr_isError(sres)) {
sewardj45f4e7c2005-09-27 19:20:21 +0000687 VG_(out_of_memory_NORETURN)("newSuperblock", cszB);
688 /* NOTREACHED */
689 sb = NULL; /* keep gcc happy */
690 } else {
njncda2f0f2009-05-18 02:12:08 +0000691 sb = (Superblock*)(AddrH)sr_Res(sres);
sewardj45f4e7c2005-09-27 19:20:21 +0000692 }
nethercote2d5b8162004-08-11 09:40:52 +0000693 }
694 vg_assert(NULL != sb);
njndbf7ca72006-03-31 11:57:59 +0000695 //zzVALGRIND_MAKE_MEM_UNDEFINED(sb, cszB);
nethercote2d5b8162004-08-11 09:40:52 +0000696 vg_assert(0 == (Addr)sb % VG_MIN_MALLOC_SZB);
697 sb->n_payload_bytes = cszB - sizeof(Superblock);
698 a->bytes_mmaped += cszB;
sewardj45f4e7c2005-09-27 19:20:21 +0000699 VG_(debugLog)(1, "mallocfree",
700 "newSuperblock at %p (pszB %7ld) owner %s/%s\n",
701 sb, sb->n_payload_bytes,
702 a->clientmem ? "CLIENT" : "VALGRIND", a->name );
nethercote2d5b8162004-08-11 09:40:52 +0000703 return sb;
704}
705
706// Find the superblock containing the given chunk.
707static
708Superblock* findSb ( Arena* a, Block* b )
709{
sewardj0b3fd2d2007-08-21 10:55:26 +0000710 SizeT min = 0;
711 SizeT max = a->sblocks_used;
sewardj49bdd7a2005-12-17 20:37:36 +0000712
sewardj0b3fd2d2007-08-21 10:55:26 +0000713 while (min <= max) {
714 Superblock * sb;
715 SizeT pos = min + (max - min)/2;
716
717 vg_assert(pos >= 0 && pos < a->sblocks_used);
718 sb = a->sblocks[pos];
719 if ((Block*)&sb->payload_bytes[0] <= b
720 && b < (Block*)&sb->payload_bytes[sb->n_payload_bytes])
721 {
722 return sb;
723 } else if ((Block*)&sb->payload_bytes[0] <= b) {
724 min = pos + 1;
725 } else {
726 max = pos - 1;
sewardj49bdd7a2005-12-17 20:37:36 +0000727 }
728 }
sewardj0b3fd2d2007-08-21 10:55:26 +0000729 VG_(printf)("findSb: can't find pointer %p in arena '%s'\n",
730 b, a->name );
731 VG_(core_panic)("findSb: VG_(arena_free)() in wrong arena?");
732 return NULL; /*NOTREACHED*/
nethercote2d5b8162004-08-11 09:40:52 +0000733}
734
sewardjde4a1d02002-03-22 01:27:54 +0000735
fitzhardinge98abfc72003-12-16 02:05:15 +0000736/*------------------------------------------------------------*/
sewardjde4a1d02002-03-22 01:27:54 +0000737/*--- Functions for working with freelists. ---*/
738/*------------------------------------------------------------*/
739
nethercote2d5b8162004-08-11 09:40:52 +0000740// Nb: Determination of which freelist a block lives on is based on the
741// payload size, not block size.
sewardjde4a1d02002-03-22 01:27:54 +0000742
nethercote2d5b8162004-08-11 09:40:52 +0000743// Convert a payload size in bytes to a freelist number.
sewardjde4a1d02002-03-22 01:27:54 +0000744static
nethercote7ac7f7b2004-11-02 12:36:02 +0000745UInt pszB_to_listNo ( SizeT pszB )
sewardjde4a1d02002-03-22 01:27:54 +0000746{
njndb247dc2005-07-17 23:12:33 +0000747 SizeT n = pszB / VG_MIN_MALLOC_SZB;
tom60a4b0b2005-10-12 10:45:27 +0000748 vg_assert(0 == pszB % VG_MIN_MALLOC_SZB);
njn61dcab82005-05-21 19:36:45 +0000749
sewardjc1ac9772007-08-20 22:57:56 +0000750 // The first 64 lists hold blocks of size VG_MIN_MALLOC_SZB * list_num.
751 // The final 48 hold bigger blocks.
752 if (n < 64) return (UInt)n;
753 /* Exponential slope up, factor 1.05 */
754 if (n < 67) return 64;
755 if (n < 70) return 65;
756 if (n < 74) return 66;
757 if (n < 77) return 67;
758 if (n < 81) return 68;
759 if (n < 85) return 69;
760 if (n < 90) return 70;
761 if (n < 94) return 71;
762 if (n < 99) return 72;
763 if (n < 104) return 73;
764 if (n < 109) return 74;
765 if (n < 114) return 75;
766 if (n < 120) return 76;
767 if (n < 126) return 77;
768 if (n < 133) return 78;
769 if (n < 139) return 79;
770 /* Exponential slope up, factor 1.10 */
771 if (n < 153) return 80;
772 if (n < 169) return 81;
773 if (n < 185) return 82;
774 if (n < 204) return 83;
775 if (n < 224) return 84;
776 if (n < 247) return 85;
777 if (n < 272) return 86;
778 if (n < 299) return 87;
779 if (n < 329) return 88;
780 if (n < 362) return 89;
781 if (n < 398) return 90;
782 if (n < 438) return 91;
783 if (n < 482) return 92;
784 if (n < 530) return 93;
785 if (n < 583) return 94;
786 if (n < 641) return 95;
787 /* Exponential slope up, factor 1.20 */
788 if (n < 770) return 96;
789 if (n < 924) return 97;
790 if (n < 1109) return 98;
791 if (n < 1331) return 99;
792 if (n < 1597) return 100;
793 if (n < 1916) return 101;
794 if (n < 2300) return 102;
795 if (n < 2760) return 103;
796 if (n < 3312) return 104;
797 if (n < 3974) return 105;
798 if (n < 4769) return 106;
799 if (n < 5723) return 107;
800 if (n < 6868) return 108;
801 if (n < 8241) return 109;
802 if (n < 9890) return 110;
803 return 111;
sewardjde4a1d02002-03-22 01:27:54 +0000804}
805
nethercote2d5b8162004-08-11 09:40:52 +0000806// What is the minimum payload size for a given list?
sewardjde4a1d02002-03-22 01:27:54 +0000807static
nethercote7ac7f7b2004-11-02 12:36:02 +0000808SizeT listNo_to_pszB_min ( UInt listNo )
sewardjde4a1d02002-03-22 01:27:54 +0000809{
sewardj1d2e2e62007-08-23 10:22:44 +0000810 /* Repeatedly computing this function at every request is
811 expensive. Hence at the first call just cache the result for
812 every possible argument. */
813 static SizeT cache[N_MALLOC_LISTS];
814 static Bool cache_valid = False;
815 if (!cache_valid) {
816 UInt i;
817 for (i = 0; i < N_MALLOC_LISTS; i++) {
818 SizeT pszB = 0;
819 while (pszB_to_listNo(pszB) < i)
820 pszB += VG_MIN_MALLOC_SZB;
821 cache[i] = pszB;
822 }
823 cache_valid = True;
824 }
825 /* Returned cached answer. */
njn6e6588c2005-03-13 18:52:48 +0000826 vg_assert(listNo <= N_MALLOC_LISTS);
sewardj1d2e2e62007-08-23 10:22:44 +0000827 return cache[listNo];
sewardjde4a1d02002-03-22 01:27:54 +0000828}
829
nethercote2d5b8162004-08-11 09:40:52 +0000830// What is the maximum payload size for a given list?
sewardjde4a1d02002-03-22 01:27:54 +0000831static
nethercote7ac7f7b2004-11-02 12:36:02 +0000832SizeT listNo_to_pszB_max ( UInt listNo )
sewardjde4a1d02002-03-22 01:27:54 +0000833{
njn6e6588c2005-03-13 18:52:48 +0000834 vg_assert(listNo <= N_MALLOC_LISTS);
835 if (listNo == N_MALLOC_LISTS-1) {
nethercote2d5b8162004-08-11 09:40:52 +0000836 return MAX_PSZB;
sewardjde4a1d02002-03-22 01:27:54 +0000837 } else {
nethercote2d5b8162004-08-11 09:40:52 +0000838 return listNo_to_pszB_min(listNo+1) - 1;
sewardjde4a1d02002-03-22 01:27:54 +0000839 }
840}
841
842
843/* A nasty hack to try and reduce fragmentation. Try and replace
844 a->freelist[lno] with another block on the same list but with a
845 lower address, with the idea of attempting to recycle the same
846 blocks rather than cruise through the address space. */
sewardjde4a1d02002-03-22 01:27:54 +0000847static
nethercote7ac7f7b2004-11-02 12:36:02 +0000848void swizzle ( Arena* a, UInt lno )
sewardjde4a1d02002-03-22 01:27:54 +0000849{
nethercote2d5b8162004-08-11 09:40:52 +0000850 Block* p_best;
851 Block* pp;
852 Block* pn;
nethercote7ac7f7b2004-11-02 12:36:02 +0000853 UInt i;
sewardjde4a1d02002-03-22 01:27:54 +0000854
855 p_best = a->freelist[lno];
856 if (p_best == NULL) return;
857
858 pn = pp = p_best;
njn2bf9ba62005-12-25 02:47:12 +0000859
860 // This loop bound was 20 for a long time, but experiments showed that
861 // reducing it to 10 gave the same result in all the tests, and 5 got the
862 // same result in 85--100% of cases. And it's called often enough to be
863 // noticeable in programs that allocated a lot.
864 for (i = 0; i < 5; i++) {
nethercote2d5b8162004-08-11 09:40:52 +0000865 pn = get_next_b(pn);
866 pp = get_prev_b(pp);
sewardjde4a1d02002-03-22 01:27:54 +0000867 if (pn < p_best) p_best = pn;
868 if (pp < p_best) p_best = pp;
869 }
870 if (p_best < a->freelist[lno]) {
nethercote2d5b8162004-08-11 09:40:52 +0000871# ifdef VERBOSE_MALLOC
sewardj9c606bd2008-09-18 18:12:50 +0000872 VG_(printf)("retreat by %ld\n", (Word)(a->freelist[lno] - p_best));
sewardjde4a1d02002-03-22 01:27:54 +0000873# endif
874 a->freelist[lno] = p_best;
875 }
876}
877
878
879/*------------------------------------------------------------*/
sewardjde4a1d02002-03-22 01:27:54 +0000880/*--- Sanity-check/debugging machinery. ---*/
881/*------------------------------------------------------------*/
882
njn6e6588c2005-03-13 18:52:48 +0000883#define REDZONE_LO_MASK 0x31
884#define REDZONE_HI_MASK 0x7c
nethercote2d5b8162004-08-11 09:40:52 +0000885
nethercote7ac7f7b2004-11-02 12:36:02 +0000886// Do some crude sanity checks on a Block.
sewardjde4a1d02002-03-22 01:27:54 +0000887static
nethercote2d5b8162004-08-11 09:40:52 +0000888Bool blockSane ( Arena* a, Block* b )
sewardjde4a1d02002-03-22 01:27:54 +0000889{
890# define BLEAT(str) VG_(printf)("blockSane: fail -- %s\n",str)
nethercote7ac7f7b2004-11-02 12:36:02 +0000891 UInt i;
njn402c8612005-08-23 22:11:20 +0000892 // The lo and hi size fields will be checked (indirectly) by the call
893 // to get_rz_hi_byte().
njn472cc7c2005-07-17 17:20:30 +0000894 if (!a->clientmem && is_inuse_block(b)) {
nethercote2d5b8162004-08-11 09:40:52 +0000895 for (i = 0; i < a->rz_szB; i++) {
njn1dcee092009-02-24 03:07:37 +0000896 if (get_rz_lo_byte(b, i) !=
njn6e6588c2005-03-13 18:52:48 +0000897 (UByte)(((Addr)b&0xff) ^ REDZONE_LO_MASK))
nethercote2d5b8162004-08-11 09:40:52 +0000898 {BLEAT("redzone-lo");return False;}
njn1dcee092009-02-24 03:07:37 +0000899 if (get_rz_hi_byte(b, i) !=
njn6e6588c2005-03-13 18:52:48 +0000900 (UByte)(((Addr)b&0xff) ^ REDZONE_HI_MASK))
nethercote2d5b8162004-08-11 09:40:52 +0000901 {BLEAT("redzone-hi");return False;}
sewardjde4a1d02002-03-22 01:27:54 +0000902 }
903 }
904 return True;
905# undef BLEAT
906}
907
nethercote2d5b8162004-08-11 09:40:52 +0000908// Print superblocks (only for debugging).
sewardjde4a1d02002-03-22 01:27:54 +0000909static
910void ppSuperblocks ( Arena* a )
911{
sewardj0b3fd2d2007-08-21 10:55:26 +0000912 UInt i, j, blockno = 1;
njnd0e685c2005-07-17 17:55:42 +0000913 SizeT b_bszB;
sewardjde4a1d02002-03-22 01:27:54 +0000914
sewardj0b3fd2d2007-08-21 10:55:26 +0000915 for (j = 0; j < a->sblocks_used; ++j) {
916 Superblock * sb = a->sblocks[j];
917
sewardjde4a1d02002-03-22 01:27:54 +0000918 VG_(printf)( "\n" );
njn8a7b41b2007-09-23 00:51:24 +0000919 VG_(printf)( "superblock %d at %p, sb->n_pl_bs = %lu\n",
sewardj0b3fd2d2007-08-21 10:55:26 +0000920 blockno++, sb, sb->n_payload_bytes);
njnd0e685c2005-07-17 17:55:42 +0000921 for (i = 0; i < sb->n_payload_bytes; i += b_bszB) {
922 Block* b = (Block*)&sb->payload_bytes[i];
923 b_bszB = get_bszB(b);
njn8a7b41b2007-09-23 00:51:24 +0000924 VG_(printf)( " block at %d, bszB %lu: ", i, b_bszB );
njn472cc7c2005-07-17 17:20:30 +0000925 VG_(printf)( "%s, ", is_inuse_block(b) ? "inuse" : "free");
nethercote2d5b8162004-08-11 09:40:52 +0000926 VG_(printf)( "%s\n", blockSane(a, b) ? "ok" : "BAD" );
sewardjde4a1d02002-03-22 01:27:54 +0000927 }
nethercote2d5b8162004-08-11 09:40:52 +0000928 vg_assert(i == sb->n_payload_bytes); // no overshoot at end of Sb
sewardjde4a1d02002-03-22 01:27:54 +0000929 }
930 VG_(printf)( "end of superblocks\n\n" );
931}
932
nethercote2d5b8162004-08-11 09:40:52 +0000933// Sanity check both the superblocks and the chains.
nethercote885dd912004-08-03 23:14:00 +0000934static void sanity_check_malloc_arena ( ArenaId aid )
sewardjde4a1d02002-03-22 01:27:54 +0000935{
sewardj0b3fd2d2007-08-21 10:55:26 +0000936 UInt i, j, superblockctr, blockctr_sb, blockctr_li;
nethercote7ac7f7b2004-11-02 12:36:02 +0000937 UInt blockctr_sb_free, listno;
938 SizeT b_bszB, b_pszB, list_min_pszB, list_max_pszB;
sewardj0b3fd2d2007-08-21 10:55:26 +0000939 Bool thisFree, lastWasFree, sblockarrOK;
nethercote2d5b8162004-08-11 09:40:52 +0000940 Block* b;
941 Block* b_prev;
nethercote7ac7f7b2004-11-02 12:36:02 +0000942 SizeT arena_bytes_on_loan;
sewardjde4a1d02002-03-22 01:27:54 +0000943 Arena* a;
944
nethercote885dd912004-08-03 23:14:00 +0000945# define BOMB VG_(core_panic)("sanity_check_malloc_arena")
sewardjde4a1d02002-03-22 01:27:54 +0000946
947 a = arenaId_to_ArenaP(aid);
sewardj0b3fd2d2007-08-21 10:55:26 +0000948
949 // Check the superblock array.
950 sblockarrOK
951 = a->sblocks != NULL
952 && a->sblocks_size >= SBLOCKS_SIZE_INITIAL
953 && a->sblocks_used <= a->sblocks_size
954 && (a->sblocks_size == SBLOCKS_SIZE_INITIAL
955 ? (a->sblocks == &a->sblocks_initial[0])
956 : (a->sblocks != &a->sblocks_initial[0]));
957 if (!sblockarrOK) {
958 VG_(printf)("sanity_check_malloc_arena: sblock array BAD\n");
959 BOMB;
960 }
961
nethercote2d5b8162004-08-11 09:40:52 +0000962 // First, traverse all the superblocks, inspecting the Blocks in each.
sewardjde4a1d02002-03-22 01:27:54 +0000963 superblockctr = blockctr_sb = blockctr_sb_free = 0;
964 arena_bytes_on_loan = 0;
sewardj0b3fd2d2007-08-21 10:55:26 +0000965 for (j = 0; j < a->sblocks_used; ++j) {
966 Superblock * sb = a->sblocks[j];
sewardjde4a1d02002-03-22 01:27:54 +0000967 lastWasFree = False;
968 superblockctr++;
nethercote2d5b8162004-08-11 09:40:52 +0000969 for (i = 0; i < sb->n_payload_bytes; i += mk_plain_bszB(b_bszB)) {
sewardjde4a1d02002-03-22 01:27:54 +0000970 blockctr_sb++;
nethercote2d5b8162004-08-11 09:40:52 +0000971 b = (Block*)&sb->payload_bytes[i];
njnd0e685c2005-07-17 17:55:42 +0000972 b_bszB = get_bszB_as_is(b);
sewardjde4a1d02002-03-22 01:27:54 +0000973 if (!blockSane(a, b)) {
njn8a7b41b2007-09-23 00:51:24 +0000974 VG_(printf)("sanity_check_malloc_arena: sb %p, block %d "
975 "(bszB %lu): BAD\n", sb, i, b_bszB );
sewardjde4a1d02002-03-22 01:27:54 +0000976 BOMB;
977 }
njn472cc7c2005-07-17 17:20:30 +0000978 thisFree = !is_inuse_block(b);
sewardjde4a1d02002-03-22 01:27:54 +0000979 if (thisFree && lastWasFree) {
njn8a7b41b2007-09-23 00:51:24 +0000980 VG_(printf)("sanity_check_malloc_arena: sb %p, block %d "
981 "(bszB %lu): UNMERGED FREES\n", sb, i, b_bszB );
sewardjde4a1d02002-03-22 01:27:54 +0000982 BOMB;
983 }
sewardjde4a1d02002-03-22 01:27:54 +0000984 if (thisFree) blockctr_sb_free++;
sewardj0b3fd2d2007-08-21 10:55:26 +0000985 if (!thisFree)
nethercote2d5b8162004-08-11 09:40:52 +0000986 arena_bytes_on_loan += bszB_to_pszB(a, b_bszB);
987 lastWasFree = thisFree;
sewardjde4a1d02002-03-22 01:27:54 +0000988 }
nethercote2d5b8162004-08-11 09:40:52 +0000989 if (i > sb->n_payload_bytes) {
nethercote885dd912004-08-03 23:14:00 +0000990 VG_(printf)( "sanity_check_malloc_arena: sb %p: last block "
sewardjde4a1d02002-03-22 01:27:54 +0000991 "overshoots end\n", sb);
992 BOMB;
993 }
sewardjde4a1d02002-03-22 01:27:54 +0000994 }
995
996 if (arena_bytes_on_loan != a->bytes_on_loan) {
nethercote2d5b8162004-08-11 09:40:52 +0000997# ifdef VERBOSE_MALLOC
sewardj9c606bd2008-09-18 18:12:50 +0000998 VG_(printf)( "sanity_check_malloc_arena: a->bytes_on_loan %ld, "
999 "arena_bytes_on_loan %ld: "
nethercote2d5b8162004-08-11 09:40:52 +00001000 "MISMATCH\n", a->bytes_on_loan, arena_bytes_on_loan);
1001# endif
sewardjde4a1d02002-03-22 01:27:54 +00001002 ppSuperblocks(a);
1003 BOMB;
1004 }
1005
1006 /* Second, traverse each list, checking that the back pointers make
1007 sense, counting blocks encountered, and checking that each block
1008 is an appropriate size for this list. */
1009 blockctr_li = 0;
njn6e6588c2005-03-13 18:52:48 +00001010 for (listno = 0; listno < N_MALLOC_LISTS; listno++) {
nethercote2d5b8162004-08-11 09:40:52 +00001011 list_min_pszB = listNo_to_pszB_min(listno);
1012 list_max_pszB = listNo_to_pszB_max(listno);
sewardjde4a1d02002-03-22 01:27:54 +00001013 b = a->freelist[listno];
1014 if (b == NULL) continue;
1015 while (True) {
1016 b_prev = b;
nethercote2d5b8162004-08-11 09:40:52 +00001017 b = get_next_b(b);
1018 if (get_prev_b(b) != b_prev) {
nethercote885dd912004-08-03 23:14:00 +00001019 VG_(printf)( "sanity_check_malloc_arena: list %d at %p: "
sewardj0b3fd2d2007-08-21 10:55:26 +00001020 "BAD LINKAGE\n",
sewardjde4a1d02002-03-22 01:27:54 +00001021 listno, b );
1022 BOMB;
1023 }
njn089f51f2005-07-17 18:12:00 +00001024 b_pszB = get_pszB(a, b);
nethercote2d5b8162004-08-11 09:40:52 +00001025 if (b_pszB < list_min_pszB || b_pszB > list_max_pszB) {
sewardj0b3fd2d2007-08-21 10:55:26 +00001026 VG_(printf)(
nethercote885dd912004-08-03 23:14:00 +00001027 "sanity_check_malloc_arena: list %d at %p: "
njn8a7b41b2007-09-23 00:51:24 +00001028 "WRONG CHAIN SIZE %luB (%luB, %luB)\n",
nethercote2d5b8162004-08-11 09:40:52 +00001029 listno, b, b_pszB, list_min_pszB, list_max_pszB );
sewardjde4a1d02002-03-22 01:27:54 +00001030 BOMB;
1031 }
1032 blockctr_li++;
1033 if (b == a->freelist[listno]) break;
1034 }
1035 }
1036
1037 if (blockctr_sb_free != blockctr_li) {
nethercote2d5b8162004-08-11 09:40:52 +00001038# ifdef VERBOSE_MALLOC
1039 VG_(printf)( "sanity_check_malloc_arena: BLOCK COUNT MISMATCH "
1040 "(via sbs %d, via lists %d)\n",
1041 blockctr_sb_free, blockctr_li );
1042# endif
sewardjde4a1d02002-03-22 01:27:54 +00001043 ppSuperblocks(a);
1044 BOMB;
1045 }
1046
nethercote885dd912004-08-03 23:14:00 +00001047 if (VG_(clo_verbosity) > 2)
1048 VG_(message)(Vg_DebugMsg,
njn669ef072005-03-13 05:46:57 +00001049 "%8s: %2d sbs, %5d bs, %2d/%-2d free bs, "
sewardj738856f2009-07-15 14:48:32 +00001050 "%7ld mmap, %7ld loan\n",
nethercote885dd912004-08-03 23:14:00 +00001051 a->name,
1052 superblockctr,
1053 blockctr_sb, blockctr_sb_free, blockctr_li,
1054 a->bytes_mmaped, a->bytes_on_loan);
sewardjde4a1d02002-03-22 01:27:54 +00001055# undef BOMB
1056}
1057
1058
sewardj9c606bd2008-09-18 18:12:50 +00001059#define N_AN_CCS 1000
1060
1061typedef struct { ULong nBytes; ULong nBlocks; HChar* cc; } AnCC;
1062
1063static AnCC anCCs[N_AN_CCS];
1064
1065static Int cmp_AnCC_by_vol ( void* v1, void* v2 ) {
1066 AnCC* ancc1 = (AnCC*)v1;
1067 AnCC* ancc2 = (AnCC*)v2;
1068 if (ancc1->nBytes < ancc2->nBytes) return -1;
1069 if (ancc1->nBytes > ancc2->nBytes) return 1;
1070 return 0;
1071}
1072
1073static void cc_analyse_alloc_arena ( ArenaId aid )
1074{
1075 Word i, j, k;
1076 Arena* a;
1077 Block* b;
1078 Bool thisFree, lastWasFree;
1079 SizeT b_bszB;
1080
1081 HChar* cc;
1082 UInt n_ccs = 0;
1083 //return;
1084 a = arenaId_to_ArenaP(aid);
1085 if (a->name == NULL) {
1086 /* arena is not in use, is not initialised and will fail the
1087 sanity check that follows. */
1088 return;
1089 }
1090
1091 sanity_check_malloc_arena(aid);
1092
1093 VG_(printf)(
1094 "-------- Arena \"%s\": %ld mmap'd, %ld/%ld max/curr --------\n",
1095 a->name, a->bytes_mmaped, a->bytes_on_loan_max, a->bytes_on_loan
1096 );
1097
1098 for (j = 0; j < a->sblocks_used; ++j) {
1099 Superblock * sb = a->sblocks[j];
1100 lastWasFree = False;
1101 for (i = 0; i < sb->n_payload_bytes; i += mk_plain_bszB(b_bszB)) {
1102 b = (Block*)&sb->payload_bytes[i];
1103 b_bszB = get_bszB_as_is(b);
1104 if (!blockSane(a, b)) {
1105 VG_(printf)("sanity_check_malloc_arena: sb %p, block %ld "
1106 "(bszB %lu): BAD\n", sb, i, b_bszB );
1107 tl_assert(0);
1108 }
1109 thisFree = !is_inuse_block(b);
1110 if (thisFree && lastWasFree) {
1111 VG_(printf)("sanity_check_malloc_arena: sb %p, block %ld "
1112 "(bszB %lu): UNMERGED FREES\n", sb, i, b_bszB );
1113 tl_assert(0);
1114 }
1115 lastWasFree = thisFree;
1116
1117 if (thisFree) continue;
1118
1119 if (0)
1120 VG_(printf)("block: inUse=%d pszB=%d cc=%s\n",
1121 (Int)(!thisFree),
1122 (Int)bszB_to_pszB(a, b_bszB),
1123 get_cc(b));
1124 cc = get_cc(b);
1125 tl_assert(cc);
1126 for (k = 0; k < n_ccs; k++) {
1127 tl_assert(anCCs[k].cc);
1128 if (0 == VG_(strcmp)(cc, anCCs[k].cc))
1129 break;
1130 }
1131 tl_assert(k >= 0 && k <= n_ccs);
1132
1133 if (k == n_ccs) {
1134 tl_assert(n_ccs < N_AN_CCS-1);
1135 n_ccs++;
1136 anCCs[k].nBytes = 0;
1137 anCCs[k].nBlocks = 0;
1138 anCCs[k].cc = cc;
1139 }
1140
1141 tl_assert(k >= 0 && k < n_ccs && k < N_AN_CCS);
1142 anCCs[k].nBytes += (ULong)bszB_to_pszB(a, b_bszB);
1143 anCCs[k].nBlocks++;
1144 }
1145 if (i > sb->n_payload_bytes) {
1146 VG_(printf)( "sanity_check_malloc_arena: sb %p: last block "
1147 "overshoots end\n", sb);
1148 tl_assert(0);
1149 }
1150 }
1151
1152 VG_(ssort)( &anCCs[0], n_ccs, sizeof(anCCs[0]), cmp_AnCC_by_vol );
1153
1154 for (k = 0; k < n_ccs; k++) {
1155 VG_(printf)("%'13llu in %'9llu: %s\n",
1156 anCCs[k].nBytes, anCCs[k].nBlocks, anCCs[k].cc );
1157 }
1158
1159 VG_(printf)("\n");
1160}
1161
1162
nethercote885dd912004-08-03 23:14:00 +00001163void VG_(sanity_check_malloc_all) ( void )
sewardjde4a1d02002-03-22 01:27:54 +00001164{
nethercote7ac7f7b2004-11-02 12:36:02 +00001165 UInt i;
sewardj0b3fd2d2007-08-21 10:55:26 +00001166 for (i = 0; i < VG_N_ARENAS; i++) {
1167 if (i == VG_AR_CLIENT && !client_inited)
1168 continue;
nethercote885dd912004-08-03 23:14:00 +00001169 sanity_check_malloc_arena ( i );
sewardj0b3fd2d2007-08-21 10:55:26 +00001170 }
sewardjde4a1d02002-03-22 01:27:54 +00001171}
1172
sewardjde4a1d02002-03-22 01:27:54 +00001173
nethercote2d5b8162004-08-11 09:40:52 +00001174/*------------------------------------------------------------*/
1175/*--- Creating and deleting blocks. ---*/
1176/*------------------------------------------------------------*/
1177
1178// Mark the bytes at b .. b+bszB-1 as not in use, and add them to the
1179// relevant free list.
1180
1181static
nethercote7ac7f7b2004-11-02 12:36:02 +00001182void mkFreeBlock ( Arena* a, Block* b, SizeT bszB, UInt b_lno )
jsewardb1a26ae2004-03-14 03:06:37 +00001183{
nethercote7ac7f7b2004-11-02 12:36:02 +00001184 SizeT pszB = bszB_to_pszB(a, bszB);
nethercote2d5b8162004-08-11 09:40:52 +00001185 vg_assert(b_lno == pszB_to_listNo(pszB));
njndbf7ca72006-03-31 11:57:59 +00001186 //zzVALGRIND_MAKE_MEM_UNDEFINED(b, bszB);
nethercote2d5b8162004-08-11 09:40:52 +00001187 // Set the size fields and indicate not-in-use.
njn8d3f8452005-07-20 04:12:41 +00001188 set_bszB(b, mk_free_bszB(bszB));
nethercote2d5b8162004-08-11 09:40:52 +00001189
1190 // Add to the relevant list.
1191 if (a->freelist[b_lno] == NULL) {
1192 set_prev_b(b, b);
1193 set_next_b(b, b);
1194 a->freelist[b_lno] = b;
1195 } else {
1196 Block* b_prev = get_prev_b(a->freelist[b_lno]);
1197 Block* b_next = a->freelist[b_lno];
1198 set_next_b(b_prev, b);
1199 set_prev_b(b_next, b);
1200 set_next_b(b, b_next);
1201 set_prev_b(b, b_prev);
1202 }
1203# ifdef DEBUG_MALLOC
1204 (void)blockSane(a,b);
1205# endif
1206}
1207
1208// Mark the bytes at b .. b+bszB-1 as in use, and set up the block
1209// appropriately.
1210static
nethercote7ac7f7b2004-11-02 12:36:02 +00001211void mkInuseBlock ( Arena* a, Block* b, SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +00001212{
nethercote7ac7f7b2004-11-02 12:36:02 +00001213 UInt i;
nethercote2d5b8162004-08-11 09:40:52 +00001214 vg_assert(bszB >= min_useful_bszB(a));
njndbf7ca72006-03-31 11:57:59 +00001215 //zzVALGRIND_MAKE_MEM_UNDEFINED(b, bszB);
njn8d3f8452005-07-20 04:12:41 +00001216 set_bszB(b, mk_inuse_bszB(bszB));
nethercote2d5b8162004-08-11 09:40:52 +00001217 set_prev_b(b, NULL); // Take off freelist
1218 set_next_b(b, NULL); // ditto
1219 if (!a->clientmem) {
1220 for (i = 0; i < a->rz_szB; i++) {
njn1dcee092009-02-24 03:07:37 +00001221 set_rz_lo_byte(b, i, (UByte)(((Addr)b&0xff) ^ REDZONE_LO_MASK));
1222 set_rz_hi_byte(b, i, (UByte)(((Addr)b&0xff) ^ REDZONE_HI_MASK));
nethercote2d5b8162004-08-11 09:40:52 +00001223 }
1224 }
1225# ifdef DEBUG_MALLOC
1226 (void)blockSane(a,b);
1227# endif
1228}
1229
1230// Remove a block from a given list. Does no sanity checking.
1231static
nethercote7ac7f7b2004-11-02 12:36:02 +00001232void unlinkBlock ( Arena* a, Block* b, UInt listno )
nethercote2d5b8162004-08-11 09:40:52 +00001233{
njn6e6588c2005-03-13 18:52:48 +00001234 vg_assert(listno < N_MALLOC_LISTS);
nethercote2d5b8162004-08-11 09:40:52 +00001235 if (get_prev_b(b) == b) {
1236 // Only one element in the list; treat it specially.
1237 vg_assert(get_next_b(b) == b);
1238 a->freelist[listno] = NULL;
1239 } else {
1240 Block* b_prev = get_prev_b(b);
1241 Block* b_next = get_next_b(b);
1242 a->freelist[listno] = b_prev;
1243 set_next_b(b_prev, b_next);
1244 set_prev_b(b_next, b_prev);
1245 swizzle ( a, listno );
1246 }
1247 set_prev_b(b, NULL);
1248 set_next_b(b, NULL);
jsewardb1a26ae2004-03-14 03:06:37 +00001249}
1250
1251
sewardjde4a1d02002-03-22 01:27:54 +00001252/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00001253/*--- Core-visible functions. ---*/
sewardjde4a1d02002-03-22 01:27:54 +00001254/*------------------------------------------------------------*/
1255
nethercote2d5b8162004-08-11 09:40:52 +00001256// Align the request size.
1257static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +00001258SizeT align_req_pszB ( SizeT req_pszB )
nethercote2d5b8162004-08-11 09:40:52 +00001259{
nethercote7ac7f7b2004-11-02 12:36:02 +00001260 SizeT n = VG_MIN_MALLOC_SZB-1;
nethercote2d5b8162004-08-11 09:40:52 +00001261 return ((req_pszB + n) & (~n));
1262}
1263
sewardj9c606bd2008-09-18 18:12:50 +00001264void* VG_(arena_malloc) ( ArenaId aid, HChar* cc, SizeT req_pszB )
sewardjde4a1d02002-03-22 01:27:54 +00001265{
nethercote7ac7f7b2004-11-02 12:36:02 +00001266 SizeT req_bszB, frag_bszB, b_bszB;
sewardj0b3fd2d2007-08-21 10:55:26 +00001267 UInt lno, i;
sewardjde4a1d02002-03-22 01:27:54 +00001268 Superblock* new_sb;
nethercote2d5b8162004-08-11 09:40:52 +00001269 Block* b = NULL;
sewardjde4a1d02002-03-22 01:27:54 +00001270 Arena* a;
jsewardb1a26ae2004-03-14 03:06:37 +00001271 void* v;
sewardjde4a1d02002-03-22 01:27:54 +00001272
sewardj45f4e7c2005-09-27 19:20:21 +00001273 ensure_mm_init(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001274 a = arenaId_to_ArenaP(aid);
1275
nethercote7ac7f7b2004-11-02 12:36:02 +00001276 vg_assert(req_pszB < MAX_PSZB);
nethercote2d5b8162004-08-11 09:40:52 +00001277 req_pszB = align_req_pszB(req_pszB);
1278 req_bszB = pszB_to_bszB(a, req_pszB);
sewardjde4a1d02002-03-22 01:27:54 +00001279
sewardj9c606bd2008-09-18 18:12:50 +00001280 // You must provide a cost-center name against which to charge
1281 // this allocation; it isn't optional.
1282 vg_assert(cc);
1283
nethercote2d5b8162004-08-11 09:40:52 +00001284 // Scan through all the big-enough freelists for a block.
njn4ab6d532007-10-16 23:18:06 +00001285 //
1286 // Nb: this scanning might be expensive in some cases. Eg. if you
1287 // allocate lots of small objects without freeing them, but no
1288 // medium-sized objects, it will repeatedly scanning through the whole
1289 // list, and each time not find any free blocks until the last element.
1290 //
1291 // If this becomes a noticeable problem... the loop answers the question
1292 // "where is the first nonempty list above me?" And most of the time,
1293 // you ask the same question and get the same answer. So it would be
1294 // good to somehow cache the results of previous searches.
1295 // One possibility is an array (with N_MALLOC_LISTS elements) of
1296 // shortcuts. shortcut[i] would give the index number of the nearest
1297 // larger list above list i which is non-empty. Then this loop isn't
1298 // necessary. However, we'd have to modify some section [ .. i-1] of the
1299 // shortcut array every time a list [i] changes from empty to nonempty or
1300 // back. This would require care to avoid pathological worst-case
1301 // behaviour.
1302 //
njn6e6588c2005-03-13 18:52:48 +00001303 for (lno = pszB_to_listNo(req_pszB); lno < N_MALLOC_LISTS; lno++) {
sewardjde4a1d02002-03-22 01:27:54 +00001304 b = a->freelist[lno];
nethercote2d5b8162004-08-11 09:40:52 +00001305 if (NULL == b) continue; // If this list is empty, try the next one.
sewardjde4a1d02002-03-22 01:27:54 +00001306 while (True) {
njnd0e685c2005-07-17 17:55:42 +00001307 b_bszB = get_bszB(b);
nethercote2d5b8162004-08-11 09:40:52 +00001308 if (b_bszB >= req_bszB) goto obtained_block; // success!
1309 b = get_next_b(b);
1310 if (b == a->freelist[lno]) break; // traversed entire freelist
sewardjde4a1d02002-03-22 01:27:54 +00001311 }
sewardjde4a1d02002-03-22 01:27:54 +00001312 }
1313
nethercote2d5b8162004-08-11 09:40:52 +00001314 // If we reach here, no suitable block found, allocate a new superblock
njn6e6588c2005-03-13 18:52:48 +00001315 vg_assert(lno == N_MALLOC_LISTS);
nethercote2d5b8162004-08-11 09:40:52 +00001316 new_sb = newSuperblock(a, req_bszB);
1317 if (NULL == new_sb) {
1318 // Should only fail if for client, otherwise, should have aborted
1319 // already.
1320 vg_assert(VG_AR_CLIENT == aid);
1321 return NULL;
sewardjde4a1d02002-03-22 01:27:54 +00001322 }
sewardj0b3fd2d2007-08-21 10:55:26 +00001323
1324 vg_assert(a->sblocks_used <= a->sblocks_size);
1325 if (a->sblocks_used == a->sblocks_size) {
1326 Superblock ** array;
1327 SysRes sres = VG_(am_sbrk_anon_float_valgrind)(sizeof(Superblock *) *
1328 a->sblocks_size * 2);
njncda2f0f2009-05-18 02:12:08 +00001329 if (sr_isError(sres)) {
sewardj0b3fd2d2007-08-21 10:55:26 +00001330 VG_(out_of_memory_NORETURN)("arena_init", sizeof(Superblock *) *
1331 a->sblocks_size * 2);
1332 /* NOTREACHED */
1333 }
njncda2f0f2009-05-18 02:12:08 +00001334 array = (Superblock**)(AddrH)sr_Res(sres);
sewardj0b3fd2d2007-08-21 10:55:26 +00001335 for (i = 0; i < a->sblocks_used; ++i) array[i] = a->sblocks[i];
1336
1337 a->sblocks_size *= 2;
1338 a->sblocks = array;
1339 VG_(debugLog)(1, "mallocfree",
1340 "sblock array for arena `%s' resized to %ld\n",
1341 a->name, a->sblocks_size);
1342 }
1343
1344 vg_assert(a->sblocks_used < a->sblocks_size);
1345
1346 i = a->sblocks_used;
1347 while (i > 0) {
1348 if (a->sblocks[i-1] > new_sb) {
1349 a->sblocks[i] = a->sblocks[i-1];
1350 } else {
1351 break;
1352 }
1353 --i;
1354 }
1355 a->sblocks[i] = new_sb;
1356 a->sblocks_used++;
1357
nethercote2d5b8162004-08-11 09:40:52 +00001358 b = (Block*)&new_sb->payload_bytes[0];
1359 lno = pszB_to_listNo(bszB_to_pszB(a, new_sb->n_payload_bytes));
1360 mkFreeBlock ( a, b, new_sb->n_payload_bytes, lno);
sewardj94c8eb42008-09-19 20:13:39 +00001361 if (VG_(clo_profile_heap))
1362 set_cc(b, "admin.free-new-sb-1");
nethercote2d5b8162004-08-11 09:40:52 +00001363 // fall through
sewardjde4a1d02002-03-22 01:27:54 +00001364
nethercote2d5b8162004-08-11 09:40:52 +00001365 obtained_block:
1366 // Ok, we can allocate from b, which lives in list lno.
sewardjde4a1d02002-03-22 01:27:54 +00001367 vg_assert(b != NULL);
njn6e6588c2005-03-13 18:52:48 +00001368 vg_assert(lno < N_MALLOC_LISTS);
sewardjde4a1d02002-03-22 01:27:54 +00001369 vg_assert(a->freelist[lno] != NULL);
njnd0e685c2005-07-17 17:55:42 +00001370 b_bszB = get_bszB(b);
nethercote2d5b8162004-08-11 09:40:52 +00001371 // req_bszB is the size of the block we are after. b_bszB is the
1372 // size of what we've actually got. */
1373 vg_assert(b_bszB >= req_bszB);
sewardjde4a1d02002-03-22 01:27:54 +00001374
nethercote2d5b8162004-08-11 09:40:52 +00001375 // Could we split this block and still get a useful fragment?
1376 frag_bszB = b_bszB - req_bszB;
1377 if (frag_bszB >= min_useful_bszB(a)) {
1378 // Yes, split block in two, put the fragment on the appropriate free
1379 // list, and update b_bszB accordingly.
1380 // printf( "split %dB into %dB and %dB\n", b_bszB, req_bszB, frag_bszB );
sewardjde4a1d02002-03-22 01:27:54 +00001381 unlinkBlock(a, b, lno);
nethercote2d5b8162004-08-11 09:40:52 +00001382 mkInuseBlock(a, b, req_bszB);
sewardj94c8eb42008-09-19 20:13:39 +00001383 if (VG_(clo_profile_heap))
1384 set_cc(b, cc);
nethercote2d5b8162004-08-11 09:40:52 +00001385 mkFreeBlock(a, &b[req_bszB], frag_bszB,
1386 pszB_to_listNo(bszB_to_pszB(a, frag_bszB)));
sewardj94c8eb42008-09-19 20:13:39 +00001387 if (VG_(clo_profile_heap))
1388 set_cc(&b[req_bszB], "admin.fragmentation-1");
njnd0e685c2005-07-17 17:55:42 +00001389 b_bszB = get_bszB(b);
nethercote2d5b8162004-08-11 09:40:52 +00001390 } else {
1391 // No, mark as in use and use as-is.
1392 unlinkBlock(a, b, lno);
1393 mkInuseBlock(a, b, b_bszB);
sewardj94c8eb42008-09-19 20:13:39 +00001394 if (VG_(clo_profile_heap))
1395 set_cc(b, cc);
sewardjde4a1d02002-03-22 01:27:54 +00001396 }
sewardjde4a1d02002-03-22 01:27:54 +00001397
nethercote2d5b8162004-08-11 09:40:52 +00001398 // Update stats
1399 a->bytes_on_loan += bszB_to_pszB(a, b_bszB);
sewardj9c606bd2008-09-18 18:12:50 +00001400 if (a->bytes_on_loan > a->bytes_on_loan_max) {
sewardjde4a1d02002-03-22 01:27:54 +00001401 a->bytes_on_loan_max = a->bytes_on_loan;
sewardj9c606bd2008-09-18 18:12:50 +00001402 if (a->bytes_on_loan_max >= a->next_profile_at) {
1403 /* next profile after 10% more growth */
1404 a->next_profile_at
1405 = (SizeT)(
1406 (((ULong)a->bytes_on_loan_max) * 110ULL) / 100ULL );
1407 if (VG_(clo_profile_heap))
1408 cc_analyse_alloc_arena(aid);
1409 }
1410 }
sewardjde4a1d02002-03-22 01:27:54 +00001411
1412# ifdef DEBUG_MALLOC
nethercote885dd912004-08-03 23:14:00 +00001413 sanity_check_malloc_arena(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001414# endif
1415
nethercote2d5b8162004-08-11 09:40:52 +00001416 v = get_block_payload(a, b);
1417 vg_assert( (((Addr)v) & (VG_MIN_MALLOC_SZB-1)) == 0 );
sewardjb5f6f512005-03-10 23:59:00 +00001418
sewardja53462a2007-11-24 23:37:07 +00001419 /* VALGRIND_MALLOCLIKE_BLOCK(v, req_pszB, 0, False); */
1420
1421 /* For debugging/testing purposes, fill the newly allocated area
1422 with a definite value in an attempt to shake out any
1423 uninitialised uses of the data (by V core / V tools, not by the
1424 client). Testing on 25 Nov 07 with the values 0x00, 0xFF, 0x55,
1425 0xAA showed no differences in the regression tests on
1426 amd64-linux. Note, is disabled by default. */
1427 if (0 && aid != VG_AR_CLIENT)
1428 VG_(memset)(v, 0xAA, (SizeT)req_pszB);
1429
jsewardb1a26ae2004-03-14 03:06:37 +00001430 return v;
sewardjde4a1d02002-03-22 01:27:54 +00001431}
1432
1433
njn25e49d8e72002-09-23 09:36:25 +00001434void VG_(arena_free) ( ArenaId aid, void* ptr )
sewardjde4a1d02002-03-22 01:27:54 +00001435{
1436 Superblock* sb;
nethercote2d5b8162004-08-11 09:40:52 +00001437 UByte* sb_start;
1438 UByte* sb_end;
njna2578652005-07-17 17:12:24 +00001439 Block* other_b;
nethercote2d5b8162004-08-11 09:40:52 +00001440 Block* b;
nethercote7ac7f7b2004-11-02 12:36:02 +00001441 SizeT b_bszB, b_pszB, other_bszB;
1442 UInt b_listno;
sewardjde4a1d02002-03-22 01:27:54 +00001443 Arena* a;
1444
sewardj45f4e7c2005-09-27 19:20:21 +00001445 ensure_mm_init(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001446 a = arenaId_to_ArenaP(aid);
1447
njn25e49d8e72002-09-23 09:36:25 +00001448 if (ptr == NULL) {
njn25e49d8e72002-09-23 09:36:25 +00001449 return;
1450 }
1451
nethercote2d5b8162004-08-11 09:40:52 +00001452 b = get_payload_block(a, ptr);
sewardjde4a1d02002-03-22 01:27:54 +00001453
sewardj3187a4e2005-12-04 23:27:14 +00001454 /* If this is one of V's areas, check carefully the block we're
1455 getting back. This picks up simple block-end overruns. */
1456 if (aid != VG_AR_CLIENT)
1457 vg_assert(blockSane(a, b));
sewardjde4a1d02002-03-22 01:27:54 +00001458
njne6f9e3b2005-07-17 18:00:57 +00001459 b_bszB = get_bszB(b);
1460 b_pszB = bszB_to_pszB(a, b_bszB);
nethercote2d5b8162004-08-11 09:40:52 +00001461 sb = findSb( a, b );
1462 sb_start = &sb->payload_bytes[0];
1463 sb_end = &sb->payload_bytes[sb->n_payload_bytes - 1];
sewardjde4a1d02002-03-22 01:27:54 +00001464
njne6f9e3b2005-07-17 18:00:57 +00001465 a->bytes_on_loan -= b_pszB;
1466
sewardj3187a4e2005-12-04 23:27:14 +00001467 /* If this is one of V's areas, fill it up with junk to enhance the
1468 chances of catching any later reads of it. Note, 0xDD is
1469 carefully chosen junk :-), in that: (1) 0xDDDDDDDD is an invalid
1470 and non-word-aligned address on most systems, and (2) 0xDD is a
1471 value which is unlikely to be generated by the new compressed
1472 Vbits representation for memcheck. */
1473 if (aid != VG_AR_CLIENT)
1474 VG_(memset)(ptr, 0xDD, (SizeT)b_pszB);
1475
nethercote2d5b8162004-08-11 09:40:52 +00001476 // Put this chunk back on a list somewhere.
nethercote2d5b8162004-08-11 09:40:52 +00001477 b_listno = pszB_to_listNo(b_pszB);
1478 mkFreeBlock( a, b, b_bszB, b_listno );
sewardj94c8eb42008-09-19 20:13:39 +00001479 if (VG_(clo_profile_heap))
1480 set_cc(b, "admin.free-1");
sewardjde4a1d02002-03-22 01:27:54 +00001481
nethercote2d5b8162004-08-11 09:40:52 +00001482 // See if this block can be merged with its successor.
1483 // First test if we're far enough before the superblock's end to possibly
1484 // have a successor.
njna2578652005-07-17 17:12:24 +00001485 other_b = b + b_bszB;
1486 if (other_b+min_useful_bszB(a)-1 <= (Block*)sb_end) {
nethercote2d5b8162004-08-11 09:40:52 +00001487 // Ok, we have a successor, merge if it's not in use.
njnd0e685c2005-07-17 17:55:42 +00001488 other_bszB = get_bszB(other_b);
njn472cc7c2005-07-17 17:20:30 +00001489 if (!is_inuse_block(other_b)) {
nethercote2d5b8162004-08-11 09:40:52 +00001490 // VG_(printf)( "merge-successor\n");
sewardjde4a1d02002-03-22 01:27:54 +00001491# ifdef DEBUG_MALLOC
njna2578652005-07-17 17:12:24 +00001492 vg_assert(blockSane(a, other_b));
sewardjde4a1d02002-03-22 01:27:54 +00001493# endif
nethercote2d5b8162004-08-11 09:40:52 +00001494 unlinkBlock( a, b, b_listno );
njna2578652005-07-17 17:12:24 +00001495 unlinkBlock( a, other_b, pszB_to_listNo(bszB_to_pszB(a,other_bszB)) );
nethercote2d5b8162004-08-11 09:40:52 +00001496 b_bszB += other_bszB;
1497 b_listno = pszB_to_listNo(bszB_to_pszB(a, b_bszB));
1498 mkFreeBlock( a, b, b_bszB, b_listno );
sewardj94c8eb42008-09-19 20:13:39 +00001499 if (VG_(clo_profile_heap))
1500 set_cc(b, "admin.free-2");
sewardjde4a1d02002-03-22 01:27:54 +00001501 }
nethercote2d5b8162004-08-11 09:40:52 +00001502 } else {
1503 // Not enough space for successor: check that b is the last block
1504 // ie. there are no unused bytes at the end of the Superblock.
njna2578652005-07-17 17:12:24 +00001505 vg_assert(other_b-1 == (Block*)sb_end);
sewardjde4a1d02002-03-22 01:27:54 +00001506 }
1507
nethercote2d5b8162004-08-11 09:40:52 +00001508 // Then see if this block can be merged with its predecessor.
1509 // First test if we're far enough after the superblock's start to possibly
1510 // have a predecessor.
1511 if (b >= (Block*)sb_start + min_useful_bszB(a)) {
1512 // Ok, we have a predecessor, merge if it's not in use.
njna2578652005-07-17 17:12:24 +00001513 other_b = get_predecessor_block( b );
njnd0e685c2005-07-17 17:55:42 +00001514 other_bszB = get_bszB(other_b);
njn472cc7c2005-07-17 17:20:30 +00001515 if (!is_inuse_block(other_b)) {
nethercote2d5b8162004-08-11 09:40:52 +00001516 // VG_(printf)( "merge-predecessor\n");
nethercote2d5b8162004-08-11 09:40:52 +00001517 unlinkBlock( a, b, b_listno );
njna2578652005-07-17 17:12:24 +00001518 unlinkBlock( a, other_b, pszB_to_listNo(bszB_to_pszB(a, other_bszB)) );
1519 b = other_b;
nethercote2d5b8162004-08-11 09:40:52 +00001520 b_bszB += other_bszB;
1521 b_listno = pszB_to_listNo(bszB_to_pszB(a, b_bszB));
1522 mkFreeBlock( a, b, b_bszB, b_listno );
sewardj94c8eb42008-09-19 20:13:39 +00001523 if (VG_(clo_profile_heap))
1524 set_cc(b, "admin.free-3");
sewardjde4a1d02002-03-22 01:27:54 +00001525 }
nethercote2d5b8162004-08-11 09:40:52 +00001526 } else {
1527 // Not enough space for predecessor: check that b is the first block,
1528 // ie. there are no unused bytes at the start of the Superblock.
1529 vg_assert((Block*)sb_start == b);
sewardjde4a1d02002-03-22 01:27:54 +00001530 }
1531
1532# ifdef DEBUG_MALLOC
nethercote885dd912004-08-03 23:14:00 +00001533 sanity_check_malloc_arena(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001534# endif
1535
sewardj45f4e7c2005-09-27 19:20:21 +00001536 //zzVALGRIND_FREELIKE_BLOCK(ptr, 0);
sewardjde4a1d02002-03-22 01:27:54 +00001537}
1538
1539
1540/*
1541 The idea for malloc_aligned() is to allocate a big block, base, and
1542 then split it into two parts: frag, which is returned to the the
1543 free pool, and align, which is the bit we're really after. Here's
1544 a picture. L and H denote the block lower and upper overheads, in
nethercote2d5b8162004-08-11 09:40:52 +00001545 bytes. The details are gruesome. Note it is slightly complicated
sewardjde4a1d02002-03-22 01:27:54 +00001546 because the initial request to generate base may return a bigger
1547 block than we asked for, so it is important to distinguish the base
1548 request size and the base actual size.
1549
1550 frag_b align_b
1551 | |
1552 | frag_p | align_p
1553 | | | |
1554 v v v v
1555
1556 +---+ +---+---+ +---+
1557 | L |----------------| H | L |---------------| H |
1558 +---+ +---+---+ +---+
1559
1560 ^ ^ ^
1561 | | :
1562 | base_p this addr must be aligned
1563 |
1564 base_b
1565
1566 . . . . . . .
nethercote2d5b8162004-08-11 09:40:52 +00001567 <------ frag_bszB -------> . . .
1568 . <------------- base_pszB_act -----------> .
sewardjde4a1d02002-03-22 01:27:54 +00001569 . . . . . . .
1570
1571*/
sewardj9c606bd2008-09-18 18:12:50 +00001572void* VG_(arena_memalign) ( ArenaId aid, HChar* cc,
1573 SizeT req_alignB, SizeT req_pszB )
sewardjde4a1d02002-03-22 01:27:54 +00001574{
nethercote7ac7f7b2004-11-02 12:36:02 +00001575 SizeT base_pszB_req, base_pszB_act, frag_bszB;
nethercote2d5b8162004-08-11 09:40:52 +00001576 Block *base_b, *align_b;
1577 UByte *base_p, *align_p;
nethercote7ac7f7b2004-11-02 12:36:02 +00001578 SizeT saved_bytes_on_loan;
sewardjde4a1d02002-03-22 01:27:54 +00001579 Arena* a;
1580
sewardj45f4e7c2005-09-27 19:20:21 +00001581 ensure_mm_init(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001582 a = arenaId_to_ArenaP(aid);
1583
nethercote7ac7f7b2004-11-02 12:36:02 +00001584 vg_assert(req_pszB < MAX_PSZB);
sewardjde4a1d02002-03-22 01:27:54 +00001585
sewardj9c606bd2008-09-18 18:12:50 +00001586 // You must provide a cost-center name against which to charge
1587 // this allocation; it isn't optional.
1588 vg_assert(cc);
1589
nethercote2d5b8162004-08-11 09:40:52 +00001590 // Check that the requested alignment seems reasonable; that is, is
1591 // a power of 2.
1592 if (req_alignB < VG_MIN_MALLOC_SZB
1593 || req_alignB > 1048576
njn717cde52005-05-10 02:47:21 +00001594 || VG_(log2)( req_alignB ) == -1 /* not a power of 2 */) {
njn36b65172009-04-14 23:43:15 +00001595 VG_(printf)("VG_(arena_memalign)(%p, %lu, %lu)\n"
1596 "bad alignment value %lu\n"
1597 "(it is too small, too big, or not a power of two)",
1598 a, req_alignB, req_pszB, req_alignB );
njn717cde52005-05-10 02:47:21 +00001599 VG_(core_panic)("VG_(arena_memalign)");
nethercote2d5b8162004-08-11 09:40:52 +00001600 /*NOTREACHED*/
sewardjde4a1d02002-03-22 01:27:54 +00001601 }
nethercote2d5b8162004-08-11 09:40:52 +00001602 // Paranoid
1603 vg_assert(req_alignB % VG_MIN_MALLOC_SZB == 0);
sewardjde4a1d02002-03-22 01:27:54 +00001604
1605 /* Required payload size for the aligned chunk. */
nethercote2d5b8162004-08-11 09:40:52 +00001606 req_pszB = align_req_pszB(req_pszB);
sewardjde4a1d02002-03-22 01:27:54 +00001607
nethercote2d5b8162004-08-11 09:40:52 +00001608 /* Payload size to request for the big block that we will split up. */
1609 base_pszB_req = req_pszB + min_useful_bszB(a) + req_alignB;
sewardjde4a1d02002-03-22 01:27:54 +00001610
1611 /* Payload ptr for the block we are going to split. Note this
1612 changes a->bytes_on_loan; we save and restore it ourselves. */
1613 saved_bytes_on_loan = a->bytes_on_loan;
sewardj9c606bd2008-09-18 18:12:50 +00001614 base_p = VG_(arena_malloc) ( aid, cc, base_pszB_req );
sewardjde4a1d02002-03-22 01:27:54 +00001615 a->bytes_on_loan = saved_bytes_on_loan;
1616
tom8af1a172005-10-06 12:04:26 +00001617 /* Give up if we couldn't allocate enough space */
1618 if (base_p == 0)
1619 return 0;
1620
sewardjde4a1d02002-03-22 01:27:54 +00001621 /* Block ptr for the block we are going to split. */
nethercote2d5b8162004-08-11 09:40:52 +00001622 base_b = get_payload_block ( a, base_p );
sewardjde4a1d02002-03-22 01:27:54 +00001623
1624 /* Pointer to the payload of the aligned block we are going to
1625 return. This has to be suitably aligned. */
nethercote2d5b8162004-08-11 09:40:52 +00001626 align_p = align_upwards ( base_b + 2 * overhead_szB_lo(a)
1627 + overhead_szB_hi(a),
sewardjde4a1d02002-03-22 01:27:54 +00001628 req_alignB );
nethercote2d5b8162004-08-11 09:40:52 +00001629 align_b = get_payload_block(a, align_p);
sewardjde4a1d02002-03-22 01:27:54 +00001630
1631 /* The block size of the fragment we will create. This must be big
1632 enough to actually create a fragment. */
nethercote2d5b8162004-08-11 09:40:52 +00001633 frag_bszB = align_b - base_b;
1634
1635 vg_assert(frag_bszB >= min_useful_bszB(a));
sewardjde4a1d02002-03-22 01:27:54 +00001636
1637 /* The actual payload size of the block we are going to split. */
njn089f51f2005-07-17 18:12:00 +00001638 base_pszB_act = get_pszB(a, base_b);
sewardjde4a1d02002-03-22 01:27:54 +00001639
nethercote2d5b8162004-08-11 09:40:52 +00001640 /* Create the fragment block, and put it back on the relevant free list. */
1641 mkFreeBlock ( a, base_b, frag_bszB,
1642 pszB_to_listNo(bszB_to_pszB(a, frag_bszB)) );
sewardj94c8eb42008-09-19 20:13:39 +00001643 if (VG_(clo_profile_heap))
1644 set_cc(base_b, "admin.frag-memalign-1");
sewardjde4a1d02002-03-22 01:27:54 +00001645
1646 /* Create the aligned block. */
nethercote2d5b8162004-08-11 09:40:52 +00001647 mkInuseBlock ( a, align_b,
1648 base_p + base_pszB_act
1649 + overhead_szB_hi(a) - (UByte*)align_b );
sewardj94c8eb42008-09-19 20:13:39 +00001650 if (VG_(clo_profile_heap))
1651 set_cc(align_b, cc);
sewardjde4a1d02002-03-22 01:27:54 +00001652
1653 /* Final sanity checks. */
njn472cc7c2005-07-17 17:20:30 +00001654 vg_assert( is_inuse_block(get_payload_block(a, align_p)) );
sewardjde4a1d02002-03-22 01:27:54 +00001655
njn089f51f2005-07-17 18:12:00 +00001656 vg_assert(req_pszB <= get_pszB(a, get_payload_block(a, align_p)));
sewardjde4a1d02002-03-22 01:27:54 +00001657
njn089f51f2005-07-17 18:12:00 +00001658 a->bytes_on_loan += get_pszB(a, get_payload_block(a, align_p));
sewardjde4a1d02002-03-22 01:27:54 +00001659 if (a->bytes_on_loan > a->bytes_on_loan_max)
1660 a->bytes_on_loan_max = a->bytes_on_loan;
1661
1662# ifdef DEBUG_MALLOC
nethercote885dd912004-08-03 23:14:00 +00001663 sanity_check_malloc_arena(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001664# endif
1665
nethercote2d5b8162004-08-11 09:40:52 +00001666 vg_assert( (((Addr)align_p) % req_alignB) == 0 );
sewardjb5f6f512005-03-10 23:59:00 +00001667
sewardj45f4e7c2005-09-27 19:20:21 +00001668 //zzVALGRIND_MALLOCLIKE_BLOCK(align_p, req_pszB, 0, False);
sewardjb5f6f512005-03-10 23:59:00 +00001669
nethercote2d5b8162004-08-11 09:40:52 +00001670 return align_p;
1671}
1672
1673
njn8b140de2009-02-17 04:31:18 +00001674SizeT VG_(arena_malloc_usable_size) ( ArenaId aid, void* ptr )
nethercote2d5b8162004-08-11 09:40:52 +00001675{
1676 Arena* a = arenaId_to_ArenaP(aid);
1677 Block* b = get_payload_block(a, ptr);
njn089f51f2005-07-17 18:12:00 +00001678 return get_pszB(a, b);
sewardjde4a1d02002-03-22 01:27:54 +00001679}
1680
bart545380e2008-04-21 17:28:50 +00001681
1682// Implementation of mallinfo(). There is no recent standard that defines
1683// the behavior of mallinfo(). The meaning of the fields in struct mallinfo
1684// is as follows:
1685//
1686// struct mallinfo {
1687// int arena; /* total space in arena */
1688// int ordblks; /* number of ordinary blocks */
1689// int smblks; /* number of small blocks */
1690// int hblks; /* number of holding blocks */
1691// int hblkhd; /* space in holding block headers */
1692// int usmblks; /* space in small blocks in use */
1693// int fsmblks; /* space in free small blocks */
1694// int uordblks; /* space in ordinary blocks in use */
1695// int fordblks; /* space in free ordinary blocks */
1696// int keepcost; /* space penalty if keep option */
1697// /* is used */
1698// };
1699//
1700// The glibc documentation about mallinfo (which is somewhat outdated) can
1701// be found here:
1702// http://www.gnu.org/software/libtool/manual/libc/Statistics-of-Malloc.html
1703//
1704// See also http://bugs.kde.org/show_bug.cgi?id=160956.
1705//
1706// Regarding the implementation of VG_(mallinfo)(): we cannot return the
1707// whole struct as the library function does, because this is called by a
1708// client request. So instead we use a pointer to do call by reference.
njn088bfb42005-08-17 05:01:37 +00001709void VG_(mallinfo) ( ThreadId tid, struct vg_mallinfo* mi )
1710{
sewardj76dda8f2008-05-29 13:45:49 +00001711 UWord i, free_blocks, free_blocks_size;
bartc3c98392008-04-19 14:43:30 +00001712 Arena* a = arenaId_to_ArenaP(VG_AR_CLIENT);
1713
1714 // Traverse free list and calculate free blocks statistics.
1715 // This may seem slow but glibc works the same way.
1716 free_blocks_size = free_blocks = 0;
1717 for (i = 0; i < N_MALLOC_LISTS; i++) {
1718 Block* b = a->freelist[i];
1719 if (b == NULL) continue;
1720 for (;;) {
1721 free_blocks++;
sewardj76dda8f2008-05-29 13:45:49 +00001722 free_blocks_size += (UWord)get_pszB(a, b);
bartc3c98392008-04-19 14:43:30 +00001723 b = get_next_b(b);
1724 if (b == a->freelist[i]) break;
1725 }
1726 }
1727
1728 // We don't have fastbins so smblks & fsmblks are always 0. Also we don't
bart545380e2008-04-21 17:28:50 +00001729 // have a separate mmap allocator so set hblks & hblkhd to 0.
bartc3c98392008-04-19 14:43:30 +00001730 mi->arena = a->bytes_mmaped;
bart545380e2008-04-21 17:28:50 +00001731 mi->ordblks = free_blocks + VG_(free_queue_length);
bartc3c98392008-04-19 14:43:30 +00001732 mi->smblks = 0;
1733 mi->hblks = 0;
1734 mi->hblkhd = 0;
1735 mi->usmblks = 0;
1736 mi->fsmblks = 0;
bart545380e2008-04-21 17:28:50 +00001737 mi->uordblks = a->bytes_on_loan - VG_(free_queue_volume);
1738 mi->fordblks = free_blocks_size + VG_(free_queue_volume);
bartc3c98392008-04-19 14:43:30 +00001739 mi->keepcost = 0; // may want some value in here
njn088bfb42005-08-17 05:01:37 +00001740}
sewardjde4a1d02002-03-22 01:27:54 +00001741
sewardj45f4e7c2005-09-27 19:20:21 +00001742
sewardjde4a1d02002-03-22 01:27:54 +00001743/*------------------------------------------------------------*/
1744/*--- Services layered on top of malloc/free. ---*/
1745/*------------------------------------------------------------*/
1746
sewardj9c606bd2008-09-18 18:12:50 +00001747void* VG_(arena_calloc) ( ArenaId aid, HChar* cc,
1748 SizeT nmemb, SizeT bytes_per_memb )
sewardjde4a1d02002-03-22 01:27:54 +00001749{
nethercote7ac7f7b2004-11-02 12:36:02 +00001750 SizeT size;
sewardjde4a1d02002-03-22 01:27:54 +00001751 UChar* p;
njn25e49d8e72002-09-23 09:36:25 +00001752
njn926ed472005-03-11 04:44:10 +00001753 size = nmemb * bytes_per_memb;
1754 vg_assert(size >= nmemb && size >= bytes_per_memb);// check against overflow
njn3e884182003-04-15 13:03:23 +00001755
sewardj9c606bd2008-09-18 18:12:50 +00001756 p = VG_(arena_malloc) ( aid, cc, size );
njn3e884182003-04-15 13:03:23 +00001757
njn926ed472005-03-11 04:44:10 +00001758 VG_(memset)(p, 0, size);
sewardjb5f6f512005-03-10 23:59:00 +00001759
sewardj45f4e7c2005-09-27 19:20:21 +00001760 //zzVALGRIND_MALLOCLIKE_BLOCK(p, size, 0, True);
njn25e49d8e72002-09-23 09:36:25 +00001761
sewardjde4a1d02002-03-22 01:27:54 +00001762 return p;
1763}
1764
1765
sewardj9c606bd2008-09-18 18:12:50 +00001766void* VG_(arena_realloc) ( ArenaId aid, HChar* cc,
1767 void* ptr, SizeT req_pszB )
sewardjde4a1d02002-03-22 01:27:54 +00001768{
1769 Arena* a;
njn089f51f2005-07-17 18:12:00 +00001770 SizeT old_pszB;
sewardjb5f6f512005-03-10 23:59:00 +00001771 UChar *p_new;
nethercote2d5b8162004-08-11 09:40:52 +00001772 Block* b;
sewardjde4a1d02002-03-22 01:27:54 +00001773
sewardj45f4e7c2005-09-27 19:20:21 +00001774 ensure_mm_init(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001775 a = arenaId_to_ArenaP(aid);
1776
nethercote7ac7f7b2004-11-02 12:36:02 +00001777 vg_assert(req_pszB < MAX_PSZB);
sewardjde4a1d02002-03-22 01:27:54 +00001778
njn180f6982008-10-12 19:51:41 +00001779 if (NULL == ptr) {
1780 return VG_(arena_malloc)(aid, cc, req_pszB);
1781 }
1782
1783 if (req_pszB == 0) {
1784 VG_(arena_free)(aid, ptr);
1785 return NULL;
1786 }
1787
nethercote2d5b8162004-08-11 09:40:52 +00001788 b = get_payload_block(a, ptr);
1789 vg_assert(blockSane(a, b));
sewardjde4a1d02002-03-22 01:27:54 +00001790
njn472cc7c2005-07-17 17:20:30 +00001791 vg_assert(is_inuse_block(b));
njn089f51f2005-07-17 18:12:00 +00001792 old_pszB = get_pszB(a, b);
sewardjde4a1d02002-03-22 01:27:54 +00001793
njn25e49d8e72002-09-23 09:36:25 +00001794 if (req_pszB <= old_pszB) {
njn25e49d8e72002-09-23 09:36:25 +00001795 return ptr;
1796 }
sewardjde4a1d02002-03-22 01:27:54 +00001797
sewardj9c606bd2008-09-18 18:12:50 +00001798 p_new = VG_(arena_malloc) ( aid, cc, req_pszB );
njn828022a2005-03-13 14:56:31 +00001799
sewardjb5f6f512005-03-10 23:59:00 +00001800 VG_(memcpy)(p_new, ptr, old_pszB);
sewardjde4a1d02002-03-22 01:27:54 +00001801
sewardjb5f6f512005-03-10 23:59:00 +00001802 VG_(arena_free)(aid, ptr);
njn25e49d8e72002-09-23 09:36:25 +00001803
sewardjde4a1d02002-03-22 01:27:54 +00001804 return p_new;
1805}
1806
1807
njn6ba622c2005-06-11 01:12:08 +00001808/* Inline just for the wrapper VG_(strdup) below */
sewardj9c606bd2008-09-18 18:12:50 +00001809__inline__ Char* VG_(arena_strdup) ( ArenaId aid, HChar* cc,
1810 const Char* s )
njn6ba622c2005-06-11 01:12:08 +00001811{
1812 Int i;
1813 Int len;
1814 Char* res;
1815
1816 if (s == NULL)
1817 return NULL;
1818
1819 len = VG_(strlen)(s) + 1;
sewardj9c606bd2008-09-18 18:12:50 +00001820 res = VG_(arena_malloc) (aid, cc, len);
njn6ba622c2005-06-11 01:12:08 +00001821
1822 for (i = 0; i < len; i++)
1823 res[i] = s[i];
1824 return res;
1825}
1826
1827
sewardjde4a1d02002-03-22 01:27:54 +00001828/*------------------------------------------------------------*/
nethercote996901a2004-08-03 13:29:09 +00001829/*--- Tool-visible functions. ---*/
njn25e49d8e72002-09-23 09:36:25 +00001830/*------------------------------------------------------------*/
1831
nethercote2d5b8162004-08-11 09:40:52 +00001832// All just wrappers to avoid exposing arenas to tools.
njn25e49d8e72002-09-23 09:36:25 +00001833
sewardj9c606bd2008-09-18 18:12:50 +00001834void* VG_(malloc) ( HChar* cc, SizeT nbytes )
njn25e49d8e72002-09-23 09:36:25 +00001835{
sewardj9c606bd2008-09-18 18:12:50 +00001836 return VG_(arena_malloc) ( VG_AR_TOOL, cc, nbytes );
njn25e49d8e72002-09-23 09:36:25 +00001837}
1838
1839void VG_(free) ( void* ptr )
1840{
nethercote60f5b822004-01-26 17:24:42 +00001841 VG_(arena_free) ( VG_AR_TOOL, ptr );
njn25e49d8e72002-09-23 09:36:25 +00001842}
1843
sewardj9c606bd2008-09-18 18:12:50 +00001844void* VG_(calloc) ( HChar* cc, SizeT nmemb, SizeT bytes_per_memb )
njn25e49d8e72002-09-23 09:36:25 +00001845{
sewardj9c606bd2008-09-18 18:12:50 +00001846 return VG_(arena_calloc) ( VG_AR_TOOL, cc, nmemb, bytes_per_memb );
njn25e49d8e72002-09-23 09:36:25 +00001847}
1848
sewardj9c606bd2008-09-18 18:12:50 +00001849void* VG_(realloc) ( HChar* cc, void* ptr, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00001850{
sewardj9c606bd2008-09-18 18:12:50 +00001851 return VG_(arena_realloc) ( VG_AR_TOOL, cc, ptr, size );
njn25e49d8e72002-09-23 09:36:25 +00001852}
1853
sewardj9c606bd2008-09-18 18:12:50 +00001854Char* VG_(strdup) ( HChar* cc, const Char* s )
njn6ba622c2005-06-11 01:12:08 +00001855{
sewardj9c606bd2008-09-18 18:12:50 +00001856 return VG_(arena_strdup) ( VG_AR_TOOL, cc, s );
njn6ba622c2005-06-11 01:12:08 +00001857}
1858
njn32397c02007-11-10 04:08:08 +00001859// Useful for querying user blocks.
1860SizeT VG_(malloc_usable_size) ( void* p )
1861{
njn00556da2009-03-17 04:51:19 +00001862 return VG_(arena_malloc_usable_size)(VG_AR_CLIENT, p);
njn32397c02007-11-10 04:08:08 +00001863}
1864
1865
sewardjde4a1d02002-03-22 01:27:54 +00001866/*--------------------------------------------------------------------*/
njn717cde52005-05-10 02:47:21 +00001867/*--- end ---*/
sewardjde4a1d02002-03-22 01:27:54 +00001868/*--------------------------------------------------------------------*/