blob: 023cf4afa11fe024222be1725b35a86cc4232ecb [file] [log] [blame]
sewardjde4a1d02002-03-22 01:27:54 +00001
2/*--------------------------------------------------------------------*/
3/*--- An implementation of malloc/free which doesn't use sbrk. ---*/
njn717cde52005-05-10 02:47:21 +00004/*--- m_mallocfree.c ---*/
sewardjde4a1d02002-03-22 01:27:54 +00005/*--------------------------------------------------------------------*/
6
7/*
njnb9c427c2004-12-01 14:14:42 +00008 This file is part of Valgrind, a dynamic binary instrumentation
9 framework.
sewardjde4a1d02002-03-22 01:27:54 +000010
sewardj03f8d3f2012-08-05 15:46:46 +000011 Copyright (C) 2000-2012 Julian Seward
sewardjde4a1d02002-03-22 01:27:54 +000012 jseward@acm.org
sewardjde4a1d02002-03-22 01:27:54 +000013
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
njn25e49d8e72002-09-23 09:36:25 +000029 The GNU General Public License is contained in the file COPYING.
sewardjde4a1d02002-03-22 01:27:54 +000030*/
31
njnc7561b92005-06-19 01:24:32 +000032#include "pub_core_basics.h"
sewardj4cfea4f2006-10-14 19:26:10 +000033#include "pub_core_vki.h"
sewardj45f4e7c2005-09-27 19:20:21 +000034#include "pub_core_debuglog.h"
njn97405b22005-06-02 03:39:33 +000035#include "pub_core_libcbase.h"
sewardj45f4e7c2005-09-27 19:20:21 +000036#include "pub_core_aspacemgr.h"
njn132bfcc2005-06-04 19:16:06 +000037#include "pub_core_libcassert.h"
njn36a20fa2005-06-03 03:08:39 +000038#include "pub_core_libcprint.h"
njnaf1d7df2005-06-11 01:31:52 +000039#include "pub_core_mallocfree.h"
njn20242342005-05-16 23:31:24 +000040#include "pub_core_options.h"
sewardj6c591e12011-04-11 16:17:51 +000041#include "pub_core_libcsetjmp.h" // to keep _threadstate.h happy
njn32397c02007-11-10 04:08:08 +000042#include "pub_core_threadstate.h" // For VG_INVALID_THREADID
sewardjd043de92011-09-26 11:28:20 +000043#include "pub_core_transtab.h"
njnfc51f8d2005-06-21 03:20:17 +000044#include "pub_core_tooliface.h"
sewardj55f9d1a2005-04-25 11:11:44 +000045
philippe72faf102012-03-11 22:24:03 +000046#include "pub_tool_inner.h"
47#if defined(ENABLE_INNER_CLIENT_REQUEST)
48#include "memcheck/memcheck.h"
49#endif
sewardjde4a1d02002-03-22 01:27:54 +000050
sewardj0b3fd2d2007-08-21 10:55:26 +000051// #define DEBUG_MALLOC // turn on heavyweight debugging machinery
52// #define VERBOSE_MALLOC // make verbose, esp. in debugging machinery
nethercote2d5b8162004-08-11 09:40:52 +000053
bart545380e2008-04-21 17:28:50 +000054/* Number and total size of blocks in free queue. Used by mallinfo(). */
55Long VG_(free_queue_volume) = 0;
56Long VG_(free_queue_length) = 0;
57
sewardj9c606bd2008-09-18 18:12:50 +000058static void cc_analyse_alloc_arena ( ArenaId aid ); /* fwds */
59
nethercote2d5b8162004-08-11 09:40:52 +000060/*------------------------------------------------------------*/
61/*--- Main types ---*/
62/*------------------------------------------------------------*/
63
sewardjc1ac9772007-08-20 22:57:56 +000064#define N_MALLOC_LISTS 112 // do not change this
nethercote2d5b8162004-08-11 09:40:52 +000065
nethercote7ac7f7b2004-11-02 12:36:02 +000066// The amount you can ask for is limited only by sizeof(SizeT)...
67#define MAX_PSZB (~((SizeT)0x0))
nethercote2d5b8162004-08-11 09:40:52 +000068
sewardj0b3fd2d2007-08-21 10:55:26 +000069// Each arena has a sorted array of superblocks, which expands
70// dynamically. This is its initial size.
71#define SBLOCKS_SIZE_INITIAL 50
72
nethercote2d5b8162004-08-11 09:40:52 +000073typedef UChar UByte;
74
njn8d3f8452005-07-20 04:12:41 +000075/* Layout of an in-use block:
nethercote2d5b8162004-08-11 09:40:52 +000076
njn341a6642009-05-24 23:36:50 +000077 cost center (OPTIONAL) (VG_MIN_MALLOC_SZB bytes, only when h-p enabled)
njn8d3f8452005-07-20 04:12:41 +000078 this block total szB (sizeof(SizeT) bytes)
njn7ce83112005-08-24 22:38:00 +000079 red zone bytes (depends on Arena.rz_szB, but >= sizeof(void*))
njn8d3f8452005-07-20 04:12:41 +000080 (payload bytes)
njn7ce83112005-08-24 22:38:00 +000081 red zone bytes (depends on Arena.rz_szB, but >= sizeof(void*))
njn8d3f8452005-07-20 04:12:41 +000082 this block total szB (sizeof(SizeT) bytes)
nethercote2d5b8162004-08-11 09:40:52 +000083
njn8d3f8452005-07-20 04:12:41 +000084 Layout of a block on the free list:
nethercote2d5b8162004-08-11 09:40:52 +000085
njn341a6642009-05-24 23:36:50 +000086 cost center (OPTIONAL) (VG_MIN_MALLOC_SZB bytes, only when h-p enabled)
njn8d3f8452005-07-20 04:12:41 +000087 this block total szB (sizeof(SizeT) bytes)
88 freelist previous ptr (sizeof(void*) bytes)
89 excess red zone bytes (if Arena.rz_szB > sizeof(void*))
90 (payload bytes)
91 excess red zone bytes (if Arena.rz_szB > sizeof(void*))
92 freelist next ptr (sizeof(void*) bytes)
93 this block total szB (sizeof(SizeT) bytes)
nethercote2d5b8162004-08-11 09:40:52 +000094
njn8d3f8452005-07-20 04:12:41 +000095 Total size in bytes (bszB) and payload size in bytes (pszB)
96 are related by:
nethercote2d5b8162004-08-11 09:40:52 +000097
sewardj94c8eb42008-09-19 20:13:39 +000098 bszB == pszB + 2*sizeof(SizeT) + 2*a->rz_szB
99
100 when heap profiling is not enabled, and
101
njn341a6642009-05-24 23:36:50 +0000102 bszB == pszB + 2*sizeof(SizeT) + 2*a->rz_szB + VG_MIN_MALLOC_SZB
njn8d3f8452005-07-20 04:12:41 +0000103
sewardj94c8eb42008-09-19 20:13:39 +0000104 when it is enabled. It follows that the minimum overhead per heap
105 block for arenas used by the core is:
106
107 32-bit platforms: 2*4 + 2*4 == 16 bytes
108 64-bit platforms: 2*8 + 2*8 == 32 bytes
109
110 when heap profiling is not enabled, and
njna527a492005-12-16 17:06:37 +0000111
njn341a6642009-05-24 23:36:50 +0000112 32-bit platforms: 2*4 + 2*4 + 8 == 24 bytes
113 64-bit platforms: 2*8 + 2*8 + 16 == 48 bytes
njna527a492005-12-16 17:06:37 +0000114
sewardj94c8eb42008-09-19 20:13:39 +0000115 when it is enabled. In all cases, extra overhead may be incurred
116 when rounding the payload size up to VG_MIN_MALLOC_SZB.
njna527a492005-12-16 17:06:37 +0000117
njn8d3f8452005-07-20 04:12:41 +0000118 Furthermore, both size fields in the block have their least-significant
119 bit set if the block is not in use, and unset if it is in use.
120 (The bottom 3 or so bits are always free for this because of alignment.)
121 A block size of zero is not possible, because a block always has at
122 least two SizeTs and two pointers of overhead.
123
124 Nb: All Block payloads must be VG_MIN_MALLOC_SZB-aligned. This is
125 achieved by ensuring that Superblocks are VG_MIN_MALLOC_SZB-aligned
126 (see newSuperblock() for how), and that the lengths of the following
127 things are a multiple of VG_MIN_MALLOC_SZB:
128 - Superblock admin section lengths (due to elastic padding)
129 - Block admin section (low and high) lengths (due to elastic redzones)
130 - Block payload lengths (due to req_pszB rounding up)
sewardj9c606bd2008-09-18 18:12:50 +0000131
132 The heap-profile cost-center field is 8 bytes even on 32 bit
133 platforms. This is so as to keep the payload field 8-aligned. On
134 a 64-bit platform, this cc-field contains a pointer to a const
135 HChar*, which is the cost center name. On 32-bit platforms, the
136 pointer lives in the lower-addressed half of the field, regardless
137 of the endianness of the host.
nethercote2d5b8162004-08-11 09:40:52 +0000138*/
139typedef
140 struct {
141 // No fields are actually used in this struct, because a Block has
njn37517e82005-05-25 15:52:39 +0000142 // many variable sized fields and so can't be accessed
nethercote2d5b8162004-08-11 09:40:52 +0000143 // meaningfully with normal fields. So we use access functions all
144 // the time. This struct gives us a type to use, though. Also, we
145 // make sizeof(Block) 1 byte so that we can do arithmetic with the
146 // Block* type in increments of 1!
147 UByte dummy;
148 }
149 Block;
150
151// A superblock. 'padding' is never used, it just ensures that if the
152// entire Superblock is aligned to VG_MIN_MALLOC_SZB, then payload_bytes[]
153// will be too. It can add small amounts of padding unnecessarily -- eg.
154// 8-bytes on 32-bit machines with an 8-byte VG_MIN_MALLOC_SZB -- because
155// it's too hard to make a constant expression that works perfectly in all
156// cases.
sewardjd043de92011-09-26 11:28:20 +0000157// 'unsplittable' is set to NULL if superblock can be splitted, otherwise
158// it is set to the address of the superblock. An unsplittable superblock
159// will contain only one allocated block. An unsplittable superblock will
160// be unmapped when its (only) allocated block is freed.
161// The free space at the end of an unsplittable superblock is not used to
162// make a free block. Note that this means that an unsplittable superblock can
sewardjd8b93462011-09-10 10:17:35 +0000163// have up to slightly less than 1 page of unused bytes at the end of the
164// superblock.
sewardjd043de92011-09-26 11:28:20 +0000165// 'unsplittable' is used to avoid quadratic memory usage for linear
166// reallocation of big structures
167// (see http://bugs.kde.org/show_bug.cgi?id=250101).
168// ??? unsplittable replaces 'void *padding2'. Choosed this
sewardjd8b93462011-09-10 10:17:35 +0000169// ??? to avoid changing the alignment logic. Maybe something cleaner
170// ??? can be done.
sewardjd043de92011-09-26 11:28:20 +0000171// A splittable block can be reclaimed when all its blocks are freed :
172// the reclaim of such a block is deferred till either another superblock
173// of the same arena can be reclaimed or till a new superblock is needed
174// in any arena.
nethercote2d5b8162004-08-11 09:40:52 +0000175// payload_bytes[] is made a single big Block when the Superblock is
176// created, and then can be split and the splittings remerged, but Blocks
177// always cover its entire length -- there's never any unused bytes at the
178// end, for example.
sewardj0b3fd2d2007-08-21 10:55:26 +0000179typedef
nethercote2d5b8162004-08-11 09:40:52 +0000180 struct _Superblock {
nethercote7ac7f7b2004-11-02 12:36:02 +0000181 SizeT n_payload_bytes;
sewardjd043de92011-09-26 11:28:20 +0000182 struct _Superblock* unsplittable;
sewardj0b3fd2d2007-08-21 10:55:26 +0000183 UByte padding[ VG_MIN_MALLOC_SZB -
184 ((sizeof(struct _Superblock*) + sizeof(SizeT)) %
nethercote7ac7f7b2004-11-02 12:36:02 +0000185 VG_MIN_MALLOC_SZB) ];
nethercote2d5b8162004-08-11 09:40:52 +0000186 UByte payload_bytes[0];
187 }
188 Superblock;
189
190// An arena. 'freelist' is a circular, doubly-linked list. 'rz_szB' is
191// elastic, in that it can be bigger than asked-for to ensure alignment.
sewardj0b3fd2d2007-08-21 10:55:26 +0000192typedef
nethercote2d5b8162004-08-11 09:40:52 +0000193 struct {
florian54fe2022012-10-27 23:07:42 +0000194 const HChar* name;
sewardj0b3fd2d2007-08-21 10:55:26 +0000195 Bool clientmem; // Allocates in the client address space?
196 SizeT rz_szB; // Red zone size in bytes
197 SizeT min_sblock_szB; // Minimum superblock size in bytes
sewardjd043de92011-09-26 11:28:20 +0000198 SizeT min_unsplittable_sblock_szB;
199 // Minimum unsplittable superblock size in bytes. To be marked as
200 // unsplittable, a superblock must have a
201 // size >= min_unsplittable_sblock_szB and cannot be splitted.
202 // So, to avoid big overhead, superblocks used to provide aligned
203 // blocks on big alignments are splittable.
204 // Unsplittable superblocks will be reclaimed when their (only)
sewardjd8b93462011-09-10 10:17:35 +0000205 // allocated block is freed.
sewardjd043de92011-09-26 11:28:20 +0000206 // Smaller size superblocks are splittable and can be reclaimed when all
207 // their blocks are freed.
sewardj0b3fd2d2007-08-21 10:55:26 +0000208 Block* freelist[N_MALLOC_LISTS];
209 // A dynamically expanding, ordered array of (pointers to)
210 // superblocks in the arena. If this array is expanded, which
211 // is rare, the previous space it occupies is simply abandoned.
212 // To avoid having to get yet another block from m_aspacemgr for
213 // the first incarnation of this array, the first allocation of
214 // it is within this struct. If it has to be expanded then the
215 // new space is acquired from m_aspacemgr as you would expect.
216 Superblock** sblocks;
217 SizeT sblocks_size;
218 SizeT sblocks_used;
219 Superblock* sblocks_initial[SBLOCKS_SIZE_INITIAL];
sewardjd043de92011-09-26 11:28:20 +0000220 Superblock* deferred_reclaimed_sb;
221
nethercote2d5b8162004-08-11 09:40:52 +0000222 // Stats only.
sewardjd043de92011-09-26 11:28:20 +0000223 ULong stats__nreclaim_unsplit;
224 ULong stats__nreclaim_split;
225 /* total # of reclaim executed for unsplittable/splittable superblocks */
sewardj7d1064a2011-02-23 13:18:56 +0000226 SizeT stats__bytes_on_loan;
227 SizeT stats__bytes_mmaped;
228 SizeT stats__bytes_on_loan_max;
229 ULong stats__tot_blocks; /* total # blocks alloc'd */
230 ULong stats__tot_bytes; /* total # bytes alloc'd */
231 ULong stats__nsearches; /* total # freelist checks */
232 // If profiling, when should the next profile happen at
233 // (in terms of stats__bytes_on_loan_max) ?
sewardj9c606bd2008-09-18 18:12:50 +0000234 SizeT next_profile_at;
sewardjd8b93462011-09-10 10:17:35 +0000235 SizeT stats__bytes_mmaped_max;
sewardj0b3fd2d2007-08-21 10:55:26 +0000236 }
nethercote2d5b8162004-08-11 09:40:52 +0000237 Arena;
238
239
240/*------------------------------------------------------------*/
241/*--- Low-level functions for working with Blocks. ---*/
242/*------------------------------------------------------------*/
243
nethercote7ac7f7b2004-11-02 12:36:02 +0000244#define SIZE_T_0x1 ((SizeT)0x1)
245
florian6bd9dc12012-11-23 16:17:43 +0000246static const char* probably_your_fault =
njnb8329f02009-04-16 00:33:20 +0000247 "This is probably caused by your program erroneously writing past the\n"
248 "end of a heap block and corrupting heap metadata. If you fix any\n"
249 "invalid writes reported by Memcheck, this assertion failure will\n"
250 "probably go away. Please try that before reporting this as a bug.\n";
251
njn8d3f8452005-07-20 04:12:41 +0000252// Mark a bszB as in-use, and not in-use, and remove the in-use attribute.
nethercote2d5b8162004-08-11 09:40:52 +0000253static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000254SizeT mk_inuse_bszB ( SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000255{
njnb8329f02009-04-16 00:33:20 +0000256 vg_assert2(bszB != 0, probably_your_fault);
nethercote7ac7f7b2004-11-02 12:36:02 +0000257 return bszB & (~SIZE_T_0x1);
nethercote2d5b8162004-08-11 09:40:52 +0000258}
259static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000260SizeT mk_free_bszB ( SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000261{
njnb8329f02009-04-16 00:33:20 +0000262 vg_assert2(bszB != 0, probably_your_fault);
nethercote7ac7f7b2004-11-02 12:36:02 +0000263 return bszB | SIZE_T_0x1;
nethercote2d5b8162004-08-11 09:40:52 +0000264}
nethercote2d5b8162004-08-11 09:40:52 +0000265static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000266SizeT mk_plain_bszB ( SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000267{
njnb8329f02009-04-16 00:33:20 +0000268 vg_assert2(bszB != 0, probably_your_fault);
nethercote7ac7f7b2004-11-02 12:36:02 +0000269 return bszB & (~SIZE_T_0x1);
nethercote2d5b8162004-08-11 09:40:52 +0000270}
271
philipped99c26a2012-07-31 22:17:28 +0000272// Forward definition.
273static
274void ensure_mm_init ( ArenaId aid );
275
sewardj94c8eb42008-09-19 20:13:39 +0000276// return either 0 or sizeof(ULong) depending on whether or not
277// heap profiling is engaged
sewardjd043de92011-09-26 11:28:20 +0000278#define hp_overhead_szB() set_at_init_hp_overhead_szB
279static SizeT set_at_init_hp_overhead_szB = -1000000;
280// startup value chosen to very likely cause a problem if used before
281// a proper value is given by ensure_mm_init.
sewardj94c8eb42008-09-19 20:13:39 +0000282
njn402c8612005-08-23 22:11:20 +0000283//---------------------------------------------------------------------------
284
285// Get a block's size as stored, ie with the in-use/free attribute.
nethercote2d5b8162004-08-11 09:40:52 +0000286static __inline__
njn402c8612005-08-23 22:11:20 +0000287SizeT get_bszB_as_is ( Block* b )
nethercote2d5b8162004-08-11 09:40:52 +0000288{
njn402c8612005-08-23 22:11:20 +0000289 UByte* b2 = (UByte*)b;
sewardj94c8eb42008-09-19 20:13:39 +0000290 SizeT bszB_lo = *(SizeT*)&b2[0 + hp_overhead_szB()];
njn402c8612005-08-23 22:11:20 +0000291 SizeT bszB_hi = *(SizeT*)&b2[mk_plain_bszB(bszB_lo) - sizeof(SizeT)];
292 vg_assert2(bszB_lo == bszB_hi,
njnb8329f02009-04-16 00:33:20 +0000293 "Heap block lo/hi size mismatch: lo = %llu, hi = %llu.\n%s",
294 (ULong)bszB_lo, (ULong)bszB_hi, probably_your_fault);
njn402c8612005-08-23 22:11:20 +0000295 return bszB_lo;
nethercote2d5b8162004-08-11 09:40:52 +0000296}
297
njn402c8612005-08-23 22:11:20 +0000298// Get a block's plain size, ie. remove the in-use/free attribute.
299static __inline__
300SizeT get_bszB ( Block* b )
301{
302 return mk_plain_bszB(get_bszB_as_is(b));
303}
304
305// Set the size fields of a block. bszB may have the in-use/free attribute.
306static __inline__
307void set_bszB ( Block* b, SizeT bszB )
308{
309 UByte* b2 = (UByte*)b;
sewardj94c8eb42008-09-19 20:13:39 +0000310 *(SizeT*)&b2[0 + hp_overhead_szB()] = bszB;
njn402c8612005-08-23 22:11:20 +0000311 *(SizeT*)&b2[mk_plain_bszB(bszB) - sizeof(SizeT)] = bszB;
312}
313
314//---------------------------------------------------------------------------
315
njn472cc7c2005-07-17 17:20:30 +0000316// Does this block have the in-use attribute?
317static __inline__
318Bool is_inuse_block ( Block* b )
319{
njn402c8612005-08-23 22:11:20 +0000320 SizeT bszB = get_bszB_as_is(b);
njnb8329f02009-04-16 00:33:20 +0000321 vg_assert2(bszB != 0, probably_your_fault);
njn472cc7c2005-07-17 17:20:30 +0000322 return (0 != (bszB & SIZE_T_0x1)) ? False : True;
323}
324
njn402c8612005-08-23 22:11:20 +0000325//---------------------------------------------------------------------------
njn8d3f8452005-07-20 04:12:41 +0000326
njn089f51f2005-07-17 18:12:00 +0000327// Return the lower, upper and total overhead in bytes for a block.
328// These are determined purely by which arena the block lives in.
329static __inline__
330SizeT overhead_szB_lo ( Arena* a )
331{
sewardj94c8eb42008-09-19 20:13:39 +0000332 return hp_overhead_szB() + sizeof(SizeT) + a->rz_szB;
njn089f51f2005-07-17 18:12:00 +0000333}
334static __inline__
335SizeT overhead_szB_hi ( Arena* a )
336{
njn8d3f8452005-07-20 04:12:41 +0000337 return a->rz_szB + sizeof(SizeT);
njn089f51f2005-07-17 18:12:00 +0000338}
339static __inline__
340SizeT overhead_szB ( Arena* a )
341{
342 return overhead_szB_lo(a) + overhead_szB_hi(a);
343}
344
njn402c8612005-08-23 22:11:20 +0000345//---------------------------------------------------------------------------
346
njn089f51f2005-07-17 18:12:00 +0000347// Return the minimum bszB for a block in this arena. Can have zero-length
348// payloads, so it's the size of the admin bytes.
349static __inline__
350SizeT min_useful_bszB ( Arena* a )
351{
352 return overhead_szB(a);
353}
354
njn402c8612005-08-23 22:11:20 +0000355//---------------------------------------------------------------------------
356
njn089f51f2005-07-17 18:12:00 +0000357// Convert payload size <--> block size (both in bytes).
358static __inline__
359SizeT pszB_to_bszB ( Arena* a, SizeT pszB )
360{
361 return pszB + overhead_szB(a);
362}
363static __inline__
364SizeT bszB_to_pszB ( Arena* a, SizeT bszB )
365{
njnb8329f02009-04-16 00:33:20 +0000366 vg_assert2(bszB >= overhead_szB(a), probably_your_fault);
njn089f51f2005-07-17 18:12:00 +0000367 return bszB - overhead_szB(a);
368}
369
njn402c8612005-08-23 22:11:20 +0000370//---------------------------------------------------------------------------
nethercote2d5b8162004-08-11 09:40:52 +0000371
njn089f51f2005-07-17 18:12:00 +0000372// Get a block's payload size.
nethercote7ac7f7b2004-11-02 12:36:02 +0000373static __inline__
njn089f51f2005-07-17 18:12:00 +0000374SizeT get_pszB ( Arena* a, Block* b )
nethercote7ac7f7b2004-11-02 12:36:02 +0000375{
njn089f51f2005-07-17 18:12:00 +0000376 return bszB_to_pszB(a, get_bszB(b));
nethercote7ac7f7b2004-11-02 12:36:02 +0000377}
378
njn402c8612005-08-23 22:11:20 +0000379//---------------------------------------------------------------------------
380
381// Given the addr of a block, return the addr of its payload, and vice versa.
nethercote2d5b8162004-08-11 09:40:52 +0000382static __inline__
383UByte* get_block_payload ( Arena* a, Block* b )
384{
385 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000386 return & b2[ overhead_szB_lo(a) ];
nethercote2d5b8162004-08-11 09:40:52 +0000387}
388// Given the addr of a block's payload, return the addr of the block itself.
389static __inline__
390Block* get_payload_block ( Arena* a, UByte* payload )
391{
nethercote7ac7f7b2004-11-02 12:36:02 +0000392 return (Block*)&payload[ -overhead_szB_lo(a) ];
nethercote2d5b8162004-08-11 09:40:52 +0000393}
394
njn402c8612005-08-23 22:11:20 +0000395//---------------------------------------------------------------------------
nethercote2d5b8162004-08-11 09:40:52 +0000396
397// Set and get the next and previous link fields of a block.
398static __inline__
399void set_prev_b ( Block* b, Block* prev_p )
400{
401 UByte* b2 = (UByte*)b;
sewardj94c8eb42008-09-19 20:13:39 +0000402 *(Block**)&b2[hp_overhead_szB() + sizeof(SizeT)] = prev_p;
nethercote2d5b8162004-08-11 09:40:52 +0000403}
404static __inline__
405void set_next_b ( Block* b, Block* next_p )
406{
njn402c8612005-08-23 22:11:20 +0000407 UByte* b2 = (UByte*)b;
408 *(Block**)&b2[get_bszB(b) - sizeof(SizeT) - sizeof(void*)] = next_p;
nethercote2d5b8162004-08-11 09:40:52 +0000409}
410static __inline__
411Block* get_prev_b ( Block* b )
412{
413 UByte* b2 = (UByte*)b;
sewardj94c8eb42008-09-19 20:13:39 +0000414 return *(Block**)&b2[hp_overhead_szB() + sizeof(SizeT)];
nethercote2d5b8162004-08-11 09:40:52 +0000415}
416static __inline__
417Block* get_next_b ( Block* b )
418{
njn402c8612005-08-23 22:11:20 +0000419 UByte* b2 = (UByte*)b;
420 return *(Block**)&b2[get_bszB(b) - sizeof(SizeT) - sizeof(void*)];
nethercote2d5b8162004-08-11 09:40:52 +0000421}
422
njn402c8612005-08-23 22:11:20 +0000423//---------------------------------------------------------------------------
nethercote2d5b8162004-08-11 09:40:52 +0000424
sewardj9c606bd2008-09-18 18:12:50 +0000425// Set and get the cost-center field of a block.
426static __inline__
florian54fe2022012-10-27 23:07:42 +0000427void set_cc ( Block* b, const HChar* cc )
sewardj9c606bd2008-09-18 18:12:50 +0000428{
429 UByte* b2 = (UByte*)b;
sewardj94c8eb42008-09-19 20:13:39 +0000430 vg_assert( VG_(clo_profile_heap) );
florian54fe2022012-10-27 23:07:42 +0000431 *(const HChar**)&b2[0] = cc;
sewardj9c606bd2008-09-18 18:12:50 +0000432}
433static __inline__
florian54fe2022012-10-27 23:07:42 +0000434const HChar* get_cc ( Block* b )
sewardj9c606bd2008-09-18 18:12:50 +0000435{
436 UByte* b2 = (UByte*)b;
sewardj94c8eb42008-09-19 20:13:39 +0000437 vg_assert( VG_(clo_profile_heap) );
florian54fe2022012-10-27 23:07:42 +0000438 return *(const HChar**)&b2[0];
sewardj9c606bd2008-09-18 18:12:50 +0000439}
440
441//---------------------------------------------------------------------------
442
nethercote2d5b8162004-08-11 09:40:52 +0000443// Get the block immediately preceding this one in the Superblock.
444static __inline__
445Block* get_predecessor_block ( Block* b )
446{
447 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000448 SizeT bszB = mk_plain_bszB( (*(SizeT*)&b2[-sizeof(SizeT)]) );
nethercote2d5b8162004-08-11 09:40:52 +0000449 return (Block*)&b2[-bszB];
450}
451
njn402c8612005-08-23 22:11:20 +0000452//---------------------------------------------------------------------------
453
nethercote2d5b8162004-08-11 09:40:52 +0000454// Read and write the lower and upper red-zone bytes of a block.
455static __inline__
njn1dcee092009-02-24 03:07:37 +0000456void set_rz_lo_byte ( Block* b, UInt rz_byteno, UByte v )
nethercote2d5b8162004-08-11 09:40:52 +0000457{
458 UByte* b2 = (UByte*)b;
sewardj94c8eb42008-09-19 20:13:39 +0000459 b2[hp_overhead_szB() + sizeof(SizeT) + rz_byteno] = v;
nethercote2d5b8162004-08-11 09:40:52 +0000460}
461static __inline__
njn1dcee092009-02-24 03:07:37 +0000462void set_rz_hi_byte ( Block* b, UInt rz_byteno, UByte v )
nethercote2d5b8162004-08-11 09:40:52 +0000463{
njn402c8612005-08-23 22:11:20 +0000464 UByte* b2 = (UByte*)b;
465 b2[get_bszB(b) - sizeof(SizeT) - rz_byteno - 1] = v;
nethercote2d5b8162004-08-11 09:40:52 +0000466}
467static __inline__
njn1dcee092009-02-24 03:07:37 +0000468UByte get_rz_lo_byte ( Block* b, UInt rz_byteno )
nethercote2d5b8162004-08-11 09:40:52 +0000469{
470 UByte* b2 = (UByte*)b;
sewardj94c8eb42008-09-19 20:13:39 +0000471 return b2[hp_overhead_szB() + sizeof(SizeT) + rz_byteno];
nethercote2d5b8162004-08-11 09:40:52 +0000472}
473static __inline__
njn1dcee092009-02-24 03:07:37 +0000474UByte get_rz_hi_byte ( Block* b, UInt rz_byteno )
nethercote2d5b8162004-08-11 09:40:52 +0000475{
njn402c8612005-08-23 22:11:20 +0000476 UByte* b2 = (UByte*)b;
477 return b2[get_bszB(b) - sizeof(SizeT) - rz_byteno - 1];
nethercote2d5b8162004-08-11 09:40:52 +0000478}
479
480
nethercote2d5b8162004-08-11 09:40:52 +0000481/*------------------------------------------------------------*/
482/*--- Arena management ---*/
483/*------------------------------------------------------------*/
484
485#define CORE_ARENA_MIN_SZB 1048576
486
487// The arena structures themselves.
488static Arena vg_arena[VG_N_ARENAS];
489
490// Functions external to this module identify arenas using ArenaIds,
491// not Arena*s. This fn converts the former to the latter.
492static Arena* arenaId_to_ArenaP ( ArenaId arena )
493{
494 vg_assert(arena >= 0 && arena < VG_N_ARENAS);
495 return & vg_arena[arena];
496}
497
philipped99c26a2012-07-31 22:17:28 +0000498SizeT VG_(malloc_effective_client_redzone_size)(void)
499{
500 vg_assert(VG_(needs).malloc_replacement);
501 ensure_mm_init (VG_AR_CLIENT);
502 /* ensure_mm_init will call arena_init if not yet done.
503 This then ensures that the arena redzone size is properly
504 initialised. */
505 return arenaId_to_ArenaP(VG_AR_CLIENT)->rz_szB;
506}
507
508// Initialise an arena. rz_szB is the (default) minimum redzone size;
509// It might be overriden by VG_(clo_redzone_size) or VG_(clo_core_redzone_size).
510// it might be made bigger to ensure that VG_MIN_MALLOC_SZB is observed.
nethercote2d5b8162004-08-11 09:40:52 +0000511static
florian54fe2022012-10-27 23:07:42 +0000512void arena_init ( ArenaId aid, const HChar* name, SizeT rz_szB,
sewardjd043de92011-09-26 11:28:20 +0000513 SizeT min_sblock_szB, SizeT min_unsplittable_sblock_szB )
nethercote2d5b8162004-08-11 09:40:52 +0000514{
sewardj0b3fd2d2007-08-21 10:55:26 +0000515 SizeT i;
nethercote2d5b8162004-08-11 09:40:52 +0000516 Arena* a = arenaId_to_ArenaP(aid);
philipped99c26a2012-07-31 22:17:28 +0000517
518 // Ensure default redzones are a reasonable size.
519 vg_assert(rz_szB <= MAX_REDZONE_SZB);
nethercote2d5b8162004-08-11 09:40:52 +0000520
philipped99c26a2012-07-31 22:17:28 +0000521 /* Override the default redzone size if a clo value was given.
522 Note that the clo value can be significantly bigger than MAX_REDZONE_SZB
523 to allow the user to chase horrible bugs using up to 1 page
524 of protection. */
525 if (VG_AR_CLIENT == aid) {
526 if (VG_(clo_redzone_size) != -1)
527 rz_szB = VG_(clo_redzone_size);
528 } else {
529 if (VG_(clo_core_redzone_size) != rz_szB)
530 rz_szB = VG_(clo_core_redzone_size);
531 }
532
533 // Redzones must always be at least the size of a pointer, for holding the
534 // prev/next pointer (see the layout details at the top of this file).
njn7ce83112005-08-24 22:38:00 +0000535 if (rz_szB < sizeof(void*)) rz_szB = sizeof(void*);
nethercote2d5b8162004-08-11 09:40:52 +0000536
537 // The size of the low and high admin sections in a block must be a
njn30490552005-03-13 06:30:42 +0000538 // multiple of VG_MIN_MALLOC_SZB. So we round up the asked-for
nethercote2d5b8162004-08-11 09:40:52 +0000539 // redzone size if necessary to achieve this.
540 a->rz_szB = rz_szB;
541 while (0 != overhead_szB_lo(a) % VG_MIN_MALLOC_SZB) a->rz_szB++;
njn341a6642009-05-24 23:36:50 +0000542 vg_assert(overhead_szB_lo(a) - hp_overhead_szB() == overhead_szB_hi(a));
nethercote2d5b8162004-08-11 09:40:52 +0000543
philipped99c26a2012-07-31 22:17:28 +0000544 // Here we have established the effective redzone size.
545
546
547 vg_assert((min_sblock_szB % VKI_PAGE_SIZE) == 0);
548 a->name = name;
549 a->clientmem = ( VG_AR_CLIENT == aid ? True : False );
550
nethercote2d5b8162004-08-11 09:40:52 +0000551 a->min_sblock_szB = min_sblock_szB;
sewardjd043de92011-09-26 11:28:20 +0000552 a->min_unsplittable_sblock_szB = min_unsplittable_sblock_szB;
njn6e6588c2005-03-13 18:52:48 +0000553 for (i = 0; i < N_MALLOC_LISTS; i++) a->freelist[i] = NULL;
sewardj0b3fd2d2007-08-21 10:55:26 +0000554
sewardj7d1064a2011-02-23 13:18:56 +0000555 a->sblocks = & a->sblocks_initial[0];
556 a->sblocks_size = SBLOCKS_SIZE_INITIAL;
557 a->sblocks_used = 0;
sewardjd043de92011-09-26 11:28:20 +0000558 a->stats__nreclaim_unsplit = 0;
559 a->stats__nreclaim_split = 0;
sewardj7d1064a2011-02-23 13:18:56 +0000560 a->stats__bytes_on_loan = 0;
561 a->stats__bytes_mmaped = 0;
562 a->stats__bytes_on_loan_max = 0;
sewardjd8b93462011-09-10 10:17:35 +0000563 a->stats__bytes_mmaped_max = 0;
sewardj7d1064a2011-02-23 13:18:56 +0000564 a->stats__tot_blocks = 0;
565 a->stats__tot_bytes = 0;
566 a->stats__nsearches = 0;
567 a->next_profile_at = 25 * 1000 * 1000;
sewardj0b3fd2d2007-08-21 10:55:26 +0000568 vg_assert(sizeof(a->sblocks_initial)
569 == SBLOCKS_SIZE_INITIAL * sizeof(Superblock*));
nethercote2d5b8162004-08-11 09:40:52 +0000570}
571
572/* Print vital stats for an arena. */
573void VG_(print_all_arena_stats) ( void )
574{
nethercote7ac7f7b2004-11-02 12:36:02 +0000575 UInt i;
nethercote2d5b8162004-08-11 09:40:52 +0000576 for (i = 0; i < VG_N_ARENAS; i++) {
577 Arena* a = arenaId_to_ArenaP(i);
578 VG_(message)(Vg_DebugMsg,
philippe98a87932012-10-31 22:10:47 +0000579 "%8s: %8lu/%8lu max/curr mmap'd, "
sewardjd043de92011-09-26 11:28:20 +0000580 "%llu/%llu unsplit/split sb unmmap'd, "
philippe98a87932012-10-31 22:10:47 +0000581 "%8lu/%8lu max/curr, "
sewardj7d1064a2011-02-23 13:18:56 +0000582 "%10llu/%10llu totalloc-blocks/bytes,"
philipped99c26a2012-07-31 22:17:28 +0000583 " %10llu searches %lu rzB\n",
sewardjd8b93462011-09-10 10:17:35 +0000584 a->name,
585 a->stats__bytes_mmaped_max, a->stats__bytes_mmaped,
sewardjd043de92011-09-26 11:28:20 +0000586 a->stats__nreclaim_unsplit, a->stats__nreclaim_split,
sewardj7d1064a2011-02-23 13:18:56 +0000587 a->stats__bytes_on_loan_max,
588 a->stats__bytes_on_loan,
589 a->stats__tot_blocks, a->stats__tot_bytes,
philipped99c26a2012-07-31 22:17:28 +0000590 a->stats__nsearches,
591 a->rz_szB
nethercote2d5b8162004-08-11 09:40:52 +0000592 );
593 }
594}
595
sewardj9c606bd2008-09-18 18:12:50 +0000596void VG_(print_arena_cc_analysis) ( void )
597{
598 UInt i;
599 vg_assert( VG_(clo_profile_heap) );
600 for (i = 0; i < VG_N_ARENAS; i++) {
601 cc_analyse_alloc_arena(i);
602 }
603}
604
605
nethercote2d5b8162004-08-11 09:40:52 +0000606/* This library is self-initialising, as it makes this more self-contained,
607 less coupled with the outside world. Hence VG_(arena_malloc)() and
608 VG_(arena_free)() below always call ensure_mm_init() to ensure things are
sewardj45f4e7c2005-09-27 19:20:21 +0000609 correctly initialised.
610
611 We initialise the client arena separately (and later) because the core
612 must do non-client allocation before the tool has a chance to set the
613 client arena's redzone size.
614*/
sewardj0b3fd2d2007-08-21 10:55:26 +0000615static Bool client_inited = False;
616static Bool nonclient_inited = False;
617
nethercote2d5b8162004-08-11 09:40:52 +0000618static
sewardj45f4e7c2005-09-27 19:20:21 +0000619void ensure_mm_init ( ArenaId aid )
nethercote2d5b8162004-08-11 09:40:52 +0000620{
njn95c23292005-12-26 17:50:22 +0000621 static SizeT client_rz_szB = 8; // default: be paranoid
njnfc51f8d2005-06-21 03:20:17 +0000622
sewardj45f4e7c2005-09-27 19:20:21 +0000623 /* We use checked red zones (of various sizes) for our internal stuff,
nethercote2d5b8162004-08-11 09:40:52 +0000624 and an unchecked zone of arbitrary size for the client. Of
625 course the client's red zone can be checked by the tool, eg.
626 by using addressibility maps, but not by the mechanism implemented
627 here, which merely checks at the time of freeing that the red
628 zone bytes are unchanged.
629
630 Nb: redzone sizes are *minimums*; they could be made bigger to ensure
njn8d3f8452005-07-20 04:12:41 +0000631 alignment. Eg. with 8 byte alignment, on 32-bit machines 4 stays as
632 4, but 16 becomes 20; but on 64-bit machines 4 becomes 8, and 16
633 stays as 16 --- the extra 4 bytes in both are accounted for by the
634 larger prev/next ptr.
nethercote2d5b8162004-08-11 09:40:52 +0000635 */
sewardj45f4e7c2005-09-27 19:20:21 +0000636 if (VG_AR_CLIENT == aid) {
sewardj5600ab32006-10-17 01:42:40 +0000637 Int ar_client_sbszB;
sewardj45f4e7c2005-09-27 19:20:21 +0000638 if (client_inited) {
639 // This assertion ensures that a tool cannot try to change the client
640 // redzone size with VG_(needs_malloc_replacement)() after this module
641 // has done its first allocation from the client arena.
642 if (VG_(needs).malloc_replacement)
njn95c23292005-12-26 17:50:22 +0000643 vg_assert(client_rz_szB == VG_(tdict).tool_client_redzone_szB);
sewardj45f4e7c2005-09-27 19:20:21 +0000644 return;
645 }
nethercote2d5b8162004-08-11 09:40:52 +0000646
sewardj45f4e7c2005-09-27 19:20:21 +0000647 // Check and set the client arena redzone size
648 if (VG_(needs).malloc_replacement) {
njn95c23292005-12-26 17:50:22 +0000649 client_rz_szB = VG_(tdict).tool_client_redzone_szB;
philipped99c26a2012-07-31 22:17:28 +0000650 if (client_rz_szB > MAX_REDZONE_SZB) {
sewardj45f4e7c2005-09-27 19:20:21 +0000651 VG_(printf)( "\nTool error:\n"
652 " specified redzone size is too big (%llu)\n",
njn95c23292005-12-26 17:50:22 +0000653 (ULong)client_rz_szB);
sewardj45f4e7c2005-09-27 19:20:21 +0000654 VG_(exit)(1);
655 }
656 }
sewardj6e9de462011-06-28 07:25:29 +0000657 // Initialise the client arena. On all platforms,
sewardjc1ac9772007-08-20 22:57:56 +0000658 // increasing the superblock size reduces the number of superblocks
659 // in the client arena, which makes findSb cheaper.
sewardjc1ac9772007-08-20 22:57:56 +0000660 ar_client_sbszB = 4194304;
sewardjd043de92011-09-26 11:28:20 +0000661 // superblocks with a size > ar_client_sbszB will be unsplittable
662 // (unless used for providing memalign-ed blocks).
sewardjd8b93462011-09-10 10:17:35 +0000663 arena_init ( VG_AR_CLIENT, "client", client_rz_szB,
664 ar_client_sbszB, ar_client_sbszB+1);
sewardj45f4e7c2005-09-27 19:20:21 +0000665 client_inited = True;
666
667 } else {
668 if (nonclient_inited) {
669 return;
670 }
sewardjd043de92011-09-26 11:28:20 +0000671 set_at_init_hp_overhead_szB =
672 VG_(clo_profile_heap) ? VG_MIN_MALLOC_SZB : 0;
sewardj45f4e7c2005-09-27 19:20:21 +0000673 // Initialise the non-client arenas
sewardjd043de92011-09-26 11:28:20 +0000674 // Similarly to client arena, big allocations will be unsplittable.
philipped99c26a2012-07-31 22:17:28 +0000675 arena_init ( VG_AR_CORE, "core", CORE_REDZONE_DEFAULT_SZB,
676 1048576, 1048576+1 );
677 arena_init ( VG_AR_TOOL, "tool", CORE_REDZONE_DEFAULT_SZB,
678 4194304, 4194304+1 );
679 arena_init ( VG_AR_DINFO, "dinfo", CORE_REDZONE_DEFAULT_SZB,
680 1048576, 1048576+1 );
681 arena_init ( VG_AR_DEMANGLE, "demangle", CORE_REDZONE_DEFAULT_SZB,
682 65536, 65536+1 );
683 arena_init ( VG_AR_EXECTXT, "exectxt", CORE_REDZONE_DEFAULT_SZB,
684 1048576, 1048576+1 );
685 arena_init ( VG_AR_ERRORS, "errors", CORE_REDZONE_DEFAULT_SZB,
686 65536, 65536+1 );
687 arena_init ( VG_AR_TTAUX, "ttaux", CORE_REDZONE_DEFAULT_SZB,
688 65536, 65536+1 );
sewardj45f4e7c2005-09-27 19:20:21 +0000689 nonclient_inited = True;
690 }
691
nethercote2d5b8162004-08-11 09:40:52 +0000692# ifdef DEBUG_MALLOC
sewardj0b3fd2d2007-08-21 10:55:26 +0000693 VG_(printf)("ZZZ1\n");
nethercote2d5b8162004-08-11 09:40:52 +0000694 VG_(sanity_check_malloc_all)();
sewardj0b3fd2d2007-08-21 10:55:26 +0000695 VG_(printf)("ZZZ2\n");
nethercote2d5b8162004-08-11 09:40:52 +0000696# endif
697}
698
699
700/*------------------------------------------------------------*/
701/*--- Superblock management ---*/
702/*------------------------------------------------------------*/
703
njn4c245e52009-03-15 23:25:38 +0000704__attribute__((noreturn))
florian54fe2022012-10-27 23:07:42 +0000705void VG_(out_of_memory_NORETURN) ( const HChar* who, SizeT szB )
sewardj45f4e7c2005-09-27 19:20:21 +0000706{
philippe14baeb42012-10-21 21:03:11 +0000707 static Int outputTrial = 0;
708 // We try once to output the full memory state followed by the below message.
709 // If that fails (due to out of memory during first trial), we try to just
710 // output the below message.
711 // And then we abandon.
712
sewardj45f4e7c2005-09-27 19:20:21 +0000713 ULong tot_alloc = VG_(am_get_anonsize_total)();
florian54fe2022012-10-27 23:07:42 +0000714 const HChar* s1 =
njnb81c7952007-03-22 03:36:55 +0000715 "\n"
716 " Valgrind's memory management: out of memory:\n"
717 " %s's request for %llu bytes failed.\n"
718 " %llu bytes have already been allocated.\n"
719 " Valgrind cannot continue. Sorry.\n\n"
720 " There are several possible reasons for this.\n"
721 " - You have some kind of memory limit in place. Look at the\n"
722 " output of 'ulimit -a'. Is there a limit on the size of\n"
723 " virtual memory or address space?\n"
724 " - You have run out of swap space.\n"
725 " - Valgrind has a bug. If you think this is the case or you are\n"
726 " not sure, please let us know and we'll try to fix it.\n"
727 " Please note that programs can take substantially more memory than\n"
728 " normal when running under Valgrind tools, eg. up to twice or\n"
729 " more, depending on the tool. On a 64-bit machine, Valgrind\n"
730 " should be able to make use of up 32GB memory. On a 32-bit\n"
731 " machine, Valgrind should be able to use all the memory available\n"
732 " to a single process, up to 4GB if that's how you have your\n"
733 " kernel configured. Most 32-bit Linux setups allow a maximum of\n"
734 " 3GB per process.\n\n"
735 " Whatever the reason, Valgrind cannot continue. Sorry.\n";
736
philippe14baeb42012-10-21 21:03:11 +0000737 if (outputTrial <= 1) {
738 if (outputTrial == 0) {
739 outputTrial++;
740 VG_(am_show_nsegments) (0, "out_of_memory");
741 VG_(print_all_arena_stats) ();
742 if (VG_(clo_profile_heap))
743 VG_(print_arena_cc_analysis) ();
744 }
745 outputTrial++;
njnb81c7952007-03-22 03:36:55 +0000746 VG_(message)(Vg_UserMsg, s1, who, (ULong)szB, tot_alloc);
sewardj45f4e7c2005-09-27 19:20:21 +0000747 } else {
njnb81c7952007-03-22 03:36:55 +0000748 VG_(debugLog)(0,"mallocfree", s1, who, (ULong)szB, tot_alloc);
sewardj45f4e7c2005-09-27 19:20:21 +0000749 }
njncda2f0f2009-05-18 02:12:08 +0000750
sewardj45f4e7c2005-09-27 19:20:21 +0000751 VG_(exit)(1);
752}
753
754
nethercote2d5b8162004-08-11 09:40:52 +0000755// Align ptr p upwards to an align-sized boundary.
756static
nethercote7ac7f7b2004-11-02 12:36:02 +0000757void* align_upwards ( void* p, SizeT align )
nethercote2d5b8162004-08-11 09:40:52 +0000758{
759 Addr a = (Addr)p;
760 if ((a % align) == 0) return (void*)a;
761 return (void*)(a - (a % align) + align);
762}
763
sewardjd043de92011-09-26 11:28:20 +0000764// Forward definition.
765static
766void deferred_reclaimSuperblock ( Arena* a, Superblock* sb);
767
nethercote2d5b8162004-08-11 09:40:52 +0000768// If not enough memory available, either aborts (for non-client memory)
769// or returns 0 (for client memory).
770static
nethercote7ac7f7b2004-11-02 12:36:02 +0000771Superblock* newSuperblock ( Arena* a, SizeT cszB )
nethercote2d5b8162004-08-11 09:40:52 +0000772{
nethercote2d5b8162004-08-11 09:40:52 +0000773 Superblock* sb;
sewardj45f4e7c2005-09-27 19:20:21 +0000774 SysRes sres;
sewardjd043de92011-09-26 11:28:20 +0000775 Bool unsplittable;
776 ArenaId aid;
777
778 // A new superblock is needed for arena a. We will execute the deferred
779 // reclaim in all arenas in order to minimise fragmentation and
780 // peak memory usage.
781 for (aid = 0; aid < VG_N_ARENAS; aid++) {
782 Arena* arena = arenaId_to_ArenaP(aid);
783 if (arena->deferred_reclaimed_sb != NULL)
784 deferred_reclaimSuperblock (arena, NULL);
785 }
nethercote2d5b8162004-08-11 09:40:52 +0000786
787 // Take into account admin bytes in the Superblock.
788 cszB += sizeof(Superblock);
789
790 if (cszB < a->min_sblock_szB) cszB = a->min_sblock_szB;
bartc3c98392008-04-19 14:43:30 +0000791 cszB = VG_PGROUNDUP(cszB);
nethercote2d5b8162004-08-11 09:40:52 +0000792
sewardjd043de92011-09-26 11:28:20 +0000793 if (cszB >= a->min_unsplittable_sblock_szB)
794 unsplittable = True;
sewardjd8b93462011-09-10 10:17:35 +0000795 else
sewardjd043de92011-09-26 11:28:20 +0000796 unsplittable = False;
sewardjd8b93462011-09-10 10:17:35 +0000797
798
sewardj45f4e7c2005-09-27 19:20:21 +0000799 if (a->clientmem) {
nethercote2d5b8162004-08-11 09:40:52 +0000800 // client allocation -- return 0 to client if it fails
sewardjd043de92011-09-26 11:28:20 +0000801 if (unsplittable)
sewardjd8b93462011-09-10 10:17:35 +0000802 sres = VG_(am_mmap_anon_float_client)
803 ( cszB, VKI_PROT_READ|VKI_PROT_WRITE|VKI_PROT_EXEC );
804 else
805 sres = VG_(am_sbrk_anon_float_client)
806 ( cszB, VKI_PROT_READ|VKI_PROT_WRITE|VKI_PROT_EXEC );
njncda2f0f2009-05-18 02:12:08 +0000807 if (sr_isError(sres))
nethercote2d5b8162004-08-11 09:40:52 +0000808 return 0;
njncda2f0f2009-05-18 02:12:08 +0000809 sb = (Superblock*)(AddrH)sr_Res(sres);
sewardj45f4e7c2005-09-27 19:20:21 +0000810 // Mark this segment as containing client heap. The leak
811 // checker needs to be able to identify such segments so as not
812 // to use them as sources of roots during leak checks.
florian3e798632012-11-24 19:41:54 +0000813 VG_(am_set_segment_isCH_if_SkAnonC)( VG_(am_find_nsegment)( (Addr)sb ) );
nethercote2d5b8162004-08-11 09:40:52 +0000814 } else {
sewardj45f4e7c2005-09-27 19:20:21 +0000815 // non-client allocation -- abort if it fails
sewardjd043de92011-09-26 11:28:20 +0000816 if (unsplittable)
sewardjd8b93462011-09-10 10:17:35 +0000817 sres = VG_(am_mmap_anon_float_valgrind)( cszB );
818 else
819 sres = VG_(am_sbrk_anon_float_valgrind)( cszB );
njncda2f0f2009-05-18 02:12:08 +0000820 if (sr_isError(sres)) {
sewardj45f4e7c2005-09-27 19:20:21 +0000821 VG_(out_of_memory_NORETURN)("newSuperblock", cszB);
822 /* NOTREACHED */
823 sb = NULL; /* keep gcc happy */
824 } else {
njncda2f0f2009-05-18 02:12:08 +0000825 sb = (Superblock*)(AddrH)sr_Res(sres);
sewardj45f4e7c2005-09-27 19:20:21 +0000826 }
nethercote2d5b8162004-08-11 09:40:52 +0000827 }
828 vg_assert(NULL != sb);
philippe72faf102012-03-11 22:24:03 +0000829 INNER_REQUEST(VALGRIND_MAKE_MEM_UNDEFINED(sb, cszB));
nethercote2d5b8162004-08-11 09:40:52 +0000830 vg_assert(0 == (Addr)sb % VG_MIN_MALLOC_SZB);
831 sb->n_payload_bytes = cszB - sizeof(Superblock);
sewardjd043de92011-09-26 11:28:20 +0000832 sb->unsplittable = (unsplittable ? sb : NULL);
sewardj7d1064a2011-02-23 13:18:56 +0000833 a->stats__bytes_mmaped += cszB;
sewardjd8b93462011-09-10 10:17:35 +0000834 if (a->stats__bytes_mmaped > a->stats__bytes_mmaped_max)
835 a->stats__bytes_mmaped_max = a->stats__bytes_mmaped;
sewardj45f4e7c2005-09-27 19:20:21 +0000836 VG_(debugLog)(1, "mallocfree",
sewardjd8b93462011-09-10 10:17:35 +0000837 "newSuperblock at %p (pszB %7ld) %s owner %s/%s\n",
sewardjd043de92011-09-26 11:28:20 +0000838 sb, sb->n_payload_bytes,
839 (unsplittable ? "unsplittable" : ""),
sewardj45264af2011-07-24 17:39:10 +0000840 a->clientmem ? "CLIENT" : "VALGRIND", a->name );
sewardj4c89b2f2011-08-17 22:13:14 +0000841 return sb;
sewardj45264af2011-07-24 17:39:10 +0000842}
843
sewardjd043de92011-09-26 11:28:20 +0000844// Reclaims the given superblock:
sewardjd8b93462011-09-10 10:17:35 +0000845// * removes sb from arena sblocks list.
846// * munmap the superblock segment.
847static
848void reclaimSuperblock ( Arena* a, Superblock* sb)
849{
850 SysRes sres;
851 SizeT cszB;
852 UInt i, j;
853
854 VG_(debugLog)(1, "mallocfree",
sewardjd043de92011-09-26 11:28:20 +0000855 "reclaimSuperblock at %p (pszB %7ld) %s owner %s/%s\n",
856 sb, sb->n_payload_bytes,
857 (sb->unsplittable ? "unsplittable" : ""),
sewardjd8b93462011-09-10 10:17:35 +0000858 a->clientmem ? "CLIENT" : "VALGRIND", a->name );
859
sewardjd8b93462011-09-10 10:17:35 +0000860 // Take into account admin bytes in the Superblock.
861 cszB = sizeof(Superblock) + sb->n_payload_bytes;
sewardjd8b93462011-09-10 10:17:35 +0000862
863 // removes sb from superblock list.
864 for (i = 0; i < a->sblocks_used; i++) {
865 if (a->sblocks[i] == sb)
866 break;
867 }
868 vg_assert(i >= 0 && i < a->sblocks_used);
869 for (j = i; j < a->sblocks_used; j++)
870 a->sblocks[j] = a->sblocks[j+1];
871 a->sblocks_used--;
872 a->sblocks[a->sblocks_used] = NULL;
873 // paranoia: NULLify ptr to reclaimed sb or NULLify copy of ptr to last sb.
874
sewardjd043de92011-09-26 11:28:20 +0000875 a->stats__bytes_mmaped -= cszB;
876 if (sb->unsplittable)
877 a->stats__nreclaim_unsplit++;
878 else
879 a->stats__nreclaim_split++;
880
sewardjd8b93462011-09-10 10:17:35 +0000881 // Now that the sb is removed from the list, mnumap its space.
882 if (a->clientmem) {
883 // reclaimable client allocation
884 Bool need_discard = False;
885 sres = VG_(am_munmap_client)(&need_discard, (Addr) sb, cszB);
sewardjd8b93462011-09-10 10:17:35 +0000886 vg_assert2(! sr_isError(sres), "superblock client munmap failure\n");
sewardjd043de92011-09-26 11:28:20 +0000887 /* We somewhat help the client by discarding the range.
888 Note however that if the client has JITted some code in
889 a small block that was freed, we do not provide this
890 'discard support' */
891 /* JRS 2011-Sept-26: it would be nice to move the discard
892 outwards somewhat (in terms of calls) so as to make it easier
893 to verify that there will be no nonterminating recursive set
894 of calls a result of calling VG_(discard_translations).
895 Another day, perhaps. */
896 if (need_discard)
897 VG_(discard_translations) ((Addr) sb, cszB, "reclaimSuperblock");
sewardjd8b93462011-09-10 10:17:35 +0000898 } else {
899 // reclaimable non-client allocation
900 sres = VG_(am_munmap_valgrind)((Addr) sb, cszB);
901 vg_assert2(! sr_isError(sres), "superblock valgrind munmap failure\n");
902 }
sewardjd043de92011-09-26 11:28:20 +0000903
sewardjd8b93462011-09-10 10:17:35 +0000904}
905
nethercote2d5b8162004-08-11 09:40:52 +0000906// Find the superblock containing the given chunk.
907static
908Superblock* findSb ( Arena* a, Block* b )
909{
sewardj0b3fd2d2007-08-21 10:55:26 +0000910 SizeT min = 0;
911 SizeT max = a->sblocks_used;
sewardj49bdd7a2005-12-17 20:37:36 +0000912
sewardj0b3fd2d2007-08-21 10:55:26 +0000913 while (min <= max) {
914 Superblock * sb;
915 SizeT pos = min + (max - min)/2;
916
917 vg_assert(pos >= 0 && pos < a->sblocks_used);
918 sb = a->sblocks[pos];
919 if ((Block*)&sb->payload_bytes[0] <= b
920 && b < (Block*)&sb->payload_bytes[sb->n_payload_bytes])
921 {
922 return sb;
923 } else if ((Block*)&sb->payload_bytes[0] <= b) {
924 min = pos + 1;
925 } else {
926 max = pos - 1;
sewardj49bdd7a2005-12-17 20:37:36 +0000927 }
928 }
sewardj0b3fd2d2007-08-21 10:55:26 +0000929 VG_(printf)("findSb: can't find pointer %p in arena '%s'\n",
930 b, a->name );
931 VG_(core_panic)("findSb: VG_(arena_free)() in wrong arena?");
932 return NULL; /*NOTREACHED*/
nethercote2d5b8162004-08-11 09:40:52 +0000933}
934
sewardjde4a1d02002-03-22 01:27:54 +0000935
fitzhardinge98abfc72003-12-16 02:05:15 +0000936/*------------------------------------------------------------*/
sewardjde4a1d02002-03-22 01:27:54 +0000937/*--- Functions for working with freelists. ---*/
938/*------------------------------------------------------------*/
939
nethercote2d5b8162004-08-11 09:40:52 +0000940// Nb: Determination of which freelist a block lives on is based on the
941// payload size, not block size.
sewardjde4a1d02002-03-22 01:27:54 +0000942
nethercote2d5b8162004-08-11 09:40:52 +0000943// Convert a payload size in bytes to a freelist number.
sewardjde4a1d02002-03-22 01:27:54 +0000944static
nethercote7ac7f7b2004-11-02 12:36:02 +0000945UInt pszB_to_listNo ( SizeT pszB )
sewardjde4a1d02002-03-22 01:27:54 +0000946{
njndb247dc2005-07-17 23:12:33 +0000947 SizeT n = pszB / VG_MIN_MALLOC_SZB;
tom60a4b0b2005-10-12 10:45:27 +0000948 vg_assert(0 == pszB % VG_MIN_MALLOC_SZB);
njn61dcab82005-05-21 19:36:45 +0000949
sewardjc1ac9772007-08-20 22:57:56 +0000950 // The first 64 lists hold blocks of size VG_MIN_MALLOC_SZB * list_num.
951 // The final 48 hold bigger blocks.
952 if (n < 64) return (UInt)n;
953 /* Exponential slope up, factor 1.05 */
954 if (n < 67) return 64;
955 if (n < 70) return 65;
956 if (n < 74) return 66;
957 if (n < 77) return 67;
958 if (n < 81) return 68;
959 if (n < 85) return 69;
960 if (n < 90) return 70;
961 if (n < 94) return 71;
962 if (n < 99) return 72;
963 if (n < 104) return 73;
964 if (n < 109) return 74;
965 if (n < 114) return 75;
966 if (n < 120) return 76;
967 if (n < 126) return 77;
968 if (n < 133) return 78;
969 if (n < 139) return 79;
970 /* Exponential slope up, factor 1.10 */
971 if (n < 153) return 80;
972 if (n < 169) return 81;
973 if (n < 185) return 82;
974 if (n < 204) return 83;
975 if (n < 224) return 84;
976 if (n < 247) return 85;
977 if (n < 272) return 86;
978 if (n < 299) return 87;
979 if (n < 329) return 88;
980 if (n < 362) return 89;
981 if (n < 398) return 90;
982 if (n < 438) return 91;
983 if (n < 482) return 92;
984 if (n < 530) return 93;
985 if (n < 583) return 94;
986 if (n < 641) return 95;
987 /* Exponential slope up, factor 1.20 */
988 if (n < 770) return 96;
989 if (n < 924) return 97;
990 if (n < 1109) return 98;
991 if (n < 1331) return 99;
992 if (n < 1597) return 100;
993 if (n < 1916) return 101;
994 if (n < 2300) return 102;
995 if (n < 2760) return 103;
996 if (n < 3312) return 104;
997 if (n < 3974) return 105;
998 if (n < 4769) return 106;
999 if (n < 5723) return 107;
1000 if (n < 6868) return 108;
1001 if (n < 8241) return 109;
1002 if (n < 9890) return 110;
1003 return 111;
sewardjde4a1d02002-03-22 01:27:54 +00001004}
1005
nethercote2d5b8162004-08-11 09:40:52 +00001006// What is the minimum payload size for a given list?
sewardjde4a1d02002-03-22 01:27:54 +00001007static
nethercote7ac7f7b2004-11-02 12:36:02 +00001008SizeT listNo_to_pszB_min ( UInt listNo )
sewardjde4a1d02002-03-22 01:27:54 +00001009{
sewardj1d2e2e62007-08-23 10:22:44 +00001010 /* Repeatedly computing this function at every request is
1011 expensive. Hence at the first call just cache the result for
1012 every possible argument. */
1013 static SizeT cache[N_MALLOC_LISTS];
1014 static Bool cache_valid = False;
1015 if (!cache_valid) {
1016 UInt i;
1017 for (i = 0; i < N_MALLOC_LISTS; i++) {
1018 SizeT pszB = 0;
1019 while (pszB_to_listNo(pszB) < i)
1020 pszB += VG_MIN_MALLOC_SZB;
1021 cache[i] = pszB;
1022 }
1023 cache_valid = True;
1024 }
1025 /* Returned cached answer. */
njn6e6588c2005-03-13 18:52:48 +00001026 vg_assert(listNo <= N_MALLOC_LISTS);
sewardj1d2e2e62007-08-23 10:22:44 +00001027 return cache[listNo];
sewardjde4a1d02002-03-22 01:27:54 +00001028}
1029
nethercote2d5b8162004-08-11 09:40:52 +00001030// What is the maximum payload size for a given list?
sewardjde4a1d02002-03-22 01:27:54 +00001031static
nethercote7ac7f7b2004-11-02 12:36:02 +00001032SizeT listNo_to_pszB_max ( UInt listNo )
sewardjde4a1d02002-03-22 01:27:54 +00001033{
njn6e6588c2005-03-13 18:52:48 +00001034 vg_assert(listNo <= N_MALLOC_LISTS);
1035 if (listNo == N_MALLOC_LISTS-1) {
nethercote2d5b8162004-08-11 09:40:52 +00001036 return MAX_PSZB;
sewardjde4a1d02002-03-22 01:27:54 +00001037 } else {
nethercote2d5b8162004-08-11 09:40:52 +00001038 return listNo_to_pszB_min(listNo+1) - 1;
sewardjde4a1d02002-03-22 01:27:54 +00001039 }
1040}
1041
1042
1043/* A nasty hack to try and reduce fragmentation. Try and replace
1044 a->freelist[lno] with another block on the same list but with a
1045 lower address, with the idea of attempting to recycle the same
1046 blocks rather than cruise through the address space. */
sewardjde4a1d02002-03-22 01:27:54 +00001047static
nethercote7ac7f7b2004-11-02 12:36:02 +00001048void swizzle ( Arena* a, UInt lno )
sewardjde4a1d02002-03-22 01:27:54 +00001049{
nethercote2d5b8162004-08-11 09:40:52 +00001050 Block* p_best;
1051 Block* pp;
1052 Block* pn;
nethercote7ac7f7b2004-11-02 12:36:02 +00001053 UInt i;
sewardjde4a1d02002-03-22 01:27:54 +00001054
1055 p_best = a->freelist[lno];
1056 if (p_best == NULL) return;
1057
1058 pn = pp = p_best;
njn2bf9ba62005-12-25 02:47:12 +00001059
1060 // This loop bound was 20 for a long time, but experiments showed that
1061 // reducing it to 10 gave the same result in all the tests, and 5 got the
1062 // same result in 85--100% of cases. And it's called often enough to be
1063 // noticeable in programs that allocated a lot.
1064 for (i = 0; i < 5; i++) {
nethercote2d5b8162004-08-11 09:40:52 +00001065 pn = get_next_b(pn);
1066 pp = get_prev_b(pp);
sewardjde4a1d02002-03-22 01:27:54 +00001067 if (pn < p_best) p_best = pn;
1068 if (pp < p_best) p_best = pp;
1069 }
1070 if (p_best < a->freelist[lno]) {
nethercote2d5b8162004-08-11 09:40:52 +00001071# ifdef VERBOSE_MALLOC
sewardj9c606bd2008-09-18 18:12:50 +00001072 VG_(printf)("retreat by %ld\n", (Word)(a->freelist[lno] - p_best));
sewardjde4a1d02002-03-22 01:27:54 +00001073# endif
1074 a->freelist[lno] = p_best;
1075 }
1076}
1077
1078
1079/*------------------------------------------------------------*/
sewardjde4a1d02002-03-22 01:27:54 +00001080/*--- Sanity-check/debugging machinery. ---*/
1081/*------------------------------------------------------------*/
1082
njn6e6588c2005-03-13 18:52:48 +00001083#define REDZONE_LO_MASK 0x31
1084#define REDZONE_HI_MASK 0x7c
nethercote2d5b8162004-08-11 09:40:52 +00001085
nethercote7ac7f7b2004-11-02 12:36:02 +00001086// Do some crude sanity checks on a Block.
sewardjde4a1d02002-03-22 01:27:54 +00001087static
nethercote2d5b8162004-08-11 09:40:52 +00001088Bool blockSane ( Arena* a, Block* b )
sewardjde4a1d02002-03-22 01:27:54 +00001089{
1090# define BLEAT(str) VG_(printf)("blockSane: fail -- %s\n",str)
nethercote7ac7f7b2004-11-02 12:36:02 +00001091 UInt i;
njn402c8612005-08-23 22:11:20 +00001092 // The lo and hi size fields will be checked (indirectly) by the call
1093 // to get_rz_hi_byte().
njn472cc7c2005-07-17 17:20:30 +00001094 if (!a->clientmem && is_inuse_block(b)) {
philippe72faf102012-03-11 22:24:03 +00001095 // In the inner, for memcheck sake, temporarily mark redzone accessible.
1096 INNER_REQUEST(VALGRIND_MAKE_MEM_DEFINED
1097 (b + hp_overhead_szB() + sizeof(SizeT), a->rz_szB));
1098 INNER_REQUEST(VALGRIND_MAKE_MEM_DEFINED
1099 (b + get_bszB(b)
1100 - sizeof(SizeT) - a->rz_szB, a->rz_szB));
nethercote2d5b8162004-08-11 09:40:52 +00001101 for (i = 0; i < a->rz_szB; i++) {
njn1dcee092009-02-24 03:07:37 +00001102 if (get_rz_lo_byte(b, i) !=
njn6e6588c2005-03-13 18:52:48 +00001103 (UByte)(((Addr)b&0xff) ^ REDZONE_LO_MASK))
nethercote2d5b8162004-08-11 09:40:52 +00001104 {BLEAT("redzone-lo");return False;}
njn1dcee092009-02-24 03:07:37 +00001105 if (get_rz_hi_byte(b, i) !=
njn6e6588c2005-03-13 18:52:48 +00001106 (UByte)(((Addr)b&0xff) ^ REDZONE_HI_MASK))
nethercote2d5b8162004-08-11 09:40:52 +00001107 {BLEAT("redzone-hi");return False;}
sewardjde4a1d02002-03-22 01:27:54 +00001108 }
philippe72faf102012-03-11 22:24:03 +00001109 INNER_REQUEST(VALGRIND_MAKE_MEM_NOACCESS
1110 (b + hp_overhead_szB() + sizeof(SizeT), a->rz_szB));
1111 INNER_REQUEST(VALGRIND_MAKE_MEM_NOACCESS
1112 (b + get_bszB(b)
1113 - sizeof(SizeT) - a->rz_szB, a->rz_szB));
sewardjde4a1d02002-03-22 01:27:54 +00001114 }
1115 return True;
1116# undef BLEAT
1117}
1118
nethercote2d5b8162004-08-11 09:40:52 +00001119// Print superblocks (only for debugging).
sewardjde4a1d02002-03-22 01:27:54 +00001120static
1121void ppSuperblocks ( Arena* a )
1122{
sewardj0b3fd2d2007-08-21 10:55:26 +00001123 UInt i, j, blockno = 1;
njnd0e685c2005-07-17 17:55:42 +00001124 SizeT b_bszB;
sewardjde4a1d02002-03-22 01:27:54 +00001125
sewardj0b3fd2d2007-08-21 10:55:26 +00001126 for (j = 0; j < a->sblocks_used; ++j) {
1127 Superblock * sb = a->sblocks[j];
1128
sewardjde4a1d02002-03-22 01:27:54 +00001129 VG_(printf)( "\n" );
sewardjd8b93462011-09-10 10:17:35 +00001130 VG_(printf)( "superblock %d at %p %s, sb->n_pl_bs = %lu\n",
sewardjd043de92011-09-26 11:28:20 +00001131 blockno++, sb, (sb->unsplittable ? "unsplittable" : ""),
sewardjd8b93462011-09-10 10:17:35 +00001132 sb->n_payload_bytes);
njnd0e685c2005-07-17 17:55:42 +00001133 for (i = 0; i < sb->n_payload_bytes; i += b_bszB) {
1134 Block* b = (Block*)&sb->payload_bytes[i];
1135 b_bszB = get_bszB(b);
njn8a7b41b2007-09-23 00:51:24 +00001136 VG_(printf)( " block at %d, bszB %lu: ", i, b_bszB );
njn472cc7c2005-07-17 17:20:30 +00001137 VG_(printf)( "%s, ", is_inuse_block(b) ? "inuse" : "free");
nethercote2d5b8162004-08-11 09:40:52 +00001138 VG_(printf)( "%s\n", blockSane(a, b) ? "ok" : "BAD" );
sewardjde4a1d02002-03-22 01:27:54 +00001139 }
nethercote2d5b8162004-08-11 09:40:52 +00001140 vg_assert(i == sb->n_payload_bytes); // no overshoot at end of Sb
sewardjde4a1d02002-03-22 01:27:54 +00001141 }
1142 VG_(printf)( "end of superblocks\n\n" );
1143}
1144
nethercote2d5b8162004-08-11 09:40:52 +00001145// Sanity check both the superblocks and the chains.
nethercote885dd912004-08-03 23:14:00 +00001146static void sanity_check_malloc_arena ( ArenaId aid )
sewardjde4a1d02002-03-22 01:27:54 +00001147{
sewardj0b3fd2d2007-08-21 10:55:26 +00001148 UInt i, j, superblockctr, blockctr_sb, blockctr_li;
nethercote7ac7f7b2004-11-02 12:36:02 +00001149 UInt blockctr_sb_free, listno;
1150 SizeT b_bszB, b_pszB, list_min_pszB, list_max_pszB;
sewardj0b3fd2d2007-08-21 10:55:26 +00001151 Bool thisFree, lastWasFree, sblockarrOK;
nethercote2d5b8162004-08-11 09:40:52 +00001152 Block* b;
1153 Block* b_prev;
nethercote7ac7f7b2004-11-02 12:36:02 +00001154 SizeT arena_bytes_on_loan;
sewardjde4a1d02002-03-22 01:27:54 +00001155 Arena* a;
1156
nethercote885dd912004-08-03 23:14:00 +00001157# define BOMB VG_(core_panic)("sanity_check_malloc_arena")
sewardjde4a1d02002-03-22 01:27:54 +00001158
1159 a = arenaId_to_ArenaP(aid);
sewardj0b3fd2d2007-08-21 10:55:26 +00001160
1161 // Check the superblock array.
1162 sblockarrOK
1163 = a->sblocks != NULL
1164 && a->sblocks_size >= SBLOCKS_SIZE_INITIAL
1165 && a->sblocks_used <= a->sblocks_size
1166 && (a->sblocks_size == SBLOCKS_SIZE_INITIAL
1167 ? (a->sblocks == &a->sblocks_initial[0])
1168 : (a->sblocks != &a->sblocks_initial[0]));
1169 if (!sblockarrOK) {
1170 VG_(printf)("sanity_check_malloc_arena: sblock array BAD\n");
1171 BOMB;
1172 }
1173
nethercote2d5b8162004-08-11 09:40:52 +00001174 // First, traverse all the superblocks, inspecting the Blocks in each.
sewardjde4a1d02002-03-22 01:27:54 +00001175 superblockctr = blockctr_sb = blockctr_sb_free = 0;
1176 arena_bytes_on_loan = 0;
sewardj0b3fd2d2007-08-21 10:55:26 +00001177 for (j = 0; j < a->sblocks_used; ++j) {
1178 Superblock * sb = a->sblocks[j];
sewardjde4a1d02002-03-22 01:27:54 +00001179 lastWasFree = False;
1180 superblockctr++;
nethercote2d5b8162004-08-11 09:40:52 +00001181 for (i = 0; i < sb->n_payload_bytes; i += mk_plain_bszB(b_bszB)) {
sewardjde4a1d02002-03-22 01:27:54 +00001182 blockctr_sb++;
nethercote2d5b8162004-08-11 09:40:52 +00001183 b = (Block*)&sb->payload_bytes[i];
njnd0e685c2005-07-17 17:55:42 +00001184 b_bszB = get_bszB_as_is(b);
sewardjde4a1d02002-03-22 01:27:54 +00001185 if (!blockSane(a, b)) {
njn8a7b41b2007-09-23 00:51:24 +00001186 VG_(printf)("sanity_check_malloc_arena: sb %p, block %d "
1187 "(bszB %lu): BAD\n", sb, i, b_bszB );
sewardjde4a1d02002-03-22 01:27:54 +00001188 BOMB;
1189 }
njn472cc7c2005-07-17 17:20:30 +00001190 thisFree = !is_inuse_block(b);
sewardjde4a1d02002-03-22 01:27:54 +00001191 if (thisFree && lastWasFree) {
njn8a7b41b2007-09-23 00:51:24 +00001192 VG_(printf)("sanity_check_malloc_arena: sb %p, block %d "
1193 "(bszB %lu): UNMERGED FREES\n", sb, i, b_bszB );
sewardjde4a1d02002-03-22 01:27:54 +00001194 BOMB;
1195 }
sewardjde4a1d02002-03-22 01:27:54 +00001196 if (thisFree) blockctr_sb_free++;
sewardj0b3fd2d2007-08-21 10:55:26 +00001197 if (!thisFree)
nethercote2d5b8162004-08-11 09:40:52 +00001198 arena_bytes_on_loan += bszB_to_pszB(a, b_bszB);
1199 lastWasFree = thisFree;
sewardjde4a1d02002-03-22 01:27:54 +00001200 }
nethercote2d5b8162004-08-11 09:40:52 +00001201 if (i > sb->n_payload_bytes) {
nethercote885dd912004-08-03 23:14:00 +00001202 VG_(printf)( "sanity_check_malloc_arena: sb %p: last block "
sewardjde4a1d02002-03-22 01:27:54 +00001203 "overshoots end\n", sb);
1204 BOMB;
1205 }
sewardjde4a1d02002-03-22 01:27:54 +00001206 }
1207
sewardj7d1064a2011-02-23 13:18:56 +00001208 if (arena_bytes_on_loan != a->stats__bytes_on_loan) {
nethercote2d5b8162004-08-11 09:40:52 +00001209# ifdef VERBOSE_MALLOC
sewardjd8b93462011-09-10 10:17:35 +00001210 VG_(printf)( "sanity_check_malloc_arena: a->bytes_on_loan %lu, "
1211 "arena_bytes_on_loan %lu: "
nethercote2d5b8162004-08-11 09:40:52 +00001212 "MISMATCH\n", a->bytes_on_loan, arena_bytes_on_loan);
1213# endif
sewardjde4a1d02002-03-22 01:27:54 +00001214 ppSuperblocks(a);
1215 BOMB;
1216 }
1217
1218 /* Second, traverse each list, checking that the back pointers make
1219 sense, counting blocks encountered, and checking that each block
1220 is an appropriate size for this list. */
1221 blockctr_li = 0;
njn6e6588c2005-03-13 18:52:48 +00001222 for (listno = 0; listno < N_MALLOC_LISTS; listno++) {
nethercote2d5b8162004-08-11 09:40:52 +00001223 list_min_pszB = listNo_to_pszB_min(listno);
1224 list_max_pszB = listNo_to_pszB_max(listno);
sewardjde4a1d02002-03-22 01:27:54 +00001225 b = a->freelist[listno];
1226 if (b == NULL) continue;
1227 while (True) {
1228 b_prev = b;
nethercote2d5b8162004-08-11 09:40:52 +00001229 b = get_next_b(b);
1230 if (get_prev_b(b) != b_prev) {
nethercote885dd912004-08-03 23:14:00 +00001231 VG_(printf)( "sanity_check_malloc_arena: list %d at %p: "
sewardj0b3fd2d2007-08-21 10:55:26 +00001232 "BAD LINKAGE\n",
sewardjde4a1d02002-03-22 01:27:54 +00001233 listno, b );
1234 BOMB;
1235 }
njn089f51f2005-07-17 18:12:00 +00001236 b_pszB = get_pszB(a, b);
nethercote2d5b8162004-08-11 09:40:52 +00001237 if (b_pszB < list_min_pszB || b_pszB > list_max_pszB) {
sewardj0b3fd2d2007-08-21 10:55:26 +00001238 VG_(printf)(
nethercote885dd912004-08-03 23:14:00 +00001239 "sanity_check_malloc_arena: list %d at %p: "
njn8a7b41b2007-09-23 00:51:24 +00001240 "WRONG CHAIN SIZE %luB (%luB, %luB)\n",
nethercote2d5b8162004-08-11 09:40:52 +00001241 listno, b, b_pszB, list_min_pszB, list_max_pszB );
sewardjde4a1d02002-03-22 01:27:54 +00001242 BOMB;
1243 }
1244 blockctr_li++;
1245 if (b == a->freelist[listno]) break;
1246 }
1247 }
1248
1249 if (blockctr_sb_free != blockctr_li) {
nethercote2d5b8162004-08-11 09:40:52 +00001250# ifdef VERBOSE_MALLOC
1251 VG_(printf)( "sanity_check_malloc_arena: BLOCK COUNT MISMATCH "
1252 "(via sbs %d, via lists %d)\n",
1253 blockctr_sb_free, blockctr_li );
1254# endif
sewardjde4a1d02002-03-22 01:27:54 +00001255 ppSuperblocks(a);
1256 BOMB;
1257 }
1258
nethercote885dd912004-08-03 23:14:00 +00001259 if (VG_(clo_verbosity) > 2)
1260 VG_(message)(Vg_DebugMsg,
njn669ef072005-03-13 05:46:57 +00001261 "%8s: %2d sbs, %5d bs, %2d/%-2d free bs, "
sewardj738856f2009-07-15 14:48:32 +00001262 "%7ld mmap, %7ld loan\n",
nethercote885dd912004-08-03 23:14:00 +00001263 a->name,
1264 superblockctr,
1265 blockctr_sb, blockctr_sb_free, blockctr_li,
sewardj7d1064a2011-02-23 13:18:56 +00001266 a->stats__bytes_mmaped, a->stats__bytes_on_loan);
sewardjde4a1d02002-03-22 01:27:54 +00001267# undef BOMB
1268}
1269
1270
sewardj9c606bd2008-09-18 18:12:50 +00001271#define N_AN_CCS 1000
1272
florian54fe2022012-10-27 23:07:42 +00001273typedef struct {
1274 ULong nBytes;
1275 ULong nBlocks;
1276 const HChar* cc;
1277} AnCC;
sewardj9c606bd2008-09-18 18:12:50 +00001278
1279static AnCC anCCs[N_AN_CCS];
1280
florian6bd9dc12012-11-23 16:17:43 +00001281static Int cmp_AnCC_by_vol ( const void* v1, const void* v2 ) {
florian3e798632012-11-24 19:41:54 +00001282 const AnCC* ancc1 = v1;
1283 const AnCC* ancc2 = v2;
sewardj9c606bd2008-09-18 18:12:50 +00001284 if (ancc1->nBytes < ancc2->nBytes) return -1;
1285 if (ancc1->nBytes > ancc2->nBytes) return 1;
1286 return 0;
1287}
1288
1289static void cc_analyse_alloc_arena ( ArenaId aid )
1290{
1291 Word i, j, k;
1292 Arena* a;
1293 Block* b;
1294 Bool thisFree, lastWasFree;
1295 SizeT b_bszB;
1296
florian54fe2022012-10-27 23:07:42 +00001297 const HChar* cc;
sewardj9c606bd2008-09-18 18:12:50 +00001298 UInt n_ccs = 0;
1299 //return;
1300 a = arenaId_to_ArenaP(aid);
1301 if (a->name == NULL) {
1302 /* arena is not in use, is not initialised and will fail the
1303 sanity check that follows. */
1304 return;
1305 }
1306
1307 sanity_check_malloc_arena(aid);
1308
1309 VG_(printf)(
sewardjd8b93462011-09-10 10:17:35 +00001310 "-------- Arena \"%s\": %lu/%lu max/curr mmap'd, "
sewardjd043de92011-09-26 11:28:20 +00001311 "%llu/%llu unsplit/split sb unmmap'd, "
philipped99c26a2012-07-31 22:17:28 +00001312 "%lu/%lu max/curr on_loan %lu rzB --------\n",
sewardjd8b93462011-09-10 10:17:35 +00001313 a->name, a->stats__bytes_mmaped_max, a->stats__bytes_mmaped,
sewardjd043de92011-09-26 11:28:20 +00001314 a->stats__nreclaim_unsplit, a->stats__nreclaim_split,
philipped99c26a2012-07-31 22:17:28 +00001315 a->stats__bytes_on_loan_max, a->stats__bytes_on_loan,
1316 a->rz_szB
sewardj9c606bd2008-09-18 18:12:50 +00001317 );
1318
1319 for (j = 0; j < a->sblocks_used; ++j) {
1320 Superblock * sb = a->sblocks[j];
1321 lastWasFree = False;
1322 for (i = 0; i < sb->n_payload_bytes; i += mk_plain_bszB(b_bszB)) {
1323 b = (Block*)&sb->payload_bytes[i];
1324 b_bszB = get_bszB_as_is(b);
1325 if (!blockSane(a, b)) {
1326 VG_(printf)("sanity_check_malloc_arena: sb %p, block %ld "
1327 "(bszB %lu): BAD\n", sb, i, b_bszB );
1328 tl_assert(0);
1329 }
1330 thisFree = !is_inuse_block(b);
1331 if (thisFree && lastWasFree) {
1332 VG_(printf)("sanity_check_malloc_arena: sb %p, block %ld "
1333 "(bszB %lu): UNMERGED FREES\n", sb, i, b_bszB );
1334 tl_assert(0);
1335 }
1336 lastWasFree = thisFree;
1337
1338 if (thisFree) continue;
1339
1340 if (0)
1341 VG_(printf)("block: inUse=%d pszB=%d cc=%s\n",
1342 (Int)(!thisFree),
1343 (Int)bszB_to_pszB(a, b_bszB),
1344 get_cc(b));
1345 cc = get_cc(b);
1346 tl_assert(cc);
1347 for (k = 0; k < n_ccs; k++) {
1348 tl_assert(anCCs[k].cc);
1349 if (0 == VG_(strcmp)(cc, anCCs[k].cc))
1350 break;
1351 }
1352 tl_assert(k >= 0 && k <= n_ccs);
1353
1354 if (k == n_ccs) {
1355 tl_assert(n_ccs < N_AN_CCS-1);
1356 n_ccs++;
1357 anCCs[k].nBytes = 0;
1358 anCCs[k].nBlocks = 0;
1359 anCCs[k].cc = cc;
1360 }
1361
1362 tl_assert(k >= 0 && k < n_ccs && k < N_AN_CCS);
1363 anCCs[k].nBytes += (ULong)bszB_to_pszB(a, b_bszB);
1364 anCCs[k].nBlocks++;
1365 }
1366 if (i > sb->n_payload_bytes) {
1367 VG_(printf)( "sanity_check_malloc_arena: sb %p: last block "
1368 "overshoots end\n", sb);
1369 tl_assert(0);
1370 }
1371 }
1372
1373 VG_(ssort)( &anCCs[0], n_ccs, sizeof(anCCs[0]), cmp_AnCC_by_vol );
1374
1375 for (k = 0; k < n_ccs; k++) {
1376 VG_(printf)("%'13llu in %'9llu: %s\n",
1377 anCCs[k].nBytes, anCCs[k].nBlocks, anCCs[k].cc );
1378 }
1379
1380 VG_(printf)("\n");
1381}
1382
1383
nethercote885dd912004-08-03 23:14:00 +00001384void VG_(sanity_check_malloc_all) ( void )
sewardjde4a1d02002-03-22 01:27:54 +00001385{
nethercote7ac7f7b2004-11-02 12:36:02 +00001386 UInt i;
sewardj0b3fd2d2007-08-21 10:55:26 +00001387 for (i = 0; i < VG_N_ARENAS; i++) {
1388 if (i == VG_AR_CLIENT && !client_inited)
1389 continue;
nethercote885dd912004-08-03 23:14:00 +00001390 sanity_check_malloc_arena ( i );
sewardj0b3fd2d2007-08-21 10:55:26 +00001391 }
sewardjde4a1d02002-03-22 01:27:54 +00001392}
1393
sewardjde4a1d02002-03-22 01:27:54 +00001394
nethercote2d5b8162004-08-11 09:40:52 +00001395/*------------------------------------------------------------*/
1396/*--- Creating and deleting blocks. ---*/
1397/*------------------------------------------------------------*/
1398
1399// Mark the bytes at b .. b+bszB-1 as not in use, and add them to the
1400// relevant free list.
1401
1402static
nethercote7ac7f7b2004-11-02 12:36:02 +00001403void mkFreeBlock ( Arena* a, Block* b, SizeT bszB, UInt b_lno )
jsewardb1a26ae2004-03-14 03:06:37 +00001404{
nethercote7ac7f7b2004-11-02 12:36:02 +00001405 SizeT pszB = bszB_to_pszB(a, bszB);
nethercote2d5b8162004-08-11 09:40:52 +00001406 vg_assert(b_lno == pszB_to_listNo(pszB));
philippe72faf102012-03-11 22:24:03 +00001407 INNER_REQUEST(VALGRIND_MAKE_MEM_UNDEFINED(b, bszB));
nethercote2d5b8162004-08-11 09:40:52 +00001408 // Set the size fields and indicate not-in-use.
njn8d3f8452005-07-20 04:12:41 +00001409 set_bszB(b, mk_free_bszB(bszB));
nethercote2d5b8162004-08-11 09:40:52 +00001410
1411 // Add to the relevant list.
1412 if (a->freelist[b_lno] == NULL) {
1413 set_prev_b(b, b);
1414 set_next_b(b, b);
1415 a->freelist[b_lno] = b;
1416 } else {
1417 Block* b_prev = get_prev_b(a->freelist[b_lno]);
1418 Block* b_next = a->freelist[b_lno];
1419 set_next_b(b_prev, b);
1420 set_prev_b(b_next, b);
1421 set_next_b(b, b_next);
1422 set_prev_b(b, b_prev);
1423 }
1424# ifdef DEBUG_MALLOC
1425 (void)blockSane(a,b);
1426# endif
1427}
1428
1429// Mark the bytes at b .. b+bszB-1 as in use, and set up the block
1430// appropriately.
1431static
nethercote7ac7f7b2004-11-02 12:36:02 +00001432void mkInuseBlock ( Arena* a, Block* b, SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +00001433{
nethercote7ac7f7b2004-11-02 12:36:02 +00001434 UInt i;
nethercote2d5b8162004-08-11 09:40:52 +00001435 vg_assert(bszB >= min_useful_bszB(a));
philippe72faf102012-03-11 22:24:03 +00001436 INNER_REQUEST(VALGRIND_MAKE_MEM_UNDEFINED(b, bszB));
njn8d3f8452005-07-20 04:12:41 +00001437 set_bszB(b, mk_inuse_bszB(bszB));
nethercote2d5b8162004-08-11 09:40:52 +00001438 set_prev_b(b, NULL); // Take off freelist
1439 set_next_b(b, NULL); // ditto
1440 if (!a->clientmem) {
1441 for (i = 0; i < a->rz_szB; i++) {
njn1dcee092009-02-24 03:07:37 +00001442 set_rz_lo_byte(b, i, (UByte)(((Addr)b&0xff) ^ REDZONE_LO_MASK));
1443 set_rz_hi_byte(b, i, (UByte)(((Addr)b&0xff) ^ REDZONE_HI_MASK));
nethercote2d5b8162004-08-11 09:40:52 +00001444 }
1445 }
1446# ifdef DEBUG_MALLOC
1447 (void)blockSane(a,b);
1448# endif
1449}
1450
1451// Remove a block from a given list. Does no sanity checking.
1452static
nethercote7ac7f7b2004-11-02 12:36:02 +00001453void unlinkBlock ( Arena* a, Block* b, UInt listno )
nethercote2d5b8162004-08-11 09:40:52 +00001454{
njn6e6588c2005-03-13 18:52:48 +00001455 vg_assert(listno < N_MALLOC_LISTS);
nethercote2d5b8162004-08-11 09:40:52 +00001456 if (get_prev_b(b) == b) {
1457 // Only one element in the list; treat it specially.
1458 vg_assert(get_next_b(b) == b);
1459 a->freelist[listno] = NULL;
1460 } else {
1461 Block* b_prev = get_prev_b(b);
1462 Block* b_next = get_next_b(b);
1463 a->freelist[listno] = b_prev;
1464 set_next_b(b_prev, b_next);
1465 set_prev_b(b_next, b_prev);
1466 swizzle ( a, listno );
1467 }
1468 set_prev_b(b, NULL);
1469 set_next_b(b, NULL);
jsewardb1a26ae2004-03-14 03:06:37 +00001470}
1471
1472
sewardjde4a1d02002-03-22 01:27:54 +00001473/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00001474/*--- Core-visible functions. ---*/
sewardjde4a1d02002-03-22 01:27:54 +00001475/*------------------------------------------------------------*/
1476
nethercote2d5b8162004-08-11 09:40:52 +00001477// Align the request size.
1478static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +00001479SizeT align_req_pszB ( SizeT req_pszB )
nethercote2d5b8162004-08-11 09:40:52 +00001480{
nethercote7ac7f7b2004-11-02 12:36:02 +00001481 SizeT n = VG_MIN_MALLOC_SZB-1;
nethercote2d5b8162004-08-11 09:40:52 +00001482 return ((req_pszB + n) & (~n));
1483}
1484
florian54fe2022012-10-27 23:07:42 +00001485void* VG_(arena_malloc) ( ArenaId aid, const HChar* cc, SizeT req_pszB )
sewardjde4a1d02002-03-22 01:27:54 +00001486{
nethercote7ac7f7b2004-11-02 12:36:02 +00001487 SizeT req_bszB, frag_bszB, b_bszB;
sewardj0b3fd2d2007-08-21 10:55:26 +00001488 UInt lno, i;
sewardjd8b93462011-09-10 10:17:35 +00001489 Superblock* new_sb = NULL;
nethercote2d5b8162004-08-11 09:40:52 +00001490 Block* b = NULL;
sewardjde4a1d02002-03-22 01:27:54 +00001491 Arena* a;
jsewardb1a26ae2004-03-14 03:06:37 +00001492 void* v;
sewardj7d1064a2011-02-23 13:18:56 +00001493 UWord stats__nsearches = 0;
sewardjde4a1d02002-03-22 01:27:54 +00001494
sewardj45f4e7c2005-09-27 19:20:21 +00001495 ensure_mm_init(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001496 a = arenaId_to_ArenaP(aid);
1497
nethercote7ac7f7b2004-11-02 12:36:02 +00001498 vg_assert(req_pszB < MAX_PSZB);
nethercote2d5b8162004-08-11 09:40:52 +00001499 req_pszB = align_req_pszB(req_pszB);
1500 req_bszB = pszB_to_bszB(a, req_pszB);
sewardjde4a1d02002-03-22 01:27:54 +00001501
sewardj9c606bd2008-09-18 18:12:50 +00001502 // You must provide a cost-center name against which to charge
1503 // this allocation; it isn't optional.
1504 vg_assert(cc);
1505
nethercote2d5b8162004-08-11 09:40:52 +00001506 // Scan through all the big-enough freelists for a block.
njn4ab6d532007-10-16 23:18:06 +00001507 //
1508 // Nb: this scanning might be expensive in some cases. Eg. if you
1509 // allocate lots of small objects without freeing them, but no
1510 // medium-sized objects, it will repeatedly scanning through the whole
1511 // list, and each time not find any free blocks until the last element.
1512 //
1513 // If this becomes a noticeable problem... the loop answers the question
1514 // "where is the first nonempty list above me?" And most of the time,
1515 // you ask the same question and get the same answer. So it would be
1516 // good to somehow cache the results of previous searches.
1517 // One possibility is an array (with N_MALLOC_LISTS elements) of
1518 // shortcuts. shortcut[i] would give the index number of the nearest
1519 // larger list above list i which is non-empty. Then this loop isn't
1520 // necessary. However, we'd have to modify some section [ .. i-1] of the
1521 // shortcut array every time a list [i] changes from empty to nonempty or
1522 // back. This would require care to avoid pathological worst-case
1523 // behaviour.
1524 //
njn6e6588c2005-03-13 18:52:48 +00001525 for (lno = pszB_to_listNo(req_pszB); lno < N_MALLOC_LISTS; lno++) {
sewardj7d1064a2011-02-23 13:18:56 +00001526 UWord nsearches_this_level = 0;
sewardjde4a1d02002-03-22 01:27:54 +00001527 b = a->freelist[lno];
nethercote2d5b8162004-08-11 09:40:52 +00001528 if (NULL == b) continue; // If this list is empty, try the next one.
sewardjde4a1d02002-03-22 01:27:54 +00001529 while (True) {
sewardj7d1064a2011-02-23 13:18:56 +00001530 stats__nsearches++;
1531 nsearches_this_level++;
1532 if (UNLIKELY(nsearches_this_level >= 100)
1533 && lno < N_MALLOC_LISTS-1) {
1534 /* Avoid excessive scanning on this freelist, and instead
1535 try the next one up. But first, move this freelist's
1536 start pointer one element along, so as to ensure that
1537 subsequent searches of this list don't endlessly
1538 revisit only these 100 elements, but in fact slowly
1539 progress through the entire list. */
1540 b = a->freelist[lno];
1541 vg_assert(b); // this list must be nonempty!
1542 a->freelist[lno] = get_next_b(b); // step one along
1543 break;
1544 }
njnd0e685c2005-07-17 17:55:42 +00001545 b_bszB = get_bszB(b);
nethercote2d5b8162004-08-11 09:40:52 +00001546 if (b_bszB >= req_bszB) goto obtained_block; // success!
1547 b = get_next_b(b);
1548 if (b == a->freelist[lno]) break; // traversed entire freelist
sewardjde4a1d02002-03-22 01:27:54 +00001549 }
sewardjde4a1d02002-03-22 01:27:54 +00001550 }
1551
nethercote2d5b8162004-08-11 09:40:52 +00001552 // If we reach here, no suitable block found, allocate a new superblock
njn6e6588c2005-03-13 18:52:48 +00001553 vg_assert(lno == N_MALLOC_LISTS);
nethercote2d5b8162004-08-11 09:40:52 +00001554 new_sb = newSuperblock(a, req_bszB);
1555 if (NULL == new_sb) {
1556 // Should only fail if for client, otherwise, should have aborted
1557 // already.
1558 vg_assert(VG_AR_CLIENT == aid);
1559 return NULL;
sewardjde4a1d02002-03-22 01:27:54 +00001560 }
sewardj0b3fd2d2007-08-21 10:55:26 +00001561
1562 vg_assert(a->sblocks_used <= a->sblocks_size);
1563 if (a->sblocks_used == a->sblocks_size) {
1564 Superblock ** array;
1565 SysRes sres = VG_(am_sbrk_anon_float_valgrind)(sizeof(Superblock *) *
1566 a->sblocks_size * 2);
njncda2f0f2009-05-18 02:12:08 +00001567 if (sr_isError(sres)) {
sewardj0b3fd2d2007-08-21 10:55:26 +00001568 VG_(out_of_memory_NORETURN)("arena_init", sizeof(Superblock *) *
1569 a->sblocks_size * 2);
1570 /* NOTREACHED */
1571 }
njncda2f0f2009-05-18 02:12:08 +00001572 array = (Superblock**)(AddrH)sr_Res(sres);
sewardj0b3fd2d2007-08-21 10:55:26 +00001573 for (i = 0; i < a->sblocks_used; ++i) array[i] = a->sblocks[i];
1574
1575 a->sblocks_size *= 2;
1576 a->sblocks = array;
1577 VG_(debugLog)(1, "mallocfree",
1578 "sblock array for arena `%s' resized to %ld\n",
1579 a->name, a->sblocks_size);
1580 }
1581
1582 vg_assert(a->sblocks_used < a->sblocks_size);
1583
1584 i = a->sblocks_used;
1585 while (i > 0) {
1586 if (a->sblocks[i-1] > new_sb) {
1587 a->sblocks[i] = a->sblocks[i-1];
1588 } else {
1589 break;
1590 }
1591 --i;
1592 }
1593 a->sblocks[i] = new_sb;
1594 a->sblocks_used++;
1595
nethercote2d5b8162004-08-11 09:40:52 +00001596 b = (Block*)&new_sb->payload_bytes[0];
1597 lno = pszB_to_listNo(bszB_to_pszB(a, new_sb->n_payload_bytes));
1598 mkFreeBlock ( a, b, new_sb->n_payload_bytes, lno);
sewardj94c8eb42008-09-19 20:13:39 +00001599 if (VG_(clo_profile_heap))
1600 set_cc(b, "admin.free-new-sb-1");
nethercote2d5b8162004-08-11 09:40:52 +00001601 // fall through
sewardjde4a1d02002-03-22 01:27:54 +00001602
nethercote2d5b8162004-08-11 09:40:52 +00001603 obtained_block:
1604 // Ok, we can allocate from b, which lives in list lno.
sewardjde4a1d02002-03-22 01:27:54 +00001605 vg_assert(b != NULL);
njn6e6588c2005-03-13 18:52:48 +00001606 vg_assert(lno < N_MALLOC_LISTS);
sewardjde4a1d02002-03-22 01:27:54 +00001607 vg_assert(a->freelist[lno] != NULL);
njnd0e685c2005-07-17 17:55:42 +00001608 b_bszB = get_bszB(b);
nethercote2d5b8162004-08-11 09:40:52 +00001609 // req_bszB is the size of the block we are after. b_bszB is the
1610 // size of what we've actually got. */
1611 vg_assert(b_bszB >= req_bszB);
sewardjde4a1d02002-03-22 01:27:54 +00001612
nethercote2d5b8162004-08-11 09:40:52 +00001613 // Could we split this block and still get a useful fragment?
sewardjd043de92011-09-26 11:28:20 +00001614 // A block in an unsplittable superblock can never be splitted.
nethercote2d5b8162004-08-11 09:40:52 +00001615 frag_bszB = b_bszB - req_bszB;
sewardjd8b93462011-09-10 10:17:35 +00001616 if (frag_bszB >= min_useful_bszB(a)
sewardjd043de92011-09-26 11:28:20 +00001617 && (NULL == new_sb || ! new_sb->unsplittable)) {
nethercote2d5b8162004-08-11 09:40:52 +00001618 // Yes, split block in two, put the fragment on the appropriate free
1619 // list, and update b_bszB accordingly.
1620 // printf( "split %dB into %dB and %dB\n", b_bszB, req_bszB, frag_bszB );
sewardjde4a1d02002-03-22 01:27:54 +00001621 unlinkBlock(a, b, lno);
nethercote2d5b8162004-08-11 09:40:52 +00001622 mkInuseBlock(a, b, req_bszB);
sewardj94c8eb42008-09-19 20:13:39 +00001623 if (VG_(clo_profile_heap))
1624 set_cc(b, cc);
nethercote2d5b8162004-08-11 09:40:52 +00001625 mkFreeBlock(a, &b[req_bszB], frag_bszB,
1626 pszB_to_listNo(bszB_to_pszB(a, frag_bszB)));
sewardj94c8eb42008-09-19 20:13:39 +00001627 if (VG_(clo_profile_heap))
1628 set_cc(&b[req_bszB], "admin.fragmentation-1");
njnd0e685c2005-07-17 17:55:42 +00001629 b_bszB = get_bszB(b);
nethercote2d5b8162004-08-11 09:40:52 +00001630 } else {
1631 // No, mark as in use and use as-is.
1632 unlinkBlock(a, b, lno);
1633 mkInuseBlock(a, b, b_bszB);
sewardj94c8eb42008-09-19 20:13:39 +00001634 if (VG_(clo_profile_heap))
1635 set_cc(b, cc);
sewardjde4a1d02002-03-22 01:27:54 +00001636 }
sewardjde4a1d02002-03-22 01:27:54 +00001637
nethercote2d5b8162004-08-11 09:40:52 +00001638 // Update stats
sewardj7d1064a2011-02-23 13:18:56 +00001639 SizeT loaned = bszB_to_pszB(a, b_bszB);
1640 a->stats__bytes_on_loan += loaned;
1641 if (a->stats__bytes_on_loan > a->stats__bytes_on_loan_max) {
1642 a->stats__bytes_on_loan_max = a->stats__bytes_on_loan;
1643 if (a->stats__bytes_on_loan_max >= a->next_profile_at) {
sewardj9c606bd2008-09-18 18:12:50 +00001644 /* next profile after 10% more growth */
1645 a->next_profile_at
1646 = (SizeT)(
sewardj75f107f2011-09-26 20:17:41 +00001647 (((ULong)a->stats__bytes_on_loan_max) * 105ULL) / 100ULL );
sewardj9c606bd2008-09-18 18:12:50 +00001648 if (VG_(clo_profile_heap))
1649 cc_analyse_alloc_arena(aid);
1650 }
1651 }
sewardj7d1064a2011-02-23 13:18:56 +00001652 a->stats__tot_blocks += (ULong)1;
1653 a->stats__tot_bytes += (ULong)loaned;
1654 a->stats__nsearches += (ULong)stats__nsearches;
sewardjde4a1d02002-03-22 01:27:54 +00001655
1656# ifdef DEBUG_MALLOC
nethercote885dd912004-08-03 23:14:00 +00001657 sanity_check_malloc_arena(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001658# endif
1659
nethercote2d5b8162004-08-11 09:40:52 +00001660 v = get_block_payload(a, b);
1661 vg_assert( (((Addr)v) & (VG_MIN_MALLOC_SZB-1)) == 0 );
sewardjb5f6f512005-03-10 23:59:00 +00001662
philippe72faf102012-03-11 22:24:03 +00001663 // Which size should we pass to VALGRIND_MALLOCLIKE_BLOCK ?
1664 // We have 2 possible options:
1665 // 1. The final resulting usable size.
1666 // 2. The initial (non-aligned) req_pszB.
1667 // Memcheck implements option 2 easily, as the initial requested size
1668 // is maintained in the mc_chunk data structure.
1669 // This is not as easy in the core, as there is no such structure.
1670 // (note: using the aligned req_pszB is not simpler than 2, as
1671 // requesting an aligned req_pszB might still be satisfied by returning
1672 // a (slightly) bigger block than requested if the remaining part of
1673 // of a free block is not big enough to make a free block by itself).
1674 // Implement Sol 2 can be done the following way:
1675 // After having called VALGRIND_MALLOCLIKE_BLOCK, the non accessible
1676 // redzone just after the block can be used to determine the
1677 // initial requested size.
1678 // Currently, not implemented => we use Option 1.
1679 INNER_REQUEST
1680 (VALGRIND_MALLOCLIKE_BLOCK(v,
1681 VG_(arena_malloc_usable_size)(aid, v),
1682 a->rz_szB, False));
sewardja53462a2007-11-24 23:37:07 +00001683
1684 /* For debugging/testing purposes, fill the newly allocated area
1685 with a definite value in an attempt to shake out any
1686 uninitialised uses of the data (by V core / V tools, not by the
1687 client). Testing on 25 Nov 07 with the values 0x00, 0xFF, 0x55,
1688 0xAA showed no differences in the regression tests on
1689 amd64-linux. Note, is disabled by default. */
1690 if (0 && aid != VG_AR_CLIENT)
1691 VG_(memset)(v, 0xAA, (SizeT)req_pszB);
1692
jsewardb1a26ae2004-03-14 03:06:37 +00001693 return v;
sewardjde4a1d02002-03-22 01:27:54 +00001694}
1695
sewardjd043de92011-09-26 11:28:20 +00001696// If arena has already a deferred reclaimed superblock and
1697// this superblock is still reclaimable, then this superblock is first
1698// reclaimed.
1699// sb becomes then the new arena deferred superblock.
1700// Passing NULL as sb allows to reclaim a deferred sb without setting a new
1701// deferred reclaim.
1702static
1703void deferred_reclaimSuperblock ( Arena* a, Superblock* sb)
1704{
1705
1706 if (sb == NULL) {
1707 if (!a->deferred_reclaimed_sb)
1708 // no deferred sb to reclaim now, nothing to do in the future =>
1709 // return directly.
1710 return;
1711
1712 VG_(debugLog)(1, "mallocfree",
1713 "deferred_reclaimSuperblock NULL "
1714 "(prev %p) owner %s/%s\n",
1715 a->deferred_reclaimed_sb,
1716 a->clientmem ? "CLIENT" : "VALGRIND", a->name );
1717 } else
1718 VG_(debugLog)(1, "mallocfree",
1719 "deferred_reclaimSuperblock at %p (pszB %7ld) %s "
1720 "(prev %p) owner %s/%s\n",
1721 sb, sb->n_payload_bytes,
1722 (sb->unsplittable ? "unsplittable" : ""),
1723 a->deferred_reclaimed_sb,
1724 a->clientmem ? "CLIENT" : "VALGRIND", a->name );
1725
1726 if (a->deferred_reclaimed_sb && a->deferred_reclaimed_sb != sb) {
1727 // If we are deferring another block that the current block deferred,
1728 // then if this block can stil be reclaimed, reclaim it now.
1729 // Note that we might have a re-deferred reclaim of the same block
1730 // with a sequence: free (causing a deferred reclaim of sb)
1731 // alloc (using a piece of memory of the deferred sb)
1732 // free of the just alloc-ed block (causing a re-defer).
1733 UByte* def_sb_start;
1734 UByte* def_sb_end;
1735 Superblock* def_sb;
1736 Block* b;
1737
1738 def_sb = a->deferred_reclaimed_sb;
1739 def_sb_start = &def_sb->payload_bytes[0];
1740 def_sb_end = &def_sb->payload_bytes[def_sb->n_payload_bytes - 1];
1741 b = (Block *)def_sb_start;
1742 vg_assert (blockSane(a, b));
1743
1744 // Check if the deferred_reclaimed_sb is still reclaimable.
1745 // If yes, we will execute the reclaim.
1746 if (!is_inuse_block(b)) {
1747 // b (at the beginning of def_sb) is not in use.
1748 UInt b_listno;
1749 SizeT b_bszB, b_pszB;
1750 b_bszB = get_bszB(b);
1751 b_pszB = bszB_to_pszB(a, b_bszB);
1752 if (b + b_bszB-1 == (Block*)def_sb_end) {
1753 // b (not in use) covers the full superblock.
1754 // => def_sb is still reclaimable
1755 // => execute now the reclaim of this def_sb.
1756 b_listno = pszB_to_listNo(b_pszB);
1757 unlinkBlock( a, b, b_listno );
1758 reclaimSuperblock (a, def_sb);
1759 a->deferred_reclaimed_sb = NULL;
1760 }
1761 }
1762 }
1763
1764 // sb (possibly NULL) becomes the new deferred reclaimed superblock.
1765 a->deferred_reclaimed_sb = sb;
1766}
1767
sewardjde4a1d02002-03-22 01:27:54 +00001768
njn25e49d8e72002-09-23 09:36:25 +00001769void VG_(arena_free) ( ArenaId aid, void* ptr )
sewardjde4a1d02002-03-22 01:27:54 +00001770{
1771 Superblock* sb;
nethercote2d5b8162004-08-11 09:40:52 +00001772 UByte* sb_start;
1773 UByte* sb_end;
njna2578652005-07-17 17:12:24 +00001774 Block* other_b;
nethercote2d5b8162004-08-11 09:40:52 +00001775 Block* b;
nethercote7ac7f7b2004-11-02 12:36:02 +00001776 SizeT b_bszB, b_pszB, other_bszB;
1777 UInt b_listno;
sewardjde4a1d02002-03-22 01:27:54 +00001778 Arena* a;
1779
sewardj45f4e7c2005-09-27 19:20:21 +00001780 ensure_mm_init(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001781 a = arenaId_to_ArenaP(aid);
1782
njn25e49d8e72002-09-23 09:36:25 +00001783 if (ptr == NULL) {
njn25e49d8e72002-09-23 09:36:25 +00001784 return;
1785 }
1786
nethercote2d5b8162004-08-11 09:40:52 +00001787 b = get_payload_block(a, ptr);
sewardjde4a1d02002-03-22 01:27:54 +00001788
sewardj3187a4e2005-12-04 23:27:14 +00001789 /* If this is one of V's areas, check carefully the block we're
1790 getting back. This picks up simple block-end overruns. */
1791 if (aid != VG_AR_CLIENT)
1792 vg_assert(blockSane(a, b));
sewardjde4a1d02002-03-22 01:27:54 +00001793
njne6f9e3b2005-07-17 18:00:57 +00001794 b_bszB = get_bszB(b);
1795 b_pszB = bszB_to_pszB(a, b_bszB);
nethercote2d5b8162004-08-11 09:40:52 +00001796 sb = findSb( a, b );
1797 sb_start = &sb->payload_bytes[0];
1798 sb_end = &sb->payload_bytes[sb->n_payload_bytes - 1];
sewardjde4a1d02002-03-22 01:27:54 +00001799
sewardj7d1064a2011-02-23 13:18:56 +00001800 a->stats__bytes_on_loan -= b_pszB;
njne6f9e3b2005-07-17 18:00:57 +00001801
sewardj3187a4e2005-12-04 23:27:14 +00001802 /* If this is one of V's areas, fill it up with junk to enhance the
1803 chances of catching any later reads of it. Note, 0xDD is
1804 carefully chosen junk :-), in that: (1) 0xDDDDDDDD is an invalid
1805 and non-word-aligned address on most systems, and (2) 0xDD is a
1806 value which is unlikely to be generated by the new compressed
1807 Vbits representation for memcheck. */
1808 if (aid != VG_AR_CLIENT)
1809 VG_(memset)(ptr, 0xDD, (SizeT)b_pszB);
1810
sewardjd043de92011-09-26 11:28:20 +00001811 if (! sb->unsplittable) {
sewardjd8b93462011-09-10 10:17:35 +00001812 // Put this chunk back on a list somewhere.
1813 b_listno = pszB_to_listNo(b_pszB);
1814 mkFreeBlock( a, b, b_bszB, b_listno );
1815 if (VG_(clo_profile_heap))
1816 set_cc(b, "admin.free-1");
sewardjde4a1d02002-03-22 01:27:54 +00001817
sewardjd8b93462011-09-10 10:17:35 +00001818 // See if this block can be merged with its successor.
1819 // First test if we're far enough before the superblock's end to possibly
1820 // have a successor.
1821 other_b = b + b_bszB;
1822 if (other_b+min_useful_bszB(a)-1 <= (Block*)sb_end) {
1823 // Ok, we have a successor, merge if it's not in use.
1824 other_bszB = get_bszB(other_b);
1825 if (!is_inuse_block(other_b)) {
1826 // VG_(printf)( "merge-successor\n");
1827# ifdef DEBUG_MALLOC
1828 vg_assert(blockSane(a, other_b));
1829# endif
1830 unlinkBlock( a, b, b_listno );
1831 unlinkBlock( a, other_b,
1832 pszB_to_listNo(bszB_to_pszB(a,other_bszB)) );
1833 b_bszB += other_bszB;
1834 b_listno = pszB_to_listNo(bszB_to_pszB(a, b_bszB));
1835 mkFreeBlock( a, b, b_bszB, b_listno );
1836 if (VG_(clo_profile_heap))
1837 set_cc(b, "admin.free-2");
1838 }
1839 } else {
1840 // Not enough space for successor: check that b is the last block
1841 // ie. there are no unused bytes at the end of the Superblock.
1842 vg_assert(other_b-1 == (Block*)sb_end);
1843 }
1844
1845 // Then see if this block can be merged with its predecessor.
1846 // First test if we're far enough after the superblock's start to possibly
1847 // have a predecessor.
1848 if (b >= (Block*)sb_start + min_useful_bszB(a)) {
1849 // Ok, we have a predecessor, merge if it's not in use.
1850 other_b = get_predecessor_block( b );
1851 other_bszB = get_bszB(other_b);
1852 if (!is_inuse_block(other_b)) {
1853 // VG_(printf)( "merge-predecessor\n");
1854 unlinkBlock( a, b, b_listno );
1855 unlinkBlock( a, other_b,
1856 pszB_to_listNo(bszB_to_pszB(a, other_bszB)) );
1857 b = other_b;
1858 b_bszB += other_bszB;
1859 b_listno = pszB_to_listNo(bszB_to_pszB(a, b_bszB));
1860 mkFreeBlock( a, b, b_bszB, b_listno );
1861 if (VG_(clo_profile_heap))
1862 set_cc(b, "admin.free-3");
1863 }
1864 } else {
1865 // Not enough space for predecessor: check that b is the first block,
1866 // ie. there are no unused bytes at the start of the Superblock.
1867 vg_assert((Block*)sb_start == b);
sewardjde4a1d02002-03-22 01:27:54 +00001868 }
sewardjd043de92011-09-26 11:28:20 +00001869
1870 /* If the block b just merged is the only block of the superblock sb,
1871 then we defer reclaim sb. */
1872 if ( ((Block*)sb_start == b) && (b + b_bszB-1 == (Block*)sb_end) ) {
1873 deferred_reclaimSuperblock (a, sb);
1874 }
1875
philippe72faf102012-03-11 22:24:03 +00001876 // Inform that ptr has been released. We give redzone size
1877 // 0 instead of a->rz_szB as proper accessibility is done just after.
1878 INNER_REQUEST(VALGRIND_FREELIKE_BLOCK(ptr, 0));
1879
1880 // We need to (re-)establish the minimum accessibility needed
1881 // for free list management. E.g. if block ptr has been put in a free
1882 // list and a neighbour block is released afterwards, the
1883 // "lo" and "hi" portions of the block ptr will be accessed to
1884 // glue the 2 blocks together.
1885 // We could mark the whole block as not accessible, and each time
1886 // transiently mark accessible the needed lo/hi parts. Not done as this
1887 // is quite complex, for very little expected additional bug detection.
1888 // fully unaccessible. Note that the below marks the (possibly) merged
1889 // block, not the block corresponding to the ptr argument.
1890
1891 // First mark the whole block unaccessible.
1892 INNER_REQUEST(VALGRIND_MAKE_MEM_NOACCESS(b, b_bszB));
1893 // Then mark the relevant administrative headers as defined.
1894 // No need to mark the heap profile portion as defined, this is not
1895 // used for free blocks.
1896 INNER_REQUEST(VALGRIND_MAKE_MEM_DEFINED(b + hp_overhead_szB(),
1897 sizeof(SizeT) + sizeof(void*)));
1898 INNER_REQUEST(VALGRIND_MAKE_MEM_DEFINED(b + b_bszB
1899 - sizeof(SizeT) - sizeof(void*),
1900 sizeof(SizeT) + sizeof(void*)));
nethercote2d5b8162004-08-11 09:40:52 +00001901 } else {
sewardjd8b93462011-09-10 10:17:35 +00001902 // b must be first block (i.e. no unused bytes at the beginning)
sewardj4c89b2f2011-08-17 22:13:14 +00001903 vg_assert((Block*)sb_start == b);
sewardjd8b93462011-09-10 10:17:35 +00001904
1905 // b must be last block (i.e. no unused bytes at the end)
1906 other_b = b + b_bszB;
1907 vg_assert(other_b-1 == (Block*)sb_end);
1908
philippe72faf102012-03-11 22:24:03 +00001909 // Inform that ptr has been released. Redzone size value
1910 // is not relevant (so we give 0 instead of a->rz_szB)
1911 // as it is expected that the aspacemgr munmap will be used by
1912 // outer to mark the whole superblock as unaccessible.
1913 INNER_REQUEST(VALGRIND_FREELIKE_BLOCK(ptr, 0));
1914
sewardjd043de92011-09-26 11:28:20 +00001915 // Reclaim immediately the unsplittable superblock sb.
sewardjd8b93462011-09-10 10:17:35 +00001916 reclaimSuperblock (a, sb);
sewardjde4a1d02002-03-22 01:27:54 +00001917 }
1918
1919# ifdef DEBUG_MALLOC
nethercote885dd912004-08-03 23:14:00 +00001920 sanity_check_malloc_arena(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001921# endif
1922
sewardjde4a1d02002-03-22 01:27:54 +00001923}
1924
1925
1926/*
1927 The idea for malloc_aligned() is to allocate a big block, base, and
1928 then split it into two parts: frag, which is returned to the the
1929 free pool, and align, which is the bit we're really after. Here's
1930 a picture. L and H denote the block lower and upper overheads, in
nethercote2d5b8162004-08-11 09:40:52 +00001931 bytes. The details are gruesome. Note it is slightly complicated
sewardjde4a1d02002-03-22 01:27:54 +00001932 because the initial request to generate base may return a bigger
1933 block than we asked for, so it is important to distinguish the base
1934 request size and the base actual size.
1935
1936 frag_b align_b
1937 | |
1938 | frag_p | align_p
1939 | | | |
1940 v v v v
1941
1942 +---+ +---+---+ +---+
1943 | L |----------------| H | L |---------------| H |
1944 +---+ +---+---+ +---+
1945
1946 ^ ^ ^
1947 | | :
1948 | base_p this addr must be aligned
1949 |
1950 base_b
1951
1952 . . . . . . .
nethercote2d5b8162004-08-11 09:40:52 +00001953 <------ frag_bszB -------> . . .
1954 . <------------- base_pszB_act -----------> .
sewardjde4a1d02002-03-22 01:27:54 +00001955 . . . . . . .
1956
1957*/
florian54fe2022012-10-27 23:07:42 +00001958void* VG_(arena_memalign) ( ArenaId aid, const HChar* cc,
sewardj9c606bd2008-09-18 18:12:50 +00001959 SizeT req_alignB, SizeT req_pszB )
sewardjde4a1d02002-03-22 01:27:54 +00001960{
nethercote7ac7f7b2004-11-02 12:36:02 +00001961 SizeT base_pszB_req, base_pszB_act, frag_bszB;
nethercote2d5b8162004-08-11 09:40:52 +00001962 Block *base_b, *align_b;
1963 UByte *base_p, *align_p;
nethercote7ac7f7b2004-11-02 12:36:02 +00001964 SizeT saved_bytes_on_loan;
sewardjde4a1d02002-03-22 01:27:54 +00001965 Arena* a;
1966
sewardj45f4e7c2005-09-27 19:20:21 +00001967 ensure_mm_init(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001968 a = arenaId_to_ArenaP(aid);
1969
nethercote7ac7f7b2004-11-02 12:36:02 +00001970 vg_assert(req_pszB < MAX_PSZB);
sewardjde4a1d02002-03-22 01:27:54 +00001971
sewardj9c606bd2008-09-18 18:12:50 +00001972 // You must provide a cost-center name against which to charge
1973 // this allocation; it isn't optional.
1974 vg_assert(cc);
1975
philippef5f6ed12012-06-15 22:19:59 +00001976 // Check that the requested alignment has a plausible size.
nethercote2d5b8162004-08-11 09:40:52 +00001977 // Check that the requested alignment seems reasonable; that is, is
1978 // a power of 2.
1979 if (req_alignB < VG_MIN_MALLOC_SZB
philippef5f6ed12012-06-15 22:19:59 +00001980 || req_alignB > 16 * 1024 * 1024
njn717cde52005-05-10 02:47:21 +00001981 || VG_(log2)( req_alignB ) == -1 /* not a power of 2 */) {
njn36b65172009-04-14 23:43:15 +00001982 VG_(printf)("VG_(arena_memalign)(%p, %lu, %lu)\n"
1983 "bad alignment value %lu\n"
1984 "(it is too small, too big, or not a power of two)",
1985 a, req_alignB, req_pszB, req_alignB );
njn717cde52005-05-10 02:47:21 +00001986 VG_(core_panic)("VG_(arena_memalign)");
nethercote2d5b8162004-08-11 09:40:52 +00001987 /*NOTREACHED*/
sewardjde4a1d02002-03-22 01:27:54 +00001988 }
nethercote2d5b8162004-08-11 09:40:52 +00001989 // Paranoid
1990 vg_assert(req_alignB % VG_MIN_MALLOC_SZB == 0);
sewardjde4a1d02002-03-22 01:27:54 +00001991
1992 /* Required payload size for the aligned chunk. */
nethercote2d5b8162004-08-11 09:40:52 +00001993 req_pszB = align_req_pszB(req_pszB);
sewardjde4a1d02002-03-22 01:27:54 +00001994
nethercote2d5b8162004-08-11 09:40:52 +00001995 /* Payload size to request for the big block that we will split up. */
1996 base_pszB_req = req_pszB + min_useful_bszB(a) + req_alignB;
sewardjde4a1d02002-03-22 01:27:54 +00001997
1998 /* Payload ptr for the block we are going to split. Note this
1999 changes a->bytes_on_loan; we save and restore it ourselves. */
sewardj7d1064a2011-02-23 13:18:56 +00002000 saved_bytes_on_loan = a->stats__bytes_on_loan;
sewardjd8b93462011-09-10 10:17:35 +00002001 {
2002 /* As we will split the block given back by VG_(arena_malloc),
sewardjd043de92011-09-26 11:28:20 +00002003 we have to (temporarily) disable unsplittable for this arena,
2004 as unsplittable superblocks cannot be splitted. */
2005 const SizeT save_min_unsplittable_sblock_szB
2006 = a->min_unsplittable_sblock_szB;
2007 a->min_unsplittable_sblock_szB = MAX_PSZB;
sewardjd8b93462011-09-10 10:17:35 +00002008 base_p = VG_(arena_malloc) ( aid, cc, base_pszB_req );
sewardjd043de92011-09-26 11:28:20 +00002009 a->min_unsplittable_sblock_szB = save_min_unsplittable_sblock_szB;
sewardjd8b93462011-09-10 10:17:35 +00002010 }
sewardj7d1064a2011-02-23 13:18:56 +00002011 a->stats__bytes_on_loan = saved_bytes_on_loan;
sewardjde4a1d02002-03-22 01:27:54 +00002012
tom8af1a172005-10-06 12:04:26 +00002013 /* Give up if we couldn't allocate enough space */
2014 if (base_p == 0)
2015 return 0;
philippe72faf102012-03-11 22:24:03 +00002016 /* base_p was marked as allocated by VALGRIND_MALLOCLIKE_BLOCK
2017 inside VG_(arena_malloc). We need to indicate it is free, then
2018 we need to mark it undefined to allow the below code to access is. */
2019 INNER_REQUEST(VALGRIND_FREELIKE_BLOCK(base_p, a->rz_szB));
2020 INNER_REQUEST(VALGRIND_MAKE_MEM_UNDEFINED(base_p, base_pszB_req));
tom8af1a172005-10-06 12:04:26 +00002021
sewardjde4a1d02002-03-22 01:27:54 +00002022 /* Block ptr for the block we are going to split. */
nethercote2d5b8162004-08-11 09:40:52 +00002023 base_b = get_payload_block ( a, base_p );
sewardjde4a1d02002-03-22 01:27:54 +00002024
2025 /* Pointer to the payload of the aligned block we are going to
2026 return. This has to be suitably aligned. */
nethercote2d5b8162004-08-11 09:40:52 +00002027 align_p = align_upwards ( base_b + 2 * overhead_szB_lo(a)
2028 + overhead_szB_hi(a),
sewardjde4a1d02002-03-22 01:27:54 +00002029 req_alignB );
nethercote2d5b8162004-08-11 09:40:52 +00002030 align_b = get_payload_block(a, align_p);
sewardjde4a1d02002-03-22 01:27:54 +00002031
2032 /* The block size of the fragment we will create. This must be big
2033 enough to actually create a fragment. */
nethercote2d5b8162004-08-11 09:40:52 +00002034 frag_bszB = align_b - base_b;
2035
2036 vg_assert(frag_bszB >= min_useful_bszB(a));
sewardjde4a1d02002-03-22 01:27:54 +00002037
2038 /* The actual payload size of the block we are going to split. */
njn089f51f2005-07-17 18:12:00 +00002039 base_pszB_act = get_pszB(a, base_b);
sewardjde4a1d02002-03-22 01:27:54 +00002040
nethercote2d5b8162004-08-11 09:40:52 +00002041 /* Create the fragment block, and put it back on the relevant free list. */
2042 mkFreeBlock ( a, base_b, frag_bszB,
2043 pszB_to_listNo(bszB_to_pszB(a, frag_bszB)) );
sewardj94c8eb42008-09-19 20:13:39 +00002044 if (VG_(clo_profile_heap))
2045 set_cc(base_b, "admin.frag-memalign-1");
sewardjde4a1d02002-03-22 01:27:54 +00002046
2047 /* Create the aligned block. */
nethercote2d5b8162004-08-11 09:40:52 +00002048 mkInuseBlock ( a, align_b,
2049 base_p + base_pszB_act
2050 + overhead_szB_hi(a) - (UByte*)align_b );
sewardj94c8eb42008-09-19 20:13:39 +00002051 if (VG_(clo_profile_heap))
2052 set_cc(align_b, cc);
sewardjde4a1d02002-03-22 01:27:54 +00002053
2054 /* Final sanity checks. */
njn472cc7c2005-07-17 17:20:30 +00002055 vg_assert( is_inuse_block(get_payload_block(a, align_p)) );
sewardjde4a1d02002-03-22 01:27:54 +00002056
njn089f51f2005-07-17 18:12:00 +00002057 vg_assert(req_pszB <= get_pszB(a, get_payload_block(a, align_p)));
sewardjde4a1d02002-03-22 01:27:54 +00002058
sewardj7d1064a2011-02-23 13:18:56 +00002059 a->stats__bytes_on_loan += get_pszB(a, get_payload_block(a, align_p));
2060 if (a->stats__bytes_on_loan > a->stats__bytes_on_loan_max) {
2061 a->stats__bytes_on_loan_max = a->stats__bytes_on_loan;
2062 }
2063 /* a->stats__tot_blocks, a->stats__tot_bytes, a->stats__nsearches
2064 are updated by the call to VG_(arena_malloc) just a few lines
2065 above. So we don't need to update them here. */
sewardjde4a1d02002-03-22 01:27:54 +00002066
2067# ifdef DEBUG_MALLOC
nethercote885dd912004-08-03 23:14:00 +00002068 sanity_check_malloc_arena(aid);
sewardjde4a1d02002-03-22 01:27:54 +00002069# endif
2070
nethercote2d5b8162004-08-11 09:40:52 +00002071 vg_assert( (((Addr)align_p) % req_alignB) == 0 );
sewardjb5f6f512005-03-10 23:59:00 +00002072
philippe72faf102012-03-11 22:24:03 +00002073 INNER_REQUEST(VALGRIND_MALLOCLIKE_BLOCK(align_p,
2074 req_pszB, a->rz_szB, False));
sewardjb5f6f512005-03-10 23:59:00 +00002075
nethercote2d5b8162004-08-11 09:40:52 +00002076 return align_p;
2077}
2078
2079
njn8b140de2009-02-17 04:31:18 +00002080SizeT VG_(arena_malloc_usable_size) ( ArenaId aid, void* ptr )
nethercote2d5b8162004-08-11 09:40:52 +00002081{
2082 Arena* a = arenaId_to_ArenaP(aid);
2083 Block* b = get_payload_block(a, ptr);
njn089f51f2005-07-17 18:12:00 +00002084 return get_pszB(a, b);
sewardjde4a1d02002-03-22 01:27:54 +00002085}
2086
bart545380e2008-04-21 17:28:50 +00002087
2088// Implementation of mallinfo(). There is no recent standard that defines
2089// the behavior of mallinfo(). The meaning of the fields in struct mallinfo
2090// is as follows:
2091//
2092// struct mallinfo {
2093// int arena; /* total space in arena */
2094// int ordblks; /* number of ordinary blocks */
2095// int smblks; /* number of small blocks */
2096// int hblks; /* number of holding blocks */
2097// int hblkhd; /* space in holding block headers */
2098// int usmblks; /* space in small blocks in use */
2099// int fsmblks; /* space in free small blocks */
2100// int uordblks; /* space in ordinary blocks in use */
2101// int fordblks; /* space in free ordinary blocks */
2102// int keepcost; /* space penalty if keep option */
2103// /* is used */
2104// };
2105//
2106// The glibc documentation about mallinfo (which is somewhat outdated) can
2107// be found here:
2108// http://www.gnu.org/software/libtool/manual/libc/Statistics-of-Malloc.html
2109//
2110// See also http://bugs.kde.org/show_bug.cgi?id=160956.
2111//
2112// Regarding the implementation of VG_(mallinfo)(): we cannot return the
2113// whole struct as the library function does, because this is called by a
2114// client request. So instead we use a pointer to do call by reference.
njn088bfb42005-08-17 05:01:37 +00002115void VG_(mallinfo) ( ThreadId tid, struct vg_mallinfo* mi )
2116{
sewardj76dda8f2008-05-29 13:45:49 +00002117 UWord i, free_blocks, free_blocks_size;
bartc3c98392008-04-19 14:43:30 +00002118 Arena* a = arenaId_to_ArenaP(VG_AR_CLIENT);
2119
2120 // Traverse free list and calculate free blocks statistics.
2121 // This may seem slow but glibc works the same way.
2122 free_blocks_size = free_blocks = 0;
2123 for (i = 0; i < N_MALLOC_LISTS; i++) {
2124 Block* b = a->freelist[i];
2125 if (b == NULL) continue;
2126 for (;;) {
2127 free_blocks++;
sewardj76dda8f2008-05-29 13:45:49 +00002128 free_blocks_size += (UWord)get_pszB(a, b);
bartc3c98392008-04-19 14:43:30 +00002129 b = get_next_b(b);
2130 if (b == a->freelist[i]) break;
2131 }
2132 }
2133
2134 // We don't have fastbins so smblks & fsmblks are always 0. Also we don't
bart545380e2008-04-21 17:28:50 +00002135 // have a separate mmap allocator so set hblks & hblkhd to 0.
sewardj7d1064a2011-02-23 13:18:56 +00002136 mi->arena = a->stats__bytes_mmaped;
bart545380e2008-04-21 17:28:50 +00002137 mi->ordblks = free_blocks + VG_(free_queue_length);
bartc3c98392008-04-19 14:43:30 +00002138 mi->smblks = 0;
2139 mi->hblks = 0;
2140 mi->hblkhd = 0;
2141 mi->usmblks = 0;
2142 mi->fsmblks = 0;
sewardj7d1064a2011-02-23 13:18:56 +00002143 mi->uordblks = a->stats__bytes_on_loan - VG_(free_queue_volume);
bart545380e2008-04-21 17:28:50 +00002144 mi->fordblks = free_blocks_size + VG_(free_queue_volume);
bartc3c98392008-04-19 14:43:30 +00002145 mi->keepcost = 0; // may want some value in here
njn088bfb42005-08-17 05:01:37 +00002146}
sewardjde4a1d02002-03-22 01:27:54 +00002147
sewardj45f4e7c2005-09-27 19:20:21 +00002148
sewardjde4a1d02002-03-22 01:27:54 +00002149/*------------------------------------------------------------*/
2150/*--- Services layered on top of malloc/free. ---*/
2151/*------------------------------------------------------------*/
2152
florian54fe2022012-10-27 23:07:42 +00002153void* VG_(arena_calloc) ( ArenaId aid, const HChar* cc,
sewardj9c606bd2008-09-18 18:12:50 +00002154 SizeT nmemb, SizeT bytes_per_memb )
sewardjde4a1d02002-03-22 01:27:54 +00002155{
nethercote7ac7f7b2004-11-02 12:36:02 +00002156 SizeT size;
florian54fe2022012-10-27 23:07:42 +00002157 void* p;
njn25e49d8e72002-09-23 09:36:25 +00002158
njn926ed472005-03-11 04:44:10 +00002159 size = nmemb * bytes_per_memb;
2160 vg_assert(size >= nmemb && size >= bytes_per_memb);// check against overflow
njn3e884182003-04-15 13:03:23 +00002161
sewardj9c606bd2008-09-18 18:12:50 +00002162 p = VG_(arena_malloc) ( aid, cc, size );
njn3e884182003-04-15 13:03:23 +00002163
njn926ed472005-03-11 04:44:10 +00002164 VG_(memset)(p, 0, size);
sewardjb5f6f512005-03-10 23:59:00 +00002165
sewardjde4a1d02002-03-22 01:27:54 +00002166 return p;
2167}
2168
2169
florian54fe2022012-10-27 23:07:42 +00002170void* VG_(arena_realloc) ( ArenaId aid, const HChar* cc,
sewardj9c606bd2008-09-18 18:12:50 +00002171 void* ptr, SizeT req_pszB )
sewardjde4a1d02002-03-22 01:27:54 +00002172{
2173 Arena* a;
njn089f51f2005-07-17 18:12:00 +00002174 SizeT old_pszB;
florian54fe2022012-10-27 23:07:42 +00002175 void* p_new;
nethercote2d5b8162004-08-11 09:40:52 +00002176 Block* b;
sewardjde4a1d02002-03-22 01:27:54 +00002177
sewardj45f4e7c2005-09-27 19:20:21 +00002178 ensure_mm_init(aid);
sewardjde4a1d02002-03-22 01:27:54 +00002179 a = arenaId_to_ArenaP(aid);
2180
nethercote7ac7f7b2004-11-02 12:36:02 +00002181 vg_assert(req_pszB < MAX_PSZB);
sewardjde4a1d02002-03-22 01:27:54 +00002182
njn180f6982008-10-12 19:51:41 +00002183 if (NULL == ptr) {
2184 return VG_(arena_malloc)(aid, cc, req_pszB);
2185 }
2186
2187 if (req_pszB == 0) {
2188 VG_(arena_free)(aid, ptr);
2189 return NULL;
2190 }
2191
nethercote2d5b8162004-08-11 09:40:52 +00002192 b = get_payload_block(a, ptr);
2193 vg_assert(blockSane(a, b));
sewardjde4a1d02002-03-22 01:27:54 +00002194
njn472cc7c2005-07-17 17:20:30 +00002195 vg_assert(is_inuse_block(b));
njn089f51f2005-07-17 18:12:00 +00002196 old_pszB = get_pszB(a, b);
sewardjde4a1d02002-03-22 01:27:54 +00002197
njn25e49d8e72002-09-23 09:36:25 +00002198 if (req_pszB <= old_pszB) {
njn25e49d8e72002-09-23 09:36:25 +00002199 return ptr;
2200 }
sewardjde4a1d02002-03-22 01:27:54 +00002201
sewardj9c606bd2008-09-18 18:12:50 +00002202 p_new = VG_(arena_malloc) ( aid, cc, req_pszB );
njn828022a2005-03-13 14:56:31 +00002203
sewardjb5f6f512005-03-10 23:59:00 +00002204 VG_(memcpy)(p_new, ptr, old_pszB);
sewardjde4a1d02002-03-22 01:27:54 +00002205
sewardjb5f6f512005-03-10 23:59:00 +00002206 VG_(arena_free)(aid, ptr);
njn25e49d8e72002-09-23 09:36:25 +00002207
sewardjde4a1d02002-03-22 01:27:54 +00002208 return p_new;
2209}
2210
2211
njn6ba622c2005-06-11 01:12:08 +00002212/* Inline just for the wrapper VG_(strdup) below */
florian19f91bb2012-11-10 22:29:54 +00002213__inline__ HChar* VG_(arena_strdup) ( ArenaId aid, const HChar* cc,
2214 const HChar* s )
njn6ba622c2005-06-11 01:12:08 +00002215{
2216 Int i;
2217 Int len;
florian19f91bb2012-11-10 22:29:54 +00002218 HChar* res;
njn6ba622c2005-06-11 01:12:08 +00002219
2220 if (s == NULL)
2221 return NULL;
2222
2223 len = VG_(strlen)(s) + 1;
sewardj9c606bd2008-09-18 18:12:50 +00002224 res = VG_(arena_malloc) (aid, cc, len);
njn6ba622c2005-06-11 01:12:08 +00002225
2226 for (i = 0; i < len; i++)
2227 res[i] = s[i];
2228 return res;
2229}
2230
2231
sewardjde4a1d02002-03-22 01:27:54 +00002232/*------------------------------------------------------------*/
nethercote996901a2004-08-03 13:29:09 +00002233/*--- Tool-visible functions. ---*/
njn25e49d8e72002-09-23 09:36:25 +00002234/*------------------------------------------------------------*/
2235
nethercote2d5b8162004-08-11 09:40:52 +00002236// All just wrappers to avoid exposing arenas to tools.
njn25e49d8e72002-09-23 09:36:25 +00002237
florian54fe2022012-10-27 23:07:42 +00002238void* VG_(malloc) ( const HChar* cc, SizeT nbytes )
njn25e49d8e72002-09-23 09:36:25 +00002239{
sewardj9c606bd2008-09-18 18:12:50 +00002240 return VG_(arena_malloc) ( VG_AR_TOOL, cc, nbytes );
njn25e49d8e72002-09-23 09:36:25 +00002241}
2242
2243void VG_(free) ( void* ptr )
2244{
nethercote60f5b822004-01-26 17:24:42 +00002245 VG_(arena_free) ( VG_AR_TOOL, ptr );
njn25e49d8e72002-09-23 09:36:25 +00002246}
2247
florian54fe2022012-10-27 23:07:42 +00002248void* VG_(calloc) ( const HChar* cc, SizeT nmemb, SizeT bytes_per_memb )
njn25e49d8e72002-09-23 09:36:25 +00002249{
sewardj9c606bd2008-09-18 18:12:50 +00002250 return VG_(arena_calloc) ( VG_AR_TOOL, cc, nmemb, bytes_per_memb );
njn25e49d8e72002-09-23 09:36:25 +00002251}
2252
florian54fe2022012-10-27 23:07:42 +00002253void* VG_(realloc) ( const HChar* cc, void* ptr, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00002254{
sewardj9c606bd2008-09-18 18:12:50 +00002255 return VG_(arena_realloc) ( VG_AR_TOOL, cc, ptr, size );
njn25e49d8e72002-09-23 09:36:25 +00002256}
2257
florian19f91bb2012-11-10 22:29:54 +00002258HChar* VG_(strdup) ( const HChar* cc, const HChar* s )
njn6ba622c2005-06-11 01:12:08 +00002259{
sewardj9c606bd2008-09-18 18:12:50 +00002260 return VG_(arena_strdup) ( VG_AR_TOOL, cc, s );
njn6ba622c2005-06-11 01:12:08 +00002261}
2262
njn32397c02007-11-10 04:08:08 +00002263// Useful for querying user blocks.
2264SizeT VG_(malloc_usable_size) ( void* p )
2265{
njn00556da2009-03-17 04:51:19 +00002266 return VG_(arena_malloc_usable_size)(VG_AR_CLIENT, p);
njn32397c02007-11-10 04:08:08 +00002267}
2268
2269
sewardjde4a1d02002-03-22 01:27:54 +00002270/*--------------------------------------------------------------------*/
njn717cde52005-05-10 02:47:21 +00002271/*--- end ---*/
sewardjde4a1d02002-03-22 01:27:54 +00002272/*--------------------------------------------------------------------*/