blob: eb0bc12f48e1464fe3284945d83769d60df7da9a [file] [log] [blame]
sewardjde4a1d02002-03-22 01:27:54 +00001
2/*--------------------------------------------------------------------*/
3/*--- An implementation of malloc/free which doesn't use sbrk. ---*/
njn717cde52005-05-10 02:47:21 +00004/*--- m_mallocfree.c ---*/
sewardjde4a1d02002-03-22 01:27:54 +00005/*--------------------------------------------------------------------*/
6
7/*
njnb9c427c2004-12-01 14:14:42 +00008 This file is part of Valgrind, a dynamic binary instrumentation
9 framework.
sewardjde4a1d02002-03-22 01:27:54 +000010
Elliott Hughesed398002017-06-21 14:41:24 -070011 Copyright (C) 2000-2017 Julian Seward
sewardjde4a1d02002-03-22 01:27:54 +000012 jseward@acm.org
sewardjde4a1d02002-03-22 01:27:54 +000013
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
njn25e49d8e72002-09-23 09:36:25 +000029 The GNU General Public License is contained in the file COPYING.
sewardjde4a1d02002-03-22 01:27:54 +000030*/
31
njnc7561b92005-06-19 01:24:32 +000032#include "pub_core_basics.h"
sewardj4cfea4f2006-10-14 19:26:10 +000033#include "pub_core_vki.h"
sewardj45f4e7c2005-09-27 19:20:21 +000034#include "pub_core_debuglog.h"
njn97405b22005-06-02 03:39:33 +000035#include "pub_core_libcbase.h"
sewardj45f4e7c2005-09-27 19:20:21 +000036#include "pub_core_aspacemgr.h"
njn132bfcc2005-06-04 19:16:06 +000037#include "pub_core_libcassert.h"
njn36a20fa2005-06-03 03:08:39 +000038#include "pub_core_libcprint.h"
njnaf1d7df2005-06-11 01:31:52 +000039#include "pub_core_mallocfree.h"
njn20242342005-05-16 23:31:24 +000040#include "pub_core_options.h"
njn32397c02007-11-10 04:08:08 +000041#include "pub_core_threadstate.h" // For VG_INVALID_THREADID
philippe4f6f3362014-04-19 00:25:54 +000042#include "pub_core_gdbserver.h"
sewardjd043de92011-09-26 11:28:20 +000043#include "pub_core_transtab.h"
njnfc51f8d2005-06-21 03:20:17 +000044#include "pub_core_tooliface.h"
sewardj55f9d1a2005-04-25 11:11:44 +000045
florianc91f5842013-09-15 10:42:26 +000046#include "pub_core_inner.h"
philippe72faf102012-03-11 22:24:03 +000047#if defined(ENABLE_INNER_CLIENT_REQUEST)
48#include "memcheck/memcheck.h"
49#endif
sewardjde4a1d02002-03-22 01:27:54 +000050
sewardj0b3fd2d2007-08-21 10:55:26 +000051// #define DEBUG_MALLOC // turn on heavyweight debugging machinery
52// #define VERBOSE_MALLOC // make verbose, esp. in debugging machinery
nethercote2d5b8162004-08-11 09:40:52 +000053
bart545380e2008-04-21 17:28:50 +000054/* Number and total size of blocks in free queue. Used by mallinfo(). */
55Long VG_(free_queue_volume) = 0;
56Long VG_(free_queue_length) = 0;
57
sewardj9c606bd2008-09-18 18:12:50 +000058static void cc_analyse_alloc_arena ( ArenaId aid ); /* fwds */
59
nethercote2d5b8162004-08-11 09:40:52 +000060/*------------------------------------------------------------*/
61/*--- Main types ---*/
62/*------------------------------------------------------------*/
63
sewardjc1ac9772007-08-20 22:57:56 +000064#define N_MALLOC_LISTS 112 // do not change this
nethercote2d5b8162004-08-11 09:40:52 +000065
nethercote7ac7f7b2004-11-02 12:36:02 +000066// The amount you can ask for is limited only by sizeof(SizeT)...
67#define MAX_PSZB (~((SizeT)0x0))
nethercote2d5b8162004-08-11 09:40:52 +000068
sewardj0b3fd2d2007-08-21 10:55:26 +000069// Each arena has a sorted array of superblocks, which expands
70// dynamically. This is its initial size.
71#define SBLOCKS_SIZE_INITIAL 50
72
nethercote2d5b8162004-08-11 09:40:52 +000073typedef UChar UByte;
74
njn8d3f8452005-07-20 04:12:41 +000075/* Layout of an in-use block:
nethercote2d5b8162004-08-11 09:40:52 +000076
njn341a6642009-05-24 23:36:50 +000077 cost center (OPTIONAL) (VG_MIN_MALLOC_SZB bytes, only when h-p enabled)
njn8d3f8452005-07-20 04:12:41 +000078 this block total szB (sizeof(SizeT) bytes)
njn7ce83112005-08-24 22:38:00 +000079 red zone bytes (depends on Arena.rz_szB, but >= sizeof(void*))
njn8d3f8452005-07-20 04:12:41 +000080 (payload bytes)
njn7ce83112005-08-24 22:38:00 +000081 red zone bytes (depends on Arena.rz_szB, but >= sizeof(void*))
njn8d3f8452005-07-20 04:12:41 +000082 this block total szB (sizeof(SizeT) bytes)
nethercote2d5b8162004-08-11 09:40:52 +000083
njn8d3f8452005-07-20 04:12:41 +000084 Layout of a block on the free list:
nethercote2d5b8162004-08-11 09:40:52 +000085
njn341a6642009-05-24 23:36:50 +000086 cost center (OPTIONAL) (VG_MIN_MALLOC_SZB bytes, only when h-p enabled)
njn8d3f8452005-07-20 04:12:41 +000087 this block total szB (sizeof(SizeT) bytes)
88 freelist previous ptr (sizeof(void*) bytes)
89 excess red zone bytes (if Arena.rz_szB > sizeof(void*))
90 (payload bytes)
91 excess red zone bytes (if Arena.rz_szB > sizeof(void*))
92 freelist next ptr (sizeof(void*) bytes)
93 this block total szB (sizeof(SizeT) bytes)
nethercote2d5b8162004-08-11 09:40:52 +000094
njn8d3f8452005-07-20 04:12:41 +000095 Total size in bytes (bszB) and payload size in bytes (pszB)
96 are related by:
nethercote2d5b8162004-08-11 09:40:52 +000097
sewardj94c8eb42008-09-19 20:13:39 +000098 bszB == pszB + 2*sizeof(SizeT) + 2*a->rz_szB
99
100 when heap profiling is not enabled, and
101
njn341a6642009-05-24 23:36:50 +0000102 bszB == pszB + 2*sizeof(SizeT) + 2*a->rz_szB + VG_MIN_MALLOC_SZB
njn8d3f8452005-07-20 04:12:41 +0000103
sewardj94c8eb42008-09-19 20:13:39 +0000104 when it is enabled. It follows that the minimum overhead per heap
105 block for arenas used by the core is:
106
107 32-bit platforms: 2*4 + 2*4 == 16 bytes
108 64-bit platforms: 2*8 + 2*8 == 32 bytes
109
110 when heap profiling is not enabled, and
njna527a492005-12-16 17:06:37 +0000111
njn341a6642009-05-24 23:36:50 +0000112 32-bit platforms: 2*4 + 2*4 + 8 == 24 bytes
113 64-bit platforms: 2*8 + 2*8 + 16 == 48 bytes
njna527a492005-12-16 17:06:37 +0000114
sewardj94c8eb42008-09-19 20:13:39 +0000115 when it is enabled. In all cases, extra overhead may be incurred
116 when rounding the payload size up to VG_MIN_MALLOC_SZB.
njna527a492005-12-16 17:06:37 +0000117
njn8d3f8452005-07-20 04:12:41 +0000118 Furthermore, both size fields in the block have their least-significant
119 bit set if the block is not in use, and unset if it is in use.
120 (The bottom 3 or so bits are always free for this because of alignment.)
121 A block size of zero is not possible, because a block always has at
122 least two SizeTs and two pointers of overhead.
123
124 Nb: All Block payloads must be VG_MIN_MALLOC_SZB-aligned. This is
125 achieved by ensuring that Superblocks are VG_MIN_MALLOC_SZB-aligned
126 (see newSuperblock() for how), and that the lengths of the following
127 things are a multiple of VG_MIN_MALLOC_SZB:
128 - Superblock admin section lengths (due to elastic padding)
129 - Block admin section (low and high) lengths (due to elastic redzones)
130 - Block payload lengths (due to req_pszB rounding up)
sewardj9c606bd2008-09-18 18:12:50 +0000131
132 The heap-profile cost-center field is 8 bytes even on 32 bit
133 platforms. This is so as to keep the payload field 8-aligned. On
134 a 64-bit platform, this cc-field contains a pointer to a const
135 HChar*, which is the cost center name. On 32-bit platforms, the
136 pointer lives in the lower-addressed half of the field, regardless
137 of the endianness of the host.
nethercote2d5b8162004-08-11 09:40:52 +0000138*/
139typedef
140 struct {
141 // No fields are actually used in this struct, because a Block has
njn37517e82005-05-25 15:52:39 +0000142 // many variable sized fields and so can't be accessed
nethercote2d5b8162004-08-11 09:40:52 +0000143 // meaningfully with normal fields. So we use access functions all
144 // the time. This struct gives us a type to use, though. Also, we
145 // make sizeof(Block) 1 byte so that we can do arithmetic with the
146 // Block* type in increments of 1!
147 UByte dummy;
148 }
149 Block;
150
Elliott Hughesed398002017-06-21 14:41:24 -0700151/* Ensure that Block payloads can be safely cast to various pointers below. */
152STATIC_ASSERT(VG_MIN_MALLOC_SZB % sizeof(void *) == 0);
153
nethercote2d5b8162004-08-11 09:40:52 +0000154// A superblock. 'padding' is never used, it just ensures that if the
155// entire Superblock is aligned to VG_MIN_MALLOC_SZB, then payload_bytes[]
156// will be too. It can add small amounts of padding unnecessarily -- eg.
157// 8-bytes on 32-bit machines with an 8-byte VG_MIN_MALLOC_SZB -- because
158// it's too hard to make a constant expression that works perfectly in all
159// cases.
Elliott Hughesed398002017-06-21 14:41:24 -0700160// 'unsplittable' is set to NULL if superblock can be split, otherwise
sewardjd043de92011-09-26 11:28:20 +0000161// it is set to the address of the superblock. An unsplittable superblock
162// will contain only one allocated block. An unsplittable superblock will
163// be unmapped when its (only) allocated block is freed.
164// The free space at the end of an unsplittable superblock is not used to
165// make a free block. Note that this means that an unsplittable superblock can
sewardjd8b93462011-09-10 10:17:35 +0000166// have up to slightly less than 1 page of unused bytes at the end of the
167// superblock.
sewardjd043de92011-09-26 11:28:20 +0000168// 'unsplittable' is used to avoid quadratic memory usage for linear
169// reallocation of big structures
170// (see http://bugs.kde.org/show_bug.cgi?id=250101).
171// ??? unsplittable replaces 'void *padding2'. Choosed this
sewardjd8b93462011-09-10 10:17:35 +0000172// ??? to avoid changing the alignment logic. Maybe something cleaner
173// ??? can be done.
sewardjd043de92011-09-26 11:28:20 +0000174// A splittable block can be reclaimed when all its blocks are freed :
175// the reclaim of such a block is deferred till either another superblock
176// of the same arena can be reclaimed or till a new superblock is needed
177// in any arena.
nethercote2d5b8162004-08-11 09:40:52 +0000178// payload_bytes[] is made a single big Block when the Superblock is
179// created, and then can be split and the splittings remerged, but Blocks
180// always cover its entire length -- there's never any unused bytes at the
181// end, for example.
sewardj0b3fd2d2007-08-21 10:55:26 +0000182typedef
nethercote2d5b8162004-08-11 09:40:52 +0000183 struct _Superblock {
nethercote7ac7f7b2004-11-02 12:36:02 +0000184 SizeT n_payload_bytes;
sewardjd043de92011-09-26 11:28:20 +0000185 struct _Superblock* unsplittable;
sewardj0b3fd2d2007-08-21 10:55:26 +0000186 UByte padding[ VG_MIN_MALLOC_SZB -
187 ((sizeof(struct _Superblock*) + sizeof(SizeT)) %
nethercote7ac7f7b2004-11-02 12:36:02 +0000188 VG_MIN_MALLOC_SZB) ];
nethercote2d5b8162004-08-11 09:40:52 +0000189 UByte payload_bytes[0];
190 }
191 Superblock;
192
193// An arena. 'freelist' is a circular, doubly-linked list. 'rz_szB' is
194// elastic, in that it can be bigger than asked-for to ensure alignment.
sewardj0b3fd2d2007-08-21 10:55:26 +0000195typedef
nethercote2d5b8162004-08-11 09:40:52 +0000196 struct {
florian54fe2022012-10-27 23:07:42 +0000197 const HChar* name;
sewardj0b3fd2d2007-08-21 10:55:26 +0000198 Bool clientmem; // Allocates in the client address space?
199 SizeT rz_szB; // Red zone size in bytes
200 SizeT min_sblock_szB; // Minimum superblock size in bytes
sewardjd043de92011-09-26 11:28:20 +0000201 SizeT min_unsplittable_sblock_szB;
202 // Minimum unsplittable superblock size in bytes. To be marked as
203 // unsplittable, a superblock must have a
Elliott Hughesed398002017-06-21 14:41:24 -0700204 // size >= min_unsplittable_sblock_szB and cannot be split.
sewardjd043de92011-09-26 11:28:20 +0000205 // So, to avoid big overhead, superblocks used to provide aligned
206 // blocks on big alignments are splittable.
207 // Unsplittable superblocks will be reclaimed when their (only)
sewardjd8b93462011-09-10 10:17:35 +0000208 // allocated block is freed.
sewardjd043de92011-09-26 11:28:20 +0000209 // Smaller size superblocks are splittable and can be reclaimed when all
210 // their blocks are freed.
sewardj0b3fd2d2007-08-21 10:55:26 +0000211 Block* freelist[N_MALLOC_LISTS];
212 // A dynamically expanding, ordered array of (pointers to)
213 // superblocks in the arena. If this array is expanded, which
214 // is rare, the previous space it occupies is simply abandoned.
215 // To avoid having to get yet another block from m_aspacemgr for
216 // the first incarnation of this array, the first allocation of
217 // it is within this struct. If it has to be expanded then the
218 // new space is acquired from m_aspacemgr as you would expect.
219 Superblock** sblocks;
220 SizeT sblocks_size;
221 SizeT sblocks_used;
222 Superblock* sblocks_initial[SBLOCKS_SIZE_INITIAL];
sewardjd043de92011-09-26 11:28:20 +0000223 Superblock* deferred_reclaimed_sb;
philippe6e4b7132013-01-18 06:19:49 +0000224
225 // VG_(arena_perm_malloc) returns memory from superblocks
226 // only used for permanent blocks. No overhead. These superblocks
227 // are not stored in sblocks array above.
228 Addr perm_malloc_current; // first byte free in perm_malloc sb.
229 Addr perm_malloc_limit; // maximum usable byte in perm_malloc sb.
230
231 // Stats only
232 SizeT stats__perm_bytes_on_loan;
233 SizeT stats__perm_blocks;
234
sewardjd043de92011-09-26 11:28:20 +0000235 ULong stats__nreclaim_unsplit;
236 ULong stats__nreclaim_split;
237 /* total # of reclaim executed for unsplittable/splittable superblocks */
sewardj7d1064a2011-02-23 13:18:56 +0000238 SizeT stats__bytes_on_loan;
239 SizeT stats__bytes_mmaped;
240 SizeT stats__bytes_on_loan_max;
241 ULong stats__tot_blocks; /* total # blocks alloc'd */
242 ULong stats__tot_bytes; /* total # bytes alloc'd */
243 ULong stats__nsearches; /* total # freelist checks */
244 // If profiling, when should the next profile happen at
245 // (in terms of stats__bytes_on_loan_max) ?
sewardj9c606bd2008-09-18 18:12:50 +0000246 SizeT next_profile_at;
sewardjd8b93462011-09-10 10:17:35 +0000247 SizeT stats__bytes_mmaped_max;
sewardj0b3fd2d2007-08-21 10:55:26 +0000248 }
nethercote2d5b8162004-08-11 09:40:52 +0000249 Arena;
250
251
252/*------------------------------------------------------------*/
253/*--- Low-level functions for working with Blocks. ---*/
254/*------------------------------------------------------------*/
255
nethercote7ac7f7b2004-11-02 12:36:02 +0000256#define SIZE_T_0x1 ((SizeT)0x1)
257
florian6bd9dc12012-11-23 16:17:43 +0000258static const char* probably_your_fault =
njnb8329f02009-04-16 00:33:20 +0000259 "This is probably caused by your program erroneously writing past the\n"
260 "end of a heap block and corrupting heap metadata. If you fix any\n"
261 "invalid writes reported by Memcheck, this assertion failure will\n"
262 "probably go away. Please try that before reporting this as a bug.\n";
263
njn8d3f8452005-07-20 04:12:41 +0000264// Mark a bszB as in-use, and not in-use, and remove the in-use attribute.
nethercote2d5b8162004-08-11 09:40:52 +0000265static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000266SizeT mk_inuse_bszB ( SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000267{
njnb8329f02009-04-16 00:33:20 +0000268 vg_assert2(bszB != 0, probably_your_fault);
nethercote7ac7f7b2004-11-02 12:36:02 +0000269 return bszB & (~SIZE_T_0x1);
nethercote2d5b8162004-08-11 09:40:52 +0000270}
271static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000272SizeT mk_free_bszB ( SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000273{
njnb8329f02009-04-16 00:33:20 +0000274 vg_assert2(bszB != 0, probably_your_fault);
nethercote7ac7f7b2004-11-02 12:36:02 +0000275 return bszB | SIZE_T_0x1;
nethercote2d5b8162004-08-11 09:40:52 +0000276}
nethercote2d5b8162004-08-11 09:40:52 +0000277static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000278SizeT mk_plain_bszB ( SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000279{
njnb8329f02009-04-16 00:33:20 +0000280 vg_assert2(bszB != 0, probably_your_fault);
nethercote7ac7f7b2004-11-02 12:36:02 +0000281 return bszB & (~SIZE_T_0x1);
nethercote2d5b8162004-08-11 09:40:52 +0000282}
283
philipped99c26a2012-07-31 22:17:28 +0000284// Forward definition.
285static
286void ensure_mm_init ( ArenaId aid );
287
sewardj94c8eb42008-09-19 20:13:39 +0000288// return either 0 or sizeof(ULong) depending on whether or not
289// heap profiling is engaged
sewardjd043de92011-09-26 11:28:20 +0000290#define hp_overhead_szB() set_at_init_hp_overhead_szB
291static SizeT set_at_init_hp_overhead_szB = -1000000;
292// startup value chosen to very likely cause a problem if used before
293// a proper value is given by ensure_mm_init.
sewardj94c8eb42008-09-19 20:13:39 +0000294
njn402c8612005-08-23 22:11:20 +0000295//---------------------------------------------------------------------------
296
297// Get a block's size as stored, ie with the in-use/free attribute.
nethercote2d5b8162004-08-11 09:40:52 +0000298static __inline__
njn402c8612005-08-23 22:11:20 +0000299SizeT get_bszB_as_is ( Block* b )
nethercote2d5b8162004-08-11 09:40:52 +0000300{
njn402c8612005-08-23 22:11:20 +0000301 UByte* b2 = (UByte*)b;
Elliott Hughesed398002017-06-21 14:41:24 -0700302 SizeT bszB_lo = *ASSUME_ALIGNED(SizeT*, &b2[0 + hp_overhead_szB()]);
303 SizeT bszB_hi = *ASSUME_ALIGNED(SizeT*,
304 &b2[mk_plain_bszB(bszB_lo) - sizeof(SizeT)]);
njn402c8612005-08-23 22:11:20 +0000305 vg_assert2(bszB_lo == bszB_hi,
njnb8329f02009-04-16 00:33:20 +0000306 "Heap block lo/hi size mismatch: lo = %llu, hi = %llu.\n%s",
307 (ULong)bszB_lo, (ULong)bszB_hi, probably_your_fault);
njn402c8612005-08-23 22:11:20 +0000308 return bszB_lo;
nethercote2d5b8162004-08-11 09:40:52 +0000309}
310
njn402c8612005-08-23 22:11:20 +0000311// Get a block's plain size, ie. remove the in-use/free attribute.
312static __inline__
313SizeT get_bszB ( Block* b )
314{
315 return mk_plain_bszB(get_bszB_as_is(b));
316}
317
318// Set the size fields of a block. bszB may have the in-use/free attribute.
319static __inline__
320void set_bszB ( Block* b, SizeT bszB )
321{
322 UByte* b2 = (UByte*)b;
Elliott Hughesed398002017-06-21 14:41:24 -0700323 *ASSUME_ALIGNED(SizeT*, &b2[0 + hp_overhead_szB()]) = bszB;
324 *ASSUME_ALIGNED(SizeT*, &b2[mk_plain_bszB(bszB) - sizeof(SizeT)]) = bszB;
njn402c8612005-08-23 22:11:20 +0000325}
326
327//---------------------------------------------------------------------------
328
njn472cc7c2005-07-17 17:20:30 +0000329// Does this block have the in-use attribute?
330static __inline__
331Bool is_inuse_block ( Block* b )
332{
njn402c8612005-08-23 22:11:20 +0000333 SizeT bszB = get_bszB_as_is(b);
njnb8329f02009-04-16 00:33:20 +0000334 vg_assert2(bszB != 0, probably_your_fault);
njn472cc7c2005-07-17 17:20:30 +0000335 return (0 != (bszB & SIZE_T_0x1)) ? False : True;
336}
337
njn402c8612005-08-23 22:11:20 +0000338//---------------------------------------------------------------------------
njn8d3f8452005-07-20 04:12:41 +0000339
njn089f51f2005-07-17 18:12:00 +0000340// Return the lower, upper and total overhead in bytes for a block.
341// These are determined purely by which arena the block lives in.
342static __inline__
343SizeT overhead_szB_lo ( Arena* a )
344{
sewardj94c8eb42008-09-19 20:13:39 +0000345 return hp_overhead_szB() + sizeof(SizeT) + a->rz_szB;
njn089f51f2005-07-17 18:12:00 +0000346}
347static __inline__
348SizeT overhead_szB_hi ( Arena* a )
349{
njn8d3f8452005-07-20 04:12:41 +0000350 return a->rz_szB + sizeof(SizeT);
njn089f51f2005-07-17 18:12:00 +0000351}
352static __inline__
353SizeT overhead_szB ( Arena* a )
354{
355 return overhead_szB_lo(a) + overhead_szB_hi(a);
356}
357
njn402c8612005-08-23 22:11:20 +0000358//---------------------------------------------------------------------------
359
njn089f51f2005-07-17 18:12:00 +0000360// Return the minimum bszB for a block in this arena. Can have zero-length
361// payloads, so it's the size of the admin bytes.
362static __inline__
363SizeT min_useful_bszB ( Arena* a )
364{
365 return overhead_szB(a);
366}
367
njn402c8612005-08-23 22:11:20 +0000368//---------------------------------------------------------------------------
369
njn089f51f2005-07-17 18:12:00 +0000370// Convert payload size <--> block size (both in bytes).
371static __inline__
372SizeT pszB_to_bszB ( Arena* a, SizeT pszB )
373{
374 return pszB + overhead_szB(a);
375}
376static __inline__
377SizeT bszB_to_pszB ( Arena* a, SizeT bszB )
378{
njnb8329f02009-04-16 00:33:20 +0000379 vg_assert2(bszB >= overhead_szB(a), probably_your_fault);
njn089f51f2005-07-17 18:12:00 +0000380 return bszB - overhead_szB(a);
381}
382
njn402c8612005-08-23 22:11:20 +0000383//---------------------------------------------------------------------------
nethercote2d5b8162004-08-11 09:40:52 +0000384
njn089f51f2005-07-17 18:12:00 +0000385// Get a block's payload size.
nethercote7ac7f7b2004-11-02 12:36:02 +0000386static __inline__
njn089f51f2005-07-17 18:12:00 +0000387SizeT get_pszB ( Arena* a, Block* b )
nethercote7ac7f7b2004-11-02 12:36:02 +0000388{
njn089f51f2005-07-17 18:12:00 +0000389 return bszB_to_pszB(a, get_bszB(b));
nethercote7ac7f7b2004-11-02 12:36:02 +0000390}
391
njn402c8612005-08-23 22:11:20 +0000392//---------------------------------------------------------------------------
393
394// Given the addr of a block, return the addr of its payload, and vice versa.
nethercote2d5b8162004-08-11 09:40:52 +0000395static __inline__
396UByte* get_block_payload ( Arena* a, Block* b )
397{
398 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000399 return & b2[ overhead_szB_lo(a) ];
nethercote2d5b8162004-08-11 09:40:52 +0000400}
401// Given the addr of a block's payload, return the addr of the block itself.
402static __inline__
403Block* get_payload_block ( Arena* a, UByte* payload )
404{
nethercote7ac7f7b2004-11-02 12:36:02 +0000405 return (Block*)&payload[ -overhead_szB_lo(a) ];
nethercote2d5b8162004-08-11 09:40:52 +0000406}
407
njn402c8612005-08-23 22:11:20 +0000408//---------------------------------------------------------------------------
nethercote2d5b8162004-08-11 09:40:52 +0000409
410// Set and get the next and previous link fields of a block.
411static __inline__
412void set_prev_b ( Block* b, Block* prev_p )
413{
414 UByte* b2 = (UByte*)b;
Elliott Hughesed398002017-06-21 14:41:24 -0700415 *ASSUME_ALIGNED(Block**, &b2[hp_overhead_szB() + sizeof(SizeT)]) = prev_p;
nethercote2d5b8162004-08-11 09:40:52 +0000416}
417static __inline__
418void set_next_b ( Block* b, Block* next_p )
419{
njn402c8612005-08-23 22:11:20 +0000420 UByte* b2 = (UByte*)b;
Elliott Hughesed398002017-06-21 14:41:24 -0700421 *ASSUME_ALIGNED(Block**,
422 &b2[get_bszB(b) - sizeof(SizeT) - sizeof(void*)]) = next_p;
nethercote2d5b8162004-08-11 09:40:52 +0000423}
424static __inline__
425Block* get_prev_b ( Block* b )
426{
427 UByte* b2 = (UByte*)b;
Elliott Hughesed398002017-06-21 14:41:24 -0700428 return *ASSUME_ALIGNED(Block**, &b2[hp_overhead_szB() + sizeof(SizeT)]);
nethercote2d5b8162004-08-11 09:40:52 +0000429}
430static __inline__
431Block* get_next_b ( Block* b )
432{
njn402c8612005-08-23 22:11:20 +0000433 UByte* b2 = (UByte*)b;
Elliott Hughesed398002017-06-21 14:41:24 -0700434 return *ASSUME_ALIGNED(Block**,
435 &b2[get_bszB(b) - sizeof(SizeT) - sizeof(void*)]);
nethercote2d5b8162004-08-11 09:40:52 +0000436}
437
njn402c8612005-08-23 22:11:20 +0000438//---------------------------------------------------------------------------
nethercote2d5b8162004-08-11 09:40:52 +0000439
sewardj9c606bd2008-09-18 18:12:50 +0000440// Set and get the cost-center field of a block.
441static __inline__
florian54fe2022012-10-27 23:07:42 +0000442void set_cc ( Block* b, const HChar* cc )
sewardj9c606bd2008-09-18 18:12:50 +0000443{
444 UByte* b2 = (UByte*)b;
sewardj94c8eb42008-09-19 20:13:39 +0000445 vg_assert( VG_(clo_profile_heap) );
Elliott Hughesed398002017-06-21 14:41:24 -0700446 *ASSUME_ALIGNED(const HChar**, &b2[0]) = cc;
sewardj9c606bd2008-09-18 18:12:50 +0000447}
448static __inline__
florian54fe2022012-10-27 23:07:42 +0000449const HChar* get_cc ( Block* b )
sewardj9c606bd2008-09-18 18:12:50 +0000450{
451 UByte* b2 = (UByte*)b;
sewardj94c8eb42008-09-19 20:13:39 +0000452 vg_assert( VG_(clo_profile_heap) );
Elliott Hughesed398002017-06-21 14:41:24 -0700453 return *ASSUME_ALIGNED(const HChar**, &b2[0]);
sewardj9c606bd2008-09-18 18:12:50 +0000454}
455
456//---------------------------------------------------------------------------
457
nethercote2d5b8162004-08-11 09:40:52 +0000458// Get the block immediately preceding this one in the Superblock.
459static __inline__
460Block* get_predecessor_block ( Block* b )
461{
462 UByte* b2 = (UByte*)b;
Elliott Hughesed398002017-06-21 14:41:24 -0700463 SizeT bszB = mk_plain_bszB(*ASSUME_ALIGNED(SizeT*, &b2[-sizeof(SizeT)]));
nethercote2d5b8162004-08-11 09:40:52 +0000464 return (Block*)&b2[-bszB];
465}
466
njn402c8612005-08-23 22:11:20 +0000467//---------------------------------------------------------------------------
468
nethercote2d5b8162004-08-11 09:40:52 +0000469// Read and write the lower and upper red-zone bytes of a block.
470static __inline__
njn1dcee092009-02-24 03:07:37 +0000471void set_rz_lo_byte ( Block* b, UInt rz_byteno, UByte v )
nethercote2d5b8162004-08-11 09:40:52 +0000472{
473 UByte* b2 = (UByte*)b;
sewardj94c8eb42008-09-19 20:13:39 +0000474 b2[hp_overhead_szB() + sizeof(SizeT) + rz_byteno] = v;
nethercote2d5b8162004-08-11 09:40:52 +0000475}
476static __inline__
njn1dcee092009-02-24 03:07:37 +0000477void set_rz_hi_byte ( Block* b, UInt rz_byteno, UByte v )
nethercote2d5b8162004-08-11 09:40:52 +0000478{
njn402c8612005-08-23 22:11:20 +0000479 UByte* b2 = (UByte*)b;
480 b2[get_bszB(b) - sizeof(SizeT) - rz_byteno - 1] = v;
nethercote2d5b8162004-08-11 09:40:52 +0000481}
482static __inline__
njn1dcee092009-02-24 03:07:37 +0000483UByte get_rz_lo_byte ( Block* b, UInt rz_byteno )
nethercote2d5b8162004-08-11 09:40:52 +0000484{
485 UByte* b2 = (UByte*)b;
sewardj94c8eb42008-09-19 20:13:39 +0000486 return b2[hp_overhead_szB() + sizeof(SizeT) + rz_byteno];
nethercote2d5b8162004-08-11 09:40:52 +0000487}
488static __inline__
njn1dcee092009-02-24 03:07:37 +0000489UByte get_rz_hi_byte ( Block* b, UInt rz_byteno )
nethercote2d5b8162004-08-11 09:40:52 +0000490{
njn402c8612005-08-23 22:11:20 +0000491 UByte* b2 = (UByte*)b;
492 return b2[get_bszB(b) - sizeof(SizeT) - rz_byteno - 1];
nethercote2d5b8162004-08-11 09:40:52 +0000493}
494
philippe0b9d0642014-06-30 19:47:24 +0000495#if defined(ENABLE_INNER_CLIENT_REQUEST)
496/* When running as an inner, the block headers before and after
497 (see 'Layout of an in-use block:' above) are made non accessible
498 by VALGRIND_MALLOCLIKE_BLOCK/VALGRIND_FREELIKE_BLOCK
499 to allow the outer to detect block overrun.
500 The below two functions are used when these headers must be
501 temporarily accessed. */
502static void mkBhdrAccess( Arena* a, Block* b )
503{
504 VALGRIND_MAKE_MEM_DEFINED (b,
505 hp_overhead_szB() + sizeof(SizeT) + a->rz_szB);
506 VALGRIND_MAKE_MEM_DEFINED (b + get_bszB(b) - a->rz_szB - sizeof(SizeT),
507 a->rz_szB + sizeof(SizeT));
508}
509
510/* Mark block hdr as not accessible.
511 !!! Currently, we do not mark the cost center and szB fields unaccessible
512 as these are accessed at too many places. */
513static void mkBhdrNoAccess( Arena* a, Block* b )
514{
515 VALGRIND_MAKE_MEM_NOACCESS (b + hp_overhead_szB() + sizeof(SizeT),
516 a->rz_szB);
517 VALGRIND_MAKE_MEM_NOACCESS (b + get_bszB(b) - sizeof(SizeT) - a->rz_szB,
518 a->rz_szB);
519}
520
521/* Make the cc+szB fields accessible. */
522static void mkBhdrSzAccess( Arena* a, Block* b )
523{
524 VALGRIND_MAKE_MEM_DEFINED (b,
525 hp_overhead_szB() + sizeof(SizeT));
526 /* We cannot use get_bszB(b), as this reads the 'hi' szB we want
527 to mark accessible. So, we only access the 'lo' szB. */
528 SizeT bszB_lo = mk_plain_bszB(*(SizeT*)&b[0 + hp_overhead_szB()]);
529 VALGRIND_MAKE_MEM_DEFINED (b + bszB_lo - sizeof(SizeT),
530 sizeof(SizeT));
531}
532#endif
nethercote2d5b8162004-08-11 09:40:52 +0000533
nethercote2d5b8162004-08-11 09:40:52 +0000534/*------------------------------------------------------------*/
535/*--- Arena management ---*/
536/*------------------------------------------------------------*/
537
538#define CORE_ARENA_MIN_SZB 1048576
539
540// The arena structures themselves.
541static Arena vg_arena[VG_N_ARENAS];
542
543// Functions external to this module identify arenas using ArenaIds,
544// not Arena*s. This fn converts the former to the latter.
545static Arena* arenaId_to_ArenaP ( ArenaId arena )
546{
547 vg_assert(arena >= 0 && arena < VG_N_ARENAS);
548 return & vg_arena[arena];
549}
550
philippe6e4b7132013-01-18 06:19:49 +0000551static ArenaId arenaP_to_ArenaId ( Arena *a )
552{
553 ArenaId arena = a -vg_arena;
554 vg_assert(arena >= 0 && arena < VG_N_ARENAS);
555 return arena;
556}
557
philipped99c26a2012-07-31 22:17:28 +0000558// Initialise an arena. rz_szB is the (default) minimum redzone size;
Elliott Hughesed398002017-06-21 14:41:24 -0700559// It might be overridden by VG_(clo_redzone_size) or VG_(clo_core_redzone_size).
philipped99c26a2012-07-31 22:17:28 +0000560// it might be made bigger to ensure that VG_MIN_MALLOC_SZB is observed.
nethercote2d5b8162004-08-11 09:40:52 +0000561static
florian54fe2022012-10-27 23:07:42 +0000562void arena_init ( ArenaId aid, const HChar* name, SizeT rz_szB,
sewardjd043de92011-09-26 11:28:20 +0000563 SizeT min_sblock_szB, SizeT min_unsplittable_sblock_szB )
nethercote2d5b8162004-08-11 09:40:52 +0000564{
sewardj0b3fd2d2007-08-21 10:55:26 +0000565 SizeT i;
nethercote2d5b8162004-08-11 09:40:52 +0000566 Arena* a = arenaId_to_ArenaP(aid);
philipped99c26a2012-07-31 22:17:28 +0000567
568 // Ensure default redzones are a reasonable size.
569 vg_assert(rz_szB <= MAX_REDZONE_SZB);
nethercote2d5b8162004-08-11 09:40:52 +0000570
philipped99c26a2012-07-31 22:17:28 +0000571 /* Override the default redzone size if a clo value was given.
572 Note that the clo value can be significantly bigger than MAX_REDZONE_SZB
573 to allow the user to chase horrible bugs using up to 1 page
574 of protection. */
575 if (VG_AR_CLIENT == aid) {
576 if (VG_(clo_redzone_size) != -1)
577 rz_szB = VG_(clo_redzone_size);
578 } else {
579 if (VG_(clo_core_redzone_size) != rz_szB)
580 rz_szB = VG_(clo_core_redzone_size);
581 }
582
583 // Redzones must always be at least the size of a pointer, for holding the
584 // prev/next pointer (see the layout details at the top of this file).
njn7ce83112005-08-24 22:38:00 +0000585 if (rz_szB < sizeof(void*)) rz_szB = sizeof(void*);
nethercote2d5b8162004-08-11 09:40:52 +0000586
587 // The size of the low and high admin sections in a block must be a
njn30490552005-03-13 06:30:42 +0000588 // multiple of VG_MIN_MALLOC_SZB. So we round up the asked-for
nethercote2d5b8162004-08-11 09:40:52 +0000589 // redzone size if necessary to achieve this.
590 a->rz_szB = rz_szB;
591 while (0 != overhead_szB_lo(a) % VG_MIN_MALLOC_SZB) a->rz_szB++;
njn341a6642009-05-24 23:36:50 +0000592 vg_assert(overhead_szB_lo(a) - hp_overhead_szB() == overhead_szB_hi(a));
nethercote2d5b8162004-08-11 09:40:52 +0000593
philipped99c26a2012-07-31 22:17:28 +0000594 // Here we have established the effective redzone size.
595
596
597 vg_assert((min_sblock_szB % VKI_PAGE_SIZE) == 0);
598 a->name = name;
599 a->clientmem = ( VG_AR_CLIENT == aid ? True : False );
600
nethercote2d5b8162004-08-11 09:40:52 +0000601 a->min_sblock_szB = min_sblock_szB;
sewardjd043de92011-09-26 11:28:20 +0000602 a->min_unsplittable_sblock_szB = min_unsplittable_sblock_szB;
njn6e6588c2005-03-13 18:52:48 +0000603 for (i = 0; i < N_MALLOC_LISTS; i++) a->freelist[i] = NULL;
sewardj0b3fd2d2007-08-21 10:55:26 +0000604
sewardj7d1064a2011-02-23 13:18:56 +0000605 a->sblocks = & a->sblocks_initial[0];
606 a->sblocks_size = SBLOCKS_SIZE_INITIAL;
607 a->sblocks_used = 0;
philippe6e4b7132013-01-18 06:19:49 +0000608 a->deferred_reclaimed_sb = 0;
609 a->perm_malloc_current = 0;
610 a->perm_malloc_limit = 0;
611 a->stats__perm_bytes_on_loan= 0;
612 a->stats__perm_blocks = 0;
sewardjd043de92011-09-26 11:28:20 +0000613 a->stats__nreclaim_unsplit = 0;
614 a->stats__nreclaim_split = 0;
sewardj7d1064a2011-02-23 13:18:56 +0000615 a->stats__bytes_on_loan = 0;
616 a->stats__bytes_mmaped = 0;
617 a->stats__bytes_on_loan_max = 0;
sewardjd8b93462011-09-10 10:17:35 +0000618 a->stats__bytes_mmaped_max = 0;
sewardj7d1064a2011-02-23 13:18:56 +0000619 a->stats__tot_blocks = 0;
620 a->stats__tot_bytes = 0;
621 a->stats__nsearches = 0;
622 a->next_profile_at = 25 * 1000 * 1000;
sewardj0b3fd2d2007-08-21 10:55:26 +0000623 vg_assert(sizeof(a->sblocks_initial)
624 == SBLOCKS_SIZE_INITIAL * sizeof(Superblock*));
nethercote2d5b8162004-08-11 09:40:52 +0000625}
626
627/* Print vital stats for an arena. */
628void VG_(print_all_arena_stats) ( void )
629{
nethercote7ac7f7b2004-11-02 12:36:02 +0000630 UInt i;
nethercote2d5b8162004-08-11 09:40:52 +0000631 for (i = 0; i < VG_N_ARENAS; i++) {
632 Arena* a = arenaId_to_ArenaP(i);
633 VG_(message)(Vg_DebugMsg,
philippe92527132015-05-10 22:40:38 +0000634 "%-8s: %'13lu/%'13lu max/curr mmap'd, "
sewardjd043de92011-09-26 11:28:20 +0000635 "%llu/%llu unsplit/split sb unmmap'd, "
philippe92527132015-05-10 22:40:38 +0000636 "%'13lu/%'13lu max/curr, "
sewardj7d1064a2011-02-23 13:18:56 +0000637 "%10llu/%10llu totalloc-blocks/bytes,"
philipped99c26a2012-07-31 22:17:28 +0000638 " %10llu searches %lu rzB\n",
sewardjd8b93462011-09-10 10:17:35 +0000639 a->name,
640 a->stats__bytes_mmaped_max, a->stats__bytes_mmaped,
sewardjd043de92011-09-26 11:28:20 +0000641 a->stats__nreclaim_unsplit, a->stats__nreclaim_split,
sewardj7d1064a2011-02-23 13:18:56 +0000642 a->stats__bytes_on_loan_max,
643 a->stats__bytes_on_loan,
644 a->stats__tot_blocks, a->stats__tot_bytes,
philipped99c26a2012-07-31 22:17:28 +0000645 a->stats__nsearches,
646 a->rz_szB
nethercote2d5b8162004-08-11 09:40:52 +0000647 );
648 }
649}
650
sewardj9c606bd2008-09-18 18:12:50 +0000651void VG_(print_arena_cc_analysis) ( void )
652{
653 UInt i;
654 vg_assert( VG_(clo_profile_heap) );
655 for (i = 0; i < VG_N_ARENAS; i++) {
656 cc_analyse_alloc_arena(i);
657 }
658}
659
660
nethercote2d5b8162004-08-11 09:40:52 +0000661/* This library is self-initialising, as it makes this more self-contained,
662 less coupled with the outside world. Hence VG_(arena_malloc)() and
663 VG_(arena_free)() below always call ensure_mm_init() to ensure things are
sewardj45f4e7c2005-09-27 19:20:21 +0000664 correctly initialised.
665
666 We initialise the client arena separately (and later) because the core
667 must do non-client allocation before the tool has a chance to set the
668 client arena's redzone size.
669*/
sewardj0b3fd2d2007-08-21 10:55:26 +0000670static Bool client_inited = False;
671static Bool nonclient_inited = False;
672
nethercote2d5b8162004-08-11 09:40:52 +0000673static
sewardj45f4e7c2005-09-27 19:20:21 +0000674void ensure_mm_init ( ArenaId aid )
nethercote2d5b8162004-08-11 09:40:52 +0000675{
njn95c23292005-12-26 17:50:22 +0000676 static SizeT client_rz_szB = 8; // default: be paranoid
njnfc51f8d2005-06-21 03:20:17 +0000677
sewardj45f4e7c2005-09-27 19:20:21 +0000678 /* We use checked red zones (of various sizes) for our internal stuff,
nethercote2d5b8162004-08-11 09:40:52 +0000679 and an unchecked zone of arbitrary size for the client. Of
680 course the client's red zone can be checked by the tool, eg.
681 by using addressibility maps, but not by the mechanism implemented
682 here, which merely checks at the time of freeing that the red
683 zone bytes are unchanged.
684
685 Nb: redzone sizes are *minimums*; they could be made bigger to ensure
njn8d3f8452005-07-20 04:12:41 +0000686 alignment. Eg. with 8 byte alignment, on 32-bit machines 4 stays as
687 4, but 16 becomes 20; but on 64-bit machines 4 becomes 8, and 16
688 stays as 16 --- the extra 4 bytes in both are accounted for by the
689 larger prev/next ptr.
nethercote2d5b8162004-08-11 09:40:52 +0000690 */
sewardj45f4e7c2005-09-27 19:20:21 +0000691 if (VG_AR_CLIENT == aid) {
sewardj5600ab32006-10-17 01:42:40 +0000692 Int ar_client_sbszB;
sewardj45f4e7c2005-09-27 19:20:21 +0000693 if (client_inited) {
694 // This assertion ensures that a tool cannot try to change the client
695 // redzone size with VG_(needs_malloc_replacement)() after this module
696 // has done its first allocation from the client arena.
697 if (VG_(needs).malloc_replacement)
njn95c23292005-12-26 17:50:22 +0000698 vg_assert(client_rz_szB == VG_(tdict).tool_client_redzone_szB);
sewardj45f4e7c2005-09-27 19:20:21 +0000699 return;
700 }
nethercote2d5b8162004-08-11 09:40:52 +0000701
sewardj45f4e7c2005-09-27 19:20:21 +0000702 // Check and set the client arena redzone size
703 if (VG_(needs).malloc_replacement) {
njn95c23292005-12-26 17:50:22 +0000704 client_rz_szB = VG_(tdict).tool_client_redzone_szB;
philipped99c26a2012-07-31 22:17:28 +0000705 if (client_rz_szB > MAX_REDZONE_SZB) {
sewardj45f4e7c2005-09-27 19:20:21 +0000706 VG_(printf)( "\nTool error:\n"
707 " specified redzone size is too big (%llu)\n",
njn95c23292005-12-26 17:50:22 +0000708 (ULong)client_rz_szB);
sewardj45f4e7c2005-09-27 19:20:21 +0000709 VG_(exit)(1);
710 }
711 }
sewardj6e9de462011-06-28 07:25:29 +0000712 // Initialise the client arena. On all platforms,
sewardjc1ac9772007-08-20 22:57:56 +0000713 // increasing the superblock size reduces the number of superblocks
714 // in the client arena, which makes findSb cheaper.
sewardjc1ac9772007-08-20 22:57:56 +0000715 ar_client_sbszB = 4194304;
sewardjd043de92011-09-26 11:28:20 +0000716 // superblocks with a size > ar_client_sbszB will be unsplittable
717 // (unless used for providing memalign-ed blocks).
sewardjd8b93462011-09-10 10:17:35 +0000718 arena_init ( VG_AR_CLIENT, "client", client_rz_szB,
719 ar_client_sbszB, ar_client_sbszB+1);
sewardj45f4e7c2005-09-27 19:20:21 +0000720 client_inited = True;
721
722 } else {
723 if (nonclient_inited) {
724 return;
725 }
sewardjd043de92011-09-26 11:28:20 +0000726 set_at_init_hp_overhead_szB =
727 VG_(clo_profile_heap) ? VG_MIN_MALLOC_SZB : 0;
sewardj45f4e7c2005-09-27 19:20:21 +0000728 // Initialise the non-client arenas
sewardjd043de92011-09-26 11:28:20 +0000729 // Similarly to client arena, big allocations will be unsplittable.
philipped99c26a2012-07-31 22:17:28 +0000730 arena_init ( VG_AR_CORE, "core", CORE_REDZONE_DEFAULT_SZB,
philipped99c26a2012-07-31 22:17:28 +0000731 4194304, 4194304+1 );
732 arena_init ( VG_AR_DINFO, "dinfo", CORE_REDZONE_DEFAULT_SZB,
733 1048576, 1048576+1 );
734 arena_init ( VG_AR_DEMANGLE, "demangle", CORE_REDZONE_DEFAULT_SZB,
735 65536, 65536+1 );
philipped99c26a2012-07-31 22:17:28 +0000736 arena_init ( VG_AR_TTAUX, "ttaux", CORE_REDZONE_DEFAULT_SZB,
737 65536, 65536+1 );
sewardj45f4e7c2005-09-27 19:20:21 +0000738 nonclient_inited = True;
739 }
740
nethercote2d5b8162004-08-11 09:40:52 +0000741# ifdef DEBUG_MALLOC
sewardj0b3fd2d2007-08-21 10:55:26 +0000742 VG_(printf)("ZZZ1\n");
nethercote2d5b8162004-08-11 09:40:52 +0000743 VG_(sanity_check_malloc_all)();
sewardj0b3fd2d2007-08-21 10:55:26 +0000744 VG_(printf)("ZZZ2\n");
nethercote2d5b8162004-08-11 09:40:52 +0000745# endif
746}
747
748
749/*------------------------------------------------------------*/
750/*--- Superblock management ---*/
751/*------------------------------------------------------------*/
752
njn4c245e52009-03-15 23:25:38 +0000753__attribute__((noreturn))
florian54fe2022012-10-27 23:07:42 +0000754void VG_(out_of_memory_NORETURN) ( const HChar* who, SizeT szB )
sewardj45f4e7c2005-09-27 19:20:21 +0000755{
philippe14baeb42012-10-21 21:03:11 +0000756 static Int outputTrial = 0;
757 // We try once to output the full memory state followed by the below message.
758 // If that fails (due to out of memory during first trial), we try to just
759 // output the below message.
760 // And then we abandon.
761
sewardj45f4e7c2005-09-27 19:20:21 +0000762 ULong tot_alloc = VG_(am_get_anonsize_total)();
florian54fe2022012-10-27 23:07:42 +0000763 const HChar* s1 =
njnb81c7952007-03-22 03:36:55 +0000764 "\n"
765 " Valgrind's memory management: out of memory:\n"
766 " %s's request for %llu bytes failed.\n"
philippe92527132015-05-10 22:40:38 +0000767 " %'13llu bytes have already been mmap-ed ANONYMOUS.\n"
njnb81c7952007-03-22 03:36:55 +0000768 " Valgrind cannot continue. Sorry.\n\n"
769 " There are several possible reasons for this.\n"
770 " - You have some kind of memory limit in place. Look at the\n"
771 " output of 'ulimit -a'. Is there a limit on the size of\n"
772 " virtual memory or address space?\n"
773 " - You have run out of swap space.\n"
774 " - Valgrind has a bug. If you think this is the case or you are\n"
775 " not sure, please let us know and we'll try to fix it.\n"
776 " Please note that programs can take substantially more memory than\n"
777 " normal when running under Valgrind tools, eg. up to twice or\n"
778 " more, depending on the tool. On a 64-bit machine, Valgrind\n"
779 " should be able to make use of up 32GB memory. On a 32-bit\n"
780 " machine, Valgrind should be able to use all the memory available\n"
781 " to a single process, up to 4GB if that's how you have your\n"
782 " kernel configured. Most 32-bit Linux setups allow a maximum of\n"
783 " 3GB per process.\n\n"
784 " Whatever the reason, Valgrind cannot continue. Sorry.\n";
785
philippe14baeb42012-10-21 21:03:11 +0000786 if (outputTrial <= 1) {
787 if (outputTrial == 0) {
788 outputTrial++;
philippe4f6f3362014-04-19 00:25:54 +0000789 // First print the memory stats with the aspacemgr data.
philippe14baeb42012-10-21 21:03:11 +0000790 VG_(am_show_nsegments) (0, "out_of_memory");
791 VG_(print_all_arena_stats) ();
792 if (VG_(clo_profile_heap))
793 VG_(print_arena_cc_analysis) ();
philippe4f6f3362014-04-19 00:25:54 +0000794 // And then print some other information that might help.
795 VG_(print_all_stats) (False, /* Memory stats */
796 True /* Tool stats */);
797 VG_(show_sched_status) (True, // host_stacktrace
798 True, // valgrind_stack_usage
799 True); // exited_threads
800 /* In case we are an inner valgrind, asks the outer to report
philippe3bf117d2013-08-12 22:17:47 +0000801 its memory state in its log output. */
802 INNER_REQUEST(VALGRIND_MONITOR_COMMAND("v.set log_output"));
803 INNER_REQUEST(VALGRIND_MONITOR_COMMAND("v.info memory aspacemgr"));
philippe14baeb42012-10-21 21:03:11 +0000804 }
805 outputTrial++;
njnb81c7952007-03-22 03:36:55 +0000806 VG_(message)(Vg_UserMsg, s1, who, (ULong)szB, tot_alloc);
sewardj45f4e7c2005-09-27 19:20:21 +0000807 } else {
njnb81c7952007-03-22 03:36:55 +0000808 VG_(debugLog)(0,"mallocfree", s1, who, (ULong)szB, tot_alloc);
sewardj45f4e7c2005-09-27 19:20:21 +0000809 }
njncda2f0f2009-05-18 02:12:08 +0000810
sewardj45f4e7c2005-09-27 19:20:21 +0000811 VG_(exit)(1);
812}
813
814
nethercote2d5b8162004-08-11 09:40:52 +0000815// Align ptr p upwards to an align-sized boundary.
816static
nethercote7ac7f7b2004-11-02 12:36:02 +0000817void* align_upwards ( void* p, SizeT align )
nethercote2d5b8162004-08-11 09:40:52 +0000818{
819 Addr a = (Addr)p;
820 if ((a % align) == 0) return (void*)a;
821 return (void*)(a - (a % align) + align);
822}
823
sewardjd043de92011-09-26 11:28:20 +0000824// Forward definition.
825static
826void deferred_reclaimSuperblock ( Arena* a, Superblock* sb);
827
nethercote2d5b8162004-08-11 09:40:52 +0000828// If not enough memory available, either aborts (for non-client memory)
829// or returns 0 (for client memory).
830static
nethercote7ac7f7b2004-11-02 12:36:02 +0000831Superblock* newSuperblock ( Arena* a, SizeT cszB )
nethercote2d5b8162004-08-11 09:40:52 +0000832{
nethercote2d5b8162004-08-11 09:40:52 +0000833 Superblock* sb;
sewardj45f4e7c2005-09-27 19:20:21 +0000834 SysRes sres;
sewardjd043de92011-09-26 11:28:20 +0000835 Bool unsplittable;
836 ArenaId aid;
837
838 // A new superblock is needed for arena a. We will execute the deferred
839 // reclaim in all arenas in order to minimise fragmentation and
840 // peak memory usage.
841 for (aid = 0; aid < VG_N_ARENAS; aid++) {
842 Arena* arena = arenaId_to_ArenaP(aid);
843 if (arena->deferred_reclaimed_sb != NULL)
844 deferred_reclaimSuperblock (arena, NULL);
845 }
nethercote2d5b8162004-08-11 09:40:52 +0000846
847 // Take into account admin bytes in the Superblock.
848 cszB += sizeof(Superblock);
849
850 if (cszB < a->min_sblock_szB) cszB = a->min_sblock_szB;
bartc3c98392008-04-19 14:43:30 +0000851 cszB = VG_PGROUNDUP(cszB);
nethercote2d5b8162004-08-11 09:40:52 +0000852
sewardjd043de92011-09-26 11:28:20 +0000853 if (cszB >= a->min_unsplittable_sblock_szB)
854 unsplittable = True;
sewardjd8b93462011-09-10 10:17:35 +0000855 else
sewardjd043de92011-09-26 11:28:20 +0000856 unsplittable = False;
sewardjd8b93462011-09-10 10:17:35 +0000857
858
sewardj45f4e7c2005-09-27 19:20:21 +0000859 if (a->clientmem) {
nethercote2d5b8162004-08-11 09:40:52 +0000860 // client allocation -- return 0 to client if it fails
florian2fa66ce2015-03-07 23:01:14 +0000861 sres = VG_(am_mmap_client_heap)
philippe17e76ec2014-04-20 19:50:13 +0000862 ( cszB, VKI_PROT_READ|VKI_PROT_WRITE|VKI_PROT_EXEC );
njncda2f0f2009-05-18 02:12:08 +0000863 if (sr_isError(sres))
nethercote2d5b8162004-08-11 09:40:52 +0000864 return 0;
florian44bd4462014-12-29 17:04:46 +0000865 sb = (Superblock*)(Addr)sr_Res(sres);
nethercote2d5b8162004-08-11 09:40:52 +0000866 } else {
sewardj45f4e7c2005-09-27 19:20:21 +0000867 // non-client allocation -- abort if it fails
philippe17e76ec2014-04-20 19:50:13 +0000868 sres = VG_(am_mmap_anon_float_valgrind)( cszB );
njncda2f0f2009-05-18 02:12:08 +0000869 if (sr_isError(sres)) {
sewardj45f4e7c2005-09-27 19:20:21 +0000870 VG_(out_of_memory_NORETURN)("newSuperblock", cszB);
871 /* NOTREACHED */
872 sb = NULL; /* keep gcc happy */
873 } else {
florian44bd4462014-12-29 17:04:46 +0000874 sb = (Superblock*)(Addr)sr_Res(sres);
sewardj45f4e7c2005-09-27 19:20:21 +0000875 }
nethercote2d5b8162004-08-11 09:40:52 +0000876 }
877 vg_assert(NULL != sb);
philippe72faf102012-03-11 22:24:03 +0000878 INNER_REQUEST(VALGRIND_MAKE_MEM_UNDEFINED(sb, cszB));
nethercote2d5b8162004-08-11 09:40:52 +0000879 vg_assert(0 == (Addr)sb % VG_MIN_MALLOC_SZB);
880 sb->n_payload_bytes = cszB - sizeof(Superblock);
sewardjd043de92011-09-26 11:28:20 +0000881 sb->unsplittable = (unsplittable ? sb : NULL);
sewardj7d1064a2011-02-23 13:18:56 +0000882 a->stats__bytes_mmaped += cszB;
sewardjd8b93462011-09-10 10:17:35 +0000883 if (a->stats__bytes_mmaped > a->stats__bytes_mmaped_max)
884 a->stats__bytes_mmaped_max = a->stats__bytes_mmaped;
sewardj45f4e7c2005-09-27 19:20:21 +0000885 VG_(debugLog)(1, "mallocfree",
floriana5e06c32015-08-05 21:16:09 +0000886 "newSuperblock at %p (pszB %7lu) %s owner %s/%s\n",
sewardjd043de92011-09-26 11:28:20 +0000887 sb, sb->n_payload_bytes,
888 (unsplittable ? "unsplittable" : ""),
sewardj45264af2011-07-24 17:39:10 +0000889 a->clientmem ? "CLIENT" : "VALGRIND", a->name );
sewardj4c89b2f2011-08-17 22:13:14 +0000890 return sb;
sewardj45264af2011-07-24 17:39:10 +0000891}
892
sewardjd043de92011-09-26 11:28:20 +0000893// Reclaims the given superblock:
sewardjd8b93462011-09-10 10:17:35 +0000894// * removes sb from arena sblocks list.
895// * munmap the superblock segment.
896static
897void reclaimSuperblock ( Arena* a, Superblock* sb)
898{
899 SysRes sres;
900 SizeT cszB;
901 UInt i, j;
902
903 VG_(debugLog)(1, "mallocfree",
floriana5e06c32015-08-05 21:16:09 +0000904 "reclaimSuperblock at %p (pszB %7lu) %s owner %s/%s\n",
sewardjd043de92011-09-26 11:28:20 +0000905 sb, sb->n_payload_bytes,
906 (sb->unsplittable ? "unsplittable" : ""),
sewardjd8b93462011-09-10 10:17:35 +0000907 a->clientmem ? "CLIENT" : "VALGRIND", a->name );
908
sewardjd8b93462011-09-10 10:17:35 +0000909 // Take into account admin bytes in the Superblock.
910 cszB = sizeof(Superblock) + sb->n_payload_bytes;
sewardjd8b93462011-09-10 10:17:35 +0000911
912 // removes sb from superblock list.
913 for (i = 0; i < a->sblocks_used; i++) {
914 if (a->sblocks[i] == sb)
915 break;
916 }
917 vg_assert(i >= 0 && i < a->sblocks_used);
918 for (j = i; j < a->sblocks_used; j++)
919 a->sblocks[j] = a->sblocks[j+1];
920 a->sblocks_used--;
921 a->sblocks[a->sblocks_used] = NULL;
922 // paranoia: NULLify ptr to reclaimed sb or NULLify copy of ptr to last sb.
923
sewardjd043de92011-09-26 11:28:20 +0000924 a->stats__bytes_mmaped -= cszB;
925 if (sb->unsplittable)
926 a->stats__nreclaim_unsplit++;
927 else
928 a->stats__nreclaim_split++;
929
sewardjd8b93462011-09-10 10:17:35 +0000930 // Now that the sb is removed from the list, mnumap its space.
931 if (a->clientmem) {
932 // reclaimable client allocation
933 Bool need_discard = False;
934 sres = VG_(am_munmap_client)(&need_discard, (Addr) sb, cszB);
sewardjd8b93462011-09-10 10:17:35 +0000935 vg_assert2(! sr_isError(sres), "superblock client munmap failure\n");
sewardjd043de92011-09-26 11:28:20 +0000936 /* We somewhat help the client by discarding the range.
937 Note however that if the client has JITted some code in
938 a small block that was freed, we do not provide this
939 'discard support' */
940 /* JRS 2011-Sept-26: it would be nice to move the discard
941 outwards somewhat (in terms of calls) so as to make it easier
942 to verify that there will be no nonterminating recursive set
943 of calls a result of calling VG_(discard_translations).
944 Another day, perhaps. */
945 if (need_discard)
946 VG_(discard_translations) ((Addr) sb, cszB, "reclaimSuperblock");
sewardjd8b93462011-09-10 10:17:35 +0000947 } else {
948 // reclaimable non-client allocation
949 sres = VG_(am_munmap_valgrind)((Addr) sb, cszB);
950 vg_assert2(! sr_isError(sres), "superblock valgrind munmap failure\n");
951 }
sewardjd043de92011-09-26 11:28:20 +0000952
sewardjd8b93462011-09-10 10:17:35 +0000953}
954
nethercote2d5b8162004-08-11 09:40:52 +0000955// Find the superblock containing the given chunk.
956static
957Superblock* findSb ( Arena* a, Block* b )
958{
sewardj0b3fd2d2007-08-21 10:55:26 +0000959 SizeT min = 0;
960 SizeT max = a->sblocks_used;
sewardj49bdd7a2005-12-17 20:37:36 +0000961
sewardj0b3fd2d2007-08-21 10:55:26 +0000962 while (min <= max) {
963 Superblock * sb;
964 SizeT pos = min + (max - min)/2;
965
966 vg_assert(pos >= 0 && pos < a->sblocks_used);
967 sb = a->sblocks[pos];
968 if ((Block*)&sb->payload_bytes[0] <= b
969 && b < (Block*)&sb->payload_bytes[sb->n_payload_bytes])
970 {
971 return sb;
972 } else if ((Block*)&sb->payload_bytes[0] <= b) {
973 min = pos + 1;
974 } else {
975 max = pos - 1;
sewardj49bdd7a2005-12-17 20:37:36 +0000976 }
977 }
sewardj0b3fd2d2007-08-21 10:55:26 +0000978 VG_(printf)("findSb: can't find pointer %p in arena '%s'\n",
979 b, a->name );
980 VG_(core_panic)("findSb: VG_(arena_free)() in wrong arena?");
981 return NULL; /*NOTREACHED*/
nethercote2d5b8162004-08-11 09:40:52 +0000982}
983
sewardjde4a1d02002-03-22 01:27:54 +0000984
philippe07c08522014-05-14 20:39:27 +0000985// Find the superblock containing the given address.
986// If superblock not found, return NULL.
987static
988Superblock* maybe_findSb ( Arena* a, Addr ad )
989{
990 SizeT min = 0;
991 SizeT max = a->sblocks_used;
992
993 while (min <= max) {
994 Superblock * sb;
995 SizeT pos = min + (max - min)/2;
996 if (pos < 0 || pos >= a->sblocks_used)
997 return NULL;
998 sb = a->sblocks[pos];
999 if ((Addr)&sb->payload_bytes[0] <= ad
1000 && ad < (Addr)&sb->payload_bytes[sb->n_payload_bytes]) {
1001 return sb;
1002 } else if ((Addr)&sb->payload_bytes[0] <= ad) {
1003 min = pos + 1;
1004 } else {
1005 max = pos - 1;
1006 }
1007 }
1008 return NULL;
1009}
1010
1011
fitzhardinge98abfc72003-12-16 02:05:15 +00001012/*------------------------------------------------------------*/
sewardjde4a1d02002-03-22 01:27:54 +00001013/*--- Functions for working with freelists. ---*/
1014/*------------------------------------------------------------*/
1015
nethercote2d5b8162004-08-11 09:40:52 +00001016// Nb: Determination of which freelist a block lives on is based on the
1017// payload size, not block size.
sewardjde4a1d02002-03-22 01:27:54 +00001018
nethercote2d5b8162004-08-11 09:40:52 +00001019// Convert a payload size in bytes to a freelist number.
Elliott Hughesa0664b92017-04-18 17:46:52 -07001020static __attribute__((noinline))
1021UInt pszB_to_listNo_SLOW ( SizeT pszB__divided_by__VG_MIN_MALLOC_SZB )
1022{
1023 SizeT n = pszB__divided_by__VG_MIN_MALLOC_SZB;
1024
1025 if (n < 299) {
1026 if (n < 114) {
1027 if (n < 85) {
1028 if (n < 74) {
1029 /* -- Exponential slope up, factor 1.05 -- */
1030 if (n < 67) return 64;
1031 if (n < 70) return 65;
1032 /* else */ return 66;
1033 } else {
1034 if (n < 77) return 67;
1035 if (n < 81) return 68;
1036 /* else */ return 69;
1037 }
1038 } else {
1039 if (n < 99) {
1040 if (n < 90) return 70;
1041 if (n < 94) return 71;
1042 /* else */ return 72;
1043 } else {
1044 if (n < 104) return 73;
1045 if (n < 109) return 74;
1046 /* else */ return 75;
1047 }
1048 }
1049 } else {
1050 if (n < 169) {
1051 if (n < 133) {
1052 if (n < 120) return 76;
1053 if (n < 126) return 77;
1054 /* else */ return 78;
1055 } else {
1056 if (n < 139) return 79;
1057 /* -- Exponential slope up, factor 1.10 -- */
1058 if (n < 153) return 80;
1059 /* else */ return 81;
1060 }
1061 } else {
1062 if (n < 224) {
1063 if (n < 185) return 82;
1064 if (n < 204) return 83;
1065 /* else */ return 84;
1066 } else {
1067 if (n < 247) return 85;
1068 if (n < 272) return 86;
1069 /* else */ return 87;
1070 }
1071 }
1072 }
1073 } else {
1074 if (n < 1331) {
1075 if (n < 530) {
1076 if (n < 398) {
1077 if (n < 329) return 88;
1078 if (n < 362) return 89;
1079 /* else */ return 90;
1080 } else {
1081 if (n < 438) return 91;
1082 if (n < 482) return 92;
1083 /* else */ return 93;
1084 }
1085 } else {
1086 if (n < 770) {
1087 if (n < 583) return 94;
1088 if (n < 641) return 95;
1089 /* -- Exponential slope up, factor 1.20 -- */
1090 /* else */ return 96;
1091 } else {
1092 if (n < 924) return 97;
1093 if (n < 1109) return 98;
1094 /* else */ return 99;
1095 }
1096 }
1097 } else {
1098 if (n < 3974) {
1099 if (n < 2300) {
1100 if (n < 1597) return 100;
1101 if (n < 1916) return 101;
1102 return 102;
1103 } else {
1104 if (n < 2760) return 103;
1105 if (n < 3312) return 104;
1106 /* else */ return 105;
1107 }
1108 } else {
1109 if (n < 6868) {
1110 if (n < 4769) return 106;
1111 if (n < 5723) return 107;
1112 /* else */ return 108;
1113 } else {
1114 if (n < 8241) return 109;
1115 if (n < 9890) return 110;
1116 /* else */ return 111;
1117 }
1118 }
1119 }
1120 }
1121 /*NOTREACHED*/
1122 vg_assert(0);
1123}
1124
1125static inline
nethercote7ac7f7b2004-11-02 12:36:02 +00001126UInt pszB_to_listNo ( SizeT pszB )
sewardjde4a1d02002-03-22 01:27:54 +00001127{
njndb247dc2005-07-17 23:12:33 +00001128 SizeT n = pszB / VG_MIN_MALLOC_SZB;
Elliott Hughesa0664b92017-04-18 17:46:52 -07001129 vg_assert(0 == (pszB % VG_MIN_MALLOC_SZB));
njn61dcab82005-05-21 19:36:45 +00001130
sewardjc1ac9772007-08-20 22:57:56 +00001131 // The first 64 lists hold blocks of size VG_MIN_MALLOC_SZB * list_num.
Elliott Hughesa0664b92017-04-18 17:46:52 -07001132 // The final 48 hold bigger blocks and are dealt with by the _SLOW
1133 // case.
1134 if (LIKELY(n < 64)) {
1135 return (UInt)n;
1136 } else {
1137 return pszB_to_listNo_SLOW(n);
1138 }
sewardjde4a1d02002-03-22 01:27:54 +00001139}
1140
nethercote2d5b8162004-08-11 09:40:52 +00001141// What is the minimum payload size for a given list?
sewardjde4a1d02002-03-22 01:27:54 +00001142static
nethercote7ac7f7b2004-11-02 12:36:02 +00001143SizeT listNo_to_pszB_min ( UInt listNo )
sewardjde4a1d02002-03-22 01:27:54 +00001144{
sewardj1d2e2e62007-08-23 10:22:44 +00001145 /* Repeatedly computing this function at every request is
1146 expensive. Hence at the first call just cache the result for
1147 every possible argument. */
1148 static SizeT cache[N_MALLOC_LISTS];
1149 static Bool cache_valid = False;
1150 if (!cache_valid) {
1151 UInt i;
1152 for (i = 0; i < N_MALLOC_LISTS; i++) {
1153 SizeT pszB = 0;
1154 while (pszB_to_listNo(pszB) < i)
1155 pszB += VG_MIN_MALLOC_SZB;
1156 cache[i] = pszB;
1157 }
1158 cache_valid = True;
1159 }
1160 /* Returned cached answer. */
njn6e6588c2005-03-13 18:52:48 +00001161 vg_assert(listNo <= N_MALLOC_LISTS);
sewardj1d2e2e62007-08-23 10:22:44 +00001162 return cache[listNo];
sewardjde4a1d02002-03-22 01:27:54 +00001163}
1164
nethercote2d5b8162004-08-11 09:40:52 +00001165// What is the maximum payload size for a given list?
sewardjde4a1d02002-03-22 01:27:54 +00001166static
nethercote7ac7f7b2004-11-02 12:36:02 +00001167SizeT listNo_to_pszB_max ( UInt listNo )
sewardjde4a1d02002-03-22 01:27:54 +00001168{
njn6e6588c2005-03-13 18:52:48 +00001169 vg_assert(listNo <= N_MALLOC_LISTS);
1170 if (listNo == N_MALLOC_LISTS-1) {
nethercote2d5b8162004-08-11 09:40:52 +00001171 return MAX_PSZB;
sewardjde4a1d02002-03-22 01:27:54 +00001172 } else {
nethercote2d5b8162004-08-11 09:40:52 +00001173 return listNo_to_pszB_min(listNo+1) - 1;
sewardjde4a1d02002-03-22 01:27:54 +00001174 }
1175}
1176
1177
1178/* A nasty hack to try and reduce fragmentation. Try and replace
1179 a->freelist[lno] with another block on the same list but with a
1180 lower address, with the idea of attempting to recycle the same
1181 blocks rather than cruise through the address space. */
sewardjde4a1d02002-03-22 01:27:54 +00001182static
nethercote7ac7f7b2004-11-02 12:36:02 +00001183void swizzle ( Arena* a, UInt lno )
sewardjde4a1d02002-03-22 01:27:54 +00001184{
nethercote2d5b8162004-08-11 09:40:52 +00001185 Block* p_best;
1186 Block* pp;
1187 Block* pn;
nethercote7ac7f7b2004-11-02 12:36:02 +00001188 UInt i;
sewardjde4a1d02002-03-22 01:27:54 +00001189
1190 p_best = a->freelist[lno];
1191 if (p_best == NULL) return;
1192
1193 pn = pp = p_best;
njn2bf9ba62005-12-25 02:47:12 +00001194
1195 // This loop bound was 20 for a long time, but experiments showed that
1196 // reducing it to 10 gave the same result in all the tests, and 5 got the
1197 // same result in 85--100% of cases. And it's called often enough to be
1198 // noticeable in programs that allocated a lot.
1199 for (i = 0; i < 5; i++) {
nethercote2d5b8162004-08-11 09:40:52 +00001200 pn = get_next_b(pn);
1201 pp = get_prev_b(pp);
sewardjde4a1d02002-03-22 01:27:54 +00001202 if (pn < p_best) p_best = pn;
1203 if (pp < p_best) p_best = pp;
1204 }
1205 if (p_best < a->freelist[lno]) {
nethercote2d5b8162004-08-11 09:40:52 +00001206# ifdef VERBOSE_MALLOC
sewardj9c606bd2008-09-18 18:12:50 +00001207 VG_(printf)("retreat by %ld\n", (Word)(a->freelist[lno] - p_best));
sewardjde4a1d02002-03-22 01:27:54 +00001208# endif
1209 a->freelist[lno] = p_best;
1210 }
1211}
1212
1213
1214/*------------------------------------------------------------*/
sewardjde4a1d02002-03-22 01:27:54 +00001215/*--- Sanity-check/debugging machinery. ---*/
1216/*------------------------------------------------------------*/
1217
njn6e6588c2005-03-13 18:52:48 +00001218#define REDZONE_LO_MASK 0x31
1219#define REDZONE_HI_MASK 0x7c
nethercote2d5b8162004-08-11 09:40:52 +00001220
nethercote7ac7f7b2004-11-02 12:36:02 +00001221// Do some crude sanity checks on a Block.
sewardjde4a1d02002-03-22 01:27:54 +00001222static
nethercote2d5b8162004-08-11 09:40:52 +00001223Bool blockSane ( Arena* a, Block* b )
sewardjde4a1d02002-03-22 01:27:54 +00001224{
1225# define BLEAT(str) VG_(printf)("blockSane: fail -- %s\n",str)
nethercote7ac7f7b2004-11-02 12:36:02 +00001226 UInt i;
njn402c8612005-08-23 22:11:20 +00001227 // The lo and hi size fields will be checked (indirectly) by the call
1228 // to get_rz_hi_byte().
njn472cc7c2005-07-17 17:20:30 +00001229 if (!a->clientmem && is_inuse_block(b)) {
philippe72faf102012-03-11 22:24:03 +00001230 // In the inner, for memcheck sake, temporarily mark redzone accessible.
philippe0b9d0642014-06-30 19:47:24 +00001231 INNER_REQUEST(mkBhdrAccess(a,b));
nethercote2d5b8162004-08-11 09:40:52 +00001232 for (i = 0; i < a->rz_szB; i++) {
njn1dcee092009-02-24 03:07:37 +00001233 if (get_rz_lo_byte(b, i) !=
njn6e6588c2005-03-13 18:52:48 +00001234 (UByte)(((Addr)b&0xff) ^ REDZONE_LO_MASK))
nethercote2d5b8162004-08-11 09:40:52 +00001235 {BLEAT("redzone-lo");return False;}
njn1dcee092009-02-24 03:07:37 +00001236 if (get_rz_hi_byte(b, i) !=
njn6e6588c2005-03-13 18:52:48 +00001237 (UByte)(((Addr)b&0xff) ^ REDZONE_HI_MASK))
nethercote2d5b8162004-08-11 09:40:52 +00001238 {BLEAT("redzone-hi");return False;}
philippe0b9d0642014-06-30 19:47:24 +00001239 }
1240 INNER_REQUEST(mkBhdrNoAccess(a,b));
sewardjde4a1d02002-03-22 01:27:54 +00001241 }
1242 return True;
1243# undef BLEAT
1244}
1245
philippe0b9d0642014-06-30 19:47:24 +00001246// Sanity checks on a Block inside an unsplittable superblock
1247static
1248Bool unsplittableBlockSane ( Arena* a, Superblock *sb, Block* b )
1249{
1250# define BLEAT(str) VG_(printf)("unsplittableBlockSane: fail -- %s\n",str)
1251 Block* other_b;
1252 UByte* sb_start;
1253 UByte* sb_end;
1254
1255 if (!blockSane (a, b))
1256 {BLEAT("blockSane");return False;}
1257
1258 if (sb->unsplittable != sb)
1259 {BLEAT("unsplittable");return False;}
1260
1261 sb_start = &sb->payload_bytes[0];
1262 sb_end = &sb->payload_bytes[sb->n_payload_bytes - 1];
1263
1264 // b must be first block (i.e. no unused bytes at the beginning)
1265 if ((Block*)sb_start != b)
1266 {BLEAT("sb_start");return False;}
1267
1268 // b must be last block (i.e. no unused bytes at the end)
1269 other_b = b + get_bszB(b);
1270 if (other_b-1 != (Block*)sb_end)
1271 {BLEAT("sb_end");return False;}
1272
1273 return True;
1274# undef BLEAT
1275}
1276
nethercote2d5b8162004-08-11 09:40:52 +00001277// Print superblocks (only for debugging).
sewardjde4a1d02002-03-22 01:27:54 +00001278static
1279void ppSuperblocks ( Arena* a )
1280{
sewardj0b3fd2d2007-08-21 10:55:26 +00001281 UInt i, j, blockno = 1;
njnd0e685c2005-07-17 17:55:42 +00001282 SizeT b_bszB;
sewardjde4a1d02002-03-22 01:27:54 +00001283
sewardj0b3fd2d2007-08-21 10:55:26 +00001284 for (j = 0; j < a->sblocks_used; ++j) {
1285 Superblock * sb = a->sblocks[j];
1286
sewardjde4a1d02002-03-22 01:27:54 +00001287 VG_(printf)( "\n" );
floriana5e06c32015-08-05 21:16:09 +00001288 VG_(printf)( "superblock %u at %p %s, sb->n_pl_bs = %lu\n",
sewardjd043de92011-09-26 11:28:20 +00001289 blockno++, sb, (sb->unsplittable ? "unsplittable" : ""),
sewardjd8b93462011-09-10 10:17:35 +00001290 sb->n_payload_bytes);
njnd0e685c2005-07-17 17:55:42 +00001291 for (i = 0; i < sb->n_payload_bytes; i += b_bszB) {
1292 Block* b = (Block*)&sb->payload_bytes[i];
1293 b_bszB = get_bszB(b);
floriana5e06c32015-08-05 21:16:09 +00001294 VG_(printf)( " block at %u, bszB %lu: ", i, b_bszB );
njn472cc7c2005-07-17 17:20:30 +00001295 VG_(printf)( "%s, ", is_inuse_block(b) ? "inuse" : "free");
nethercote2d5b8162004-08-11 09:40:52 +00001296 VG_(printf)( "%s\n", blockSane(a, b) ? "ok" : "BAD" );
sewardjde4a1d02002-03-22 01:27:54 +00001297 }
nethercote2d5b8162004-08-11 09:40:52 +00001298 vg_assert(i == sb->n_payload_bytes); // no overshoot at end of Sb
sewardjde4a1d02002-03-22 01:27:54 +00001299 }
1300 VG_(printf)( "end of superblocks\n\n" );
1301}
1302
nethercote2d5b8162004-08-11 09:40:52 +00001303// Sanity check both the superblocks and the chains.
nethercote885dd912004-08-03 23:14:00 +00001304static void sanity_check_malloc_arena ( ArenaId aid )
sewardjde4a1d02002-03-22 01:27:54 +00001305{
sewardj0b3fd2d2007-08-21 10:55:26 +00001306 UInt i, j, superblockctr, blockctr_sb, blockctr_li;
nethercote7ac7f7b2004-11-02 12:36:02 +00001307 UInt blockctr_sb_free, listno;
1308 SizeT b_bszB, b_pszB, list_min_pszB, list_max_pszB;
sewardj0b3fd2d2007-08-21 10:55:26 +00001309 Bool thisFree, lastWasFree, sblockarrOK;
nethercote2d5b8162004-08-11 09:40:52 +00001310 Block* b;
1311 Block* b_prev;
nethercote7ac7f7b2004-11-02 12:36:02 +00001312 SizeT arena_bytes_on_loan;
sewardjde4a1d02002-03-22 01:27:54 +00001313 Arena* a;
1314
nethercote885dd912004-08-03 23:14:00 +00001315# define BOMB VG_(core_panic)("sanity_check_malloc_arena")
sewardjde4a1d02002-03-22 01:27:54 +00001316
1317 a = arenaId_to_ArenaP(aid);
sewardj0b3fd2d2007-08-21 10:55:26 +00001318
1319 // Check the superblock array.
1320 sblockarrOK
1321 = a->sblocks != NULL
1322 && a->sblocks_size >= SBLOCKS_SIZE_INITIAL
1323 && a->sblocks_used <= a->sblocks_size
1324 && (a->sblocks_size == SBLOCKS_SIZE_INITIAL
1325 ? (a->sblocks == &a->sblocks_initial[0])
1326 : (a->sblocks != &a->sblocks_initial[0]));
1327 if (!sblockarrOK) {
1328 VG_(printf)("sanity_check_malloc_arena: sblock array BAD\n");
1329 BOMB;
1330 }
1331
nethercote2d5b8162004-08-11 09:40:52 +00001332 // First, traverse all the superblocks, inspecting the Blocks in each.
sewardjde4a1d02002-03-22 01:27:54 +00001333 superblockctr = blockctr_sb = blockctr_sb_free = 0;
1334 arena_bytes_on_loan = 0;
sewardj0b3fd2d2007-08-21 10:55:26 +00001335 for (j = 0; j < a->sblocks_used; ++j) {
1336 Superblock * sb = a->sblocks[j];
sewardjde4a1d02002-03-22 01:27:54 +00001337 lastWasFree = False;
1338 superblockctr++;
nethercote2d5b8162004-08-11 09:40:52 +00001339 for (i = 0; i < sb->n_payload_bytes; i += mk_plain_bszB(b_bszB)) {
sewardjde4a1d02002-03-22 01:27:54 +00001340 blockctr_sb++;
nethercote2d5b8162004-08-11 09:40:52 +00001341 b = (Block*)&sb->payload_bytes[i];
njnd0e685c2005-07-17 17:55:42 +00001342 b_bszB = get_bszB_as_is(b);
sewardjde4a1d02002-03-22 01:27:54 +00001343 if (!blockSane(a, b)) {
floriana5e06c32015-08-05 21:16:09 +00001344 VG_(printf)("sanity_check_malloc_arena: sb %p, block %u "
njn8a7b41b2007-09-23 00:51:24 +00001345 "(bszB %lu): BAD\n", sb, i, b_bszB );
sewardjde4a1d02002-03-22 01:27:54 +00001346 BOMB;
1347 }
njn472cc7c2005-07-17 17:20:30 +00001348 thisFree = !is_inuse_block(b);
sewardjde4a1d02002-03-22 01:27:54 +00001349 if (thisFree && lastWasFree) {
floriana5e06c32015-08-05 21:16:09 +00001350 VG_(printf)("sanity_check_malloc_arena: sb %p, block %u "
njn8a7b41b2007-09-23 00:51:24 +00001351 "(bszB %lu): UNMERGED FREES\n", sb, i, b_bszB );
sewardjde4a1d02002-03-22 01:27:54 +00001352 BOMB;
1353 }
sewardjde4a1d02002-03-22 01:27:54 +00001354 if (thisFree) blockctr_sb_free++;
sewardj0b3fd2d2007-08-21 10:55:26 +00001355 if (!thisFree)
nethercote2d5b8162004-08-11 09:40:52 +00001356 arena_bytes_on_loan += bszB_to_pszB(a, b_bszB);
1357 lastWasFree = thisFree;
sewardjde4a1d02002-03-22 01:27:54 +00001358 }
nethercote2d5b8162004-08-11 09:40:52 +00001359 if (i > sb->n_payload_bytes) {
nethercote885dd912004-08-03 23:14:00 +00001360 VG_(printf)( "sanity_check_malloc_arena: sb %p: last block "
sewardjde4a1d02002-03-22 01:27:54 +00001361 "overshoots end\n", sb);
1362 BOMB;
1363 }
sewardjde4a1d02002-03-22 01:27:54 +00001364 }
1365
philippe6e4b7132013-01-18 06:19:49 +00001366 arena_bytes_on_loan += a->stats__perm_bytes_on_loan;
1367
sewardj7d1064a2011-02-23 13:18:56 +00001368 if (arena_bytes_on_loan != a->stats__bytes_on_loan) {
nethercote2d5b8162004-08-11 09:40:52 +00001369# ifdef VERBOSE_MALLOC
sewardjd8b93462011-09-10 10:17:35 +00001370 VG_(printf)( "sanity_check_malloc_arena: a->bytes_on_loan %lu, "
1371 "arena_bytes_on_loan %lu: "
philippe0b9d0642014-06-30 19:47:24 +00001372 "MISMATCH\n", a->stats__bytes_on_loan, arena_bytes_on_loan);
nethercote2d5b8162004-08-11 09:40:52 +00001373# endif
sewardjde4a1d02002-03-22 01:27:54 +00001374 ppSuperblocks(a);
1375 BOMB;
1376 }
1377
1378 /* Second, traverse each list, checking that the back pointers make
1379 sense, counting blocks encountered, and checking that each block
1380 is an appropriate size for this list. */
1381 blockctr_li = 0;
njn6e6588c2005-03-13 18:52:48 +00001382 for (listno = 0; listno < N_MALLOC_LISTS; listno++) {
nethercote2d5b8162004-08-11 09:40:52 +00001383 list_min_pszB = listNo_to_pszB_min(listno);
1384 list_max_pszB = listNo_to_pszB_max(listno);
sewardjde4a1d02002-03-22 01:27:54 +00001385 b = a->freelist[listno];
1386 if (b == NULL) continue;
1387 while (True) {
1388 b_prev = b;
nethercote2d5b8162004-08-11 09:40:52 +00001389 b = get_next_b(b);
1390 if (get_prev_b(b) != b_prev) {
floriana5e06c32015-08-05 21:16:09 +00001391 VG_(printf)( "sanity_check_malloc_arena: list %u at %p: "
sewardj0b3fd2d2007-08-21 10:55:26 +00001392 "BAD LINKAGE\n",
sewardjde4a1d02002-03-22 01:27:54 +00001393 listno, b );
1394 BOMB;
1395 }
njn089f51f2005-07-17 18:12:00 +00001396 b_pszB = get_pszB(a, b);
nethercote2d5b8162004-08-11 09:40:52 +00001397 if (b_pszB < list_min_pszB || b_pszB > list_max_pszB) {
sewardj0b3fd2d2007-08-21 10:55:26 +00001398 VG_(printf)(
floriana5e06c32015-08-05 21:16:09 +00001399 "sanity_check_malloc_arena: list %u at %p: "
njn8a7b41b2007-09-23 00:51:24 +00001400 "WRONG CHAIN SIZE %luB (%luB, %luB)\n",
nethercote2d5b8162004-08-11 09:40:52 +00001401 listno, b, b_pszB, list_min_pszB, list_max_pszB );
sewardjde4a1d02002-03-22 01:27:54 +00001402 BOMB;
1403 }
1404 blockctr_li++;
1405 if (b == a->freelist[listno]) break;
1406 }
1407 }
1408
1409 if (blockctr_sb_free != blockctr_li) {
nethercote2d5b8162004-08-11 09:40:52 +00001410# ifdef VERBOSE_MALLOC
1411 VG_(printf)( "sanity_check_malloc_arena: BLOCK COUNT MISMATCH "
1412 "(via sbs %d, via lists %d)\n",
1413 blockctr_sb_free, blockctr_li );
1414# endif
sewardjde4a1d02002-03-22 01:27:54 +00001415 ppSuperblocks(a);
1416 BOMB;
1417 }
1418
nethercote885dd912004-08-03 23:14:00 +00001419 if (VG_(clo_verbosity) > 2)
1420 VG_(message)(Vg_DebugMsg,
floriana5e06c32015-08-05 21:16:09 +00001421 "%-8s: %2u sbs, %5u bs, %2u/%-2u free bs, "
1422 "%7lu mmap, %7lu loan\n",
nethercote885dd912004-08-03 23:14:00 +00001423 a->name,
1424 superblockctr,
1425 blockctr_sb, blockctr_sb_free, blockctr_li,
sewardj7d1064a2011-02-23 13:18:56 +00001426 a->stats__bytes_mmaped, a->stats__bytes_on_loan);
sewardjde4a1d02002-03-22 01:27:54 +00001427# undef BOMB
1428}
1429
1430
sewardj9c606bd2008-09-18 18:12:50 +00001431#define N_AN_CCS 1000
1432
florian54fe2022012-10-27 23:07:42 +00001433typedef struct {
1434 ULong nBytes;
1435 ULong nBlocks;
1436 const HChar* cc;
1437} AnCC;
sewardj9c606bd2008-09-18 18:12:50 +00001438
1439static AnCC anCCs[N_AN_CCS];
1440
philippe4f6f3362014-04-19 00:25:54 +00001441/* Sorting by decreasing cost center nBytes, to have the biggest
1442 cost centres at the top. */
florian6bd9dc12012-11-23 16:17:43 +00001443static Int cmp_AnCC_by_vol ( const void* v1, const void* v2 ) {
florian3e798632012-11-24 19:41:54 +00001444 const AnCC* ancc1 = v1;
1445 const AnCC* ancc2 = v2;
philippe4f6f3362014-04-19 00:25:54 +00001446 if (ancc1->nBytes < ancc2->nBytes) return 1;
1447 if (ancc1->nBytes > ancc2->nBytes) return -1;
sewardj9c606bd2008-09-18 18:12:50 +00001448 return 0;
1449}
1450
1451static void cc_analyse_alloc_arena ( ArenaId aid )
1452{
1453 Word i, j, k;
1454 Arena* a;
1455 Block* b;
1456 Bool thisFree, lastWasFree;
1457 SizeT b_bszB;
1458
florian54fe2022012-10-27 23:07:42 +00001459 const HChar* cc;
sewardj9c606bd2008-09-18 18:12:50 +00001460 UInt n_ccs = 0;
1461 //return;
1462 a = arenaId_to_ArenaP(aid);
1463 if (a->name == NULL) {
1464 /* arena is not in use, is not initialised and will fail the
1465 sanity check that follows. */
1466 return;
1467 }
1468
1469 sanity_check_malloc_arena(aid);
1470
1471 VG_(printf)(
philippe3743c912015-05-20 14:33:30 +00001472 "-------- Arena \"%s\": %'lu/%'lu max/curr mmap'd, "
sewardjd043de92011-09-26 11:28:20 +00001473 "%llu/%llu unsplit/split sb unmmap'd, "
philippe3743c912015-05-20 14:33:30 +00001474 "%'lu/%'lu max/curr on_loan %lu rzB --------\n",
sewardjd8b93462011-09-10 10:17:35 +00001475 a->name, a->stats__bytes_mmaped_max, a->stats__bytes_mmaped,
sewardjd043de92011-09-26 11:28:20 +00001476 a->stats__nreclaim_unsplit, a->stats__nreclaim_split,
philipped99c26a2012-07-31 22:17:28 +00001477 a->stats__bytes_on_loan_max, a->stats__bytes_on_loan,
1478 a->rz_szB
sewardj9c606bd2008-09-18 18:12:50 +00001479 );
1480
1481 for (j = 0; j < a->sblocks_used; ++j) {
1482 Superblock * sb = a->sblocks[j];
1483 lastWasFree = False;
1484 for (i = 0; i < sb->n_payload_bytes; i += mk_plain_bszB(b_bszB)) {
1485 b = (Block*)&sb->payload_bytes[i];
1486 b_bszB = get_bszB_as_is(b);
1487 if (!blockSane(a, b)) {
1488 VG_(printf)("sanity_check_malloc_arena: sb %p, block %ld "
1489 "(bszB %lu): BAD\n", sb, i, b_bszB );
floriane2800c92014-09-15 20:57:45 +00001490 vg_assert(0);
sewardj9c606bd2008-09-18 18:12:50 +00001491 }
1492 thisFree = !is_inuse_block(b);
1493 if (thisFree && lastWasFree) {
1494 VG_(printf)("sanity_check_malloc_arena: sb %p, block %ld "
1495 "(bszB %lu): UNMERGED FREES\n", sb, i, b_bszB );
floriane2800c92014-09-15 20:57:45 +00001496 vg_assert(0);
sewardj9c606bd2008-09-18 18:12:50 +00001497 }
1498 lastWasFree = thisFree;
1499
1500 if (thisFree) continue;
1501
philippe0b9d0642014-06-30 19:47:24 +00001502 if (VG_(clo_profile_heap))
1503 cc = get_cc(b);
1504 else
1505 cc = "(--profile-heap=yes for details)";
sewardj9c606bd2008-09-18 18:12:50 +00001506 if (0)
1507 VG_(printf)("block: inUse=%d pszB=%d cc=%s\n",
1508 (Int)(!thisFree),
1509 (Int)bszB_to_pszB(a, b_bszB),
1510 get_cc(b));
floriane2800c92014-09-15 20:57:45 +00001511 vg_assert(cc);
sewardj9c606bd2008-09-18 18:12:50 +00001512 for (k = 0; k < n_ccs; k++) {
floriane2800c92014-09-15 20:57:45 +00001513 vg_assert(anCCs[k].cc);
sewardj9c606bd2008-09-18 18:12:50 +00001514 if (0 == VG_(strcmp)(cc, anCCs[k].cc))
1515 break;
1516 }
floriane2800c92014-09-15 20:57:45 +00001517 vg_assert(k >= 0 && k <= n_ccs);
sewardj9c606bd2008-09-18 18:12:50 +00001518
1519 if (k == n_ccs) {
floriane2800c92014-09-15 20:57:45 +00001520 vg_assert(n_ccs < N_AN_CCS-1);
sewardj9c606bd2008-09-18 18:12:50 +00001521 n_ccs++;
1522 anCCs[k].nBytes = 0;
1523 anCCs[k].nBlocks = 0;
1524 anCCs[k].cc = cc;
1525 }
1526
floriane2800c92014-09-15 20:57:45 +00001527 vg_assert(k >= 0 && k < n_ccs && k < N_AN_CCS);
sewardj9c606bd2008-09-18 18:12:50 +00001528 anCCs[k].nBytes += (ULong)bszB_to_pszB(a, b_bszB);
1529 anCCs[k].nBlocks++;
1530 }
1531 if (i > sb->n_payload_bytes) {
1532 VG_(printf)( "sanity_check_malloc_arena: sb %p: last block "
1533 "overshoots end\n", sb);
floriane2800c92014-09-15 20:57:45 +00001534 vg_assert(0);
sewardj9c606bd2008-09-18 18:12:50 +00001535 }
1536 }
1537
philippe6e4b7132013-01-18 06:19:49 +00001538 if (a->stats__perm_bytes_on_loan > 0) {
floriane2800c92014-09-15 20:57:45 +00001539 vg_assert(n_ccs < N_AN_CCS-1);
philippe6e4b7132013-01-18 06:19:49 +00001540 anCCs[n_ccs].nBytes = a->stats__perm_bytes_on_loan;
1541 anCCs[n_ccs].nBlocks = a->stats__perm_blocks;
1542 anCCs[n_ccs].cc = "perm_malloc";
1543 n_ccs++;
1544 }
1545
sewardj9c606bd2008-09-18 18:12:50 +00001546 VG_(ssort)( &anCCs[0], n_ccs, sizeof(anCCs[0]), cmp_AnCC_by_vol );
1547
1548 for (k = 0; k < n_ccs; k++) {
1549 VG_(printf)("%'13llu in %'9llu: %s\n",
1550 anCCs[k].nBytes, anCCs[k].nBlocks, anCCs[k].cc );
1551 }
1552
1553 VG_(printf)("\n");
1554}
1555
1556
nethercote885dd912004-08-03 23:14:00 +00001557void VG_(sanity_check_malloc_all) ( void )
sewardjde4a1d02002-03-22 01:27:54 +00001558{
nethercote7ac7f7b2004-11-02 12:36:02 +00001559 UInt i;
sewardj0b3fd2d2007-08-21 10:55:26 +00001560 for (i = 0; i < VG_N_ARENAS; i++) {
1561 if (i == VG_AR_CLIENT && !client_inited)
1562 continue;
nethercote885dd912004-08-03 23:14:00 +00001563 sanity_check_malloc_arena ( i );
sewardj0b3fd2d2007-08-21 10:55:26 +00001564 }
sewardjde4a1d02002-03-22 01:27:54 +00001565}
1566
philippe07c08522014-05-14 20:39:27 +00001567void VG_(describe_arena_addr) ( Addr a, AddrArenaInfo* aai )
1568{
1569 UInt i;
1570 Superblock *sb;
1571 Arena *arena;
1572
1573 for (i = 0; i < VG_N_ARENAS; i++) {
1574 if (i == VG_AR_CLIENT && !client_inited)
1575 continue;
1576 arena = arenaId_to_ArenaP(i);
1577 sb = maybe_findSb( arena, a );
1578 if (sb != NULL) {
1579 Word j;
1580 SizeT b_bszB;
1581 Block *b = NULL;
1582
1583 aai->aid = i;
1584 aai->name = arena->name;
1585 for (j = 0; j < sb->n_payload_bytes; j += mk_plain_bszB(b_bszB)) {
1586 b = (Block*)&sb->payload_bytes[j];
1587 b_bszB = get_bszB_as_is(b);
1588 if (a < (Addr)b + mk_plain_bszB(b_bszB))
1589 break;
1590 }
1591 vg_assert (b);
1592 aai->block_szB = get_pszB(arena, b);
1593 aai->rwoffset = a - (Addr)get_block_payload(arena, b);
1594 aai->free = !is_inuse_block(b);
1595 return;
1596 }
1597 }
1598 aai->aid = 0;
1599 aai->name = NULL;
1600 aai->block_szB = 0;
1601 aai->rwoffset = 0;
1602 aai->free = False;
1603}
sewardjde4a1d02002-03-22 01:27:54 +00001604
nethercote2d5b8162004-08-11 09:40:52 +00001605/*------------------------------------------------------------*/
1606/*--- Creating and deleting blocks. ---*/
1607/*------------------------------------------------------------*/
1608
1609// Mark the bytes at b .. b+bszB-1 as not in use, and add them to the
1610// relevant free list.
1611
1612static
nethercote7ac7f7b2004-11-02 12:36:02 +00001613void mkFreeBlock ( Arena* a, Block* b, SizeT bszB, UInt b_lno )
jsewardb1a26ae2004-03-14 03:06:37 +00001614{
nethercote7ac7f7b2004-11-02 12:36:02 +00001615 SizeT pszB = bszB_to_pszB(a, bszB);
nethercote2d5b8162004-08-11 09:40:52 +00001616 vg_assert(b_lno == pszB_to_listNo(pszB));
philippe72faf102012-03-11 22:24:03 +00001617 INNER_REQUEST(VALGRIND_MAKE_MEM_UNDEFINED(b, bszB));
nethercote2d5b8162004-08-11 09:40:52 +00001618 // Set the size fields and indicate not-in-use.
njn8d3f8452005-07-20 04:12:41 +00001619 set_bszB(b, mk_free_bszB(bszB));
nethercote2d5b8162004-08-11 09:40:52 +00001620
1621 // Add to the relevant list.
1622 if (a->freelist[b_lno] == NULL) {
1623 set_prev_b(b, b);
1624 set_next_b(b, b);
1625 a->freelist[b_lno] = b;
1626 } else {
1627 Block* b_prev = get_prev_b(a->freelist[b_lno]);
1628 Block* b_next = a->freelist[b_lno];
1629 set_next_b(b_prev, b);
1630 set_prev_b(b_next, b);
1631 set_next_b(b, b_next);
1632 set_prev_b(b, b_prev);
1633 }
1634# ifdef DEBUG_MALLOC
1635 (void)blockSane(a,b);
1636# endif
1637}
1638
1639// Mark the bytes at b .. b+bszB-1 as in use, and set up the block
1640// appropriately.
1641static
nethercote7ac7f7b2004-11-02 12:36:02 +00001642void mkInuseBlock ( Arena* a, Block* b, SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +00001643{
nethercote7ac7f7b2004-11-02 12:36:02 +00001644 UInt i;
nethercote2d5b8162004-08-11 09:40:52 +00001645 vg_assert(bszB >= min_useful_bszB(a));
philippe72faf102012-03-11 22:24:03 +00001646 INNER_REQUEST(VALGRIND_MAKE_MEM_UNDEFINED(b, bszB));
njn8d3f8452005-07-20 04:12:41 +00001647 set_bszB(b, mk_inuse_bszB(bszB));
nethercote2d5b8162004-08-11 09:40:52 +00001648 set_prev_b(b, NULL); // Take off freelist
1649 set_next_b(b, NULL); // ditto
1650 if (!a->clientmem) {
1651 for (i = 0; i < a->rz_szB; i++) {
njn1dcee092009-02-24 03:07:37 +00001652 set_rz_lo_byte(b, i, (UByte)(((Addr)b&0xff) ^ REDZONE_LO_MASK));
1653 set_rz_hi_byte(b, i, (UByte)(((Addr)b&0xff) ^ REDZONE_HI_MASK));
nethercote2d5b8162004-08-11 09:40:52 +00001654 }
1655 }
1656# ifdef DEBUG_MALLOC
1657 (void)blockSane(a,b);
1658# endif
1659}
1660
philippe0b9d0642014-06-30 19:47:24 +00001661// Mark the bytes at b .. b+bszB-1 as being part of a block that has been shrunk.
1662static
1663void shrinkInuseBlock ( Arena* a, Block* b, SizeT bszB )
1664{
1665 UInt i;
1666
1667 vg_assert(bszB >= min_useful_bszB(a));
1668 INNER_REQUEST(mkBhdrAccess(a,b));
1669 set_bszB(b, mk_inuse_bszB(bszB));
1670 if (!a->clientmem) {
1671 for (i = 0; i < a->rz_szB; i++) {
1672 set_rz_lo_byte(b, i, (UByte)(((Addr)b&0xff) ^ REDZONE_LO_MASK));
1673 set_rz_hi_byte(b, i, (UByte)(((Addr)b&0xff) ^ REDZONE_HI_MASK));
1674 }
1675 }
1676 INNER_REQUEST(mkBhdrNoAccess(a,b));
1677
1678# ifdef DEBUG_MALLOC
1679 (void)blockSane(a,b);
1680# endif
1681}
1682
nethercote2d5b8162004-08-11 09:40:52 +00001683// Remove a block from a given list. Does no sanity checking.
1684static
nethercote7ac7f7b2004-11-02 12:36:02 +00001685void unlinkBlock ( Arena* a, Block* b, UInt listno )
nethercote2d5b8162004-08-11 09:40:52 +00001686{
njn6e6588c2005-03-13 18:52:48 +00001687 vg_assert(listno < N_MALLOC_LISTS);
nethercote2d5b8162004-08-11 09:40:52 +00001688 if (get_prev_b(b) == b) {
1689 // Only one element in the list; treat it specially.
1690 vg_assert(get_next_b(b) == b);
1691 a->freelist[listno] = NULL;
1692 } else {
1693 Block* b_prev = get_prev_b(b);
1694 Block* b_next = get_next_b(b);
1695 a->freelist[listno] = b_prev;
1696 set_next_b(b_prev, b_next);
1697 set_prev_b(b_next, b_prev);
1698 swizzle ( a, listno );
1699 }
1700 set_prev_b(b, NULL);
1701 set_next_b(b, NULL);
jsewardb1a26ae2004-03-14 03:06:37 +00001702}
1703
1704
sewardjde4a1d02002-03-22 01:27:54 +00001705/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00001706/*--- Core-visible functions. ---*/
sewardjde4a1d02002-03-22 01:27:54 +00001707/*------------------------------------------------------------*/
1708
nethercote2d5b8162004-08-11 09:40:52 +00001709// Align the request size.
1710static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +00001711SizeT align_req_pszB ( SizeT req_pszB )
nethercote2d5b8162004-08-11 09:40:52 +00001712{
nethercote7ac7f7b2004-11-02 12:36:02 +00001713 SizeT n = VG_MIN_MALLOC_SZB-1;
nethercote2d5b8162004-08-11 09:40:52 +00001714 return ((req_pszB + n) & (~n));
1715}
1716
philippe6e4b7132013-01-18 06:19:49 +00001717static
1718void add_one_block_to_stats (Arena* a, SizeT loaned)
1719{
1720 a->stats__bytes_on_loan += loaned;
1721 if (a->stats__bytes_on_loan > a->stats__bytes_on_loan_max) {
1722 a->stats__bytes_on_loan_max = a->stats__bytes_on_loan;
1723 if (a->stats__bytes_on_loan_max >= a->next_profile_at) {
1724 /* next profile after 10% more growth */
1725 a->next_profile_at
1726 = (SizeT)(
1727 (((ULong)a->stats__bytes_on_loan_max) * 105ULL) / 100ULL );
1728 if (VG_(clo_profile_heap))
1729 cc_analyse_alloc_arena(arenaP_to_ArenaId (a));
1730 }
1731 }
1732 a->stats__tot_blocks += (ULong)1;
1733 a->stats__tot_bytes += (ULong)loaned;
1734}
1735
florian9b29b142014-09-02 09:49:53 +00001736/* Allocate a piece of memory of req_pszB bytes on the given arena.
1737 The function may return NULL if (and only if) aid == VG_AR_CLIENT.
1738 Otherwise, the function returns a non-NULL value. */
florian54fe2022012-10-27 23:07:42 +00001739void* VG_(arena_malloc) ( ArenaId aid, const HChar* cc, SizeT req_pszB )
sewardjde4a1d02002-03-22 01:27:54 +00001740{
nethercote7ac7f7b2004-11-02 12:36:02 +00001741 SizeT req_bszB, frag_bszB, b_bszB;
sewardj0b3fd2d2007-08-21 10:55:26 +00001742 UInt lno, i;
sewardjd8b93462011-09-10 10:17:35 +00001743 Superblock* new_sb = NULL;
nethercote2d5b8162004-08-11 09:40:52 +00001744 Block* b = NULL;
sewardjde4a1d02002-03-22 01:27:54 +00001745 Arena* a;
jsewardb1a26ae2004-03-14 03:06:37 +00001746 void* v;
sewardj7d1064a2011-02-23 13:18:56 +00001747 UWord stats__nsearches = 0;
sewardjde4a1d02002-03-22 01:27:54 +00001748
sewardj45f4e7c2005-09-27 19:20:21 +00001749 ensure_mm_init(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001750 a = arenaId_to_ArenaP(aid);
1751
nethercote7ac7f7b2004-11-02 12:36:02 +00001752 vg_assert(req_pszB < MAX_PSZB);
nethercote2d5b8162004-08-11 09:40:52 +00001753 req_pszB = align_req_pszB(req_pszB);
1754 req_bszB = pszB_to_bszB(a, req_pszB);
sewardjde4a1d02002-03-22 01:27:54 +00001755
sewardj9c606bd2008-09-18 18:12:50 +00001756 // You must provide a cost-center name against which to charge
1757 // this allocation; it isn't optional.
1758 vg_assert(cc);
1759
nethercote2d5b8162004-08-11 09:40:52 +00001760 // Scan through all the big-enough freelists for a block.
njn4ab6d532007-10-16 23:18:06 +00001761 //
1762 // Nb: this scanning might be expensive in some cases. Eg. if you
1763 // allocate lots of small objects without freeing them, but no
1764 // medium-sized objects, it will repeatedly scanning through the whole
1765 // list, and each time not find any free blocks until the last element.
1766 //
1767 // If this becomes a noticeable problem... the loop answers the question
1768 // "where is the first nonempty list above me?" And most of the time,
1769 // you ask the same question and get the same answer. So it would be
1770 // good to somehow cache the results of previous searches.
1771 // One possibility is an array (with N_MALLOC_LISTS elements) of
1772 // shortcuts. shortcut[i] would give the index number of the nearest
1773 // larger list above list i which is non-empty. Then this loop isn't
1774 // necessary. However, we'd have to modify some section [ .. i-1] of the
1775 // shortcut array every time a list [i] changes from empty to nonempty or
1776 // back. This would require care to avoid pathological worst-case
1777 // behaviour.
1778 //
njn6e6588c2005-03-13 18:52:48 +00001779 for (lno = pszB_to_listNo(req_pszB); lno < N_MALLOC_LISTS; lno++) {
sewardj7d1064a2011-02-23 13:18:56 +00001780 UWord nsearches_this_level = 0;
sewardjde4a1d02002-03-22 01:27:54 +00001781 b = a->freelist[lno];
nethercote2d5b8162004-08-11 09:40:52 +00001782 if (NULL == b) continue; // If this list is empty, try the next one.
sewardjde4a1d02002-03-22 01:27:54 +00001783 while (True) {
sewardj7d1064a2011-02-23 13:18:56 +00001784 stats__nsearches++;
1785 nsearches_this_level++;
1786 if (UNLIKELY(nsearches_this_level >= 100)
1787 && lno < N_MALLOC_LISTS-1) {
1788 /* Avoid excessive scanning on this freelist, and instead
1789 try the next one up. But first, move this freelist's
1790 start pointer one element along, so as to ensure that
1791 subsequent searches of this list don't endlessly
1792 revisit only these 100 elements, but in fact slowly
1793 progress through the entire list. */
1794 b = a->freelist[lno];
1795 vg_assert(b); // this list must be nonempty!
1796 a->freelist[lno] = get_next_b(b); // step one along
1797 break;
1798 }
njnd0e685c2005-07-17 17:55:42 +00001799 b_bszB = get_bszB(b);
nethercote2d5b8162004-08-11 09:40:52 +00001800 if (b_bszB >= req_bszB) goto obtained_block; // success!
1801 b = get_next_b(b);
1802 if (b == a->freelist[lno]) break; // traversed entire freelist
sewardjde4a1d02002-03-22 01:27:54 +00001803 }
sewardjde4a1d02002-03-22 01:27:54 +00001804 }
1805
nethercote2d5b8162004-08-11 09:40:52 +00001806 // If we reach here, no suitable block found, allocate a new superblock
njn6e6588c2005-03-13 18:52:48 +00001807 vg_assert(lno == N_MALLOC_LISTS);
nethercote2d5b8162004-08-11 09:40:52 +00001808 new_sb = newSuperblock(a, req_bszB);
1809 if (NULL == new_sb) {
1810 // Should only fail if for client, otherwise, should have aborted
1811 // already.
1812 vg_assert(VG_AR_CLIENT == aid);
1813 return NULL;
sewardjde4a1d02002-03-22 01:27:54 +00001814 }
sewardj0b3fd2d2007-08-21 10:55:26 +00001815
1816 vg_assert(a->sblocks_used <= a->sblocks_size);
1817 if (a->sblocks_used == a->sblocks_size) {
1818 Superblock ** array;
philippe17e76ec2014-04-20 19:50:13 +00001819 SysRes sres = VG_(am_mmap_anon_float_valgrind)(sizeof(Superblock *) *
sewardj0b3fd2d2007-08-21 10:55:26 +00001820 a->sblocks_size * 2);
njncda2f0f2009-05-18 02:12:08 +00001821 if (sr_isError(sres)) {
sewardj0b3fd2d2007-08-21 10:55:26 +00001822 VG_(out_of_memory_NORETURN)("arena_init", sizeof(Superblock *) *
1823 a->sblocks_size * 2);
1824 /* NOTREACHED */
1825 }
florian44bd4462014-12-29 17:04:46 +00001826 array = (Superblock**)(Addr)sr_Res(sres);
sewardj0b3fd2d2007-08-21 10:55:26 +00001827 for (i = 0; i < a->sblocks_used; ++i) array[i] = a->sblocks[i];
1828
1829 a->sblocks_size *= 2;
1830 a->sblocks = array;
1831 VG_(debugLog)(1, "mallocfree",
floriana5e06c32015-08-05 21:16:09 +00001832 "sblock array for arena `%s' resized to %lu\n",
sewardj0b3fd2d2007-08-21 10:55:26 +00001833 a->name, a->sblocks_size);
1834 }
1835
1836 vg_assert(a->sblocks_used < a->sblocks_size);
1837
1838 i = a->sblocks_used;
1839 while (i > 0) {
1840 if (a->sblocks[i-1] > new_sb) {
1841 a->sblocks[i] = a->sblocks[i-1];
1842 } else {
1843 break;
1844 }
1845 --i;
1846 }
1847 a->sblocks[i] = new_sb;
1848 a->sblocks_used++;
1849
nethercote2d5b8162004-08-11 09:40:52 +00001850 b = (Block*)&new_sb->payload_bytes[0];
1851 lno = pszB_to_listNo(bszB_to_pszB(a, new_sb->n_payload_bytes));
1852 mkFreeBlock ( a, b, new_sb->n_payload_bytes, lno);
sewardj94c8eb42008-09-19 20:13:39 +00001853 if (VG_(clo_profile_heap))
1854 set_cc(b, "admin.free-new-sb-1");
nethercote2d5b8162004-08-11 09:40:52 +00001855 // fall through
sewardjde4a1d02002-03-22 01:27:54 +00001856
nethercote2d5b8162004-08-11 09:40:52 +00001857 obtained_block:
1858 // Ok, we can allocate from b, which lives in list lno.
sewardjde4a1d02002-03-22 01:27:54 +00001859 vg_assert(b != NULL);
njn6e6588c2005-03-13 18:52:48 +00001860 vg_assert(lno < N_MALLOC_LISTS);
sewardjde4a1d02002-03-22 01:27:54 +00001861 vg_assert(a->freelist[lno] != NULL);
njnd0e685c2005-07-17 17:55:42 +00001862 b_bszB = get_bszB(b);
nethercote2d5b8162004-08-11 09:40:52 +00001863 // req_bszB is the size of the block we are after. b_bszB is the
1864 // size of what we've actually got. */
1865 vg_assert(b_bszB >= req_bszB);
sewardjde4a1d02002-03-22 01:27:54 +00001866
nethercote2d5b8162004-08-11 09:40:52 +00001867 // Could we split this block and still get a useful fragment?
Elliott Hughesed398002017-06-21 14:41:24 -07001868 // A block in an unsplittable superblock can never be split.
nethercote2d5b8162004-08-11 09:40:52 +00001869 frag_bszB = b_bszB - req_bszB;
sewardjd8b93462011-09-10 10:17:35 +00001870 if (frag_bszB >= min_useful_bszB(a)
sewardjd043de92011-09-26 11:28:20 +00001871 && (NULL == new_sb || ! new_sb->unsplittable)) {
nethercote2d5b8162004-08-11 09:40:52 +00001872 // Yes, split block in two, put the fragment on the appropriate free
1873 // list, and update b_bszB accordingly.
1874 // printf( "split %dB into %dB and %dB\n", b_bszB, req_bszB, frag_bszB );
sewardjde4a1d02002-03-22 01:27:54 +00001875 unlinkBlock(a, b, lno);
nethercote2d5b8162004-08-11 09:40:52 +00001876 mkInuseBlock(a, b, req_bszB);
sewardj94c8eb42008-09-19 20:13:39 +00001877 if (VG_(clo_profile_heap))
1878 set_cc(b, cc);
nethercote2d5b8162004-08-11 09:40:52 +00001879 mkFreeBlock(a, &b[req_bszB], frag_bszB,
1880 pszB_to_listNo(bszB_to_pszB(a, frag_bszB)));
sewardj94c8eb42008-09-19 20:13:39 +00001881 if (VG_(clo_profile_heap))
1882 set_cc(&b[req_bszB], "admin.fragmentation-1");
njnd0e685c2005-07-17 17:55:42 +00001883 b_bszB = get_bszB(b);
nethercote2d5b8162004-08-11 09:40:52 +00001884 } else {
1885 // No, mark as in use and use as-is.
1886 unlinkBlock(a, b, lno);
1887 mkInuseBlock(a, b, b_bszB);
sewardj94c8eb42008-09-19 20:13:39 +00001888 if (VG_(clo_profile_heap))
1889 set_cc(b, cc);
sewardjde4a1d02002-03-22 01:27:54 +00001890 }
sewardjde4a1d02002-03-22 01:27:54 +00001891
nethercote2d5b8162004-08-11 09:40:52 +00001892 // Update stats
sewardj7d1064a2011-02-23 13:18:56 +00001893 SizeT loaned = bszB_to_pszB(a, b_bszB);
philippe6e4b7132013-01-18 06:19:49 +00001894 add_one_block_to_stats (a, loaned);
sewardj7d1064a2011-02-23 13:18:56 +00001895 a->stats__nsearches += (ULong)stats__nsearches;
sewardjde4a1d02002-03-22 01:27:54 +00001896
1897# ifdef DEBUG_MALLOC
nethercote885dd912004-08-03 23:14:00 +00001898 sanity_check_malloc_arena(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001899# endif
1900
nethercote2d5b8162004-08-11 09:40:52 +00001901 v = get_block_payload(a, b);
1902 vg_assert( (((Addr)v) & (VG_MIN_MALLOC_SZB-1)) == 0 );
sewardjb5f6f512005-03-10 23:59:00 +00001903
philippe72faf102012-03-11 22:24:03 +00001904 // Which size should we pass to VALGRIND_MALLOCLIKE_BLOCK ?
1905 // We have 2 possible options:
1906 // 1. The final resulting usable size.
1907 // 2. The initial (non-aligned) req_pszB.
1908 // Memcheck implements option 2 easily, as the initial requested size
1909 // is maintained in the mc_chunk data structure.
1910 // This is not as easy in the core, as there is no such structure.
1911 // (note: using the aligned req_pszB is not simpler than 2, as
1912 // requesting an aligned req_pszB might still be satisfied by returning
1913 // a (slightly) bigger block than requested if the remaining part of
1914 // of a free block is not big enough to make a free block by itself).
1915 // Implement Sol 2 can be done the following way:
1916 // After having called VALGRIND_MALLOCLIKE_BLOCK, the non accessible
1917 // redzone just after the block can be used to determine the
1918 // initial requested size.
1919 // Currently, not implemented => we use Option 1.
1920 INNER_REQUEST
1921 (VALGRIND_MALLOCLIKE_BLOCK(v,
1922 VG_(arena_malloc_usable_size)(aid, v),
1923 a->rz_szB, False));
sewardja53462a2007-11-24 23:37:07 +00001924
1925 /* For debugging/testing purposes, fill the newly allocated area
1926 with a definite value in an attempt to shake out any
1927 uninitialised uses of the data (by V core / V tools, not by the
1928 client). Testing on 25 Nov 07 with the values 0x00, 0xFF, 0x55,
1929 0xAA showed no differences in the regression tests on
1930 amd64-linux. Note, is disabled by default. */
1931 if (0 && aid != VG_AR_CLIENT)
1932 VG_(memset)(v, 0xAA, (SizeT)req_pszB);
1933
jsewardb1a26ae2004-03-14 03:06:37 +00001934 return v;
sewardjde4a1d02002-03-22 01:27:54 +00001935}
1936
sewardjd043de92011-09-26 11:28:20 +00001937// If arena has already a deferred reclaimed superblock and
1938// this superblock is still reclaimable, then this superblock is first
1939// reclaimed.
1940// sb becomes then the new arena deferred superblock.
1941// Passing NULL as sb allows to reclaim a deferred sb without setting a new
1942// deferred reclaim.
1943static
1944void deferred_reclaimSuperblock ( Arena* a, Superblock* sb)
1945{
1946
1947 if (sb == NULL) {
1948 if (!a->deferred_reclaimed_sb)
1949 // no deferred sb to reclaim now, nothing to do in the future =>
1950 // return directly.
1951 return;
1952
1953 VG_(debugLog)(1, "mallocfree",
1954 "deferred_reclaimSuperblock NULL "
1955 "(prev %p) owner %s/%s\n",
1956 a->deferred_reclaimed_sb,
1957 a->clientmem ? "CLIENT" : "VALGRIND", a->name );
1958 } else
1959 VG_(debugLog)(1, "mallocfree",
floriana5e06c32015-08-05 21:16:09 +00001960 "deferred_reclaimSuperblock at %p (pszB %7lu) %s "
sewardjd043de92011-09-26 11:28:20 +00001961 "(prev %p) owner %s/%s\n",
1962 sb, sb->n_payload_bytes,
1963 (sb->unsplittable ? "unsplittable" : ""),
1964 a->deferred_reclaimed_sb,
1965 a->clientmem ? "CLIENT" : "VALGRIND", a->name );
1966
1967 if (a->deferred_reclaimed_sb && a->deferred_reclaimed_sb != sb) {
1968 // If we are deferring another block that the current block deferred,
1969 // then if this block can stil be reclaimed, reclaim it now.
1970 // Note that we might have a re-deferred reclaim of the same block
1971 // with a sequence: free (causing a deferred reclaim of sb)
1972 // alloc (using a piece of memory of the deferred sb)
1973 // free of the just alloc-ed block (causing a re-defer).
1974 UByte* def_sb_start;
1975 UByte* def_sb_end;
1976 Superblock* def_sb;
1977 Block* b;
1978
1979 def_sb = a->deferred_reclaimed_sb;
1980 def_sb_start = &def_sb->payload_bytes[0];
1981 def_sb_end = &def_sb->payload_bytes[def_sb->n_payload_bytes - 1];
1982 b = (Block *)def_sb_start;
1983 vg_assert (blockSane(a, b));
1984
1985 // Check if the deferred_reclaimed_sb is still reclaimable.
1986 // If yes, we will execute the reclaim.
1987 if (!is_inuse_block(b)) {
1988 // b (at the beginning of def_sb) is not in use.
1989 UInt b_listno;
1990 SizeT b_bszB, b_pszB;
1991 b_bszB = get_bszB(b);
1992 b_pszB = bszB_to_pszB(a, b_bszB);
1993 if (b + b_bszB-1 == (Block*)def_sb_end) {
1994 // b (not in use) covers the full superblock.
1995 // => def_sb is still reclaimable
1996 // => execute now the reclaim of this def_sb.
1997 b_listno = pszB_to_listNo(b_pszB);
1998 unlinkBlock( a, b, b_listno );
1999 reclaimSuperblock (a, def_sb);
2000 a->deferred_reclaimed_sb = NULL;
2001 }
2002 }
2003 }
2004
2005 // sb (possibly NULL) becomes the new deferred reclaimed superblock.
2006 a->deferred_reclaimed_sb = sb;
2007}
2008
philippe0b9d0642014-06-30 19:47:24 +00002009/* b must be a free block, of size b_bszB.
2010 If b is followed by another free block, merge them.
Elliott Hughesed398002017-06-21 14:41:24 -07002011 If b is preceded by another free block, merge them.
philippe0b9d0642014-06-30 19:47:24 +00002012 If the merge results in the superblock being fully free,
2013 deferred_reclaimSuperblock the superblock. */
2014static void mergeWithFreeNeighbours (Arena* a, Superblock* sb,
2015 Block* b, SizeT b_bszB)
2016{
2017 UByte* sb_start;
2018 UByte* sb_end;
2019 Block* other_b;
2020 SizeT other_bszB;
2021 UInt b_listno;
2022
2023 sb_start = &sb->payload_bytes[0];
2024 sb_end = &sb->payload_bytes[sb->n_payload_bytes - 1];
2025
2026 b_listno = pszB_to_listNo(bszB_to_pszB(a, b_bszB));
2027
2028 // See if this block can be merged with its successor.
2029 // First test if we're far enough before the superblock's end to possibly
2030 // have a successor.
2031 other_b = b + b_bszB;
2032 if (other_b+min_useful_bszB(a)-1 <= (Block*)sb_end) {
2033 // Ok, we have a successor, merge if it's not in use.
2034 other_bszB = get_bszB(other_b);
2035 if (!is_inuse_block(other_b)) {
2036 // VG_(printf)( "merge-successor\n");
2037# ifdef DEBUG_MALLOC
2038 vg_assert(blockSane(a, other_b));
2039# endif
2040 unlinkBlock( a, b, b_listno );
2041 unlinkBlock( a, other_b,
2042 pszB_to_listNo(bszB_to_pszB(a,other_bszB)) );
2043 b_bszB += other_bszB;
2044 b_listno = pszB_to_listNo(bszB_to_pszB(a, b_bszB));
2045 mkFreeBlock( a, b, b_bszB, b_listno );
2046 if (VG_(clo_profile_heap))
2047 set_cc(b, "admin.free-2");
2048 }
2049 } else {
2050 // Not enough space for successor: check that b is the last block
2051 // ie. there are no unused bytes at the end of the Superblock.
2052 vg_assert(other_b-1 == (Block*)sb_end);
2053 }
2054
2055 // Then see if this block can be merged with its predecessor.
2056 // First test if we're far enough after the superblock's start to possibly
2057 // have a predecessor.
2058 if (b >= (Block*)sb_start + min_useful_bszB(a)) {
2059 // Ok, we have a predecessor, merge if it's not in use.
2060 other_b = get_predecessor_block( b );
2061 other_bszB = get_bszB(other_b);
2062 if (!is_inuse_block(other_b)) {
2063 // VG_(printf)( "merge-predecessor\n");
2064 unlinkBlock( a, b, b_listno );
2065 unlinkBlock( a, other_b,
2066 pszB_to_listNo(bszB_to_pszB(a, other_bszB)) );
2067 b = other_b;
2068 b_bszB += other_bszB;
2069 b_listno = pszB_to_listNo(bszB_to_pszB(a, b_bszB));
2070 mkFreeBlock( a, b, b_bszB, b_listno );
2071 if (VG_(clo_profile_heap))
2072 set_cc(b, "admin.free-3");
2073 }
2074 } else {
2075 // Not enough space for predecessor: check that b is the first block,
2076 // ie. there are no unused bytes at the start of the Superblock.
2077 vg_assert((Block*)sb_start == b);
2078 }
2079
2080 /* If the block b just merged is the only block of the superblock sb,
2081 then we defer reclaim sb. */
2082 if ( ((Block*)sb_start == b) && (b + b_bszB-1 == (Block*)sb_end) ) {
2083 deferred_reclaimSuperblock (a, sb);
2084 }
2085}
sewardjde4a1d02002-03-22 01:27:54 +00002086
njn25e49d8e72002-09-23 09:36:25 +00002087void VG_(arena_free) ( ArenaId aid, void* ptr )
sewardjde4a1d02002-03-22 01:27:54 +00002088{
2089 Superblock* sb;
nethercote2d5b8162004-08-11 09:40:52 +00002090 Block* b;
philippe0b9d0642014-06-30 19:47:24 +00002091 SizeT b_bszB, b_pszB;
nethercote7ac7f7b2004-11-02 12:36:02 +00002092 UInt b_listno;
sewardjde4a1d02002-03-22 01:27:54 +00002093 Arena* a;
2094
sewardj45f4e7c2005-09-27 19:20:21 +00002095 ensure_mm_init(aid);
sewardjde4a1d02002-03-22 01:27:54 +00002096 a = arenaId_to_ArenaP(aid);
2097
njn25e49d8e72002-09-23 09:36:25 +00002098 if (ptr == NULL) {
njn25e49d8e72002-09-23 09:36:25 +00002099 return;
2100 }
2101
nethercote2d5b8162004-08-11 09:40:52 +00002102 b = get_payload_block(a, ptr);
sewardjde4a1d02002-03-22 01:27:54 +00002103
sewardj3187a4e2005-12-04 23:27:14 +00002104 /* If this is one of V's areas, check carefully the block we're
2105 getting back. This picks up simple block-end overruns. */
2106 if (aid != VG_AR_CLIENT)
Elliott Hughesa0664b92017-04-18 17:46:52 -07002107 vg_assert(is_inuse_block(b) && blockSane(a, b));
sewardjde4a1d02002-03-22 01:27:54 +00002108
njne6f9e3b2005-07-17 18:00:57 +00002109 b_bszB = get_bszB(b);
2110 b_pszB = bszB_to_pszB(a, b_bszB);
nethercote2d5b8162004-08-11 09:40:52 +00002111 sb = findSb( a, b );
sewardjde4a1d02002-03-22 01:27:54 +00002112
sewardj7d1064a2011-02-23 13:18:56 +00002113 a->stats__bytes_on_loan -= b_pszB;
njne6f9e3b2005-07-17 18:00:57 +00002114
sewardj3187a4e2005-12-04 23:27:14 +00002115 /* If this is one of V's areas, fill it up with junk to enhance the
2116 chances of catching any later reads of it. Note, 0xDD is
2117 carefully chosen junk :-), in that: (1) 0xDDDDDDDD is an invalid
2118 and non-word-aligned address on most systems, and (2) 0xDD is a
2119 value which is unlikely to be generated by the new compressed
2120 Vbits representation for memcheck. */
2121 if (aid != VG_AR_CLIENT)
2122 VG_(memset)(ptr, 0xDD, (SizeT)b_pszB);
2123
sewardjd043de92011-09-26 11:28:20 +00002124 if (! sb->unsplittable) {
sewardjd8b93462011-09-10 10:17:35 +00002125 // Put this chunk back on a list somewhere.
2126 b_listno = pszB_to_listNo(b_pszB);
2127 mkFreeBlock( a, b, b_bszB, b_listno );
2128 if (VG_(clo_profile_heap))
2129 set_cc(b, "admin.free-1");
sewardjde4a1d02002-03-22 01:27:54 +00002130
philippe0b9d0642014-06-30 19:47:24 +00002131 /* Possibly merge b with its predecessor or successor. */
2132 mergeWithFreeNeighbours (a, sb, b, b_bszB);
sewardjd043de92011-09-26 11:28:20 +00002133
philippe72faf102012-03-11 22:24:03 +00002134 // Inform that ptr has been released. We give redzone size
2135 // 0 instead of a->rz_szB as proper accessibility is done just after.
2136 INNER_REQUEST(VALGRIND_FREELIKE_BLOCK(ptr, 0));
2137
2138 // We need to (re-)establish the minimum accessibility needed
2139 // for free list management. E.g. if block ptr has been put in a free
2140 // list and a neighbour block is released afterwards, the
2141 // "lo" and "hi" portions of the block ptr will be accessed to
2142 // glue the 2 blocks together.
2143 // We could mark the whole block as not accessible, and each time
2144 // transiently mark accessible the needed lo/hi parts. Not done as this
2145 // is quite complex, for very little expected additional bug detection.
2146 // fully unaccessible. Note that the below marks the (possibly) merged
2147 // block, not the block corresponding to the ptr argument.
2148
2149 // First mark the whole block unaccessible.
2150 INNER_REQUEST(VALGRIND_MAKE_MEM_NOACCESS(b, b_bszB));
2151 // Then mark the relevant administrative headers as defined.
2152 // No need to mark the heap profile portion as defined, this is not
2153 // used for free blocks.
2154 INNER_REQUEST(VALGRIND_MAKE_MEM_DEFINED(b + hp_overhead_szB(),
2155 sizeof(SizeT) + sizeof(void*)));
2156 INNER_REQUEST(VALGRIND_MAKE_MEM_DEFINED(b + b_bszB
2157 - sizeof(SizeT) - sizeof(void*),
2158 sizeof(SizeT) + sizeof(void*)));
nethercote2d5b8162004-08-11 09:40:52 +00002159 } else {
philippe0b9d0642014-06-30 19:47:24 +00002160 vg_assert(unsplittableBlockSane(a, sb, b));
sewardjd8b93462011-09-10 10:17:35 +00002161
philippe72faf102012-03-11 22:24:03 +00002162 // Inform that ptr has been released. Redzone size value
2163 // is not relevant (so we give 0 instead of a->rz_szB)
2164 // as it is expected that the aspacemgr munmap will be used by
2165 // outer to mark the whole superblock as unaccessible.
2166 INNER_REQUEST(VALGRIND_FREELIKE_BLOCK(ptr, 0));
2167
sewardjd043de92011-09-26 11:28:20 +00002168 // Reclaim immediately the unsplittable superblock sb.
sewardjd8b93462011-09-10 10:17:35 +00002169 reclaimSuperblock (a, sb);
sewardjde4a1d02002-03-22 01:27:54 +00002170 }
2171
2172# ifdef DEBUG_MALLOC
nethercote885dd912004-08-03 23:14:00 +00002173 sanity_check_malloc_arena(aid);
sewardjde4a1d02002-03-22 01:27:54 +00002174# endif
2175
sewardjde4a1d02002-03-22 01:27:54 +00002176}
2177
2178
2179/*
2180 The idea for malloc_aligned() is to allocate a big block, base, and
florianad4e9792015-07-05 21:53:33 +00002181 then split it into two parts: frag, which is returned to the free
2182 pool, and align, which is the bit we're really after. Here's
sewardjde4a1d02002-03-22 01:27:54 +00002183 a picture. L and H denote the block lower and upper overheads, in
nethercote2d5b8162004-08-11 09:40:52 +00002184 bytes. The details are gruesome. Note it is slightly complicated
sewardjde4a1d02002-03-22 01:27:54 +00002185 because the initial request to generate base may return a bigger
2186 block than we asked for, so it is important to distinguish the base
2187 request size and the base actual size.
2188
2189 frag_b align_b
2190 | |
2191 | frag_p | align_p
2192 | | | |
2193 v v v v
2194
2195 +---+ +---+---+ +---+
2196 | L |----------------| H | L |---------------| H |
2197 +---+ +---+---+ +---+
2198
2199 ^ ^ ^
2200 | | :
2201 | base_p this addr must be aligned
2202 |
2203 base_b
2204
2205 . . . . . . .
nethercote2d5b8162004-08-11 09:40:52 +00002206 <------ frag_bszB -------> . . .
2207 . <------------- base_pszB_act -----------> .
sewardjde4a1d02002-03-22 01:27:54 +00002208 . . . . . . .
2209
2210*/
florian54fe2022012-10-27 23:07:42 +00002211void* VG_(arena_memalign) ( ArenaId aid, const HChar* cc,
sewardj9c606bd2008-09-18 18:12:50 +00002212 SizeT req_alignB, SizeT req_pszB )
sewardjde4a1d02002-03-22 01:27:54 +00002213{
nethercote7ac7f7b2004-11-02 12:36:02 +00002214 SizeT base_pszB_req, base_pszB_act, frag_bszB;
nethercote2d5b8162004-08-11 09:40:52 +00002215 Block *base_b, *align_b;
2216 UByte *base_p, *align_p;
nethercote7ac7f7b2004-11-02 12:36:02 +00002217 SizeT saved_bytes_on_loan;
sewardjde4a1d02002-03-22 01:27:54 +00002218 Arena* a;
2219
sewardj45f4e7c2005-09-27 19:20:21 +00002220 ensure_mm_init(aid);
sewardjde4a1d02002-03-22 01:27:54 +00002221 a = arenaId_to_ArenaP(aid);
2222
nethercote7ac7f7b2004-11-02 12:36:02 +00002223 vg_assert(req_pszB < MAX_PSZB);
sewardjde4a1d02002-03-22 01:27:54 +00002224
sewardj9c606bd2008-09-18 18:12:50 +00002225 // You must provide a cost-center name against which to charge
2226 // this allocation; it isn't optional.
2227 vg_assert(cc);
2228
philippef5f6ed12012-06-15 22:19:59 +00002229 // Check that the requested alignment has a plausible size.
nethercote2d5b8162004-08-11 09:40:52 +00002230 // Check that the requested alignment seems reasonable; that is, is
2231 // a power of 2.
2232 if (req_alignB < VG_MIN_MALLOC_SZB
philippef5f6ed12012-06-15 22:19:59 +00002233 || req_alignB > 16 * 1024 * 1024
njn717cde52005-05-10 02:47:21 +00002234 || VG_(log2)( req_alignB ) == -1 /* not a power of 2 */) {
njn36b65172009-04-14 23:43:15 +00002235 VG_(printf)("VG_(arena_memalign)(%p, %lu, %lu)\n"
2236 "bad alignment value %lu\n"
2237 "(it is too small, too big, or not a power of two)",
2238 a, req_alignB, req_pszB, req_alignB );
njn717cde52005-05-10 02:47:21 +00002239 VG_(core_panic)("VG_(arena_memalign)");
nethercote2d5b8162004-08-11 09:40:52 +00002240 /*NOTREACHED*/
sewardjde4a1d02002-03-22 01:27:54 +00002241 }
nethercote2d5b8162004-08-11 09:40:52 +00002242 // Paranoid
2243 vg_assert(req_alignB % VG_MIN_MALLOC_SZB == 0);
sewardjde4a1d02002-03-22 01:27:54 +00002244
2245 /* Required payload size for the aligned chunk. */
nethercote2d5b8162004-08-11 09:40:52 +00002246 req_pszB = align_req_pszB(req_pszB);
sewardjde4a1d02002-03-22 01:27:54 +00002247
nethercote2d5b8162004-08-11 09:40:52 +00002248 /* Payload size to request for the big block that we will split up. */
2249 base_pszB_req = req_pszB + min_useful_bszB(a) + req_alignB;
sewardjde4a1d02002-03-22 01:27:54 +00002250
2251 /* Payload ptr for the block we are going to split. Note this
2252 changes a->bytes_on_loan; we save and restore it ourselves. */
sewardj7d1064a2011-02-23 13:18:56 +00002253 saved_bytes_on_loan = a->stats__bytes_on_loan;
sewardjd8b93462011-09-10 10:17:35 +00002254 {
2255 /* As we will split the block given back by VG_(arena_malloc),
sewardjd043de92011-09-26 11:28:20 +00002256 we have to (temporarily) disable unsplittable for this arena,
Elliott Hughesed398002017-06-21 14:41:24 -07002257 as unsplittable superblocks cannot be split. */
sewardjd043de92011-09-26 11:28:20 +00002258 const SizeT save_min_unsplittable_sblock_szB
2259 = a->min_unsplittable_sblock_szB;
2260 a->min_unsplittable_sblock_szB = MAX_PSZB;
sewardjd8b93462011-09-10 10:17:35 +00002261 base_p = VG_(arena_malloc) ( aid, cc, base_pszB_req );
sewardjd043de92011-09-26 11:28:20 +00002262 a->min_unsplittable_sblock_szB = save_min_unsplittable_sblock_szB;
sewardjd8b93462011-09-10 10:17:35 +00002263 }
sewardj7d1064a2011-02-23 13:18:56 +00002264 a->stats__bytes_on_loan = saved_bytes_on_loan;
sewardjde4a1d02002-03-22 01:27:54 +00002265
tom8af1a172005-10-06 12:04:26 +00002266 /* Give up if we couldn't allocate enough space */
2267 if (base_p == 0)
2268 return 0;
philippe72faf102012-03-11 22:24:03 +00002269 /* base_p was marked as allocated by VALGRIND_MALLOCLIKE_BLOCK
2270 inside VG_(arena_malloc). We need to indicate it is free, then
2271 we need to mark it undefined to allow the below code to access is. */
2272 INNER_REQUEST(VALGRIND_FREELIKE_BLOCK(base_p, a->rz_szB));
2273 INNER_REQUEST(VALGRIND_MAKE_MEM_UNDEFINED(base_p, base_pszB_req));
tom8af1a172005-10-06 12:04:26 +00002274
sewardjde4a1d02002-03-22 01:27:54 +00002275 /* Block ptr for the block we are going to split. */
nethercote2d5b8162004-08-11 09:40:52 +00002276 base_b = get_payload_block ( a, base_p );
sewardjde4a1d02002-03-22 01:27:54 +00002277
2278 /* Pointer to the payload of the aligned block we are going to
2279 return. This has to be suitably aligned. */
nethercote2d5b8162004-08-11 09:40:52 +00002280 align_p = align_upwards ( base_b + 2 * overhead_szB_lo(a)
2281 + overhead_szB_hi(a),
sewardjde4a1d02002-03-22 01:27:54 +00002282 req_alignB );
nethercote2d5b8162004-08-11 09:40:52 +00002283 align_b = get_payload_block(a, align_p);
sewardjde4a1d02002-03-22 01:27:54 +00002284
2285 /* The block size of the fragment we will create. This must be big
2286 enough to actually create a fragment. */
nethercote2d5b8162004-08-11 09:40:52 +00002287 frag_bszB = align_b - base_b;
2288
2289 vg_assert(frag_bszB >= min_useful_bszB(a));
sewardjde4a1d02002-03-22 01:27:54 +00002290
2291 /* The actual payload size of the block we are going to split. */
njn089f51f2005-07-17 18:12:00 +00002292 base_pszB_act = get_pszB(a, base_b);
sewardjde4a1d02002-03-22 01:27:54 +00002293
nethercote2d5b8162004-08-11 09:40:52 +00002294 /* Create the fragment block, and put it back on the relevant free list. */
2295 mkFreeBlock ( a, base_b, frag_bszB,
2296 pszB_to_listNo(bszB_to_pszB(a, frag_bszB)) );
sewardj94c8eb42008-09-19 20:13:39 +00002297 if (VG_(clo_profile_heap))
2298 set_cc(base_b, "admin.frag-memalign-1");
sewardjde4a1d02002-03-22 01:27:54 +00002299
2300 /* Create the aligned block. */
nethercote2d5b8162004-08-11 09:40:52 +00002301 mkInuseBlock ( a, align_b,
2302 base_p + base_pszB_act
2303 + overhead_szB_hi(a) - (UByte*)align_b );
sewardj94c8eb42008-09-19 20:13:39 +00002304 if (VG_(clo_profile_heap))
2305 set_cc(align_b, cc);
sewardjde4a1d02002-03-22 01:27:54 +00002306
2307 /* Final sanity checks. */
njn472cc7c2005-07-17 17:20:30 +00002308 vg_assert( is_inuse_block(get_payload_block(a, align_p)) );
sewardjde4a1d02002-03-22 01:27:54 +00002309
njn089f51f2005-07-17 18:12:00 +00002310 vg_assert(req_pszB <= get_pszB(a, get_payload_block(a, align_p)));
sewardjde4a1d02002-03-22 01:27:54 +00002311
sewardj7d1064a2011-02-23 13:18:56 +00002312 a->stats__bytes_on_loan += get_pszB(a, get_payload_block(a, align_p));
2313 if (a->stats__bytes_on_loan > a->stats__bytes_on_loan_max) {
2314 a->stats__bytes_on_loan_max = a->stats__bytes_on_loan;
2315 }
2316 /* a->stats__tot_blocks, a->stats__tot_bytes, a->stats__nsearches
2317 are updated by the call to VG_(arena_malloc) just a few lines
2318 above. So we don't need to update them here. */
sewardjde4a1d02002-03-22 01:27:54 +00002319
2320# ifdef DEBUG_MALLOC
nethercote885dd912004-08-03 23:14:00 +00002321 sanity_check_malloc_arena(aid);
sewardjde4a1d02002-03-22 01:27:54 +00002322# endif
2323
nethercote2d5b8162004-08-11 09:40:52 +00002324 vg_assert( (((Addr)align_p) % req_alignB) == 0 );
sewardjb5f6f512005-03-10 23:59:00 +00002325
philippe72faf102012-03-11 22:24:03 +00002326 INNER_REQUEST(VALGRIND_MALLOCLIKE_BLOCK(align_p,
2327 req_pszB, a->rz_szB, False));
sewardjb5f6f512005-03-10 23:59:00 +00002328
nethercote2d5b8162004-08-11 09:40:52 +00002329 return align_p;
2330}
2331
2332
njn8b140de2009-02-17 04:31:18 +00002333SizeT VG_(arena_malloc_usable_size) ( ArenaId aid, void* ptr )
nethercote2d5b8162004-08-11 09:40:52 +00002334{
2335 Arena* a = arenaId_to_ArenaP(aid);
2336 Block* b = get_payload_block(a, ptr);
njn089f51f2005-07-17 18:12:00 +00002337 return get_pszB(a, b);
sewardjde4a1d02002-03-22 01:27:54 +00002338}
2339
bart545380e2008-04-21 17:28:50 +00002340
2341// Implementation of mallinfo(). There is no recent standard that defines
2342// the behavior of mallinfo(). The meaning of the fields in struct mallinfo
2343// is as follows:
2344//
2345// struct mallinfo {
2346// int arena; /* total space in arena */
2347// int ordblks; /* number of ordinary blocks */
2348// int smblks; /* number of small blocks */
2349// int hblks; /* number of holding blocks */
2350// int hblkhd; /* space in holding block headers */
2351// int usmblks; /* space in small blocks in use */
2352// int fsmblks; /* space in free small blocks */
2353// int uordblks; /* space in ordinary blocks in use */
2354// int fordblks; /* space in free ordinary blocks */
2355// int keepcost; /* space penalty if keep option */
2356// /* is used */
2357// };
2358//
2359// The glibc documentation about mallinfo (which is somewhat outdated) can
2360// be found here:
2361// http://www.gnu.org/software/libtool/manual/libc/Statistics-of-Malloc.html
2362//
2363// See also http://bugs.kde.org/show_bug.cgi?id=160956.
2364//
2365// Regarding the implementation of VG_(mallinfo)(): we cannot return the
2366// whole struct as the library function does, because this is called by a
2367// client request. So instead we use a pointer to do call by reference.
njn088bfb42005-08-17 05:01:37 +00002368void VG_(mallinfo) ( ThreadId tid, struct vg_mallinfo* mi )
2369{
sewardj76dda8f2008-05-29 13:45:49 +00002370 UWord i, free_blocks, free_blocks_size;
bartc3c98392008-04-19 14:43:30 +00002371 Arena* a = arenaId_to_ArenaP(VG_AR_CLIENT);
2372
2373 // Traverse free list and calculate free blocks statistics.
2374 // This may seem slow but glibc works the same way.
2375 free_blocks_size = free_blocks = 0;
2376 for (i = 0; i < N_MALLOC_LISTS; i++) {
2377 Block* b = a->freelist[i];
2378 if (b == NULL) continue;
2379 for (;;) {
2380 free_blocks++;
sewardj76dda8f2008-05-29 13:45:49 +00002381 free_blocks_size += (UWord)get_pszB(a, b);
bartc3c98392008-04-19 14:43:30 +00002382 b = get_next_b(b);
2383 if (b == a->freelist[i]) break;
2384 }
2385 }
2386
2387 // We don't have fastbins so smblks & fsmblks are always 0. Also we don't
bart545380e2008-04-21 17:28:50 +00002388 // have a separate mmap allocator so set hblks & hblkhd to 0.
sewardj7d1064a2011-02-23 13:18:56 +00002389 mi->arena = a->stats__bytes_mmaped;
bart545380e2008-04-21 17:28:50 +00002390 mi->ordblks = free_blocks + VG_(free_queue_length);
bartc3c98392008-04-19 14:43:30 +00002391 mi->smblks = 0;
2392 mi->hblks = 0;
2393 mi->hblkhd = 0;
2394 mi->usmblks = 0;
2395 mi->fsmblks = 0;
sewardj7d1064a2011-02-23 13:18:56 +00002396 mi->uordblks = a->stats__bytes_on_loan - VG_(free_queue_volume);
bart545380e2008-04-21 17:28:50 +00002397 mi->fordblks = free_blocks_size + VG_(free_queue_volume);
bartc3c98392008-04-19 14:43:30 +00002398 mi->keepcost = 0; // may want some value in here
njn088bfb42005-08-17 05:01:37 +00002399}
sewardjde4a1d02002-03-22 01:27:54 +00002400
florianac3a1d82013-09-15 09:18:03 +00002401SizeT VG_(arena_redzone_size) ( ArenaId aid )
2402{
2403 ensure_mm_init (VG_AR_CLIENT);
2404 /* ensure_mm_init will call arena_init if not yet done.
2405 This then ensures that the arena redzone size is properly
2406 initialised. */
2407 return arenaId_to_ArenaP(aid)->rz_szB;
2408}
sewardj45f4e7c2005-09-27 19:20:21 +00002409
sewardjde4a1d02002-03-22 01:27:54 +00002410/*------------------------------------------------------------*/
2411/*--- Services layered on top of malloc/free. ---*/
2412/*------------------------------------------------------------*/
2413
florian54fe2022012-10-27 23:07:42 +00002414void* VG_(arena_calloc) ( ArenaId aid, const HChar* cc,
sewardj9c606bd2008-09-18 18:12:50 +00002415 SizeT nmemb, SizeT bytes_per_memb )
sewardjde4a1d02002-03-22 01:27:54 +00002416{
nethercote7ac7f7b2004-11-02 12:36:02 +00002417 SizeT size;
florian54fe2022012-10-27 23:07:42 +00002418 void* p;
njn25e49d8e72002-09-23 09:36:25 +00002419
njn926ed472005-03-11 04:44:10 +00002420 size = nmemb * bytes_per_memb;
2421 vg_assert(size >= nmemb && size >= bytes_per_memb);// check against overflow
njn3e884182003-04-15 13:03:23 +00002422
sewardj9c606bd2008-09-18 18:12:50 +00002423 p = VG_(arena_malloc) ( aid, cc, size );
njn3e884182003-04-15 13:03:23 +00002424
florianeb577d72013-09-16 21:46:31 +00002425 if (p != NULL)
2426 VG_(memset)(p, 0, size);
sewardjb5f6f512005-03-10 23:59:00 +00002427
sewardjde4a1d02002-03-22 01:27:54 +00002428 return p;
2429}
2430
2431
florian54fe2022012-10-27 23:07:42 +00002432void* VG_(arena_realloc) ( ArenaId aid, const HChar* cc,
sewardj9c606bd2008-09-18 18:12:50 +00002433 void* ptr, SizeT req_pszB )
sewardjde4a1d02002-03-22 01:27:54 +00002434{
2435 Arena* a;
njn089f51f2005-07-17 18:12:00 +00002436 SizeT old_pszB;
florian54fe2022012-10-27 23:07:42 +00002437 void* p_new;
nethercote2d5b8162004-08-11 09:40:52 +00002438 Block* b;
sewardjde4a1d02002-03-22 01:27:54 +00002439
sewardj45f4e7c2005-09-27 19:20:21 +00002440 ensure_mm_init(aid);
sewardjde4a1d02002-03-22 01:27:54 +00002441 a = arenaId_to_ArenaP(aid);
2442
nethercote7ac7f7b2004-11-02 12:36:02 +00002443 vg_assert(req_pszB < MAX_PSZB);
sewardjde4a1d02002-03-22 01:27:54 +00002444
njn180f6982008-10-12 19:51:41 +00002445 if (NULL == ptr) {
2446 return VG_(arena_malloc)(aid, cc, req_pszB);
2447 }
2448
2449 if (req_pszB == 0) {
2450 VG_(arena_free)(aid, ptr);
2451 return NULL;
2452 }
2453
nethercote2d5b8162004-08-11 09:40:52 +00002454 b = get_payload_block(a, ptr);
2455 vg_assert(blockSane(a, b));
sewardjde4a1d02002-03-22 01:27:54 +00002456
njn472cc7c2005-07-17 17:20:30 +00002457 vg_assert(is_inuse_block(b));
njn089f51f2005-07-17 18:12:00 +00002458 old_pszB = get_pszB(a, b);
sewardjde4a1d02002-03-22 01:27:54 +00002459
njn25e49d8e72002-09-23 09:36:25 +00002460 if (req_pszB <= old_pszB) {
njn25e49d8e72002-09-23 09:36:25 +00002461 return ptr;
2462 }
sewardjde4a1d02002-03-22 01:27:54 +00002463
sewardj9c606bd2008-09-18 18:12:50 +00002464 p_new = VG_(arena_malloc) ( aid, cc, req_pszB );
njn828022a2005-03-13 14:56:31 +00002465
sewardjb5f6f512005-03-10 23:59:00 +00002466 VG_(memcpy)(p_new, ptr, old_pszB);
sewardjde4a1d02002-03-22 01:27:54 +00002467
sewardjb5f6f512005-03-10 23:59:00 +00002468 VG_(arena_free)(aid, ptr);
njn25e49d8e72002-09-23 09:36:25 +00002469
sewardjde4a1d02002-03-22 01:27:54 +00002470 return p_new;
2471}
2472
2473
philippe0b9d0642014-06-30 19:47:24 +00002474void VG_(arena_realloc_shrink) ( ArenaId aid,
2475 void* ptr, SizeT req_pszB )
2476{
2477 SizeT req_bszB, frag_bszB, b_bszB;
2478 Superblock* sb;
2479 Arena* a;
2480 SizeT old_pszB;
2481 Block* b;
2482
2483 ensure_mm_init(aid);
2484
2485 a = arenaId_to_ArenaP(aid);
2486 b = get_payload_block(a, ptr);
2487 vg_assert(blockSane(a, b));
2488 vg_assert(is_inuse_block(b));
2489
2490 old_pszB = get_pszB(a, b);
2491 req_pszB = align_req_pszB(req_pszB);
2492 vg_assert(old_pszB >= req_pszB);
2493 if (old_pszB == req_pszB)
2494 return;
2495
2496 sb = findSb( a, b );
2497 if (sb->unsplittable) {
2498 const UByte* sb_start = &sb->payload_bytes[0];
2499 const UByte* sb_end = &sb->payload_bytes[sb->n_payload_bytes - 1];
2500 Addr frag;
2501
2502 vg_assert(unsplittableBlockSane(a, sb, b));
2503
2504 frag = VG_PGROUNDUP((Addr) sb
2505 + sizeof(Superblock) + pszB_to_bszB(a, req_pszB));
2506 frag_bszB = (Addr)sb_end - frag + 1;
2507
2508 if (frag_bszB >= VKI_PAGE_SIZE) {
2509 SysRes sres;
2510
2511 a->stats__bytes_on_loan -= old_pszB;
2512 b_bszB = (UByte*)frag - sb_start;
2513 shrinkInuseBlock(a, b, b_bszB);
2514 INNER_REQUEST
2515 (VALGRIND_RESIZEINPLACE_BLOCK(ptr,
2516 old_pszB,
2517 VG_(arena_malloc_usable_size)(aid, ptr),
2518 a->rz_szB));
2519 /* Have the minimum admin headers needed accessibility. */
2520 INNER_REQUEST(mkBhdrSzAccess(a, b));
2521 a->stats__bytes_on_loan += bszB_to_pszB(a, b_bszB);
2522
2523 sb->n_payload_bytes -= frag_bszB;
2524 VG_(debugLog)(1, "mallocfree",
floriana5e06c32015-08-05 21:16:09 +00002525 "shrink superblock %p to (pszB %7lu) "
2526 "owner %s/%s (munmap-ing %p %7lu)\n",
philippe0b9d0642014-06-30 19:47:24 +00002527 sb, sb->n_payload_bytes,
2528 a->clientmem ? "CLIENT" : "VALGRIND", a->name,
2529 (void*) frag, frag_bszB);
2530 if (a->clientmem) {
2531 Bool need_discard = False;
2532 sres = VG_(am_munmap_client)(&need_discard,
2533 frag,
2534 frag_bszB);
2535 vg_assert (!need_discard);
2536 } else {
2537 sres = VG_(am_munmap_valgrind)(frag,
2538 frag_bszB);
2539 }
2540 vg_assert2(! sr_isError(sres), "shrink superblock munmap failure\n");
2541 a->stats__bytes_mmaped -= frag_bszB;
2542
2543 vg_assert(unsplittableBlockSane(a, sb, b));
2544 }
2545 } else {
2546 req_bszB = pszB_to_bszB(a, req_pszB);
2547 b_bszB = get_bszB(b);
2548 frag_bszB = b_bszB - req_bszB;
2549 if (frag_bszB < min_useful_bszB(a))
2550 return;
2551
2552 a->stats__bytes_on_loan -= old_pszB;
2553 shrinkInuseBlock(a, b, req_bszB);
2554 INNER_REQUEST
2555 (VALGRIND_RESIZEINPLACE_BLOCK(ptr,
2556 old_pszB,
2557 VG_(arena_malloc_usable_size)(aid, ptr),
2558 a->rz_szB));
2559 /* Have the minimum admin headers needed accessibility. */
2560 INNER_REQUEST(mkBhdrSzAccess(a, b));
2561
2562 mkFreeBlock(a, &b[req_bszB], frag_bszB,
2563 pszB_to_listNo(bszB_to_pszB(a, frag_bszB)));
2564 /* Mark the admin headers as accessible. */
2565 INNER_REQUEST(mkBhdrAccess(a, &b[req_bszB]));
2566 if (VG_(clo_profile_heap))
2567 set_cc(&b[req_bszB], "admin.fragmentation-2");
2568 /* Possibly merge &b[req_bszB] with its free neighbours. */
2569 mergeWithFreeNeighbours(a, sb, &b[req_bszB], frag_bszB);
2570
2571 b_bszB = get_bszB(b);
2572 a->stats__bytes_on_loan += bszB_to_pszB(a, b_bszB);
2573 }
2574
2575 vg_assert (blockSane(a, b));
2576# ifdef DEBUG_MALLOC
2577 sanity_check_malloc_arena(aid);
2578# endif
2579}
2580
njn6ba622c2005-06-11 01:12:08 +00002581/* Inline just for the wrapper VG_(strdup) below */
florian19f91bb2012-11-10 22:29:54 +00002582__inline__ HChar* VG_(arena_strdup) ( ArenaId aid, const HChar* cc,
2583 const HChar* s )
njn6ba622c2005-06-11 01:12:08 +00002584{
2585 Int i;
2586 Int len;
florian19f91bb2012-11-10 22:29:54 +00002587 HChar* res;
njn6ba622c2005-06-11 01:12:08 +00002588
2589 if (s == NULL)
2590 return NULL;
2591
2592 len = VG_(strlen)(s) + 1;
sewardj9c606bd2008-09-18 18:12:50 +00002593 res = VG_(arena_malloc) (aid, cc, len);
njn6ba622c2005-06-11 01:12:08 +00002594
2595 for (i = 0; i < len; i++)
2596 res[i] = s[i];
2597 return res;
2598}
2599
philippe6e4b7132013-01-18 06:19:49 +00002600void* VG_(arena_perm_malloc) ( ArenaId aid, SizeT size, Int align )
2601{
2602 Arena* a;
2603
2604 ensure_mm_init(aid);
2605 a = arenaId_to_ArenaP(aid);
2606
2607 align = align - 1;
2608 size = (size + align) & ~align;
2609
2610 if (UNLIKELY(a->perm_malloc_current + size > a->perm_malloc_limit)) {
2611 // Get a superblock, but we will not insert it into the superblock list.
2612 // The superblock structure is not needed, so we will use the full
2613 // memory range of it. This superblock is however counted in the
2614 // mmaped statistics.
2615 Superblock* new_sb = newSuperblock (a, size);
philippe92933612013-01-19 10:33:45 +00002616 a->perm_malloc_limit = (Addr)&new_sb->payload_bytes[new_sb->n_payload_bytes - 1];
philippe6e4b7132013-01-18 06:19:49 +00002617
2618 // We do not mind starting allocating from the beginning of the superblock
2619 // as afterwards, we "lose" it as a superblock.
2620 a->perm_malloc_current = (Addr)new_sb;
2621 }
2622
2623 a->stats__perm_blocks += 1;
2624 a->stats__perm_bytes_on_loan += size;
2625 add_one_block_to_stats (a, size);
2626
2627 a->perm_malloc_current += size;
2628 return (void*)(a->perm_malloc_current - size);
2629}
njn6ba622c2005-06-11 01:12:08 +00002630
sewardjde4a1d02002-03-22 01:27:54 +00002631/*------------------------------------------------------------*/
nethercote996901a2004-08-03 13:29:09 +00002632/*--- Tool-visible functions. ---*/
njn25e49d8e72002-09-23 09:36:25 +00002633/*------------------------------------------------------------*/
2634
nethercote2d5b8162004-08-11 09:40:52 +00002635// All just wrappers to avoid exposing arenas to tools.
njn25e49d8e72002-09-23 09:36:25 +00002636
florianf5d8e652014-09-11 22:15:39 +00002637// This function never returns NULL.
florian54fe2022012-10-27 23:07:42 +00002638void* VG_(malloc) ( const HChar* cc, SizeT nbytes )
njn25e49d8e72002-09-23 09:36:25 +00002639{
floriana2968cc2013-09-20 21:34:40 +00002640 return VG_(arena_malloc) ( VG_AR_CORE, cc, nbytes );
njn25e49d8e72002-09-23 09:36:25 +00002641}
2642
2643void VG_(free) ( void* ptr )
2644{
floriana2968cc2013-09-20 21:34:40 +00002645 VG_(arena_free) ( VG_AR_CORE, ptr );
njn25e49d8e72002-09-23 09:36:25 +00002646}
2647
florian54fe2022012-10-27 23:07:42 +00002648void* VG_(calloc) ( const HChar* cc, SizeT nmemb, SizeT bytes_per_memb )
njn25e49d8e72002-09-23 09:36:25 +00002649{
floriana2968cc2013-09-20 21:34:40 +00002650 return VG_(arena_calloc) ( VG_AR_CORE, cc, nmemb, bytes_per_memb );
njn25e49d8e72002-09-23 09:36:25 +00002651}
2652
florian54fe2022012-10-27 23:07:42 +00002653void* VG_(realloc) ( const HChar* cc, void* ptr, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00002654{
floriana2968cc2013-09-20 21:34:40 +00002655 return VG_(arena_realloc) ( VG_AR_CORE, cc, ptr, size );
njn25e49d8e72002-09-23 09:36:25 +00002656}
2657
florian77eb20b2014-09-11 21:19:17 +00002658void VG_(realloc_shrink) ( void* ptr, SizeT size )
2659{
2660 VG_(arena_realloc_shrink) ( VG_AR_CORE, ptr, size );
2661}
2662
florian19f91bb2012-11-10 22:29:54 +00002663HChar* VG_(strdup) ( const HChar* cc, const HChar* s )
njn6ba622c2005-06-11 01:12:08 +00002664{
floriana2968cc2013-09-20 21:34:40 +00002665 return VG_(arena_strdup) ( VG_AR_CORE, cc, s );
njn6ba622c2005-06-11 01:12:08 +00002666}
2667
philippe6e4b7132013-01-18 06:19:49 +00002668void* VG_(perm_malloc) ( SizeT size, Int align )
2669{
floriana2968cc2013-09-20 21:34:40 +00002670 return VG_(arena_perm_malloc) ( VG_AR_CORE, size, align );
philippe6e4b7132013-01-18 06:19:49 +00002671}
2672
njn32397c02007-11-10 04:08:08 +00002673
sewardjde4a1d02002-03-22 01:27:54 +00002674/*--------------------------------------------------------------------*/
njn717cde52005-05-10 02:47:21 +00002675/*--- end ---*/
sewardjde4a1d02002-03-22 01:27:54 +00002676/*--------------------------------------------------------------------*/