blob: e522754475e2d39d28ec3c9d33781f098dc1cfae [file] [log] [blame]
sewardjde4a1d02002-03-22 01:27:54 +00001
2/*--------------------------------------------------------------------*/
3/*--- An implementation of malloc/free which doesn't use sbrk. ---*/
njn717cde52005-05-10 02:47:21 +00004/*--- m_mallocfree.c ---*/
sewardjde4a1d02002-03-22 01:27:54 +00005/*--------------------------------------------------------------------*/
6
7/*
njnb9c427c2004-12-01 14:14:42 +00008 This file is part of Valgrind, a dynamic binary instrumentation
9 framework.
sewardjde4a1d02002-03-22 01:27:54 +000010
sewardjb3a1e4b2015-08-21 11:32:26 +000011 Copyright (C) 2000-2015 Julian Seward
sewardjde4a1d02002-03-22 01:27:54 +000012 jseward@acm.org
sewardjde4a1d02002-03-22 01:27:54 +000013
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
njn25e49d8e72002-09-23 09:36:25 +000029 The GNU General Public License is contained in the file COPYING.
sewardjde4a1d02002-03-22 01:27:54 +000030*/
31
njnc7561b92005-06-19 01:24:32 +000032#include "pub_core_basics.h"
sewardj4cfea4f2006-10-14 19:26:10 +000033#include "pub_core_vki.h"
sewardj45f4e7c2005-09-27 19:20:21 +000034#include "pub_core_debuglog.h"
njn97405b22005-06-02 03:39:33 +000035#include "pub_core_libcbase.h"
sewardj45f4e7c2005-09-27 19:20:21 +000036#include "pub_core_aspacemgr.h"
njn132bfcc2005-06-04 19:16:06 +000037#include "pub_core_libcassert.h"
njn36a20fa2005-06-03 03:08:39 +000038#include "pub_core_libcprint.h"
njnaf1d7df2005-06-11 01:31:52 +000039#include "pub_core_mallocfree.h"
njn20242342005-05-16 23:31:24 +000040#include "pub_core_options.h"
njn32397c02007-11-10 04:08:08 +000041#include "pub_core_threadstate.h" // For VG_INVALID_THREADID
philippe4f6f3362014-04-19 00:25:54 +000042#include "pub_core_gdbserver.h"
sewardjd043de92011-09-26 11:28:20 +000043#include "pub_core_transtab.h"
njnfc51f8d2005-06-21 03:20:17 +000044#include "pub_core_tooliface.h"
sewardj55f9d1a2005-04-25 11:11:44 +000045
florianc91f5842013-09-15 10:42:26 +000046#include "pub_core_inner.h"
philippe72faf102012-03-11 22:24:03 +000047#if defined(ENABLE_INNER_CLIENT_REQUEST)
48#include "memcheck/memcheck.h"
49#endif
sewardjde4a1d02002-03-22 01:27:54 +000050
sewardj0b3fd2d2007-08-21 10:55:26 +000051// #define DEBUG_MALLOC // turn on heavyweight debugging machinery
52// #define VERBOSE_MALLOC // make verbose, esp. in debugging machinery
nethercote2d5b8162004-08-11 09:40:52 +000053
bart545380e2008-04-21 17:28:50 +000054/* Number and total size of blocks in free queue. Used by mallinfo(). */
55Long VG_(free_queue_volume) = 0;
56Long VG_(free_queue_length) = 0;
57
sewardj9c606bd2008-09-18 18:12:50 +000058static void cc_analyse_alloc_arena ( ArenaId aid ); /* fwds */
59
nethercote2d5b8162004-08-11 09:40:52 +000060/*------------------------------------------------------------*/
61/*--- Main types ---*/
62/*------------------------------------------------------------*/
63
sewardjc1ac9772007-08-20 22:57:56 +000064#define N_MALLOC_LISTS 112 // do not change this
nethercote2d5b8162004-08-11 09:40:52 +000065
nethercote7ac7f7b2004-11-02 12:36:02 +000066// The amount you can ask for is limited only by sizeof(SizeT)...
67#define MAX_PSZB (~((SizeT)0x0))
nethercote2d5b8162004-08-11 09:40:52 +000068
sewardj0b3fd2d2007-08-21 10:55:26 +000069// Each arena has a sorted array of superblocks, which expands
70// dynamically. This is its initial size.
71#define SBLOCKS_SIZE_INITIAL 50
72
nethercote2d5b8162004-08-11 09:40:52 +000073typedef UChar UByte;
74
njn8d3f8452005-07-20 04:12:41 +000075/* Layout of an in-use block:
nethercote2d5b8162004-08-11 09:40:52 +000076
njn341a6642009-05-24 23:36:50 +000077 cost center (OPTIONAL) (VG_MIN_MALLOC_SZB bytes, only when h-p enabled)
njn8d3f8452005-07-20 04:12:41 +000078 this block total szB (sizeof(SizeT) bytes)
njn7ce83112005-08-24 22:38:00 +000079 red zone bytes (depends on Arena.rz_szB, but >= sizeof(void*))
njn8d3f8452005-07-20 04:12:41 +000080 (payload bytes)
njn7ce83112005-08-24 22:38:00 +000081 red zone bytes (depends on Arena.rz_szB, but >= sizeof(void*))
njn8d3f8452005-07-20 04:12:41 +000082 this block total szB (sizeof(SizeT) bytes)
nethercote2d5b8162004-08-11 09:40:52 +000083
njn8d3f8452005-07-20 04:12:41 +000084 Layout of a block on the free list:
nethercote2d5b8162004-08-11 09:40:52 +000085
njn341a6642009-05-24 23:36:50 +000086 cost center (OPTIONAL) (VG_MIN_MALLOC_SZB bytes, only when h-p enabled)
njn8d3f8452005-07-20 04:12:41 +000087 this block total szB (sizeof(SizeT) bytes)
88 freelist previous ptr (sizeof(void*) bytes)
89 excess red zone bytes (if Arena.rz_szB > sizeof(void*))
90 (payload bytes)
91 excess red zone bytes (if Arena.rz_szB > sizeof(void*))
92 freelist next ptr (sizeof(void*) bytes)
93 this block total szB (sizeof(SizeT) bytes)
nethercote2d5b8162004-08-11 09:40:52 +000094
njn8d3f8452005-07-20 04:12:41 +000095 Total size in bytes (bszB) and payload size in bytes (pszB)
96 are related by:
nethercote2d5b8162004-08-11 09:40:52 +000097
sewardj94c8eb42008-09-19 20:13:39 +000098 bszB == pszB + 2*sizeof(SizeT) + 2*a->rz_szB
99
100 when heap profiling is not enabled, and
101
njn341a6642009-05-24 23:36:50 +0000102 bszB == pszB + 2*sizeof(SizeT) + 2*a->rz_szB + VG_MIN_MALLOC_SZB
njn8d3f8452005-07-20 04:12:41 +0000103
sewardj94c8eb42008-09-19 20:13:39 +0000104 when it is enabled. It follows that the minimum overhead per heap
105 block for arenas used by the core is:
106
107 32-bit platforms: 2*4 + 2*4 == 16 bytes
108 64-bit platforms: 2*8 + 2*8 == 32 bytes
109
110 when heap profiling is not enabled, and
njna527a492005-12-16 17:06:37 +0000111
njn341a6642009-05-24 23:36:50 +0000112 32-bit platforms: 2*4 + 2*4 + 8 == 24 bytes
113 64-bit platforms: 2*8 + 2*8 + 16 == 48 bytes
njna527a492005-12-16 17:06:37 +0000114
sewardj94c8eb42008-09-19 20:13:39 +0000115 when it is enabled. In all cases, extra overhead may be incurred
116 when rounding the payload size up to VG_MIN_MALLOC_SZB.
njna527a492005-12-16 17:06:37 +0000117
njn8d3f8452005-07-20 04:12:41 +0000118 Furthermore, both size fields in the block have their least-significant
119 bit set if the block is not in use, and unset if it is in use.
120 (The bottom 3 or so bits are always free for this because of alignment.)
121 A block size of zero is not possible, because a block always has at
122 least two SizeTs and two pointers of overhead.
123
124 Nb: All Block payloads must be VG_MIN_MALLOC_SZB-aligned. This is
125 achieved by ensuring that Superblocks are VG_MIN_MALLOC_SZB-aligned
126 (see newSuperblock() for how), and that the lengths of the following
127 things are a multiple of VG_MIN_MALLOC_SZB:
128 - Superblock admin section lengths (due to elastic padding)
129 - Block admin section (low and high) lengths (due to elastic redzones)
130 - Block payload lengths (due to req_pszB rounding up)
sewardj9c606bd2008-09-18 18:12:50 +0000131
132 The heap-profile cost-center field is 8 bytes even on 32 bit
133 platforms. This is so as to keep the payload field 8-aligned. On
134 a 64-bit platform, this cc-field contains a pointer to a const
135 HChar*, which is the cost center name. On 32-bit platforms, the
136 pointer lives in the lower-addressed half of the field, regardless
137 of the endianness of the host.
nethercote2d5b8162004-08-11 09:40:52 +0000138*/
139typedef
140 struct {
141 // No fields are actually used in this struct, because a Block has
njn37517e82005-05-25 15:52:39 +0000142 // many variable sized fields and so can't be accessed
nethercote2d5b8162004-08-11 09:40:52 +0000143 // meaningfully with normal fields. So we use access functions all
144 // the time. This struct gives us a type to use, though. Also, we
145 // make sizeof(Block) 1 byte so that we can do arithmetic with the
146 // Block* type in increments of 1!
147 UByte dummy;
148 }
149 Block;
150
151// A superblock. 'padding' is never used, it just ensures that if the
152// entire Superblock is aligned to VG_MIN_MALLOC_SZB, then payload_bytes[]
153// will be too. It can add small amounts of padding unnecessarily -- eg.
154// 8-bytes on 32-bit machines with an 8-byte VG_MIN_MALLOC_SZB -- because
155// it's too hard to make a constant expression that works perfectly in all
156// cases.
sewardjd043de92011-09-26 11:28:20 +0000157// 'unsplittable' is set to NULL if superblock can be splitted, otherwise
158// it is set to the address of the superblock. An unsplittable superblock
159// will contain only one allocated block. An unsplittable superblock will
160// be unmapped when its (only) allocated block is freed.
161// The free space at the end of an unsplittable superblock is not used to
162// make a free block. Note that this means that an unsplittable superblock can
sewardjd8b93462011-09-10 10:17:35 +0000163// have up to slightly less than 1 page of unused bytes at the end of the
164// superblock.
sewardjd043de92011-09-26 11:28:20 +0000165// 'unsplittable' is used to avoid quadratic memory usage for linear
166// reallocation of big structures
167// (see http://bugs.kde.org/show_bug.cgi?id=250101).
168// ??? unsplittable replaces 'void *padding2'. Choosed this
sewardjd8b93462011-09-10 10:17:35 +0000169// ??? to avoid changing the alignment logic. Maybe something cleaner
170// ??? can be done.
sewardjd043de92011-09-26 11:28:20 +0000171// A splittable block can be reclaimed when all its blocks are freed :
172// the reclaim of such a block is deferred till either another superblock
173// of the same arena can be reclaimed or till a new superblock is needed
174// in any arena.
nethercote2d5b8162004-08-11 09:40:52 +0000175// payload_bytes[] is made a single big Block when the Superblock is
176// created, and then can be split and the splittings remerged, but Blocks
177// always cover its entire length -- there's never any unused bytes at the
178// end, for example.
sewardj0b3fd2d2007-08-21 10:55:26 +0000179typedef
nethercote2d5b8162004-08-11 09:40:52 +0000180 struct _Superblock {
nethercote7ac7f7b2004-11-02 12:36:02 +0000181 SizeT n_payload_bytes;
sewardjd043de92011-09-26 11:28:20 +0000182 struct _Superblock* unsplittable;
sewardj0b3fd2d2007-08-21 10:55:26 +0000183 UByte padding[ VG_MIN_MALLOC_SZB -
184 ((sizeof(struct _Superblock*) + sizeof(SizeT)) %
nethercote7ac7f7b2004-11-02 12:36:02 +0000185 VG_MIN_MALLOC_SZB) ];
nethercote2d5b8162004-08-11 09:40:52 +0000186 UByte payload_bytes[0];
187 }
188 Superblock;
189
190// An arena. 'freelist' is a circular, doubly-linked list. 'rz_szB' is
191// elastic, in that it can be bigger than asked-for to ensure alignment.
sewardj0b3fd2d2007-08-21 10:55:26 +0000192typedef
nethercote2d5b8162004-08-11 09:40:52 +0000193 struct {
florian54fe2022012-10-27 23:07:42 +0000194 const HChar* name;
sewardj0b3fd2d2007-08-21 10:55:26 +0000195 Bool clientmem; // Allocates in the client address space?
196 SizeT rz_szB; // Red zone size in bytes
197 SizeT min_sblock_szB; // Minimum superblock size in bytes
sewardjd043de92011-09-26 11:28:20 +0000198 SizeT min_unsplittable_sblock_szB;
199 // Minimum unsplittable superblock size in bytes. To be marked as
200 // unsplittable, a superblock must have a
201 // size >= min_unsplittable_sblock_szB and cannot be splitted.
202 // So, to avoid big overhead, superblocks used to provide aligned
203 // blocks on big alignments are splittable.
204 // Unsplittable superblocks will be reclaimed when their (only)
sewardjd8b93462011-09-10 10:17:35 +0000205 // allocated block is freed.
sewardjd043de92011-09-26 11:28:20 +0000206 // Smaller size superblocks are splittable and can be reclaimed when all
207 // their blocks are freed.
sewardj0b3fd2d2007-08-21 10:55:26 +0000208 Block* freelist[N_MALLOC_LISTS];
209 // A dynamically expanding, ordered array of (pointers to)
210 // superblocks in the arena. If this array is expanded, which
211 // is rare, the previous space it occupies is simply abandoned.
212 // To avoid having to get yet another block from m_aspacemgr for
213 // the first incarnation of this array, the first allocation of
214 // it is within this struct. If it has to be expanded then the
215 // new space is acquired from m_aspacemgr as you would expect.
216 Superblock** sblocks;
217 SizeT sblocks_size;
218 SizeT sblocks_used;
219 Superblock* sblocks_initial[SBLOCKS_SIZE_INITIAL];
sewardjd043de92011-09-26 11:28:20 +0000220 Superblock* deferred_reclaimed_sb;
philippe6e4b7132013-01-18 06:19:49 +0000221
222 // VG_(arena_perm_malloc) returns memory from superblocks
223 // only used for permanent blocks. No overhead. These superblocks
224 // are not stored in sblocks array above.
225 Addr perm_malloc_current; // first byte free in perm_malloc sb.
226 Addr perm_malloc_limit; // maximum usable byte in perm_malloc sb.
227
228 // Stats only
229 SizeT stats__perm_bytes_on_loan;
230 SizeT stats__perm_blocks;
231
sewardjd043de92011-09-26 11:28:20 +0000232 ULong stats__nreclaim_unsplit;
233 ULong stats__nreclaim_split;
234 /* total # of reclaim executed for unsplittable/splittable superblocks */
sewardj7d1064a2011-02-23 13:18:56 +0000235 SizeT stats__bytes_on_loan;
236 SizeT stats__bytes_mmaped;
237 SizeT stats__bytes_on_loan_max;
238 ULong stats__tot_blocks; /* total # blocks alloc'd */
239 ULong stats__tot_bytes; /* total # bytes alloc'd */
240 ULong stats__nsearches; /* total # freelist checks */
241 // If profiling, when should the next profile happen at
242 // (in terms of stats__bytes_on_loan_max) ?
sewardj9c606bd2008-09-18 18:12:50 +0000243 SizeT next_profile_at;
sewardjd8b93462011-09-10 10:17:35 +0000244 SizeT stats__bytes_mmaped_max;
sewardj0b3fd2d2007-08-21 10:55:26 +0000245 }
nethercote2d5b8162004-08-11 09:40:52 +0000246 Arena;
247
248
249/*------------------------------------------------------------*/
250/*--- Low-level functions for working with Blocks. ---*/
251/*------------------------------------------------------------*/
252
nethercote7ac7f7b2004-11-02 12:36:02 +0000253#define SIZE_T_0x1 ((SizeT)0x1)
254
florian6bd9dc12012-11-23 16:17:43 +0000255static const char* probably_your_fault =
njnb8329f02009-04-16 00:33:20 +0000256 "This is probably caused by your program erroneously writing past the\n"
257 "end of a heap block and corrupting heap metadata. If you fix any\n"
258 "invalid writes reported by Memcheck, this assertion failure will\n"
259 "probably go away. Please try that before reporting this as a bug.\n";
260
njn8d3f8452005-07-20 04:12:41 +0000261// Mark a bszB as in-use, and not in-use, and remove the in-use attribute.
nethercote2d5b8162004-08-11 09:40:52 +0000262static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000263SizeT mk_inuse_bszB ( SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000264{
njnb8329f02009-04-16 00:33:20 +0000265 vg_assert2(bszB != 0, probably_your_fault);
nethercote7ac7f7b2004-11-02 12:36:02 +0000266 return bszB & (~SIZE_T_0x1);
nethercote2d5b8162004-08-11 09:40:52 +0000267}
268static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000269SizeT mk_free_bszB ( SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000270{
njnb8329f02009-04-16 00:33:20 +0000271 vg_assert2(bszB != 0, probably_your_fault);
nethercote7ac7f7b2004-11-02 12:36:02 +0000272 return bszB | SIZE_T_0x1;
nethercote2d5b8162004-08-11 09:40:52 +0000273}
nethercote2d5b8162004-08-11 09:40:52 +0000274static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000275SizeT mk_plain_bszB ( SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +0000276{
njnb8329f02009-04-16 00:33:20 +0000277 vg_assert2(bszB != 0, probably_your_fault);
nethercote7ac7f7b2004-11-02 12:36:02 +0000278 return bszB & (~SIZE_T_0x1);
nethercote2d5b8162004-08-11 09:40:52 +0000279}
280
philipped99c26a2012-07-31 22:17:28 +0000281// Forward definition.
282static
283void ensure_mm_init ( ArenaId aid );
284
sewardj94c8eb42008-09-19 20:13:39 +0000285// return either 0 or sizeof(ULong) depending on whether or not
286// heap profiling is engaged
sewardjd043de92011-09-26 11:28:20 +0000287#define hp_overhead_szB() set_at_init_hp_overhead_szB
288static SizeT set_at_init_hp_overhead_szB = -1000000;
289// startup value chosen to very likely cause a problem if used before
290// a proper value is given by ensure_mm_init.
sewardj94c8eb42008-09-19 20:13:39 +0000291
njn402c8612005-08-23 22:11:20 +0000292//---------------------------------------------------------------------------
293
294// Get a block's size as stored, ie with the in-use/free attribute.
nethercote2d5b8162004-08-11 09:40:52 +0000295static __inline__
njn402c8612005-08-23 22:11:20 +0000296SizeT get_bszB_as_is ( Block* b )
nethercote2d5b8162004-08-11 09:40:52 +0000297{
njn402c8612005-08-23 22:11:20 +0000298 UByte* b2 = (UByte*)b;
sewardj94c8eb42008-09-19 20:13:39 +0000299 SizeT bszB_lo = *(SizeT*)&b2[0 + hp_overhead_szB()];
njn402c8612005-08-23 22:11:20 +0000300 SizeT bszB_hi = *(SizeT*)&b2[mk_plain_bszB(bszB_lo) - sizeof(SizeT)];
301 vg_assert2(bszB_lo == bszB_hi,
njnb8329f02009-04-16 00:33:20 +0000302 "Heap block lo/hi size mismatch: lo = %llu, hi = %llu.\n%s",
303 (ULong)bszB_lo, (ULong)bszB_hi, probably_your_fault);
njn402c8612005-08-23 22:11:20 +0000304 return bszB_lo;
nethercote2d5b8162004-08-11 09:40:52 +0000305}
306
njn402c8612005-08-23 22:11:20 +0000307// Get a block's plain size, ie. remove the in-use/free attribute.
308static __inline__
309SizeT get_bszB ( Block* b )
310{
311 return mk_plain_bszB(get_bszB_as_is(b));
312}
313
314// Set the size fields of a block. bszB may have the in-use/free attribute.
315static __inline__
316void set_bszB ( Block* b, SizeT bszB )
317{
318 UByte* b2 = (UByte*)b;
sewardj94c8eb42008-09-19 20:13:39 +0000319 *(SizeT*)&b2[0 + hp_overhead_szB()] = bszB;
njn402c8612005-08-23 22:11:20 +0000320 *(SizeT*)&b2[mk_plain_bszB(bszB) - sizeof(SizeT)] = bszB;
321}
322
323//---------------------------------------------------------------------------
324
njn472cc7c2005-07-17 17:20:30 +0000325// Does this block have the in-use attribute?
326static __inline__
327Bool is_inuse_block ( Block* b )
328{
njn402c8612005-08-23 22:11:20 +0000329 SizeT bszB = get_bszB_as_is(b);
njnb8329f02009-04-16 00:33:20 +0000330 vg_assert2(bszB != 0, probably_your_fault);
njn472cc7c2005-07-17 17:20:30 +0000331 return (0 != (bszB & SIZE_T_0x1)) ? False : True;
332}
333
njn402c8612005-08-23 22:11:20 +0000334//---------------------------------------------------------------------------
njn8d3f8452005-07-20 04:12:41 +0000335
njn089f51f2005-07-17 18:12:00 +0000336// Return the lower, upper and total overhead in bytes for a block.
337// These are determined purely by which arena the block lives in.
338static __inline__
339SizeT overhead_szB_lo ( Arena* a )
340{
sewardj94c8eb42008-09-19 20:13:39 +0000341 return hp_overhead_szB() + sizeof(SizeT) + a->rz_szB;
njn089f51f2005-07-17 18:12:00 +0000342}
343static __inline__
344SizeT overhead_szB_hi ( Arena* a )
345{
njn8d3f8452005-07-20 04:12:41 +0000346 return a->rz_szB + sizeof(SizeT);
njn089f51f2005-07-17 18:12:00 +0000347}
348static __inline__
349SizeT overhead_szB ( Arena* a )
350{
351 return overhead_szB_lo(a) + overhead_szB_hi(a);
352}
353
njn402c8612005-08-23 22:11:20 +0000354//---------------------------------------------------------------------------
355
njn089f51f2005-07-17 18:12:00 +0000356// Return the minimum bszB for a block in this arena. Can have zero-length
357// payloads, so it's the size of the admin bytes.
358static __inline__
359SizeT min_useful_bszB ( Arena* a )
360{
361 return overhead_szB(a);
362}
363
njn402c8612005-08-23 22:11:20 +0000364//---------------------------------------------------------------------------
365
njn089f51f2005-07-17 18:12:00 +0000366// Convert payload size <--> block size (both in bytes).
367static __inline__
368SizeT pszB_to_bszB ( Arena* a, SizeT pszB )
369{
370 return pszB + overhead_szB(a);
371}
372static __inline__
373SizeT bszB_to_pszB ( Arena* a, SizeT bszB )
374{
njnb8329f02009-04-16 00:33:20 +0000375 vg_assert2(bszB >= overhead_szB(a), probably_your_fault);
njn089f51f2005-07-17 18:12:00 +0000376 return bszB - overhead_szB(a);
377}
378
njn402c8612005-08-23 22:11:20 +0000379//---------------------------------------------------------------------------
nethercote2d5b8162004-08-11 09:40:52 +0000380
njn089f51f2005-07-17 18:12:00 +0000381// Get a block's payload size.
nethercote7ac7f7b2004-11-02 12:36:02 +0000382static __inline__
njn089f51f2005-07-17 18:12:00 +0000383SizeT get_pszB ( Arena* a, Block* b )
nethercote7ac7f7b2004-11-02 12:36:02 +0000384{
njn089f51f2005-07-17 18:12:00 +0000385 return bszB_to_pszB(a, get_bszB(b));
nethercote7ac7f7b2004-11-02 12:36:02 +0000386}
387
njn402c8612005-08-23 22:11:20 +0000388//---------------------------------------------------------------------------
389
390// Given the addr of a block, return the addr of its payload, and vice versa.
nethercote2d5b8162004-08-11 09:40:52 +0000391static __inline__
392UByte* get_block_payload ( Arena* a, Block* b )
393{
394 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000395 return & b2[ overhead_szB_lo(a) ];
nethercote2d5b8162004-08-11 09:40:52 +0000396}
397// Given the addr of a block's payload, return the addr of the block itself.
398static __inline__
399Block* get_payload_block ( Arena* a, UByte* payload )
400{
nethercote7ac7f7b2004-11-02 12:36:02 +0000401 return (Block*)&payload[ -overhead_szB_lo(a) ];
nethercote2d5b8162004-08-11 09:40:52 +0000402}
403
njn402c8612005-08-23 22:11:20 +0000404//---------------------------------------------------------------------------
nethercote2d5b8162004-08-11 09:40:52 +0000405
406// Set and get the next and previous link fields of a block.
407static __inline__
408void set_prev_b ( Block* b, Block* prev_p )
409{
410 UByte* b2 = (UByte*)b;
sewardj94c8eb42008-09-19 20:13:39 +0000411 *(Block**)&b2[hp_overhead_szB() + sizeof(SizeT)] = prev_p;
nethercote2d5b8162004-08-11 09:40:52 +0000412}
413static __inline__
414void set_next_b ( Block* b, Block* next_p )
415{
njn402c8612005-08-23 22:11:20 +0000416 UByte* b2 = (UByte*)b;
417 *(Block**)&b2[get_bszB(b) - sizeof(SizeT) - sizeof(void*)] = next_p;
nethercote2d5b8162004-08-11 09:40:52 +0000418}
419static __inline__
420Block* get_prev_b ( Block* b )
421{
422 UByte* b2 = (UByte*)b;
sewardj94c8eb42008-09-19 20:13:39 +0000423 return *(Block**)&b2[hp_overhead_szB() + sizeof(SizeT)];
nethercote2d5b8162004-08-11 09:40:52 +0000424}
425static __inline__
426Block* get_next_b ( Block* b )
427{
njn402c8612005-08-23 22:11:20 +0000428 UByte* b2 = (UByte*)b;
429 return *(Block**)&b2[get_bszB(b) - sizeof(SizeT) - sizeof(void*)];
nethercote2d5b8162004-08-11 09:40:52 +0000430}
431
njn402c8612005-08-23 22:11:20 +0000432//---------------------------------------------------------------------------
nethercote2d5b8162004-08-11 09:40:52 +0000433
sewardj9c606bd2008-09-18 18:12:50 +0000434// Set and get the cost-center field of a block.
435static __inline__
florian54fe2022012-10-27 23:07:42 +0000436void set_cc ( Block* b, const HChar* cc )
sewardj9c606bd2008-09-18 18:12:50 +0000437{
438 UByte* b2 = (UByte*)b;
sewardj94c8eb42008-09-19 20:13:39 +0000439 vg_assert( VG_(clo_profile_heap) );
florian54fe2022012-10-27 23:07:42 +0000440 *(const HChar**)&b2[0] = cc;
sewardj9c606bd2008-09-18 18:12:50 +0000441}
442static __inline__
florian54fe2022012-10-27 23:07:42 +0000443const HChar* get_cc ( Block* b )
sewardj9c606bd2008-09-18 18:12:50 +0000444{
445 UByte* b2 = (UByte*)b;
sewardj94c8eb42008-09-19 20:13:39 +0000446 vg_assert( VG_(clo_profile_heap) );
florian54fe2022012-10-27 23:07:42 +0000447 return *(const HChar**)&b2[0];
sewardj9c606bd2008-09-18 18:12:50 +0000448}
449
450//---------------------------------------------------------------------------
451
nethercote2d5b8162004-08-11 09:40:52 +0000452// Get the block immediately preceding this one in the Superblock.
453static __inline__
454Block* get_predecessor_block ( Block* b )
455{
456 UByte* b2 = (UByte*)b;
nethercote7ac7f7b2004-11-02 12:36:02 +0000457 SizeT bszB = mk_plain_bszB( (*(SizeT*)&b2[-sizeof(SizeT)]) );
nethercote2d5b8162004-08-11 09:40:52 +0000458 return (Block*)&b2[-bszB];
459}
460
njn402c8612005-08-23 22:11:20 +0000461//---------------------------------------------------------------------------
462
nethercote2d5b8162004-08-11 09:40:52 +0000463// Read and write the lower and upper red-zone bytes of a block.
464static __inline__
njn1dcee092009-02-24 03:07:37 +0000465void set_rz_lo_byte ( Block* b, UInt rz_byteno, UByte v )
nethercote2d5b8162004-08-11 09:40:52 +0000466{
467 UByte* b2 = (UByte*)b;
sewardj94c8eb42008-09-19 20:13:39 +0000468 b2[hp_overhead_szB() + sizeof(SizeT) + rz_byteno] = v;
nethercote2d5b8162004-08-11 09:40:52 +0000469}
470static __inline__
njn1dcee092009-02-24 03:07:37 +0000471void set_rz_hi_byte ( Block* b, UInt rz_byteno, UByte v )
nethercote2d5b8162004-08-11 09:40:52 +0000472{
njn402c8612005-08-23 22:11:20 +0000473 UByte* b2 = (UByte*)b;
474 b2[get_bszB(b) - sizeof(SizeT) - rz_byteno - 1] = v;
nethercote2d5b8162004-08-11 09:40:52 +0000475}
476static __inline__
njn1dcee092009-02-24 03:07:37 +0000477UByte get_rz_lo_byte ( Block* b, UInt rz_byteno )
nethercote2d5b8162004-08-11 09:40:52 +0000478{
479 UByte* b2 = (UByte*)b;
sewardj94c8eb42008-09-19 20:13:39 +0000480 return b2[hp_overhead_szB() + sizeof(SizeT) + rz_byteno];
nethercote2d5b8162004-08-11 09:40:52 +0000481}
482static __inline__
njn1dcee092009-02-24 03:07:37 +0000483UByte get_rz_hi_byte ( Block* b, UInt rz_byteno )
nethercote2d5b8162004-08-11 09:40:52 +0000484{
njn402c8612005-08-23 22:11:20 +0000485 UByte* b2 = (UByte*)b;
486 return b2[get_bszB(b) - sizeof(SizeT) - rz_byteno - 1];
nethercote2d5b8162004-08-11 09:40:52 +0000487}
488
philippe0b9d0642014-06-30 19:47:24 +0000489#if defined(ENABLE_INNER_CLIENT_REQUEST)
490/* When running as an inner, the block headers before and after
491 (see 'Layout of an in-use block:' above) are made non accessible
492 by VALGRIND_MALLOCLIKE_BLOCK/VALGRIND_FREELIKE_BLOCK
493 to allow the outer to detect block overrun.
494 The below two functions are used when these headers must be
495 temporarily accessed. */
496static void mkBhdrAccess( Arena* a, Block* b )
497{
498 VALGRIND_MAKE_MEM_DEFINED (b,
499 hp_overhead_szB() + sizeof(SizeT) + a->rz_szB);
500 VALGRIND_MAKE_MEM_DEFINED (b + get_bszB(b) - a->rz_szB - sizeof(SizeT),
501 a->rz_szB + sizeof(SizeT));
502}
503
504/* Mark block hdr as not accessible.
505 !!! Currently, we do not mark the cost center and szB fields unaccessible
506 as these are accessed at too many places. */
507static void mkBhdrNoAccess( Arena* a, Block* b )
508{
509 VALGRIND_MAKE_MEM_NOACCESS (b + hp_overhead_szB() + sizeof(SizeT),
510 a->rz_szB);
511 VALGRIND_MAKE_MEM_NOACCESS (b + get_bszB(b) - sizeof(SizeT) - a->rz_szB,
512 a->rz_szB);
513}
514
515/* Make the cc+szB fields accessible. */
516static void mkBhdrSzAccess( Arena* a, Block* b )
517{
518 VALGRIND_MAKE_MEM_DEFINED (b,
519 hp_overhead_szB() + sizeof(SizeT));
520 /* We cannot use get_bszB(b), as this reads the 'hi' szB we want
521 to mark accessible. So, we only access the 'lo' szB. */
522 SizeT bszB_lo = mk_plain_bszB(*(SizeT*)&b[0 + hp_overhead_szB()]);
523 VALGRIND_MAKE_MEM_DEFINED (b + bszB_lo - sizeof(SizeT),
524 sizeof(SizeT));
525}
526#endif
nethercote2d5b8162004-08-11 09:40:52 +0000527
nethercote2d5b8162004-08-11 09:40:52 +0000528/*------------------------------------------------------------*/
529/*--- Arena management ---*/
530/*------------------------------------------------------------*/
531
532#define CORE_ARENA_MIN_SZB 1048576
533
534// The arena structures themselves.
535static Arena vg_arena[VG_N_ARENAS];
536
537// Functions external to this module identify arenas using ArenaIds,
538// not Arena*s. This fn converts the former to the latter.
539static Arena* arenaId_to_ArenaP ( ArenaId arena )
540{
541 vg_assert(arena >= 0 && arena < VG_N_ARENAS);
542 return & vg_arena[arena];
543}
544
philippe6e4b7132013-01-18 06:19:49 +0000545static ArenaId arenaP_to_ArenaId ( Arena *a )
546{
547 ArenaId arena = a -vg_arena;
548 vg_assert(arena >= 0 && arena < VG_N_ARENAS);
549 return arena;
550}
551
philipped99c26a2012-07-31 22:17:28 +0000552// Initialise an arena. rz_szB is the (default) minimum redzone size;
553// It might be overriden by VG_(clo_redzone_size) or VG_(clo_core_redzone_size).
554// it might be made bigger to ensure that VG_MIN_MALLOC_SZB is observed.
nethercote2d5b8162004-08-11 09:40:52 +0000555static
florian54fe2022012-10-27 23:07:42 +0000556void arena_init ( ArenaId aid, const HChar* name, SizeT rz_szB,
sewardjd043de92011-09-26 11:28:20 +0000557 SizeT min_sblock_szB, SizeT min_unsplittable_sblock_szB )
nethercote2d5b8162004-08-11 09:40:52 +0000558{
sewardj0b3fd2d2007-08-21 10:55:26 +0000559 SizeT i;
nethercote2d5b8162004-08-11 09:40:52 +0000560 Arena* a = arenaId_to_ArenaP(aid);
philipped99c26a2012-07-31 22:17:28 +0000561
562 // Ensure default redzones are a reasonable size.
563 vg_assert(rz_szB <= MAX_REDZONE_SZB);
nethercote2d5b8162004-08-11 09:40:52 +0000564
philipped99c26a2012-07-31 22:17:28 +0000565 /* Override the default redzone size if a clo value was given.
566 Note that the clo value can be significantly bigger than MAX_REDZONE_SZB
567 to allow the user to chase horrible bugs using up to 1 page
568 of protection. */
569 if (VG_AR_CLIENT == aid) {
570 if (VG_(clo_redzone_size) != -1)
571 rz_szB = VG_(clo_redzone_size);
572 } else {
573 if (VG_(clo_core_redzone_size) != rz_szB)
574 rz_szB = VG_(clo_core_redzone_size);
575 }
576
577 // Redzones must always be at least the size of a pointer, for holding the
578 // prev/next pointer (see the layout details at the top of this file).
njn7ce83112005-08-24 22:38:00 +0000579 if (rz_szB < sizeof(void*)) rz_szB = sizeof(void*);
nethercote2d5b8162004-08-11 09:40:52 +0000580
581 // The size of the low and high admin sections in a block must be a
njn30490552005-03-13 06:30:42 +0000582 // multiple of VG_MIN_MALLOC_SZB. So we round up the asked-for
nethercote2d5b8162004-08-11 09:40:52 +0000583 // redzone size if necessary to achieve this.
584 a->rz_szB = rz_szB;
585 while (0 != overhead_szB_lo(a) % VG_MIN_MALLOC_SZB) a->rz_szB++;
njn341a6642009-05-24 23:36:50 +0000586 vg_assert(overhead_szB_lo(a) - hp_overhead_szB() == overhead_szB_hi(a));
nethercote2d5b8162004-08-11 09:40:52 +0000587
philipped99c26a2012-07-31 22:17:28 +0000588 // Here we have established the effective redzone size.
589
590
591 vg_assert((min_sblock_szB % VKI_PAGE_SIZE) == 0);
592 a->name = name;
593 a->clientmem = ( VG_AR_CLIENT == aid ? True : False );
594
nethercote2d5b8162004-08-11 09:40:52 +0000595 a->min_sblock_szB = min_sblock_szB;
sewardjd043de92011-09-26 11:28:20 +0000596 a->min_unsplittable_sblock_szB = min_unsplittable_sblock_szB;
njn6e6588c2005-03-13 18:52:48 +0000597 for (i = 0; i < N_MALLOC_LISTS; i++) a->freelist[i] = NULL;
sewardj0b3fd2d2007-08-21 10:55:26 +0000598
sewardj7d1064a2011-02-23 13:18:56 +0000599 a->sblocks = & a->sblocks_initial[0];
600 a->sblocks_size = SBLOCKS_SIZE_INITIAL;
601 a->sblocks_used = 0;
philippe6e4b7132013-01-18 06:19:49 +0000602 a->deferred_reclaimed_sb = 0;
603 a->perm_malloc_current = 0;
604 a->perm_malloc_limit = 0;
605 a->stats__perm_bytes_on_loan= 0;
606 a->stats__perm_blocks = 0;
sewardjd043de92011-09-26 11:28:20 +0000607 a->stats__nreclaim_unsplit = 0;
608 a->stats__nreclaim_split = 0;
sewardj7d1064a2011-02-23 13:18:56 +0000609 a->stats__bytes_on_loan = 0;
610 a->stats__bytes_mmaped = 0;
611 a->stats__bytes_on_loan_max = 0;
sewardjd8b93462011-09-10 10:17:35 +0000612 a->stats__bytes_mmaped_max = 0;
sewardj7d1064a2011-02-23 13:18:56 +0000613 a->stats__tot_blocks = 0;
614 a->stats__tot_bytes = 0;
615 a->stats__nsearches = 0;
616 a->next_profile_at = 25 * 1000 * 1000;
sewardj0b3fd2d2007-08-21 10:55:26 +0000617 vg_assert(sizeof(a->sblocks_initial)
618 == SBLOCKS_SIZE_INITIAL * sizeof(Superblock*));
nethercote2d5b8162004-08-11 09:40:52 +0000619}
620
621/* Print vital stats for an arena. */
622void VG_(print_all_arena_stats) ( void )
623{
nethercote7ac7f7b2004-11-02 12:36:02 +0000624 UInt i;
nethercote2d5b8162004-08-11 09:40:52 +0000625 for (i = 0; i < VG_N_ARENAS; i++) {
626 Arena* a = arenaId_to_ArenaP(i);
627 VG_(message)(Vg_DebugMsg,
philippe92527132015-05-10 22:40:38 +0000628 "%-8s: %'13lu/%'13lu max/curr mmap'd, "
sewardjd043de92011-09-26 11:28:20 +0000629 "%llu/%llu unsplit/split sb unmmap'd, "
philippe92527132015-05-10 22:40:38 +0000630 "%'13lu/%'13lu max/curr, "
sewardj7d1064a2011-02-23 13:18:56 +0000631 "%10llu/%10llu totalloc-blocks/bytes,"
philipped99c26a2012-07-31 22:17:28 +0000632 " %10llu searches %lu rzB\n",
sewardjd8b93462011-09-10 10:17:35 +0000633 a->name,
634 a->stats__bytes_mmaped_max, a->stats__bytes_mmaped,
sewardjd043de92011-09-26 11:28:20 +0000635 a->stats__nreclaim_unsplit, a->stats__nreclaim_split,
sewardj7d1064a2011-02-23 13:18:56 +0000636 a->stats__bytes_on_loan_max,
637 a->stats__bytes_on_loan,
638 a->stats__tot_blocks, a->stats__tot_bytes,
philipped99c26a2012-07-31 22:17:28 +0000639 a->stats__nsearches,
640 a->rz_szB
nethercote2d5b8162004-08-11 09:40:52 +0000641 );
642 }
643}
644
sewardj9c606bd2008-09-18 18:12:50 +0000645void VG_(print_arena_cc_analysis) ( void )
646{
647 UInt i;
648 vg_assert( VG_(clo_profile_heap) );
649 for (i = 0; i < VG_N_ARENAS; i++) {
650 cc_analyse_alloc_arena(i);
651 }
652}
653
654
nethercote2d5b8162004-08-11 09:40:52 +0000655/* This library is self-initialising, as it makes this more self-contained,
656 less coupled with the outside world. Hence VG_(arena_malloc)() and
657 VG_(arena_free)() below always call ensure_mm_init() to ensure things are
sewardj45f4e7c2005-09-27 19:20:21 +0000658 correctly initialised.
659
660 We initialise the client arena separately (and later) because the core
661 must do non-client allocation before the tool has a chance to set the
662 client arena's redzone size.
663*/
sewardj0b3fd2d2007-08-21 10:55:26 +0000664static Bool client_inited = False;
665static Bool nonclient_inited = False;
666
nethercote2d5b8162004-08-11 09:40:52 +0000667static
sewardj45f4e7c2005-09-27 19:20:21 +0000668void ensure_mm_init ( ArenaId aid )
nethercote2d5b8162004-08-11 09:40:52 +0000669{
njn95c23292005-12-26 17:50:22 +0000670 static SizeT client_rz_szB = 8; // default: be paranoid
njnfc51f8d2005-06-21 03:20:17 +0000671
sewardj45f4e7c2005-09-27 19:20:21 +0000672 /* We use checked red zones (of various sizes) for our internal stuff,
nethercote2d5b8162004-08-11 09:40:52 +0000673 and an unchecked zone of arbitrary size for the client. Of
674 course the client's red zone can be checked by the tool, eg.
675 by using addressibility maps, but not by the mechanism implemented
676 here, which merely checks at the time of freeing that the red
677 zone bytes are unchanged.
678
679 Nb: redzone sizes are *minimums*; they could be made bigger to ensure
njn8d3f8452005-07-20 04:12:41 +0000680 alignment. Eg. with 8 byte alignment, on 32-bit machines 4 stays as
681 4, but 16 becomes 20; but on 64-bit machines 4 becomes 8, and 16
682 stays as 16 --- the extra 4 bytes in both are accounted for by the
683 larger prev/next ptr.
nethercote2d5b8162004-08-11 09:40:52 +0000684 */
sewardj45f4e7c2005-09-27 19:20:21 +0000685 if (VG_AR_CLIENT == aid) {
sewardj5600ab32006-10-17 01:42:40 +0000686 Int ar_client_sbszB;
sewardj45f4e7c2005-09-27 19:20:21 +0000687 if (client_inited) {
688 // This assertion ensures that a tool cannot try to change the client
689 // redzone size with VG_(needs_malloc_replacement)() after this module
690 // has done its first allocation from the client arena.
691 if (VG_(needs).malloc_replacement)
njn95c23292005-12-26 17:50:22 +0000692 vg_assert(client_rz_szB == VG_(tdict).tool_client_redzone_szB);
sewardj45f4e7c2005-09-27 19:20:21 +0000693 return;
694 }
nethercote2d5b8162004-08-11 09:40:52 +0000695
sewardj45f4e7c2005-09-27 19:20:21 +0000696 // Check and set the client arena redzone size
697 if (VG_(needs).malloc_replacement) {
njn95c23292005-12-26 17:50:22 +0000698 client_rz_szB = VG_(tdict).tool_client_redzone_szB;
philipped99c26a2012-07-31 22:17:28 +0000699 if (client_rz_szB > MAX_REDZONE_SZB) {
sewardj45f4e7c2005-09-27 19:20:21 +0000700 VG_(printf)( "\nTool error:\n"
701 " specified redzone size is too big (%llu)\n",
njn95c23292005-12-26 17:50:22 +0000702 (ULong)client_rz_szB);
sewardj45f4e7c2005-09-27 19:20:21 +0000703 VG_(exit)(1);
704 }
705 }
sewardj6e9de462011-06-28 07:25:29 +0000706 // Initialise the client arena. On all platforms,
sewardjc1ac9772007-08-20 22:57:56 +0000707 // increasing the superblock size reduces the number of superblocks
708 // in the client arena, which makes findSb cheaper.
sewardjc1ac9772007-08-20 22:57:56 +0000709 ar_client_sbszB = 4194304;
sewardjd043de92011-09-26 11:28:20 +0000710 // superblocks with a size > ar_client_sbszB will be unsplittable
711 // (unless used for providing memalign-ed blocks).
sewardjd8b93462011-09-10 10:17:35 +0000712 arena_init ( VG_AR_CLIENT, "client", client_rz_szB,
713 ar_client_sbszB, ar_client_sbszB+1);
sewardj45f4e7c2005-09-27 19:20:21 +0000714 client_inited = True;
715
716 } else {
717 if (nonclient_inited) {
718 return;
719 }
sewardjd043de92011-09-26 11:28:20 +0000720 set_at_init_hp_overhead_szB =
721 VG_(clo_profile_heap) ? VG_MIN_MALLOC_SZB : 0;
sewardj45f4e7c2005-09-27 19:20:21 +0000722 // Initialise the non-client arenas
sewardjd043de92011-09-26 11:28:20 +0000723 // Similarly to client arena, big allocations will be unsplittable.
philipped99c26a2012-07-31 22:17:28 +0000724 arena_init ( VG_AR_CORE, "core", CORE_REDZONE_DEFAULT_SZB,
philipped99c26a2012-07-31 22:17:28 +0000725 4194304, 4194304+1 );
726 arena_init ( VG_AR_DINFO, "dinfo", CORE_REDZONE_DEFAULT_SZB,
727 1048576, 1048576+1 );
728 arena_init ( VG_AR_DEMANGLE, "demangle", CORE_REDZONE_DEFAULT_SZB,
729 65536, 65536+1 );
philipped99c26a2012-07-31 22:17:28 +0000730 arena_init ( VG_AR_TTAUX, "ttaux", CORE_REDZONE_DEFAULT_SZB,
731 65536, 65536+1 );
sewardj45f4e7c2005-09-27 19:20:21 +0000732 nonclient_inited = True;
733 }
734
nethercote2d5b8162004-08-11 09:40:52 +0000735# ifdef DEBUG_MALLOC
sewardj0b3fd2d2007-08-21 10:55:26 +0000736 VG_(printf)("ZZZ1\n");
nethercote2d5b8162004-08-11 09:40:52 +0000737 VG_(sanity_check_malloc_all)();
sewardj0b3fd2d2007-08-21 10:55:26 +0000738 VG_(printf)("ZZZ2\n");
nethercote2d5b8162004-08-11 09:40:52 +0000739# endif
740}
741
742
743/*------------------------------------------------------------*/
744/*--- Superblock management ---*/
745/*------------------------------------------------------------*/
746
njn4c245e52009-03-15 23:25:38 +0000747__attribute__((noreturn))
florian54fe2022012-10-27 23:07:42 +0000748void VG_(out_of_memory_NORETURN) ( const HChar* who, SizeT szB )
sewardj45f4e7c2005-09-27 19:20:21 +0000749{
philippe14baeb42012-10-21 21:03:11 +0000750 static Int outputTrial = 0;
751 // We try once to output the full memory state followed by the below message.
752 // If that fails (due to out of memory during first trial), we try to just
753 // output the below message.
754 // And then we abandon.
755
sewardj45f4e7c2005-09-27 19:20:21 +0000756 ULong tot_alloc = VG_(am_get_anonsize_total)();
florian54fe2022012-10-27 23:07:42 +0000757 const HChar* s1 =
njnb81c7952007-03-22 03:36:55 +0000758 "\n"
759 " Valgrind's memory management: out of memory:\n"
760 " %s's request for %llu bytes failed.\n"
philippe92527132015-05-10 22:40:38 +0000761 " %'13llu bytes have already been mmap-ed ANONYMOUS.\n"
njnb81c7952007-03-22 03:36:55 +0000762 " Valgrind cannot continue. Sorry.\n\n"
763 " There are several possible reasons for this.\n"
764 " - You have some kind of memory limit in place. Look at the\n"
765 " output of 'ulimit -a'. Is there a limit on the size of\n"
766 " virtual memory or address space?\n"
767 " - You have run out of swap space.\n"
768 " - Valgrind has a bug. If you think this is the case or you are\n"
769 " not sure, please let us know and we'll try to fix it.\n"
770 " Please note that programs can take substantially more memory than\n"
771 " normal when running under Valgrind tools, eg. up to twice or\n"
772 " more, depending on the tool. On a 64-bit machine, Valgrind\n"
773 " should be able to make use of up 32GB memory. On a 32-bit\n"
774 " machine, Valgrind should be able to use all the memory available\n"
775 " to a single process, up to 4GB if that's how you have your\n"
776 " kernel configured. Most 32-bit Linux setups allow a maximum of\n"
777 " 3GB per process.\n\n"
778 " Whatever the reason, Valgrind cannot continue. Sorry.\n";
779
philippe14baeb42012-10-21 21:03:11 +0000780 if (outputTrial <= 1) {
781 if (outputTrial == 0) {
782 outputTrial++;
philippe4f6f3362014-04-19 00:25:54 +0000783 // First print the memory stats with the aspacemgr data.
philippe14baeb42012-10-21 21:03:11 +0000784 VG_(am_show_nsegments) (0, "out_of_memory");
785 VG_(print_all_arena_stats) ();
786 if (VG_(clo_profile_heap))
787 VG_(print_arena_cc_analysis) ();
philippe4f6f3362014-04-19 00:25:54 +0000788 // And then print some other information that might help.
789 VG_(print_all_stats) (False, /* Memory stats */
790 True /* Tool stats */);
791 VG_(show_sched_status) (True, // host_stacktrace
792 True, // valgrind_stack_usage
793 True); // exited_threads
794 /* In case we are an inner valgrind, asks the outer to report
philippe3bf117d2013-08-12 22:17:47 +0000795 its memory state in its log output. */
796 INNER_REQUEST(VALGRIND_MONITOR_COMMAND("v.set log_output"));
797 INNER_REQUEST(VALGRIND_MONITOR_COMMAND("v.info memory aspacemgr"));
philippe14baeb42012-10-21 21:03:11 +0000798 }
799 outputTrial++;
njnb81c7952007-03-22 03:36:55 +0000800 VG_(message)(Vg_UserMsg, s1, who, (ULong)szB, tot_alloc);
sewardj45f4e7c2005-09-27 19:20:21 +0000801 } else {
njnb81c7952007-03-22 03:36:55 +0000802 VG_(debugLog)(0,"mallocfree", s1, who, (ULong)szB, tot_alloc);
sewardj45f4e7c2005-09-27 19:20:21 +0000803 }
njncda2f0f2009-05-18 02:12:08 +0000804
sewardj45f4e7c2005-09-27 19:20:21 +0000805 VG_(exit)(1);
806}
807
808
nethercote2d5b8162004-08-11 09:40:52 +0000809// Align ptr p upwards to an align-sized boundary.
810static
nethercote7ac7f7b2004-11-02 12:36:02 +0000811void* align_upwards ( void* p, SizeT align )
nethercote2d5b8162004-08-11 09:40:52 +0000812{
813 Addr a = (Addr)p;
814 if ((a % align) == 0) return (void*)a;
815 return (void*)(a - (a % align) + align);
816}
817
sewardjd043de92011-09-26 11:28:20 +0000818// Forward definition.
819static
820void deferred_reclaimSuperblock ( Arena* a, Superblock* sb);
821
nethercote2d5b8162004-08-11 09:40:52 +0000822// If not enough memory available, either aborts (for non-client memory)
823// or returns 0 (for client memory).
824static
nethercote7ac7f7b2004-11-02 12:36:02 +0000825Superblock* newSuperblock ( Arena* a, SizeT cszB )
nethercote2d5b8162004-08-11 09:40:52 +0000826{
nethercote2d5b8162004-08-11 09:40:52 +0000827 Superblock* sb;
sewardj45f4e7c2005-09-27 19:20:21 +0000828 SysRes sres;
sewardjd043de92011-09-26 11:28:20 +0000829 Bool unsplittable;
830 ArenaId aid;
831
832 // A new superblock is needed for arena a. We will execute the deferred
833 // reclaim in all arenas in order to minimise fragmentation and
834 // peak memory usage.
835 for (aid = 0; aid < VG_N_ARENAS; aid++) {
836 Arena* arena = arenaId_to_ArenaP(aid);
837 if (arena->deferred_reclaimed_sb != NULL)
838 deferred_reclaimSuperblock (arena, NULL);
839 }
nethercote2d5b8162004-08-11 09:40:52 +0000840
841 // Take into account admin bytes in the Superblock.
842 cszB += sizeof(Superblock);
843
844 if (cszB < a->min_sblock_szB) cszB = a->min_sblock_szB;
bartc3c98392008-04-19 14:43:30 +0000845 cszB = VG_PGROUNDUP(cszB);
nethercote2d5b8162004-08-11 09:40:52 +0000846
sewardjd043de92011-09-26 11:28:20 +0000847 if (cszB >= a->min_unsplittable_sblock_szB)
848 unsplittable = True;
sewardjd8b93462011-09-10 10:17:35 +0000849 else
sewardjd043de92011-09-26 11:28:20 +0000850 unsplittable = False;
sewardjd8b93462011-09-10 10:17:35 +0000851
852
sewardj45f4e7c2005-09-27 19:20:21 +0000853 if (a->clientmem) {
nethercote2d5b8162004-08-11 09:40:52 +0000854 // client allocation -- return 0 to client if it fails
florian2fa66ce2015-03-07 23:01:14 +0000855 sres = VG_(am_mmap_client_heap)
philippe17e76ec2014-04-20 19:50:13 +0000856 ( cszB, VKI_PROT_READ|VKI_PROT_WRITE|VKI_PROT_EXEC );
njncda2f0f2009-05-18 02:12:08 +0000857 if (sr_isError(sres))
nethercote2d5b8162004-08-11 09:40:52 +0000858 return 0;
florian44bd4462014-12-29 17:04:46 +0000859 sb = (Superblock*)(Addr)sr_Res(sres);
nethercote2d5b8162004-08-11 09:40:52 +0000860 } else {
sewardj45f4e7c2005-09-27 19:20:21 +0000861 // non-client allocation -- abort if it fails
philippe17e76ec2014-04-20 19:50:13 +0000862 sres = VG_(am_mmap_anon_float_valgrind)( cszB );
njncda2f0f2009-05-18 02:12:08 +0000863 if (sr_isError(sres)) {
sewardj45f4e7c2005-09-27 19:20:21 +0000864 VG_(out_of_memory_NORETURN)("newSuperblock", cszB);
865 /* NOTREACHED */
866 sb = NULL; /* keep gcc happy */
867 } else {
florian44bd4462014-12-29 17:04:46 +0000868 sb = (Superblock*)(Addr)sr_Res(sres);
sewardj45f4e7c2005-09-27 19:20:21 +0000869 }
nethercote2d5b8162004-08-11 09:40:52 +0000870 }
871 vg_assert(NULL != sb);
philippe72faf102012-03-11 22:24:03 +0000872 INNER_REQUEST(VALGRIND_MAKE_MEM_UNDEFINED(sb, cszB));
nethercote2d5b8162004-08-11 09:40:52 +0000873 vg_assert(0 == (Addr)sb % VG_MIN_MALLOC_SZB);
874 sb->n_payload_bytes = cszB - sizeof(Superblock);
sewardjd043de92011-09-26 11:28:20 +0000875 sb->unsplittable = (unsplittable ? sb : NULL);
sewardj7d1064a2011-02-23 13:18:56 +0000876 a->stats__bytes_mmaped += cszB;
sewardjd8b93462011-09-10 10:17:35 +0000877 if (a->stats__bytes_mmaped > a->stats__bytes_mmaped_max)
878 a->stats__bytes_mmaped_max = a->stats__bytes_mmaped;
sewardj45f4e7c2005-09-27 19:20:21 +0000879 VG_(debugLog)(1, "mallocfree",
floriana5e06c32015-08-05 21:16:09 +0000880 "newSuperblock at %p (pszB %7lu) %s owner %s/%s\n",
sewardjd043de92011-09-26 11:28:20 +0000881 sb, sb->n_payload_bytes,
882 (unsplittable ? "unsplittable" : ""),
sewardj45264af2011-07-24 17:39:10 +0000883 a->clientmem ? "CLIENT" : "VALGRIND", a->name );
sewardj4c89b2f2011-08-17 22:13:14 +0000884 return sb;
sewardj45264af2011-07-24 17:39:10 +0000885}
886
sewardjd043de92011-09-26 11:28:20 +0000887// Reclaims the given superblock:
sewardjd8b93462011-09-10 10:17:35 +0000888// * removes sb from arena sblocks list.
889// * munmap the superblock segment.
890static
891void reclaimSuperblock ( Arena* a, Superblock* sb)
892{
893 SysRes sres;
894 SizeT cszB;
895 UInt i, j;
896
897 VG_(debugLog)(1, "mallocfree",
floriana5e06c32015-08-05 21:16:09 +0000898 "reclaimSuperblock at %p (pszB %7lu) %s owner %s/%s\n",
sewardjd043de92011-09-26 11:28:20 +0000899 sb, sb->n_payload_bytes,
900 (sb->unsplittable ? "unsplittable" : ""),
sewardjd8b93462011-09-10 10:17:35 +0000901 a->clientmem ? "CLIENT" : "VALGRIND", a->name );
902
sewardjd8b93462011-09-10 10:17:35 +0000903 // Take into account admin bytes in the Superblock.
904 cszB = sizeof(Superblock) + sb->n_payload_bytes;
sewardjd8b93462011-09-10 10:17:35 +0000905
906 // removes sb from superblock list.
907 for (i = 0; i < a->sblocks_used; i++) {
908 if (a->sblocks[i] == sb)
909 break;
910 }
911 vg_assert(i >= 0 && i < a->sblocks_used);
912 for (j = i; j < a->sblocks_used; j++)
913 a->sblocks[j] = a->sblocks[j+1];
914 a->sblocks_used--;
915 a->sblocks[a->sblocks_used] = NULL;
916 // paranoia: NULLify ptr to reclaimed sb or NULLify copy of ptr to last sb.
917
sewardjd043de92011-09-26 11:28:20 +0000918 a->stats__bytes_mmaped -= cszB;
919 if (sb->unsplittable)
920 a->stats__nreclaim_unsplit++;
921 else
922 a->stats__nreclaim_split++;
923
sewardjd8b93462011-09-10 10:17:35 +0000924 // Now that the sb is removed from the list, mnumap its space.
925 if (a->clientmem) {
926 // reclaimable client allocation
927 Bool need_discard = False;
928 sres = VG_(am_munmap_client)(&need_discard, (Addr) sb, cszB);
sewardjd8b93462011-09-10 10:17:35 +0000929 vg_assert2(! sr_isError(sres), "superblock client munmap failure\n");
sewardjd043de92011-09-26 11:28:20 +0000930 /* We somewhat help the client by discarding the range.
931 Note however that if the client has JITted some code in
932 a small block that was freed, we do not provide this
933 'discard support' */
934 /* JRS 2011-Sept-26: it would be nice to move the discard
935 outwards somewhat (in terms of calls) so as to make it easier
936 to verify that there will be no nonterminating recursive set
937 of calls a result of calling VG_(discard_translations).
938 Another day, perhaps. */
939 if (need_discard)
940 VG_(discard_translations) ((Addr) sb, cszB, "reclaimSuperblock");
sewardjd8b93462011-09-10 10:17:35 +0000941 } else {
942 // reclaimable non-client allocation
943 sres = VG_(am_munmap_valgrind)((Addr) sb, cszB);
944 vg_assert2(! sr_isError(sres), "superblock valgrind munmap failure\n");
945 }
sewardjd043de92011-09-26 11:28:20 +0000946
sewardjd8b93462011-09-10 10:17:35 +0000947}
948
nethercote2d5b8162004-08-11 09:40:52 +0000949// Find the superblock containing the given chunk.
950static
951Superblock* findSb ( Arena* a, Block* b )
952{
sewardj0b3fd2d2007-08-21 10:55:26 +0000953 SizeT min = 0;
954 SizeT max = a->sblocks_used;
sewardj49bdd7a2005-12-17 20:37:36 +0000955
sewardj0b3fd2d2007-08-21 10:55:26 +0000956 while (min <= max) {
957 Superblock * sb;
958 SizeT pos = min + (max - min)/2;
959
960 vg_assert(pos >= 0 && pos < a->sblocks_used);
961 sb = a->sblocks[pos];
962 if ((Block*)&sb->payload_bytes[0] <= b
963 && b < (Block*)&sb->payload_bytes[sb->n_payload_bytes])
964 {
965 return sb;
966 } else if ((Block*)&sb->payload_bytes[0] <= b) {
967 min = pos + 1;
968 } else {
969 max = pos - 1;
sewardj49bdd7a2005-12-17 20:37:36 +0000970 }
971 }
sewardj0b3fd2d2007-08-21 10:55:26 +0000972 VG_(printf)("findSb: can't find pointer %p in arena '%s'\n",
973 b, a->name );
974 VG_(core_panic)("findSb: VG_(arena_free)() in wrong arena?");
975 return NULL; /*NOTREACHED*/
nethercote2d5b8162004-08-11 09:40:52 +0000976}
977
sewardjde4a1d02002-03-22 01:27:54 +0000978
philippe07c08522014-05-14 20:39:27 +0000979// Find the superblock containing the given address.
980// If superblock not found, return NULL.
981static
982Superblock* maybe_findSb ( Arena* a, Addr ad )
983{
984 SizeT min = 0;
985 SizeT max = a->sblocks_used;
986
987 while (min <= max) {
988 Superblock * sb;
989 SizeT pos = min + (max - min)/2;
990 if (pos < 0 || pos >= a->sblocks_used)
991 return NULL;
992 sb = a->sblocks[pos];
993 if ((Addr)&sb->payload_bytes[0] <= ad
994 && ad < (Addr)&sb->payload_bytes[sb->n_payload_bytes]) {
995 return sb;
996 } else if ((Addr)&sb->payload_bytes[0] <= ad) {
997 min = pos + 1;
998 } else {
999 max = pos - 1;
1000 }
1001 }
1002 return NULL;
1003}
1004
1005
fitzhardinge98abfc72003-12-16 02:05:15 +00001006/*------------------------------------------------------------*/
sewardjde4a1d02002-03-22 01:27:54 +00001007/*--- Functions for working with freelists. ---*/
1008/*------------------------------------------------------------*/
1009
nethercote2d5b8162004-08-11 09:40:52 +00001010// Nb: Determination of which freelist a block lives on is based on the
1011// payload size, not block size.
sewardjde4a1d02002-03-22 01:27:54 +00001012
nethercote2d5b8162004-08-11 09:40:52 +00001013// Convert a payload size in bytes to a freelist number.
sewardjde4a1d02002-03-22 01:27:54 +00001014static
nethercote7ac7f7b2004-11-02 12:36:02 +00001015UInt pszB_to_listNo ( SizeT pszB )
sewardjde4a1d02002-03-22 01:27:54 +00001016{
njndb247dc2005-07-17 23:12:33 +00001017 SizeT n = pszB / VG_MIN_MALLOC_SZB;
tom60a4b0b2005-10-12 10:45:27 +00001018 vg_assert(0 == pszB % VG_MIN_MALLOC_SZB);
njn61dcab82005-05-21 19:36:45 +00001019
sewardjc1ac9772007-08-20 22:57:56 +00001020 // The first 64 lists hold blocks of size VG_MIN_MALLOC_SZB * list_num.
1021 // The final 48 hold bigger blocks.
1022 if (n < 64) return (UInt)n;
1023 /* Exponential slope up, factor 1.05 */
1024 if (n < 67) return 64;
1025 if (n < 70) return 65;
1026 if (n < 74) return 66;
1027 if (n < 77) return 67;
1028 if (n < 81) return 68;
1029 if (n < 85) return 69;
1030 if (n < 90) return 70;
1031 if (n < 94) return 71;
1032 if (n < 99) return 72;
1033 if (n < 104) return 73;
1034 if (n < 109) return 74;
1035 if (n < 114) return 75;
1036 if (n < 120) return 76;
1037 if (n < 126) return 77;
1038 if (n < 133) return 78;
1039 if (n < 139) return 79;
1040 /* Exponential slope up, factor 1.10 */
1041 if (n < 153) return 80;
1042 if (n < 169) return 81;
1043 if (n < 185) return 82;
1044 if (n < 204) return 83;
1045 if (n < 224) return 84;
1046 if (n < 247) return 85;
1047 if (n < 272) return 86;
1048 if (n < 299) return 87;
1049 if (n < 329) return 88;
1050 if (n < 362) return 89;
1051 if (n < 398) return 90;
1052 if (n < 438) return 91;
1053 if (n < 482) return 92;
1054 if (n < 530) return 93;
1055 if (n < 583) return 94;
1056 if (n < 641) return 95;
1057 /* Exponential slope up, factor 1.20 */
1058 if (n < 770) return 96;
1059 if (n < 924) return 97;
1060 if (n < 1109) return 98;
1061 if (n < 1331) return 99;
1062 if (n < 1597) return 100;
1063 if (n < 1916) return 101;
1064 if (n < 2300) return 102;
1065 if (n < 2760) return 103;
1066 if (n < 3312) return 104;
1067 if (n < 3974) return 105;
1068 if (n < 4769) return 106;
1069 if (n < 5723) return 107;
1070 if (n < 6868) return 108;
1071 if (n < 8241) return 109;
1072 if (n < 9890) return 110;
1073 return 111;
sewardjde4a1d02002-03-22 01:27:54 +00001074}
1075
nethercote2d5b8162004-08-11 09:40:52 +00001076// What is the minimum payload size for a given list?
sewardjde4a1d02002-03-22 01:27:54 +00001077static
nethercote7ac7f7b2004-11-02 12:36:02 +00001078SizeT listNo_to_pszB_min ( UInt listNo )
sewardjde4a1d02002-03-22 01:27:54 +00001079{
sewardj1d2e2e62007-08-23 10:22:44 +00001080 /* Repeatedly computing this function at every request is
1081 expensive. Hence at the first call just cache the result for
1082 every possible argument. */
1083 static SizeT cache[N_MALLOC_LISTS];
1084 static Bool cache_valid = False;
1085 if (!cache_valid) {
1086 UInt i;
1087 for (i = 0; i < N_MALLOC_LISTS; i++) {
1088 SizeT pszB = 0;
1089 while (pszB_to_listNo(pszB) < i)
1090 pszB += VG_MIN_MALLOC_SZB;
1091 cache[i] = pszB;
1092 }
1093 cache_valid = True;
1094 }
1095 /* Returned cached answer. */
njn6e6588c2005-03-13 18:52:48 +00001096 vg_assert(listNo <= N_MALLOC_LISTS);
sewardj1d2e2e62007-08-23 10:22:44 +00001097 return cache[listNo];
sewardjde4a1d02002-03-22 01:27:54 +00001098}
1099
nethercote2d5b8162004-08-11 09:40:52 +00001100// What is the maximum payload size for a given list?
sewardjde4a1d02002-03-22 01:27:54 +00001101static
nethercote7ac7f7b2004-11-02 12:36:02 +00001102SizeT listNo_to_pszB_max ( UInt listNo )
sewardjde4a1d02002-03-22 01:27:54 +00001103{
njn6e6588c2005-03-13 18:52:48 +00001104 vg_assert(listNo <= N_MALLOC_LISTS);
1105 if (listNo == N_MALLOC_LISTS-1) {
nethercote2d5b8162004-08-11 09:40:52 +00001106 return MAX_PSZB;
sewardjde4a1d02002-03-22 01:27:54 +00001107 } else {
nethercote2d5b8162004-08-11 09:40:52 +00001108 return listNo_to_pszB_min(listNo+1) - 1;
sewardjde4a1d02002-03-22 01:27:54 +00001109 }
1110}
1111
1112
1113/* A nasty hack to try and reduce fragmentation. Try and replace
1114 a->freelist[lno] with another block on the same list but with a
1115 lower address, with the idea of attempting to recycle the same
1116 blocks rather than cruise through the address space. */
sewardjde4a1d02002-03-22 01:27:54 +00001117static
nethercote7ac7f7b2004-11-02 12:36:02 +00001118void swizzle ( Arena* a, UInt lno )
sewardjde4a1d02002-03-22 01:27:54 +00001119{
nethercote2d5b8162004-08-11 09:40:52 +00001120 Block* p_best;
1121 Block* pp;
1122 Block* pn;
nethercote7ac7f7b2004-11-02 12:36:02 +00001123 UInt i;
sewardjde4a1d02002-03-22 01:27:54 +00001124
1125 p_best = a->freelist[lno];
1126 if (p_best == NULL) return;
1127
1128 pn = pp = p_best;
njn2bf9ba62005-12-25 02:47:12 +00001129
1130 // This loop bound was 20 for a long time, but experiments showed that
1131 // reducing it to 10 gave the same result in all the tests, and 5 got the
1132 // same result in 85--100% of cases. And it's called often enough to be
1133 // noticeable in programs that allocated a lot.
1134 for (i = 0; i < 5; i++) {
nethercote2d5b8162004-08-11 09:40:52 +00001135 pn = get_next_b(pn);
1136 pp = get_prev_b(pp);
sewardjde4a1d02002-03-22 01:27:54 +00001137 if (pn < p_best) p_best = pn;
1138 if (pp < p_best) p_best = pp;
1139 }
1140 if (p_best < a->freelist[lno]) {
nethercote2d5b8162004-08-11 09:40:52 +00001141# ifdef VERBOSE_MALLOC
sewardj9c606bd2008-09-18 18:12:50 +00001142 VG_(printf)("retreat by %ld\n", (Word)(a->freelist[lno] - p_best));
sewardjde4a1d02002-03-22 01:27:54 +00001143# endif
1144 a->freelist[lno] = p_best;
1145 }
1146}
1147
1148
1149/*------------------------------------------------------------*/
sewardjde4a1d02002-03-22 01:27:54 +00001150/*--- Sanity-check/debugging machinery. ---*/
1151/*------------------------------------------------------------*/
1152
njn6e6588c2005-03-13 18:52:48 +00001153#define REDZONE_LO_MASK 0x31
1154#define REDZONE_HI_MASK 0x7c
nethercote2d5b8162004-08-11 09:40:52 +00001155
nethercote7ac7f7b2004-11-02 12:36:02 +00001156// Do some crude sanity checks on a Block.
sewardjde4a1d02002-03-22 01:27:54 +00001157static
nethercote2d5b8162004-08-11 09:40:52 +00001158Bool blockSane ( Arena* a, Block* b )
sewardjde4a1d02002-03-22 01:27:54 +00001159{
1160# define BLEAT(str) VG_(printf)("blockSane: fail -- %s\n",str)
nethercote7ac7f7b2004-11-02 12:36:02 +00001161 UInt i;
njn402c8612005-08-23 22:11:20 +00001162 // The lo and hi size fields will be checked (indirectly) by the call
1163 // to get_rz_hi_byte().
njn472cc7c2005-07-17 17:20:30 +00001164 if (!a->clientmem && is_inuse_block(b)) {
philippe72faf102012-03-11 22:24:03 +00001165 // In the inner, for memcheck sake, temporarily mark redzone accessible.
philippe0b9d0642014-06-30 19:47:24 +00001166 INNER_REQUEST(mkBhdrAccess(a,b));
nethercote2d5b8162004-08-11 09:40:52 +00001167 for (i = 0; i < a->rz_szB; i++) {
njn1dcee092009-02-24 03:07:37 +00001168 if (get_rz_lo_byte(b, i) !=
njn6e6588c2005-03-13 18:52:48 +00001169 (UByte)(((Addr)b&0xff) ^ REDZONE_LO_MASK))
nethercote2d5b8162004-08-11 09:40:52 +00001170 {BLEAT("redzone-lo");return False;}
njn1dcee092009-02-24 03:07:37 +00001171 if (get_rz_hi_byte(b, i) !=
njn6e6588c2005-03-13 18:52:48 +00001172 (UByte)(((Addr)b&0xff) ^ REDZONE_HI_MASK))
nethercote2d5b8162004-08-11 09:40:52 +00001173 {BLEAT("redzone-hi");return False;}
philippe0b9d0642014-06-30 19:47:24 +00001174 }
1175 INNER_REQUEST(mkBhdrNoAccess(a,b));
sewardjde4a1d02002-03-22 01:27:54 +00001176 }
1177 return True;
1178# undef BLEAT
1179}
1180
philippe0b9d0642014-06-30 19:47:24 +00001181// Sanity checks on a Block inside an unsplittable superblock
1182static
1183Bool unsplittableBlockSane ( Arena* a, Superblock *sb, Block* b )
1184{
1185# define BLEAT(str) VG_(printf)("unsplittableBlockSane: fail -- %s\n",str)
1186 Block* other_b;
1187 UByte* sb_start;
1188 UByte* sb_end;
1189
1190 if (!blockSane (a, b))
1191 {BLEAT("blockSane");return False;}
1192
1193 if (sb->unsplittable != sb)
1194 {BLEAT("unsplittable");return False;}
1195
1196 sb_start = &sb->payload_bytes[0];
1197 sb_end = &sb->payload_bytes[sb->n_payload_bytes - 1];
1198
1199 // b must be first block (i.e. no unused bytes at the beginning)
1200 if ((Block*)sb_start != b)
1201 {BLEAT("sb_start");return False;}
1202
1203 // b must be last block (i.e. no unused bytes at the end)
1204 other_b = b + get_bszB(b);
1205 if (other_b-1 != (Block*)sb_end)
1206 {BLEAT("sb_end");return False;}
1207
1208 return True;
1209# undef BLEAT
1210}
1211
nethercote2d5b8162004-08-11 09:40:52 +00001212// Print superblocks (only for debugging).
sewardjde4a1d02002-03-22 01:27:54 +00001213static
1214void ppSuperblocks ( Arena* a )
1215{
sewardj0b3fd2d2007-08-21 10:55:26 +00001216 UInt i, j, blockno = 1;
njnd0e685c2005-07-17 17:55:42 +00001217 SizeT b_bszB;
sewardjde4a1d02002-03-22 01:27:54 +00001218
sewardj0b3fd2d2007-08-21 10:55:26 +00001219 for (j = 0; j < a->sblocks_used; ++j) {
1220 Superblock * sb = a->sblocks[j];
1221
sewardjde4a1d02002-03-22 01:27:54 +00001222 VG_(printf)( "\n" );
floriana5e06c32015-08-05 21:16:09 +00001223 VG_(printf)( "superblock %u at %p %s, sb->n_pl_bs = %lu\n",
sewardjd043de92011-09-26 11:28:20 +00001224 blockno++, sb, (sb->unsplittable ? "unsplittable" : ""),
sewardjd8b93462011-09-10 10:17:35 +00001225 sb->n_payload_bytes);
njnd0e685c2005-07-17 17:55:42 +00001226 for (i = 0; i < sb->n_payload_bytes; i += b_bszB) {
1227 Block* b = (Block*)&sb->payload_bytes[i];
1228 b_bszB = get_bszB(b);
floriana5e06c32015-08-05 21:16:09 +00001229 VG_(printf)( " block at %u, bszB %lu: ", i, b_bszB );
njn472cc7c2005-07-17 17:20:30 +00001230 VG_(printf)( "%s, ", is_inuse_block(b) ? "inuse" : "free");
nethercote2d5b8162004-08-11 09:40:52 +00001231 VG_(printf)( "%s\n", blockSane(a, b) ? "ok" : "BAD" );
sewardjde4a1d02002-03-22 01:27:54 +00001232 }
nethercote2d5b8162004-08-11 09:40:52 +00001233 vg_assert(i == sb->n_payload_bytes); // no overshoot at end of Sb
sewardjde4a1d02002-03-22 01:27:54 +00001234 }
1235 VG_(printf)( "end of superblocks\n\n" );
1236}
1237
nethercote2d5b8162004-08-11 09:40:52 +00001238// Sanity check both the superblocks and the chains.
nethercote885dd912004-08-03 23:14:00 +00001239static void sanity_check_malloc_arena ( ArenaId aid )
sewardjde4a1d02002-03-22 01:27:54 +00001240{
sewardj0b3fd2d2007-08-21 10:55:26 +00001241 UInt i, j, superblockctr, blockctr_sb, blockctr_li;
nethercote7ac7f7b2004-11-02 12:36:02 +00001242 UInt blockctr_sb_free, listno;
1243 SizeT b_bszB, b_pszB, list_min_pszB, list_max_pszB;
sewardj0b3fd2d2007-08-21 10:55:26 +00001244 Bool thisFree, lastWasFree, sblockarrOK;
nethercote2d5b8162004-08-11 09:40:52 +00001245 Block* b;
1246 Block* b_prev;
nethercote7ac7f7b2004-11-02 12:36:02 +00001247 SizeT arena_bytes_on_loan;
sewardjde4a1d02002-03-22 01:27:54 +00001248 Arena* a;
1249
nethercote885dd912004-08-03 23:14:00 +00001250# define BOMB VG_(core_panic)("sanity_check_malloc_arena")
sewardjde4a1d02002-03-22 01:27:54 +00001251
1252 a = arenaId_to_ArenaP(aid);
sewardj0b3fd2d2007-08-21 10:55:26 +00001253
1254 // Check the superblock array.
1255 sblockarrOK
1256 = a->sblocks != NULL
1257 && a->sblocks_size >= SBLOCKS_SIZE_INITIAL
1258 && a->sblocks_used <= a->sblocks_size
1259 && (a->sblocks_size == SBLOCKS_SIZE_INITIAL
1260 ? (a->sblocks == &a->sblocks_initial[0])
1261 : (a->sblocks != &a->sblocks_initial[0]));
1262 if (!sblockarrOK) {
1263 VG_(printf)("sanity_check_malloc_arena: sblock array BAD\n");
1264 BOMB;
1265 }
1266
nethercote2d5b8162004-08-11 09:40:52 +00001267 // First, traverse all the superblocks, inspecting the Blocks in each.
sewardjde4a1d02002-03-22 01:27:54 +00001268 superblockctr = blockctr_sb = blockctr_sb_free = 0;
1269 arena_bytes_on_loan = 0;
sewardj0b3fd2d2007-08-21 10:55:26 +00001270 for (j = 0; j < a->sblocks_used; ++j) {
1271 Superblock * sb = a->sblocks[j];
sewardjde4a1d02002-03-22 01:27:54 +00001272 lastWasFree = False;
1273 superblockctr++;
nethercote2d5b8162004-08-11 09:40:52 +00001274 for (i = 0; i < sb->n_payload_bytes; i += mk_plain_bszB(b_bszB)) {
sewardjde4a1d02002-03-22 01:27:54 +00001275 blockctr_sb++;
nethercote2d5b8162004-08-11 09:40:52 +00001276 b = (Block*)&sb->payload_bytes[i];
njnd0e685c2005-07-17 17:55:42 +00001277 b_bszB = get_bszB_as_is(b);
sewardjde4a1d02002-03-22 01:27:54 +00001278 if (!blockSane(a, b)) {
floriana5e06c32015-08-05 21:16:09 +00001279 VG_(printf)("sanity_check_malloc_arena: sb %p, block %u "
njn8a7b41b2007-09-23 00:51:24 +00001280 "(bszB %lu): BAD\n", sb, i, b_bszB );
sewardjde4a1d02002-03-22 01:27:54 +00001281 BOMB;
1282 }
njn472cc7c2005-07-17 17:20:30 +00001283 thisFree = !is_inuse_block(b);
sewardjde4a1d02002-03-22 01:27:54 +00001284 if (thisFree && lastWasFree) {
floriana5e06c32015-08-05 21:16:09 +00001285 VG_(printf)("sanity_check_malloc_arena: sb %p, block %u "
njn8a7b41b2007-09-23 00:51:24 +00001286 "(bszB %lu): UNMERGED FREES\n", sb, i, b_bszB );
sewardjde4a1d02002-03-22 01:27:54 +00001287 BOMB;
1288 }
sewardjde4a1d02002-03-22 01:27:54 +00001289 if (thisFree) blockctr_sb_free++;
sewardj0b3fd2d2007-08-21 10:55:26 +00001290 if (!thisFree)
nethercote2d5b8162004-08-11 09:40:52 +00001291 arena_bytes_on_loan += bszB_to_pszB(a, b_bszB);
1292 lastWasFree = thisFree;
sewardjde4a1d02002-03-22 01:27:54 +00001293 }
nethercote2d5b8162004-08-11 09:40:52 +00001294 if (i > sb->n_payload_bytes) {
nethercote885dd912004-08-03 23:14:00 +00001295 VG_(printf)( "sanity_check_malloc_arena: sb %p: last block "
sewardjde4a1d02002-03-22 01:27:54 +00001296 "overshoots end\n", sb);
1297 BOMB;
1298 }
sewardjde4a1d02002-03-22 01:27:54 +00001299 }
1300
philippe6e4b7132013-01-18 06:19:49 +00001301 arena_bytes_on_loan += a->stats__perm_bytes_on_loan;
1302
sewardj7d1064a2011-02-23 13:18:56 +00001303 if (arena_bytes_on_loan != a->stats__bytes_on_loan) {
nethercote2d5b8162004-08-11 09:40:52 +00001304# ifdef VERBOSE_MALLOC
sewardjd8b93462011-09-10 10:17:35 +00001305 VG_(printf)( "sanity_check_malloc_arena: a->bytes_on_loan %lu, "
1306 "arena_bytes_on_loan %lu: "
philippe0b9d0642014-06-30 19:47:24 +00001307 "MISMATCH\n", a->stats__bytes_on_loan, arena_bytes_on_loan);
nethercote2d5b8162004-08-11 09:40:52 +00001308# endif
sewardjde4a1d02002-03-22 01:27:54 +00001309 ppSuperblocks(a);
1310 BOMB;
1311 }
1312
1313 /* Second, traverse each list, checking that the back pointers make
1314 sense, counting blocks encountered, and checking that each block
1315 is an appropriate size for this list. */
1316 blockctr_li = 0;
njn6e6588c2005-03-13 18:52:48 +00001317 for (listno = 0; listno < N_MALLOC_LISTS; listno++) {
nethercote2d5b8162004-08-11 09:40:52 +00001318 list_min_pszB = listNo_to_pszB_min(listno);
1319 list_max_pszB = listNo_to_pszB_max(listno);
sewardjde4a1d02002-03-22 01:27:54 +00001320 b = a->freelist[listno];
1321 if (b == NULL) continue;
1322 while (True) {
1323 b_prev = b;
nethercote2d5b8162004-08-11 09:40:52 +00001324 b = get_next_b(b);
1325 if (get_prev_b(b) != b_prev) {
floriana5e06c32015-08-05 21:16:09 +00001326 VG_(printf)( "sanity_check_malloc_arena: list %u at %p: "
sewardj0b3fd2d2007-08-21 10:55:26 +00001327 "BAD LINKAGE\n",
sewardjde4a1d02002-03-22 01:27:54 +00001328 listno, b );
1329 BOMB;
1330 }
njn089f51f2005-07-17 18:12:00 +00001331 b_pszB = get_pszB(a, b);
nethercote2d5b8162004-08-11 09:40:52 +00001332 if (b_pszB < list_min_pszB || b_pszB > list_max_pszB) {
sewardj0b3fd2d2007-08-21 10:55:26 +00001333 VG_(printf)(
floriana5e06c32015-08-05 21:16:09 +00001334 "sanity_check_malloc_arena: list %u at %p: "
njn8a7b41b2007-09-23 00:51:24 +00001335 "WRONG CHAIN SIZE %luB (%luB, %luB)\n",
nethercote2d5b8162004-08-11 09:40:52 +00001336 listno, b, b_pszB, list_min_pszB, list_max_pszB );
sewardjde4a1d02002-03-22 01:27:54 +00001337 BOMB;
1338 }
1339 blockctr_li++;
1340 if (b == a->freelist[listno]) break;
1341 }
1342 }
1343
1344 if (blockctr_sb_free != blockctr_li) {
nethercote2d5b8162004-08-11 09:40:52 +00001345# ifdef VERBOSE_MALLOC
1346 VG_(printf)( "sanity_check_malloc_arena: BLOCK COUNT MISMATCH "
1347 "(via sbs %d, via lists %d)\n",
1348 blockctr_sb_free, blockctr_li );
1349# endif
sewardjde4a1d02002-03-22 01:27:54 +00001350 ppSuperblocks(a);
1351 BOMB;
1352 }
1353
nethercote885dd912004-08-03 23:14:00 +00001354 if (VG_(clo_verbosity) > 2)
1355 VG_(message)(Vg_DebugMsg,
floriana5e06c32015-08-05 21:16:09 +00001356 "%-8s: %2u sbs, %5u bs, %2u/%-2u free bs, "
1357 "%7lu mmap, %7lu loan\n",
nethercote885dd912004-08-03 23:14:00 +00001358 a->name,
1359 superblockctr,
1360 blockctr_sb, blockctr_sb_free, blockctr_li,
sewardj7d1064a2011-02-23 13:18:56 +00001361 a->stats__bytes_mmaped, a->stats__bytes_on_loan);
sewardjde4a1d02002-03-22 01:27:54 +00001362# undef BOMB
1363}
1364
1365
sewardj9c606bd2008-09-18 18:12:50 +00001366#define N_AN_CCS 1000
1367
florian54fe2022012-10-27 23:07:42 +00001368typedef struct {
1369 ULong nBytes;
1370 ULong nBlocks;
1371 const HChar* cc;
1372} AnCC;
sewardj9c606bd2008-09-18 18:12:50 +00001373
1374static AnCC anCCs[N_AN_CCS];
1375
philippe4f6f3362014-04-19 00:25:54 +00001376/* Sorting by decreasing cost center nBytes, to have the biggest
1377 cost centres at the top. */
florian6bd9dc12012-11-23 16:17:43 +00001378static Int cmp_AnCC_by_vol ( const void* v1, const void* v2 ) {
florian3e798632012-11-24 19:41:54 +00001379 const AnCC* ancc1 = v1;
1380 const AnCC* ancc2 = v2;
philippe4f6f3362014-04-19 00:25:54 +00001381 if (ancc1->nBytes < ancc2->nBytes) return 1;
1382 if (ancc1->nBytes > ancc2->nBytes) return -1;
sewardj9c606bd2008-09-18 18:12:50 +00001383 return 0;
1384}
1385
1386static void cc_analyse_alloc_arena ( ArenaId aid )
1387{
1388 Word i, j, k;
1389 Arena* a;
1390 Block* b;
1391 Bool thisFree, lastWasFree;
1392 SizeT b_bszB;
1393
florian54fe2022012-10-27 23:07:42 +00001394 const HChar* cc;
sewardj9c606bd2008-09-18 18:12:50 +00001395 UInt n_ccs = 0;
1396 //return;
1397 a = arenaId_to_ArenaP(aid);
1398 if (a->name == NULL) {
1399 /* arena is not in use, is not initialised and will fail the
1400 sanity check that follows. */
1401 return;
1402 }
1403
1404 sanity_check_malloc_arena(aid);
1405
1406 VG_(printf)(
philippe3743c912015-05-20 14:33:30 +00001407 "-------- Arena \"%s\": %'lu/%'lu max/curr mmap'd, "
sewardjd043de92011-09-26 11:28:20 +00001408 "%llu/%llu unsplit/split sb unmmap'd, "
philippe3743c912015-05-20 14:33:30 +00001409 "%'lu/%'lu max/curr on_loan %lu rzB --------\n",
sewardjd8b93462011-09-10 10:17:35 +00001410 a->name, a->stats__bytes_mmaped_max, a->stats__bytes_mmaped,
sewardjd043de92011-09-26 11:28:20 +00001411 a->stats__nreclaim_unsplit, a->stats__nreclaim_split,
philipped99c26a2012-07-31 22:17:28 +00001412 a->stats__bytes_on_loan_max, a->stats__bytes_on_loan,
1413 a->rz_szB
sewardj9c606bd2008-09-18 18:12:50 +00001414 );
1415
1416 for (j = 0; j < a->sblocks_used; ++j) {
1417 Superblock * sb = a->sblocks[j];
1418 lastWasFree = False;
1419 for (i = 0; i < sb->n_payload_bytes; i += mk_plain_bszB(b_bszB)) {
1420 b = (Block*)&sb->payload_bytes[i];
1421 b_bszB = get_bszB_as_is(b);
1422 if (!blockSane(a, b)) {
1423 VG_(printf)("sanity_check_malloc_arena: sb %p, block %ld "
1424 "(bszB %lu): BAD\n", sb, i, b_bszB );
floriane2800c92014-09-15 20:57:45 +00001425 vg_assert(0);
sewardj9c606bd2008-09-18 18:12:50 +00001426 }
1427 thisFree = !is_inuse_block(b);
1428 if (thisFree && lastWasFree) {
1429 VG_(printf)("sanity_check_malloc_arena: sb %p, block %ld "
1430 "(bszB %lu): UNMERGED FREES\n", sb, i, b_bszB );
floriane2800c92014-09-15 20:57:45 +00001431 vg_assert(0);
sewardj9c606bd2008-09-18 18:12:50 +00001432 }
1433 lastWasFree = thisFree;
1434
1435 if (thisFree) continue;
1436
philippe0b9d0642014-06-30 19:47:24 +00001437 if (VG_(clo_profile_heap))
1438 cc = get_cc(b);
1439 else
1440 cc = "(--profile-heap=yes for details)";
sewardj9c606bd2008-09-18 18:12:50 +00001441 if (0)
1442 VG_(printf)("block: inUse=%d pszB=%d cc=%s\n",
1443 (Int)(!thisFree),
1444 (Int)bszB_to_pszB(a, b_bszB),
1445 get_cc(b));
floriane2800c92014-09-15 20:57:45 +00001446 vg_assert(cc);
sewardj9c606bd2008-09-18 18:12:50 +00001447 for (k = 0; k < n_ccs; k++) {
floriane2800c92014-09-15 20:57:45 +00001448 vg_assert(anCCs[k].cc);
sewardj9c606bd2008-09-18 18:12:50 +00001449 if (0 == VG_(strcmp)(cc, anCCs[k].cc))
1450 break;
1451 }
floriane2800c92014-09-15 20:57:45 +00001452 vg_assert(k >= 0 && k <= n_ccs);
sewardj9c606bd2008-09-18 18:12:50 +00001453
1454 if (k == n_ccs) {
floriane2800c92014-09-15 20:57:45 +00001455 vg_assert(n_ccs < N_AN_CCS-1);
sewardj9c606bd2008-09-18 18:12:50 +00001456 n_ccs++;
1457 anCCs[k].nBytes = 0;
1458 anCCs[k].nBlocks = 0;
1459 anCCs[k].cc = cc;
1460 }
1461
floriane2800c92014-09-15 20:57:45 +00001462 vg_assert(k >= 0 && k < n_ccs && k < N_AN_CCS);
sewardj9c606bd2008-09-18 18:12:50 +00001463 anCCs[k].nBytes += (ULong)bszB_to_pszB(a, b_bszB);
1464 anCCs[k].nBlocks++;
1465 }
1466 if (i > sb->n_payload_bytes) {
1467 VG_(printf)( "sanity_check_malloc_arena: sb %p: last block "
1468 "overshoots end\n", sb);
floriane2800c92014-09-15 20:57:45 +00001469 vg_assert(0);
sewardj9c606bd2008-09-18 18:12:50 +00001470 }
1471 }
1472
philippe6e4b7132013-01-18 06:19:49 +00001473 if (a->stats__perm_bytes_on_loan > 0) {
floriane2800c92014-09-15 20:57:45 +00001474 vg_assert(n_ccs < N_AN_CCS-1);
philippe6e4b7132013-01-18 06:19:49 +00001475 anCCs[n_ccs].nBytes = a->stats__perm_bytes_on_loan;
1476 anCCs[n_ccs].nBlocks = a->stats__perm_blocks;
1477 anCCs[n_ccs].cc = "perm_malloc";
1478 n_ccs++;
1479 }
1480
sewardj9c606bd2008-09-18 18:12:50 +00001481 VG_(ssort)( &anCCs[0], n_ccs, sizeof(anCCs[0]), cmp_AnCC_by_vol );
1482
1483 for (k = 0; k < n_ccs; k++) {
1484 VG_(printf)("%'13llu in %'9llu: %s\n",
1485 anCCs[k].nBytes, anCCs[k].nBlocks, anCCs[k].cc );
1486 }
1487
1488 VG_(printf)("\n");
1489}
1490
1491
nethercote885dd912004-08-03 23:14:00 +00001492void VG_(sanity_check_malloc_all) ( void )
sewardjde4a1d02002-03-22 01:27:54 +00001493{
nethercote7ac7f7b2004-11-02 12:36:02 +00001494 UInt i;
sewardj0b3fd2d2007-08-21 10:55:26 +00001495 for (i = 0; i < VG_N_ARENAS; i++) {
1496 if (i == VG_AR_CLIENT && !client_inited)
1497 continue;
nethercote885dd912004-08-03 23:14:00 +00001498 sanity_check_malloc_arena ( i );
sewardj0b3fd2d2007-08-21 10:55:26 +00001499 }
sewardjde4a1d02002-03-22 01:27:54 +00001500}
1501
philippe07c08522014-05-14 20:39:27 +00001502void VG_(describe_arena_addr) ( Addr a, AddrArenaInfo* aai )
1503{
1504 UInt i;
1505 Superblock *sb;
1506 Arena *arena;
1507
1508 for (i = 0; i < VG_N_ARENAS; i++) {
1509 if (i == VG_AR_CLIENT && !client_inited)
1510 continue;
1511 arena = arenaId_to_ArenaP(i);
1512 sb = maybe_findSb( arena, a );
1513 if (sb != NULL) {
1514 Word j;
1515 SizeT b_bszB;
1516 Block *b = NULL;
1517
1518 aai->aid = i;
1519 aai->name = arena->name;
1520 for (j = 0; j < sb->n_payload_bytes; j += mk_plain_bszB(b_bszB)) {
1521 b = (Block*)&sb->payload_bytes[j];
1522 b_bszB = get_bszB_as_is(b);
1523 if (a < (Addr)b + mk_plain_bszB(b_bszB))
1524 break;
1525 }
1526 vg_assert (b);
1527 aai->block_szB = get_pszB(arena, b);
1528 aai->rwoffset = a - (Addr)get_block_payload(arena, b);
1529 aai->free = !is_inuse_block(b);
1530 return;
1531 }
1532 }
1533 aai->aid = 0;
1534 aai->name = NULL;
1535 aai->block_szB = 0;
1536 aai->rwoffset = 0;
1537 aai->free = False;
1538}
sewardjde4a1d02002-03-22 01:27:54 +00001539
nethercote2d5b8162004-08-11 09:40:52 +00001540/*------------------------------------------------------------*/
1541/*--- Creating and deleting blocks. ---*/
1542/*------------------------------------------------------------*/
1543
1544// Mark the bytes at b .. b+bszB-1 as not in use, and add them to the
1545// relevant free list.
1546
1547static
nethercote7ac7f7b2004-11-02 12:36:02 +00001548void mkFreeBlock ( Arena* a, Block* b, SizeT bszB, UInt b_lno )
jsewardb1a26ae2004-03-14 03:06:37 +00001549{
nethercote7ac7f7b2004-11-02 12:36:02 +00001550 SizeT pszB = bszB_to_pszB(a, bszB);
nethercote2d5b8162004-08-11 09:40:52 +00001551 vg_assert(b_lno == pszB_to_listNo(pszB));
philippe72faf102012-03-11 22:24:03 +00001552 INNER_REQUEST(VALGRIND_MAKE_MEM_UNDEFINED(b, bszB));
nethercote2d5b8162004-08-11 09:40:52 +00001553 // Set the size fields and indicate not-in-use.
njn8d3f8452005-07-20 04:12:41 +00001554 set_bszB(b, mk_free_bszB(bszB));
nethercote2d5b8162004-08-11 09:40:52 +00001555
1556 // Add to the relevant list.
1557 if (a->freelist[b_lno] == NULL) {
1558 set_prev_b(b, b);
1559 set_next_b(b, b);
1560 a->freelist[b_lno] = b;
1561 } else {
1562 Block* b_prev = get_prev_b(a->freelist[b_lno]);
1563 Block* b_next = a->freelist[b_lno];
1564 set_next_b(b_prev, b);
1565 set_prev_b(b_next, b);
1566 set_next_b(b, b_next);
1567 set_prev_b(b, b_prev);
1568 }
1569# ifdef DEBUG_MALLOC
1570 (void)blockSane(a,b);
1571# endif
1572}
1573
1574// Mark the bytes at b .. b+bszB-1 as in use, and set up the block
1575// appropriately.
1576static
nethercote7ac7f7b2004-11-02 12:36:02 +00001577void mkInuseBlock ( Arena* a, Block* b, SizeT bszB )
nethercote2d5b8162004-08-11 09:40:52 +00001578{
nethercote7ac7f7b2004-11-02 12:36:02 +00001579 UInt i;
nethercote2d5b8162004-08-11 09:40:52 +00001580 vg_assert(bszB >= min_useful_bszB(a));
philippe72faf102012-03-11 22:24:03 +00001581 INNER_REQUEST(VALGRIND_MAKE_MEM_UNDEFINED(b, bszB));
njn8d3f8452005-07-20 04:12:41 +00001582 set_bszB(b, mk_inuse_bszB(bszB));
nethercote2d5b8162004-08-11 09:40:52 +00001583 set_prev_b(b, NULL); // Take off freelist
1584 set_next_b(b, NULL); // ditto
1585 if (!a->clientmem) {
1586 for (i = 0; i < a->rz_szB; i++) {
njn1dcee092009-02-24 03:07:37 +00001587 set_rz_lo_byte(b, i, (UByte)(((Addr)b&0xff) ^ REDZONE_LO_MASK));
1588 set_rz_hi_byte(b, i, (UByte)(((Addr)b&0xff) ^ REDZONE_HI_MASK));
nethercote2d5b8162004-08-11 09:40:52 +00001589 }
1590 }
1591# ifdef DEBUG_MALLOC
1592 (void)blockSane(a,b);
1593# endif
1594}
1595
philippe0b9d0642014-06-30 19:47:24 +00001596// Mark the bytes at b .. b+bszB-1 as being part of a block that has been shrunk.
1597static
1598void shrinkInuseBlock ( Arena* a, Block* b, SizeT bszB )
1599{
1600 UInt i;
1601
1602 vg_assert(bszB >= min_useful_bszB(a));
1603 INNER_REQUEST(mkBhdrAccess(a,b));
1604 set_bszB(b, mk_inuse_bszB(bszB));
1605 if (!a->clientmem) {
1606 for (i = 0; i < a->rz_szB; i++) {
1607 set_rz_lo_byte(b, i, (UByte)(((Addr)b&0xff) ^ REDZONE_LO_MASK));
1608 set_rz_hi_byte(b, i, (UByte)(((Addr)b&0xff) ^ REDZONE_HI_MASK));
1609 }
1610 }
1611 INNER_REQUEST(mkBhdrNoAccess(a,b));
1612
1613# ifdef DEBUG_MALLOC
1614 (void)blockSane(a,b);
1615# endif
1616}
1617
nethercote2d5b8162004-08-11 09:40:52 +00001618// Remove a block from a given list. Does no sanity checking.
1619static
nethercote7ac7f7b2004-11-02 12:36:02 +00001620void unlinkBlock ( Arena* a, Block* b, UInt listno )
nethercote2d5b8162004-08-11 09:40:52 +00001621{
njn6e6588c2005-03-13 18:52:48 +00001622 vg_assert(listno < N_MALLOC_LISTS);
nethercote2d5b8162004-08-11 09:40:52 +00001623 if (get_prev_b(b) == b) {
1624 // Only one element in the list; treat it specially.
1625 vg_assert(get_next_b(b) == b);
1626 a->freelist[listno] = NULL;
1627 } else {
1628 Block* b_prev = get_prev_b(b);
1629 Block* b_next = get_next_b(b);
1630 a->freelist[listno] = b_prev;
1631 set_next_b(b_prev, b_next);
1632 set_prev_b(b_next, b_prev);
1633 swizzle ( a, listno );
1634 }
1635 set_prev_b(b, NULL);
1636 set_next_b(b, NULL);
jsewardb1a26ae2004-03-14 03:06:37 +00001637}
1638
1639
sewardjde4a1d02002-03-22 01:27:54 +00001640/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00001641/*--- Core-visible functions. ---*/
sewardjde4a1d02002-03-22 01:27:54 +00001642/*------------------------------------------------------------*/
1643
nethercote2d5b8162004-08-11 09:40:52 +00001644// Align the request size.
1645static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +00001646SizeT align_req_pszB ( SizeT req_pszB )
nethercote2d5b8162004-08-11 09:40:52 +00001647{
nethercote7ac7f7b2004-11-02 12:36:02 +00001648 SizeT n = VG_MIN_MALLOC_SZB-1;
nethercote2d5b8162004-08-11 09:40:52 +00001649 return ((req_pszB + n) & (~n));
1650}
1651
philippe6e4b7132013-01-18 06:19:49 +00001652static
1653void add_one_block_to_stats (Arena* a, SizeT loaned)
1654{
1655 a->stats__bytes_on_loan += loaned;
1656 if (a->stats__bytes_on_loan > a->stats__bytes_on_loan_max) {
1657 a->stats__bytes_on_loan_max = a->stats__bytes_on_loan;
1658 if (a->stats__bytes_on_loan_max >= a->next_profile_at) {
1659 /* next profile after 10% more growth */
1660 a->next_profile_at
1661 = (SizeT)(
1662 (((ULong)a->stats__bytes_on_loan_max) * 105ULL) / 100ULL );
1663 if (VG_(clo_profile_heap))
1664 cc_analyse_alloc_arena(arenaP_to_ArenaId (a));
1665 }
1666 }
1667 a->stats__tot_blocks += (ULong)1;
1668 a->stats__tot_bytes += (ULong)loaned;
1669}
1670
florian9b29b142014-09-02 09:49:53 +00001671/* Allocate a piece of memory of req_pszB bytes on the given arena.
1672 The function may return NULL if (and only if) aid == VG_AR_CLIENT.
1673 Otherwise, the function returns a non-NULL value. */
florian54fe2022012-10-27 23:07:42 +00001674void* VG_(arena_malloc) ( ArenaId aid, const HChar* cc, SizeT req_pszB )
sewardjde4a1d02002-03-22 01:27:54 +00001675{
nethercote7ac7f7b2004-11-02 12:36:02 +00001676 SizeT req_bszB, frag_bszB, b_bszB;
sewardj0b3fd2d2007-08-21 10:55:26 +00001677 UInt lno, i;
sewardjd8b93462011-09-10 10:17:35 +00001678 Superblock* new_sb = NULL;
nethercote2d5b8162004-08-11 09:40:52 +00001679 Block* b = NULL;
sewardjde4a1d02002-03-22 01:27:54 +00001680 Arena* a;
jsewardb1a26ae2004-03-14 03:06:37 +00001681 void* v;
sewardj7d1064a2011-02-23 13:18:56 +00001682 UWord stats__nsearches = 0;
sewardjde4a1d02002-03-22 01:27:54 +00001683
sewardj45f4e7c2005-09-27 19:20:21 +00001684 ensure_mm_init(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001685 a = arenaId_to_ArenaP(aid);
1686
nethercote7ac7f7b2004-11-02 12:36:02 +00001687 vg_assert(req_pszB < MAX_PSZB);
nethercote2d5b8162004-08-11 09:40:52 +00001688 req_pszB = align_req_pszB(req_pszB);
1689 req_bszB = pszB_to_bszB(a, req_pszB);
sewardjde4a1d02002-03-22 01:27:54 +00001690
sewardj9c606bd2008-09-18 18:12:50 +00001691 // You must provide a cost-center name against which to charge
1692 // this allocation; it isn't optional.
1693 vg_assert(cc);
1694
nethercote2d5b8162004-08-11 09:40:52 +00001695 // Scan through all the big-enough freelists for a block.
njn4ab6d532007-10-16 23:18:06 +00001696 //
1697 // Nb: this scanning might be expensive in some cases. Eg. if you
1698 // allocate lots of small objects without freeing them, but no
1699 // medium-sized objects, it will repeatedly scanning through the whole
1700 // list, and each time not find any free blocks until the last element.
1701 //
1702 // If this becomes a noticeable problem... the loop answers the question
1703 // "where is the first nonempty list above me?" And most of the time,
1704 // you ask the same question and get the same answer. So it would be
1705 // good to somehow cache the results of previous searches.
1706 // One possibility is an array (with N_MALLOC_LISTS elements) of
1707 // shortcuts. shortcut[i] would give the index number of the nearest
1708 // larger list above list i which is non-empty. Then this loop isn't
1709 // necessary. However, we'd have to modify some section [ .. i-1] of the
1710 // shortcut array every time a list [i] changes from empty to nonempty or
1711 // back. This would require care to avoid pathological worst-case
1712 // behaviour.
1713 //
njn6e6588c2005-03-13 18:52:48 +00001714 for (lno = pszB_to_listNo(req_pszB); lno < N_MALLOC_LISTS; lno++) {
sewardj7d1064a2011-02-23 13:18:56 +00001715 UWord nsearches_this_level = 0;
sewardjde4a1d02002-03-22 01:27:54 +00001716 b = a->freelist[lno];
nethercote2d5b8162004-08-11 09:40:52 +00001717 if (NULL == b) continue; // If this list is empty, try the next one.
sewardjde4a1d02002-03-22 01:27:54 +00001718 while (True) {
sewardj7d1064a2011-02-23 13:18:56 +00001719 stats__nsearches++;
1720 nsearches_this_level++;
1721 if (UNLIKELY(nsearches_this_level >= 100)
1722 && lno < N_MALLOC_LISTS-1) {
1723 /* Avoid excessive scanning on this freelist, and instead
1724 try the next one up. But first, move this freelist's
1725 start pointer one element along, so as to ensure that
1726 subsequent searches of this list don't endlessly
1727 revisit only these 100 elements, but in fact slowly
1728 progress through the entire list. */
1729 b = a->freelist[lno];
1730 vg_assert(b); // this list must be nonempty!
1731 a->freelist[lno] = get_next_b(b); // step one along
1732 break;
1733 }
njnd0e685c2005-07-17 17:55:42 +00001734 b_bszB = get_bszB(b);
nethercote2d5b8162004-08-11 09:40:52 +00001735 if (b_bszB >= req_bszB) goto obtained_block; // success!
1736 b = get_next_b(b);
1737 if (b == a->freelist[lno]) break; // traversed entire freelist
sewardjde4a1d02002-03-22 01:27:54 +00001738 }
sewardjde4a1d02002-03-22 01:27:54 +00001739 }
1740
nethercote2d5b8162004-08-11 09:40:52 +00001741 // If we reach here, no suitable block found, allocate a new superblock
njn6e6588c2005-03-13 18:52:48 +00001742 vg_assert(lno == N_MALLOC_LISTS);
nethercote2d5b8162004-08-11 09:40:52 +00001743 new_sb = newSuperblock(a, req_bszB);
1744 if (NULL == new_sb) {
1745 // Should only fail if for client, otherwise, should have aborted
1746 // already.
1747 vg_assert(VG_AR_CLIENT == aid);
1748 return NULL;
sewardjde4a1d02002-03-22 01:27:54 +00001749 }
sewardj0b3fd2d2007-08-21 10:55:26 +00001750
1751 vg_assert(a->sblocks_used <= a->sblocks_size);
1752 if (a->sblocks_used == a->sblocks_size) {
1753 Superblock ** array;
philippe17e76ec2014-04-20 19:50:13 +00001754 SysRes sres = VG_(am_mmap_anon_float_valgrind)(sizeof(Superblock *) *
sewardj0b3fd2d2007-08-21 10:55:26 +00001755 a->sblocks_size * 2);
njncda2f0f2009-05-18 02:12:08 +00001756 if (sr_isError(sres)) {
sewardj0b3fd2d2007-08-21 10:55:26 +00001757 VG_(out_of_memory_NORETURN)("arena_init", sizeof(Superblock *) *
1758 a->sblocks_size * 2);
1759 /* NOTREACHED */
1760 }
florian44bd4462014-12-29 17:04:46 +00001761 array = (Superblock**)(Addr)sr_Res(sres);
sewardj0b3fd2d2007-08-21 10:55:26 +00001762 for (i = 0; i < a->sblocks_used; ++i) array[i] = a->sblocks[i];
1763
1764 a->sblocks_size *= 2;
1765 a->sblocks = array;
1766 VG_(debugLog)(1, "mallocfree",
floriana5e06c32015-08-05 21:16:09 +00001767 "sblock array for arena `%s' resized to %lu\n",
sewardj0b3fd2d2007-08-21 10:55:26 +00001768 a->name, a->sblocks_size);
1769 }
1770
1771 vg_assert(a->sblocks_used < a->sblocks_size);
1772
1773 i = a->sblocks_used;
1774 while (i > 0) {
1775 if (a->sblocks[i-1] > new_sb) {
1776 a->sblocks[i] = a->sblocks[i-1];
1777 } else {
1778 break;
1779 }
1780 --i;
1781 }
1782 a->sblocks[i] = new_sb;
1783 a->sblocks_used++;
1784
nethercote2d5b8162004-08-11 09:40:52 +00001785 b = (Block*)&new_sb->payload_bytes[0];
1786 lno = pszB_to_listNo(bszB_to_pszB(a, new_sb->n_payload_bytes));
1787 mkFreeBlock ( a, b, new_sb->n_payload_bytes, lno);
sewardj94c8eb42008-09-19 20:13:39 +00001788 if (VG_(clo_profile_heap))
1789 set_cc(b, "admin.free-new-sb-1");
nethercote2d5b8162004-08-11 09:40:52 +00001790 // fall through
sewardjde4a1d02002-03-22 01:27:54 +00001791
nethercote2d5b8162004-08-11 09:40:52 +00001792 obtained_block:
1793 // Ok, we can allocate from b, which lives in list lno.
sewardjde4a1d02002-03-22 01:27:54 +00001794 vg_assert(b != NULL);
njn6e6588c2005-03-13 18:52:48 +00001795 vg_assert(lno < N_MALLOC_LISTS);
sewardjde4a1d02002-03-22 01:27:54 +00001796 vg_assert(a->freelist[lno] != NULL);
njnd0e685c2005-07-17 17:55:42 +00001797 b_bszB = get_bszB(b);
nethercote2d5b8162004-08-11 09:40:52 +00001798 // req_bszB is the size of the block we are after. b_bszB is the
1799 // size of what we've actually got. */
1800 vg_assert(b_bszB >= req_bszB);
sewardjde4a1d02002-03-22 01:27:54 +00001801
nethercote2d5b8162004-08-11 09:40:52 +00001802 // Could we split this block and still get a useful fragment?
sewardjd043de92011-09-26 11:28:20 +00001803 // A block in an unsplittable superblock can never be splitted.
nethercote2d5b8162004-08-11 09:40:52 +00001804 frag_bszB = b_bszB - req_bszB;
sewardjd8b93462011-09-10 10:17:35 +00001805 if (frag_bszB >= min_useful_bszB(a)
sewardjd043de92011-09-26 11:28:20 +00001806 && (NULL == new_sb || ! new_sb->unsplittable)) {
nethercote2d5b8162004-08-11 09:40:52 +00001807 // Yes, split block in two, put the fragment on the appropriate free
1808 // list, and update b_bszB accordingly.
1809 // printf( "split %dB into %dB and %dB\n", b_bszB, req_bszB, frag_bszB );
sewardjde4a1d02002-03-22 01:27:54 +00001810 unlinkBlock(a, b, lno);
nethercote2d5b8162004-08-11 09:40:52 +00001811 mkInuseBlock(a, b, req_bszB);
sewardj94c8eb42008-09-19 20:13:39 +00001812 if (VG_(clo_profile_heap))
1813 set_cc(b, cc);
nethercote2d5b8162004-08-11 09:40:52 +00001814 mkFreeBlock(a, &b[req_bszB], frag_bszB,
1815 pszB_to_listNo(bszB_to_pszB(a, frag_bszB)));
sewardj94c8eb42008-09-19 20:13:39 +00001816 if (VG_(clo_profile_heap))
1817 set_cc(&b[req_bszB], "admin.fragmentation-1");
njnd0e685c2005-07-17 17:55:42 +00001818 b_bszB = get_bszB(b);
nethercote2d5b8162004-08-11 09:40:52 +00001819 } else {
1820 // No, mark as in use and use as-is.
1821 unlinkBlock(a, b, lno);
1822 mkInuseBlock(a, b, b_bszB);
sewardj94c8eb42008-09-19 20:13:39 +00001823 if (VG_(clo_profile_heap))
1824 set_cc(b, cc);
sewardjde4a1d02002-03-22 01:27:54 +00001825 }
sewardjde4a1d02002-03-22 01:27:54 +00001826
nethercote2d5b8162004-08-11 09:40:52 +00001827 // Update stats
sewardj7d1064a2011-02-23 13:18:56 +00001828 SizeT loaned = bszB_to_pszB(a, b_bszB);
philippe6e4b7132013-01-18 06:19:49 +00001829 add_one_block_to_stats (a, loaned);
sewardj7d1064a2011-02-23 13:18:56 +00001830 a->stats__nsearches += (ULong)stats__nsearches;
sewardjde4a1d02002-03-22 01:27:54 +00001831
1832# ifdef DEBUG_MALLOC
nethercote885dd912004-08-03 23:14:00 +00001833 sanity_check_malloc_arena(aid);
sewardjde4a1d02002-03-22 01:27:54 +00001834# endif
1835
nethercote2d5b8162004-08-11 09:40:52 +00001836 v = get_block_payload(a, b);
1837 vg_assert( (((Addr)v) & (VG_MIN_MALLOC_SZB-1)) == 0 );
sewardjb5f6f512005-03-10 23:59:00 +00001838
philippe72faf102012-03-11 22:24:03 +00001839 // Which size should we pass to VALGRIND_MALLOCLIKE_BLOCK ?
1840 // We have 2 possible options:
1841 // 1. The final resulting usable size.
1842 // 2. The initial (non-aligned) req_pszB.
1843 // Memcheck implements option 2 easily, as the initial requested size
1844 // is maintained in the mc_chunk data structure.
1845 // This is not as easy in the core, as there is no such structure.
1846 // (note: using the aligned req_pszB is not simpler than 2, as
1847 // requesting an aligned req_pszB might still be satisfied by returning
1848 // a (slightly) bigger block than requested if the remaining part of
1849 // of a free block is not big enough to make a free block by itself).
1850 // Implement Sol 2 can be done the following way:
1851 // After having called VALGRIND_MALLOCLIKE_BLOCK, the non accessible
1852 // redzone just after the block can be used to determine the
1853 // initial requested size.
1854 // Currently, not implemented => we use Option 1.
1855 INNER_REQUEST
1856 (VALGRIND_MALLOCLIKE_BLOCK(v,
1857 VG_(arena_malloc_usable_size)(aid, v),
1858 a->rz_szB, False));
sewardja53462a2007-11-24 23:37:07 +00001859
1860 /* For debugging/testing purposes, fill the newly allocated area
1861 with a definite value in an attempt to shake out any
1862 uninitialised uses of the data (by V core / V tools, not by the
1863 client). Testing on 25 Nov 07 with the values 0x00, 0xFF, 0x55,
1864 0xAA showed no differences in the regression tests on
1865 amd64-linux. Note, is disabled by default. */
1866 if (0 && aid != VG_AR_CLIENT)
1867 VG_(memset)(v, 0xAA, (SizeT)req_pszB);
1868
jsewardb1a26ae2004-03-14 03:06:37 +00001869 return v;
sewardjde4a1d02002-03-22 01:27:54 +00001870}
1871
sewardjd043de92011-09-26 11:28:20 +00001872// If arena has already a deferred reclaimed superblock and
1873// this superblock is still reclaimable, then this superblock is first
1874// reclaimed.
1875// sb becomes then the new arena deferred superblock.
1876// Passing NULL as sb allows to reclaim a deferred sb without setting a new
1877// deferred reclaim.
1878static
1879void deferred_reclaimSuperblock ( Arena* a, Superblock* sb)
1880{
1881
1882 if (sb == NULL) {
1883 if (!a->deferred_reclaimed_sb)
1884 // no deferred sb to reclaim now, nothing to do in the future =>
1885 // return directly.
1886 return;
1887
1888 VG_(debugLog)(1, "mallocfree",
1889 "deferred_reclaimSuperblock NULL "
1890 "(prev %p) owner %s/%s\n",
1891 a->deferred_reclaimed_sb,
1892 a->clientmem ? "CLIENT" : "VALGRIND", a->name );
1893 } else
1894 VG_(debugLog)(1, "mallocfree",
floriana5e06c32015-08-05 21:16:09 +00001895 "deferred_reclaimSuperblock at %p (pszB %7lu) %s "
sewardjd043de92011-09-26 11:28:20 +00001896 "(prev %p) owner %s/%s\n",
1897 sb, sb->n_payload_bytes,
1898 (sb->unsplittable ? "unsplittable" : ""),
1899 a->deferred_reclaimed_sb,
1900 a->clientmem ? "CLIENT" : "VALGRIND", a->name );
1901
1902 if (a->deferred_reclaimed_sb && a->deferred_reclaimed_sb != sb) {
1903 // If we are deferring another block that the current block deferred,
1904 // then if this block can stil be reclaimed, reclaim it now.
1905 // Note that we might have a re-deferred reclaim of the same block
1906 // with a sequence: free (causing a deferred reclaim of sb)
1907 // alloc (using a piece of memory of the deferred sb)
1908 // free of the just alloc-ed block (causing a re-defer).
1909 UByte* def_sb_start;
1910 UByte* def_sb_end;
1911 Superblock* def_sb;
1912 Block* b;
1913
1914 def_sb = a->deferred_reclaimed_sb;
1915 def_sb_start = &def_sb->payload_bytes[0];
1916 def_sb_end = &def_sb->payload_bytes[def_sb->n_payload_bytes - 1];
1917 b = (Block *)def_sb_start;
1918 vg_assert (blockSane(a, b));
1919
1920 // Check if the deferred_reclaimed_sb is still reclaimable.
1921 // If yes, we will execute the reclaim.
1922 if (!is_inuse_block(b)) {
1923 // b (at the beginning of def_sb) is not in use.
1924 UInt b_listno;
1925 SizeT b_bszB, b_pszB;
1926 b_bszB = get_bszB(b);
1927 b_pszB = bszB_to_pszB(a, b_bszB);
1928 if (b + b_bszB-1 == (Block*)def_sb_end) {
1929 // b (not in use) covers the full superblock.
1930 // => def_sb is still reclaimable
1931 // => execute now the reclaim of this def_sb.
1932 b_listno = pszB_to_listNo(b_pszB);
1933 unlinkBlock( a, b, b_listno );
1934 reclaimSuperblock (a, def_sb);
1935 a->deferred_reclaimed_sb = NULL;
1936 }
1937 }
1938 }
1939
1940 // sb (possibly NULL) becomes the new deferred reclaimed superblock.
1941 a->deferred_reclaimed_sb = sb;
1942}
1943
philippe0b9d0642014-06-30 19:47:24 +00001944/* b must be a free block, of size b_bszB.
1945 If b is followed by another free block, merge them.
1946 If b is preceeded by another free block, merge them.
1947 If the merge results in the superblock being fully free,
1948 deferred_reclaimSuperblock the superblock. */
1949static void mergeWithFreeNeighbours (Arena* a, Superblock* sb,
1950 Block* b, SizeT b_bszB)
1951{
1952 UByte* sb_start;
1953 UByte* sb_end;
1954 Block* other_b;
1955 SizeT other_bszB;
1956 UInt b_listno;
1957
1958 sb_start = &sb->payload_bytes[0];
1959 sb_end = &sb->payload_bytes[sb->n_payload_bytes - 1];
1960
1961 b_listno = pszB_to_listNo(bszB_to_pszB(a, b_bszB));
1962
1963 // See if this block can be merged with its successor.
1964 // First test if we're far enough before the superblock's end to possibly
1965 // have a successor.
1966 other_b = b + b_bszB;
1967 if (other_b+min_useful_bszB(a)-1 <= (Block*)sb_end) {
1968 // Ok, we have a successor, merge if it's not in use.
1969 other_bszB = get_bszB(other_b);
1970 if (!is_inuse_block(other_b)) {
1971 // VG_(printf)( "merge-successor\n");
1972# ifdef DEBUG_MALLOC
1973 vg_assert(blockSane(a, other_b));
1974# endif
1975 unlinkBlock( a, b, b_listno );
1976 unlinkBlock( a, other_b,
1977 pszB_to_listNo(bszB_to_pszB(a,other_bszB)) );
1978 b_bszB += other_bszB;
1979 b_listno = pszB_to_listNo(bszB_to_pszB(a, b_bszB));
1980 mkFreeBlock( a, b, b_bszB, b_listno );
1981 if (VG_(clo_profile_heap))
1982 set_cc(b, "admin.free-2");
1983 }
1984 } else {
1985 // Not enough space for successor: check that b is the last block
1986 // ie. there are no unused bytes at the end of the Superblock.
1987 vg_assert(other_b-1 == (Block*)sb_end);
1988 }
1989
1990 // Then see if this block can be merged with its predecessor.
1991 // First test if we're far enough after the superblock's start to possibly
1992 // have a predecessor.
1993 if (b >= (Block*)sb_start + min_useful_bszB(a)) {
1994 // Ok, we have a predecessor, merge if it's not in use.
1995 other_b = get_predecessor_block( b );
1996 other_bszB = get_bszB(other_b);
1997 if (!is_inuse_block(other_b)) {
1998 // VG_(printf)( "merge-predecessor\n");
1999 unlinkBlock( a, b, b_listno );
2000 unlinkBlock( a, other_b,
2001 pszB_to_listNo(bszB_to_pszB(a, other_bszB)) );
2002 b = other_b;
2003 b_bszB += other_bszB;
2004 b_listno = pszB_to_listNo(bszB_to_pszB(a, b_bszB));
2005 mkFreeBlock( a, b, b_bszB, b_listno );
2006 if (VG_(clo_profile_heap))
2007 set_cc(b, "admin.free-3");
2008 }
2009 } else {
2010 // Not enough space for predecessor: check that b is the first block,
2011 // ie. there are no unused bytes at the start of the Superblock.
2012 vg_assert((Block*)sb_start == b);
2013 }
2014
2015 /* If the block b just merged is the only block of the superblock sb,
2016 then we defer reclaim sb. */
2017 if ( ((Block*)sb_start == b) && (b + b_bszB-1 == (Block*)sb_end) ) {
2018 deferred_reclaimSuperblock (a, sb);
2019 }
2020}
sewardjde4a1d02002-03-22 01:27:54 +00002021
njn25e49d8e72002-09-23 09:36:25 +00002022void VG_(arena_free) ( ArenaId aid, void* ptr )
sewardjde4a1d02002-03-22 01:27:54 +00002023{
2024 Superblock* sb;
nethercote2d5b8162004-08-11 09:40:52 +00002025 Block* b;
philippe0b9d0642014-06-30 19:47:24 +00002026 SizeT b_bszB, b_pszB;
nethercote7ac7f7b2004-11-02 12:36:02 +00002027 UInt b_listno;
sewardjde4a1d02002-03-22 01:27:54 +00002028 Arena* a;
2029
sewardj45f4e7c2005-09-27 19:20:21 +00002030 ensure_mm_init(aid);
sewardjde4a1d02002-03-22 01:27:54 +00002031 a = arenaId_to_ArenaP(aid);
2032
njn25e49d8e72002-09-23 09:36:25 +00002033 if (ptr == NULL) {
njn25e49d8e72002-09-23 09:36:25 +00002034 return;
2035 }
2036
nethercote2d5b8162004-08-11 09:40:52 +00002037 b = get_payload_block(a, ptr);
sewardjde4a1d02002-03-22 01:27:54 +00002038
sewardj3187a4e2005-12-04 23:27:14 +00002039 /* If this is one of V's areas, check carefully the block we're
2040 getting back. This picks up simple block-end overruns. */
2041 if (aid != VG_AR_CLIENT)
2042 vg_assert(blockSane(a, b));
sewardjde4a1d02002-03-22 01:27:54 +00002043
njne6f9e3b2005-07-17 18:00:57 +00002044 b_bszB = get_bszB(b);
2045 b_pszB = bszB_to_pszB(a, b_bszB);
nethercote2d5b8162004-08-11 09:40:52 +00002046 sb = findSb( a, b );
sewardjde4a1d02002-03-22 01:27:54 +00002047
sewardj7d1064a2011-02-23 13:18:56 +00002048 a->stats__bytes_on_loan -= b_pszB;
njne6f9e3b2005-07-17 18:00:57 +00002049
sewardj3187a4e2005-12-04 23:27:14 +00002050 /* If this is one of V's areas, fill it up with junk to enhance the
2051 chances of catching any later reads of it. Note, 0xDD is
2052 carefully chosen junk :-), in that: (1) 0xDDDDDDDD is an invalid
2053 and non-word-aligned address on most systems, and (2) 0xDD is a
2054 value which is unlikely to be generated by the new compressed
2055 Vbits representation for memcheck. */
2056 if (aid != VG_AR_CLIENT)
2057 VG_(memset)(ptr, 0xDD, (SizeT)b_pszB);
2058
sewardjd043de92011-09-26 11:28:20 +00002059 if (! sb->unsplittable) {
sewardjd8b93462011-09-10 10:17:35 +00002060 // Put this chunk back on a list somewhere.
2061 b_listno = pszB_to_listNo(b_pszB);
2062 mkFreeBlock( a, b, b_bszB, b_listno );
2063 if (VG_(clo_profile_heap))
2064 set_cc(b, "admin.free-1");
sewardjde4a1d02002-03-22 01:27:54 +00002065
philippe0b9d0642014-06-30 19:47:24 +00002066 /* Possibly merge b with its predecessor or successor. */
2067 mergeWithFreeNeighbours (a, sb, b, b_bszB);
sewardjd043de92011-09-26 11:28:20 +00002068
philippe72faf102012-03-11 22:24:03 +00002069 // Inform that ptr has been released. We give redzone size
2070 // 0 instead of a->rz_szB as proper accessibility is done just after.
2071 INNER_REQUEST(VALGRIND_FREELIKE_BLOCK(ptr, 0));
2072
2073 // We need to (re-)establish the minimum accessibility needed
2074 // for free list management. E.g. if block ptr has been put in a free
2075 // list and a neighbour block is released afterwards, the
2076 // "lo" and "hi" portions of the block ptr will be accessed to
2077 // glue the 2 blocks together.
2078 // We could mark the whole block as not accessible, and each time
2079 // transiently mark accessible the needed lo/hi parts. Not done as this
2080 // is quite complex, for very little expected additional bug detection.
2081 // fully unaccessible. Note that the below marks the (possibly) merged
2082 // block, not the block corresponding to the ptr argument.
2083
2084 // First mark the whole block unaccessible.
2085 INNER_REQUEST(VALGRIND_MAKE_MEM_NOACCESS(b, b_bszB));
2086 // Then mark the relevant administrative headers as defined.
2087 // No need to mark the heap profile portion as defined, this is not
2088 // used for free blocks.
2089 INNER_REQUEST(VALGRIND_MAKE_MEM_DEFINED(b + hp_overhead_szB(),
2090 sizeof(SizeT) + sizeof(void*)));
2091 INNER_REQUEST(VALGRIND_MAKE_MEM_DEFINED(b + b_bszB
2092 - sizeof(SizeT) - sizeof(void*),
2093 sizeof(SizeT) + sizeof(void*)));
nethercote2d5b8162004-08-11 09:40:52 +00002094 } else {
philippe0b9d0642014-06-30 19:47:24 +00002095 vg_assert(unsplittableBlockSane(a, sb, b));
sewardjd8b93462011-09-10 10:17:35 +00002096
philippe72faf102012-03-11 22:24:03 +00002097 // Inform that ptr has been released. Redzone size value
2098 // is not relevant (so we give 0 instead of a->rz_szB)
2099 // as it is expected that the aspacemgr munmap will be used by
2100 // outer to mark the whole superblock as unaccessible.
2101 INNER_REQUEST(VALGRIND_FREELIKE_BLOCK(ptr, 0));
2102
sewardjd043de92011-09-26 11:28:20 +00002103 // Reclaim immediately the unsplittable superblock sb.
sewardjd8b93462011-09-10 10:17:35 +00002104 reclaimSuperblock (a, sb);
sewardjde4a1d02002-03-22 01:27:54 +00002105 }
2106
2107# ifdef DEBUG_MALLOC
nethercote885dd912004-08-03 23:14:00 +00002108 sanity_check_malloc_arena(aid);
sewardjde4a1d02002-03-22 01:27:54 +00002109# endif
2110
sewardjde4a1d02002-03-22 01:27:54 +00002111}
2112
2113
2114/*
2115 The idea for malloc_aligned() is to allocate a big block, base, and
florianad4e9792015-07-05 21:53:33 +00002116 then split it into two parts: frag, which is returned to the free
2117 pool, and align, which is the bit we're really after. Here's
sewardjde4a1d02002-03-22 01:27:54 +00002118 a picture. L and H denote the block lower and upper overheads, in
nethercote2d5b8162004-08-11 09:40:52 +00002119 bytes. The details are gruesome. Note it is slightly complicated
sewardjde4a1d02002-03-22 01:27:54 +00002120 because the initial request to generate base may return a bigger
2121 block than we asked for, so it is important to distinguish the base
2122 request size and the base actual size.
2123
2124 frag_b align_b
2125 | |
2126 | frag_p | align_p
2127 | | | |
2128 v v v v
2129
2130 +---+ +---+---+ +---+
2131 | L |----------------| H | L |---------------| H |
2132 +---+ +---+---+ +---+
2133
2134 ^ ^ ^
2135 | | :
2136 | base_p this addr must be aligned
2137 |
2138 base_b
2139
2140 . . . . . . .
nethercote2d5b8162004-08-11 09:40:52 +00002141 <------ frag_bszB -------> . . .
2142 . <------------- base_pszB_act -----------> .
sewardjde4a1d02002-03-22 01:27:54 +00002143 . . . . . . .
2144
2145*/
florian54fe2022012-10-27 23:07:42 +00002146void* VG_(arena_memalign) ( ArenaId aid, const HChar* cc,
sewardj9c606bd2008-09-18 18:12:50 +00002147 SizeT req_alignB, SizeT req_pszB )
sewardjde4a1d02002-03-22 01:27:54 +00002148{
nethercote7ac7f7b2004-11-02 12:36:02 +00002149 SizeT base_pszB_req, base_pszB_act, frag_bszB;
nethercote2d5b8162004-08-11 09:40:52 +00002150 Block *base_b, *align_b;
2151 UByte *base_p, *align_p;
nethercote7ac7f7b2004-11-02 12:36:02 +00002152 SizeT saved_bytes_on_loan;
sewardjde4a1d02002-03-22 01:27:54 +00002153 Arena* a;
2154
sewardj45f4e7c2005-09-27 19:20:21 +00002155 ensure_mm_init(aid);
sewardjde4a1d02002-03-22 01:27:54 +00002156 a = arenaId_to_ArenaP(aid);
2157
nethercote7ac7f7b2004-11-02 12:36:02 +00002158 vg_assert(req_pszB < MAX_PSZB);
sewardjde4a1d02002-03-22 01:27:54 +00002159
sewardj9c606bd2008-09-18 18:12:50 +00002160 // You must provide a cost-center name against which to charge
2161 // this allocation; it isn't optional.
2162 vg_assert(cc);
2163
philippef5f6ed12012-06-15 22:19:59 +00002164 // Check that the requested alignment has a plausible size.
nethercote2d5b8162004-08-11 09:40:52 +00002165 // Check that the requested alignment seems reasonable; that is, is
2166 // a power of 2.
2167 if (req_alignB < VG_MIN_MALLOC_SZB
philippef5f6ed12012-06-15 22:19:59 +00002168 || req_alignB > 16 * 1024 * 1024
njn717cde52005-05-10 02:47:21 +00002169 || VG_(log2)( req_alignB ) == -1 /* not a power of 2 */) {
njn36b65172009-04-14 23:43:15 +00002170 VG_(printf)("VG_(arena_memalign)(%p, %lu, %lu)\n"
2171 "bad alignment value %lu\n"
2172 "(it is too small, too big, or not a power of two)",
2173 a, req_alignB, req_pszB, req_alignB );
njn717cde52005-05-10 02:47:21 +00002174 VG_(core_panic)("VG_(arena_memalign)");
nethercote2d5b8162004-08-11 09:40:52 +00002175 /*NOTREACHED*/
sewardjde4a1d02002-03-22 01:27:54 +00002176 }
nethercote2d5b8162004-08-11 09:40:52 +00002177 // Paranoid
2178 vg_assert(req_alignB % VG_MIN_MALLOC_SZB == 0);
sewardjde4a1d02002-03-22 01:27:54 +00002179
2180 /* Required payload size for the aligned chunk. */
nethercote2d5b8162004-08-11 09:40:52 +00002181 req_pszB = align_req_pszB(req_pszB);
sewardjde4a1d02002-03-22 01:27:54 +00002182
nethercote2d5b8162004-08-11 09:40:52 +00002183 /* Payload size to request for the big block that we will split up. */
2184 base_pszB_req = req_pszB + min_useful_bszB(a) + req_alignB;
sewardjde4a1d02002-03-22 01:27:54 +00002185
2186 /* Payload ptr for the block we are going to split. Note this
2187 changes a->bytes_on_loan; we save and restore it ourselves. */
sewardj7d1064a2011-02-23 13:18:56 +00002188 saved_bytes_on_loan = a->stats__bytes_on_loan;
sewardjd8b93462011-09-10 10:17:35 +00002189 {
2190 /* As we will split the block given back by VG_(arena_malloc),
sewardjd043de92011-09-26 11:28:20 +00002191 we have to (temporarily) disable unsplittable for this arena,
2192 as unsplittable superblocks cannot be splitted. */
2193 const SizeT save_min_unsplittable_sblock_szB
2194 = a->min_unsplittable_sblock_szB;
2195 a->min_unsplittable_sblock_szB = MAX_PSZB;
sewardjd8b93462011-09-10 10:17:35 +00002196 base_p = VG_(arena_malloc) ( aid, cc, base_pszB_req );
sewardjd043de92011-09-26 11:28:20 +00002197 a->min_unsplittable_sblock_szB = save_min_unsplittable_sblock_szB;
sewardjd8b93462011-09-10 10:17:35 +00002198 }
sewardj7d1064a2011-02-23 13:18:56 +00002199 a->stats__bytes_on_loan = saved_bytes_on_loan;
sewardjde4a1d02002-03-22 01:27:54 +00002200
tom8af1a172005-10-06 12:04:26 +00002201 /* Give up if we couldn't allocate enough space */
2202 if (base_p == 0)
2203 return 0;
philippe72faf102012-03-11 22:24:03 +00002204 /* base_p was marked as allocated by VALGRIND_MALLOCLIKE_BLOCK
2205 inside VG_(arena_malloc). We need to indicate it is free, then
2206 we need to mark it undefined to allow the below code to access is. */
2207 INNER_REQUEST(VALGRIND_FREELIKE_BLOCK(base_p, a->rz_szB));
2208 INNER_REQUEST(VALGRIND_MAKE_MEM_UNDEFINED(base_p, base_pszB_req));
tom8af1a172005-10-06 12:04:26 +00002209
sewardjde4a1d02002-03-22 01:27:54 +00002210 /* Block ptr for the block we are going to split. */
nethercote2d5b8162004-08-11 09:40:52 +00002211 base_b = get_payload_block ( a, base_p );
sewardjde4a1d02002-03-22 01:27:54 +00002212
2213 /* Pointer to the payload of the aligned block we are going to
2214 return. This has to be suitably aligned. */
nethercote2d5b8162004-08-11 09:40:52 +00002215 align_p = align_upwards ( base_b + 2 * overhead_szB_lo(a)
2216 + overhead_szB_hi(a),
sewardjde4a1d02002-03-22 01:27:54 +00002217 req_alignB );
nethercote2d5b8162004-08-11 09:40:52 +00002218 align_b = get_payload_block(a, align_p);
sewardjde4a1d02002-03-22 01:27:54 +00002219
2220 /* The block size of the fragment we will create. This must be big
2221 enough to actually create a fragment. */
nethercote2d5b8162004-08-11 09:40:52 +00002222 frag_bszB = align_b - base_b;
2223
2224 vg_assert(frag_bszB >= min_useful_bszB(a));
sewardjde4a1d02002-03-22 01:27:54 +00002225
2226 /* The actual payload size of the block we are going to split. */
njn089f51f2005-07-17 18:12:00 +00002227 base_pszB_act = get_pszB(a, base_b);
sewardjde4a1d02002-03-22 01:27:54 +00002228
nethercote2d5b8162004-08-11 09:40:52 +00002229 /* Create the fragment block, and put it back on the relevant free list. */
2230 mkFreeBlock ( a, base_b, frag_bszB,
2231 pszB_to_listNo(bszB_to_pszB(a, frag_bszB)) );
sewardj94c8eb42008-09-19 20:13:39 +00002232 if (VG_(clo_profile_heap))
2233 set_cc(base_b, "admin.frag-memalign-1");
sewardjde4a1d02002-03-22 01:27:54 +00002234
2235 /* Create the aligned block. */
nethercote2d5b8162004-08-11 09:40:52 +00002236 mkInuseBlock ( a, align_b,
2237 base_p + base_pszB_act
2238 + overhead_szB_hi(a) - (UByte*)align_b );
sewardj94c8eb42008-09-19 20:13:39 +00002239 if (VG_(clo_profile_heap))
2240 set_cc(align_b, cc);
sewardjde4a1d02002-03-22 01:27:54 +00002241
2242 /* Final sanity checks. */
njn472cc7c2005-07-17 17:20:30 +00002243 vg_assert( is_inuse_block(get_payload_block(a, align_p)) );
sewardjde4a1d02002-03-22 01:27:54 +00002244
njn089f51f2005-07-17 18:12:00 +00002245 vg_assert(req_pszB <= get_pszB(a, get_payload_block(a, align_p)));
sewardjde4a1d02002-03-22 01:27:54 +00002246
sewardj7d1064a2011-02-23 13:18:56 +00002247 a->stats__bytes_on_loan += get_pszB(a, get_payload_block(a, align_p));
2248 if (a->stats__bytes_on_loan > a->stats__bytes_on_loan_max) {
2249 a->stats__bytes_on_loan_max = a->stats__bytes_on_loan;
2250 }
2251 /* a->stats__tot_blocks, a->stats__tot_bytes, a->stats__nsearches
2252 are updated by the call to VG_(arena_malloc) just a few lines
2253 above. So we don't need to update them here. */
sewardjde4a1d02002-03-22 01:27:54 +00002254
2255# ifdef DEBUG_MALLOC
nethercote885dd912004-08-03 23:14:00 +00002256 sanity_check_malloc_arena(aid);
sewardjde4a1d02002-03-22 01:27:54 +00002257# endif
2258
nethercote2d5b8162004-08-11 09:40:52 +00002259 vg_assert( (((Addr)align_p) % req_alignB) == 0 );
sewardjb5f6f512005-03-10 23:59:00 +00002260
philippe72faf102012-03-11 22:24:03 +00002261 INNER_REQUEST(VALGRIND_MALLOCLIKE_BLOCK(align_p,
2262 req_pszB, a->rz_szB, False));
sewardjb5f6f512005-03-10 23:59:00 +00002263
nethercote2d5b8162004-08-11 09:40:52 +00002264 return align_p;
2265}
2266
2267
njn8b140de2009-02-17 04:31:18 +00002268SizeT VG_(arena_malloc_usable_size) ( ArenaId aid, void* ptr )
nethercote2d5b8162004-08-11 09:40:52 +00002269{
2270 Arena* a = arenaId_to_ArenaP(aid);
2271 Block* b = get_payload_block(a, ptr);
njn089f51f2005-07-17 18:12:00 +00002272 return get_pszB(a, b);
sewardjde4a1d02002-03-22 01:27:54 +00002273}
2274
bart545380e2008-04-21 17:28:50 +00002275
2276// Implementation of mallinfo(). There is no recent standard that defines
2277// the behavior of mallinfo(). The meaning of the fields in struct mallinfo
2278// is as follows:
2279//
2280// struct mallinfo {
2281// int arena; /* total space in arena */
2282// int ordblks; /* number of ordinary blocks */
2283// int smblks; /* number of small blocks */
2284// int hblks; /* number of holding blocks */
2285// int hblkhd; /* space in holding block headers */
2286// int usmblks; /* space in small blocks in use */
2287// int fsmblks; /* space in free small blocks */
2288// int uordblks; /* space in ordinary blocks in use */
2289// int fordblks; /* space in free ordinary blocks */
2290// int keepcost; /* space penalty if keep option */
2291// /* is used */
2292// };
2293//
2294// The glibc documentation about mallinfo (which is somewhat outdated) can
2295// be found here:
2296// http://www.gnu.org/software/libtool/manual/libc/Statistics-of-Malloc.html
2297//
2298// See also http://bugs.kde.org/show_bug.cgi?id=160956.
2299//
2300// Regarding the implementation of VG_(mallinfo)(): we cannot return the
2301// whole struct as the library function does, because this is called by a
2302// client request. So instead we use a pointer to do call by reference.
njn088bfb42005-08-17 05:01:37 +00002303void VG_(mallinfo) ( ThreadId tid, struct vg_mallinfo* mi )
2304{
sewardj76dda8f2008-05-29 13:45:49 +00002305 UWord i, free_blocks, free_blocks_size;
bartc3c98392008-04-19 14:43:30 +00002306 Arena* a = arenaId_to_ArenaP(VG_AR_CLIENT);
2307
2308 // Traverse free list and calculate free blocks statistics.
2309 // This may seem slow but glibc works the same way.
2310 free_blocks_size = free_blocks = 0;
2311 for (i = 0; i < N_MALLOC_LISTS; i++) {
2312 Block* b = a->freelist[i];
2313 if (b == NULL) continue;
2314 for (;;) {
2315 free_blocks++;
sewardj76dda8f2008-05-29 13:45:49 +00002316 free_blocks_size += (UWord)get_pszB(a, b);
bartc3c98392008-04-19 14:43:30 +00002317 b = get_next_b(b);
2318 if (b == a->freelist[i]) break;
2319 }
2320 }
2321
2322 // We don't have fastbins so smblks & fsmblks are always 0. Also we don't
bart545380e2008-04-21 17:28:50 +00002323 // have a separate mmap allocator so set hblks & hblkhd to 0.
sewardj7d1064a2011-02-23 13:18:56 +00002324 mi->arena = a->stats__bytes_mmaped;
bart545380e2008-04-21 17:28:50 +00002325 mi->ordblks = free_blocks + VG_(free_queue_length);
bartc3c98392008-04-19 14:43:30 +00002326 mi->smblks = 0;
2327 mi->hblks = 0;
2328 mi->hblkhd = 0;
2329 mi->usmblks = 0;
2330 mi->fsmblks = 0;
sewardj7d1064a2011-02-23 13:18:56 +00002331 mi->uordblks = a->stats__bytes_on_loan - VG_(free_queue_volume);
bart545380e2008-04-21 17:28:50 +00002332 mi->fordblks = free_blocks_size + VG_(free_queue_volume);
bartc3c98392008-04-19 14:43:30 +00002333 mi->keepcost = 0; // may want some value in here
njn088bfb42005-08-17 05:01:37 +00002334}
sewardjde4a1d02002-03-22 01:27:54 +00002335
florianac3a1d82013-09-15 09:18:03 +00002336SizeT VG_(arena_redzone_size) ( ArenaId aid )
2337{
2338 ensure_mm_init (VG_AR_CLIENT);
2339 /* ensure_mm_init will call arena_init if not yet done.
2340 This then ensures that the arena redzone size is properly
2341 initialised. */
2342 return arenaId_to_ArenaP(aid)->rz_szB;
2343}
sewardj45f4e7c2005-09-27 19:20:21 +00002344
sewardjde4a1d02002-03-22 01:27:54 +00002345/*------------------------------------------------------------*/
2346/*--- Services layered on top of malloc/free. ---*/
2347/*------------------------------------------------------------*/
2348
florian54fe2022012-10-27 23:07:42 +00002349void* VG_(arena_calloc) ( ArenaId aid, const HChar* cc,
sewardj9c606bd2008-09-18 18:12:50 +00002350 SizeT nmemb, SizeT bytes_per_memb )
sewardjde4a1d02002-03-22 01:27:54 +00002351{
nethercote7ac7f7b2004-11-02 12:36:02 +00002352 SizeT size;
florian54fe2022012-10-27 23:07:42 +00002353 void* p;
njn25e49d8e72002-09-23 09:36:25 +00002354
njn926ed472005-03-11 04:44:10 +00002355 size = nmemb * bytes_per_memb;
2356 vg_assert(size >= nmemb && size >= bytes_per_memb);// check against overflow
njn3e884182003-04-15 13:03:23 +00002357
sewardj9c606bd2008-09-18 18:12:50 +00002358 p = VG_(arena_malloc) ( aid, cc, size );
njn3e884182003-04-15 13:03:23 +00002359
florianeb577d72013-09-16 21:46:31 +00002360 if (p != NULL)
2361 VG_(memset)(p, 0, size);
sewardjb5f6f512005-03-10 23:59:00 +00002362
sewardjde4a1d02002-03-22 01:27:54 +00002363 return p;
2364}
2365
2366
florian54fe2022012-10-27 23:07:42 +00002367void* VG_(arena_realloc) ( ArenaId aid, const HChar* cc,
sewardj9c606bd2008-09-18 18:12:50 +00002368 void* ptr, SizeT req_pszB )
sewardjde4a1d02002-03-22 01:27:54 +00002369{
2370 Arena* a;
njn089f51f2005-07-17 18:12:00 +00002371 SizeT old_pszB;
florian54fe2022012-10-27 23:07:42 +00002372 void* p_new;
nethercote2d5b8162004-08-11 09:40:52 +00002373 Block* b;
sewardjde4a1d02002-03-22 01:27:54 +00002374
sewardj45f4e7c2005-09-27 19:20:21 +00002375 ensure_mm_init(aid);
sewardjde4a1d02002-03-22 01:27:54 +00002376 a = arenaId_to_ArenaP(aid);
2377
nethercote7ac7f7b2004-11-02 12:36:02 +00002378 vg_assert(req_pszB < MAX_PSZB);
sewardjde4a1d02002-03-22 01:27:54 +00002379
njn180f6982008-10-12 19:51:41 +00002380 if (NULL == ptr) {
2381 return VG_(arena_malloc)(aid, cc, req_pszB);
2382 }
2383
2384 if (req_pszB == 0) {
2385 VG_(arena_free)(aid, ptr);
2386 return NULL;
2387 }
2388
nethercote2d5b8162004-08-11 09:40:52 +00002389 b = get_payload_block(a, ptr);
2390 vg_assert(blockSane(a, b));
sewardjde4a1d02002-03-22 01:27:54 +00002391
njn472cc7c2005-07-17 17:20:30 +00002392 vg_assert(is_inuse_block(b));
njn089f51f2005-07-17 18:12:00 +00002393 old_pszB = get_pszB(a, b);
sewardjde4a1d02002-03-22 01:27:54 +00002394
njn25e49d8e72002-09-23 09:36:25 +00002395 if (req_pszB <= old_pszB) {
njn25e49d8e72002-09-23 09:36:25 +00002396 return ptr;
2397 }
sewardjde4a1d02002-03-22 01:27:54 +00002398
sewardj9c606bd2008-09-18 18:12:50 +00002399 p_new = VG_(arena_malloc) ( aid, cc, req_pszB );
njn828022a2005-03-13 14:56:31 +00002400
sewardjb5f6f512005-03-10 23:59:00 +00002401 VG_(memcpy)(p_new, ptr, old_pszB);
sewardjde4a1d02002-03-22 01:27:54 +00002402
sewardjb5f6f512005-03-10 23:59:00 +00002403 VG_(arena_free)(aid, ptr);
njn25e49d8e72002-09-23 09:36:25 +00002404
sewardjde4a1d02002-03-22 01:27:54 +00002405 return p_new;
2406}
2407
2408
philippe0b9d0642014-06-30 19:47:24 +00002409void VG_(arena_realloc_shrink) ( ArenaId aid,
2410 void* ptr, SizeT req_pszB )
2411{
2412 SizeT req_bszB, frag_bszB, b_bszB;
2413 Superblock* sb;
2414 Arena* a;
2415 SizeT old_pszB;
2416 Block* b;
2417
2418 ensure_mm_init(aid);
2419
2420 a = arenaId_to_ArenaP(aid);
2421 b = get_payload_block(a, ptr);
2422 vg_assert(blockSane(a, b));
2423 vg_assert(is_inuse_block(b));
2424
2425 old_pszB = get_pszB(a, b);
2426 req_pszB = align_req_pszB(req_pszB);
2427 vg_assert(old_pszB >= req_pszB);
2428 if (old_pszB == req_pszB)
2429 return;
2430
2431 sb = findSb( a, b );
2432 if (sb->unsplittable) {
2433 const UByte* sb_start = &sb->payload_bytes[0];
2434 const UByte* sb_end = &sb->payload_bytes[sb->n_payload_bytes - 1];
2435 Addr frag;
2436
2437 vg_assert(unsplittableBlockSane(a, sb, b));
2438
2439 frag = VG_PGROUNDUP((Addr) sb
2440 + sizeof(Superblock) + pszB_to_bszB(a, req_pszB));
2441 frag_bszB = (Addr)sb_end - frag + 1;
2442
2443 if (frag_bszB >= VKI_PAGE_SIZE) {
2444 SysRes sres;
2445
2446 a->stats__bytes_on_loan -= old_pszB;
2447 b_bszB = (UByte*)frag - sb_start;
2448 shrinkInuseBlock(a, b, b_bszB);
2449 INNER_REQUEST
2450 (VALGRIND_RESIZEINPLACE_BLOCK(ptr,
2451 old_pszB,
2452 VG_(arena_malloc_usable_size)(aid, ptr),
2453 a->rz_szB));
2454 /* Have the minimum admin headers needed accessibility. */
2455 INNER_REQUEST(mkBhdrSzAccess(a, b));
2456 a->stats__bytes_on_loan += bszB_to_pszB(a, b_bszB);
2457
2458 sb->n_payload_bytes -= frag_bszB;
2459 VG_(debugLog)(1, "mallocfree",
floriana5e06c32015-08-05 21:16:09 +00002460 "shrink superblock %p to (pszB %7lu) "
2461 "owner %s/%s (munmap-ing %p %7lu)\n",
philippe0b9d0642014-06-30 19:47:24 +00002462 sb, sb->n_payload_bytes,
2463 a->clientmem ? "CLIENT" : "VALGRIND", a->name,
2464 (void*) frag, frag_bszB);
2465 if (a->clientmem) {
2466 Bool need_discard = False;
2467 sres = VG_(am_munmap_client)(&need_discard,
2468 frag,
2469 frag_bszB);
2470 vg_assert (!need_discard);
2471 } else {
2472 sres = VG_(am_munmap_valgrind)(frag,
2473 frag_bszB);
2474 }
2475 vg_assert2(! sr_isError(sres), "shrink superblock munmap failure\n");
2476 a->stats__bytes_mmaped -= frag_bszB;
2477
2478 vg_assert(unsplittableBlockSane(a, sb, b));
2479 }
2480 } else {
2481 req_bszB = pszB_to_bszB(a, req_pszB);
2482 b_bszB = get_bszB(b);
2483 frag_bszB = b_bszB - req_bszB;
2484 if (frag_bszB < min_useful_bszB(a))
2485 return;
2486
2487 a->stats__bytes_on_loan -= old_pszB;
2488 shrinkInuseBlock(a, b, req_bszB);
2489 INNER_REQUEST
2490 (VALGRIND_RESIZEINPLACE_BLOCK(ptr,
2491 old_pszB,
2492 VG_(arena_malloc_usable_size)(aid, ptr),
2493 a->rz_szB));
2494 /* Have the minimum admin headers needed accessibility. */
2495 INNER_REQUEST(mkBhdrSzAccess(a, b));
2496
2497 mkFreeBlock(a, &b[req_bszB], frag_bszB,
2498 pszB_to_listNo(bszB_to_pszB(a, frag_bszB)));
2499 /* Mark the admin headers as accessible. */
2500 INNER_REQUEST(mkBhdrAccess(a, &b[req_bszB]));
2501 if (VG_(clo_profile_heap))
2502 set_cc(&b[req_bszB], "admin.fragmentation-2");
2503 /* Possibly merge &b[req_bszB] with its free neighbours. */
2504 mergeWithFreeNeighbours(a, sb, &b[req_bszB], frag_bszB);
2505
2506 b_bszB = get_bszB(b);
2507 a->stats__bytes_on_loan += bszB_to_pszB(a, b_bszB);
2508 }
2509
2510 vg_assert (blockSane(a, b));
2511# ifdef DEBUG_MALLOC
2512 sanity_check_malloc_arena(aid);
2513# endif
2514}
2515
njn6ba622c2005-06-11 01:12:08 +00002516/* Inline just for the wrapper VG_(strdup) below */
florian19f91bb2012-11-10 22:29:54 +00002517__inline__ HChar* VG_(arena_strdup) ( ArenaId aid, const HChar* cc,
2518 const HChar* s )
njn6ba622c2005-06-11 01:12:08 +00002519{
2520 Int i;
2521 Int len;
florian19f91bb2012-11-10 22:29:54 +00002522 HChar* res;
njn6ba622c2005-06-11 01:12:08 +00002523
2524 if (s == NULL)
2525 return NULL;
2526
2527 len = VG_(strlen)(s) + 1;
sewardj9c606bd2008-09-18 18:12:50 +00002528 res = VG_(arena_malloc) (aid, cc, len);
njn6ba622c2005-06-11 01:12:08 +00002529
2530 for (i = 0; i < len; i++)
2531 res[i] = s[i];
2532 return res;
2533}
2534
philippe6e4b7132013-01-18 06:19:49 +00002535void* VG_(arena_perm_malloc) ( ArenaId aid, SizeT size, Int align )
2536{
2537 Arena* a;
2538
2539 ensure_mm_init(aid);
2540 a = arenaId_to_ArenaP(aid);
2541
2542 align = align - 1;
2543 size = (size + align) & ~align;
2544
2545 if (UNLIKELY(a->perm_malloc_current + size > a->perm_malloc_limit)) {
2546 // Get a superblock, but we will not insert it into the superblock list.
2547 // The superblock structure is not needed, so we will use the full
2548 // memory range of it. This superblock is however counted in the
2549 // mmaped statistics.
2550 Superblock* new_sb = newSuperblock (a, size);
philippe92933612013-01-19 10:33:45 +00002551 a->perm_malloc_limit = (Addr)&new_sb->payload_bytes[new_sb->n_payload_bytes - 1];
philippe6e4b7132013-01-18 06:19:49 +00002552
2553 // We do not mind starting allocating from the beginning of the superblock
2554 // as afterwards, we "lose" it as a superblock.
2555 a->perm_malloc_current = (Addr)new_sb;
2556 }
2557
2558 a->stats__perm_blocks += 1;
2559 a->stats__perm_bytes_on_loan += size;
2560 add_one_block_to_stats (a, size);
2561
2562 a->perm_malloc_current += size;
2563 return (void*)(a->perm_malloc_current - size);
2564}
njn6ba622c2005-06-11 01:12:08 +00002565
sewardjde4a1d02002-03-22 01:27:54 +00002566/*------------------------------------------------------------*/
nethercote996901a2004-08-03 13:29:09 +00002567/*--- Tool-visible functions. ---*/
njn25e49d8e72002-09-23 09:36:25 +00002568/*------------------------------------------------------------*/
2569
nethercote2d5b8162004-08-11 09:40:52 +00002570// All just wrappers to avoid exposing arenas to tools.
njn25e49d8e72002-09-23 09:36:25 +00002571
florianf5d8e652014-09-11 22:15:39 +00002572// This function never returns NULL.
florian54fe2022012-10-27 23:07:42 +00002573void* VG_(malloc) ( const HChar* cc, SizeT nbytes )
njn25e49d8e72002-09-23 09:36:25 +00002574{
floriana2968cc2013-09-20 21:34:40 +00002575 return VG_(arena_malloc) ( VG_AR_CORE, cc, nbytes );
njn25e49d8e72002-09-23 09:36:25 +00002576}
2577
2578void VG_(free) ( void* ptr )
2579{
floriana2968cc2013-09-20 21:34:40 +00002580 VG_(arena_free) ( VG_AR_CORE, ptr );
njn25e49d8e72002-09-23 09:36:25 +00002581}
2582
florian54fe2022012-10-27 23:07:42 +00002583void* VG_(calloc) ( const HChar* cc, SizeT nmemb, SizeT bytes_per_memb )
njn25e49d8e72002-09-23 09:36:25 +00002584{
floriana2968cc2013-09-20 21:34:40 +00002585 return VG_(arena_calloc) ( VG_AR_CORE, cc, nmemb, bytes_per_memb );
njn25e49d8e72002-09-23 09:36:25 +00002586}
2587
florian54fe2022012-10-27 23:07:42 +00002588void* VG_(realloc) ( const HChar* cc, void* ptr, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00002589{
floriana2968cc2013-09-20 21:34:40 +00002590 return VG_(arena_realloc) ( VG_AR_CORE, cc, ptr, size );
njn25e49d8e72002-09-23 09:36:25 +00002591}
2592
florian77eb20b2014-09-11 21:19:17 +00002593void VG_(realloc_shrink) ( void* ptr, SizeT size )
2594{
2595 VG_(arena_realloc_shrink) ( VG_AR_CORE, ptr, size );
2596}
2597
florian19f91bb2012-11-10 22:29:54 +00002598HChar* VG_(strdup) ( const HChar* cc, const HChar* s )
njn6ba622c2005-06-11 01:12:08 +00002599{
floriana2968cc2013-09-20 21:34:40 +00002600 return VG_(arena_strdup) ( VG_AR_CORE, cc, s );
njn6ba622c2005-06-11 01:12:08 +00002601}
2602
philippe6e4b7132013-01-18 06:19:49 +00002603void* VG_(perm_malloc) ( SizeT size, Int align )
2604{
floriana2968cc2013-09-20 21:34:40 +00002605 return VG_(arena_perm_malloc) ( VG_AR_CORE, size, align );
philippe6e4b7132013-01-18 06:19:49 +00002606}
2607
njn32397c02007-11-10 04:08:08 +00002608
sewardjde4a1d02002-03-22 01:27:54 +00002609/*--------------------------------------------------------------------*/
njn717cde52005-05-10 02:47:21 +00002610/*--- end ---*/
sewardjde4a1d02002-03-22 01:27:54 +00002611/*--------------------------------------------------------------------*/