blob: 875eba758381ee3db2e269e94f22e36b1bad376c [file] [log] [blame]
njn1d0825f2006-03-27 11:37:07 +00001
2/*--------------------------------------------------------------------*/
3/*--- malloc/free wrappers for detecting errors and updating bits. ---*/
4/*--- mc_malloc_wrappers.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of MemCheck, a heavyweight Valgrind tool for
njne2656362007-03-10 02:27:44 +00009 detecting memory errors.
njn1d0825f2006-03-27 11:37:07 +000010
Elliott Hughesed398002017-06-21 14:41:24 -070011 Copyright (C) 2000-2017 Julian Seward
njn1d0825f2006-03-27 11:37:07 +000012 jseward@acm.org
13
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
29 The GNU General Public License is contained in the file COPYING.
30*/
31
32#include "pub_tool_basics.h"
33#include "pub_tool_execontext.h"
philippe6643e962012-01-17 21:16:30 +000034#include "pub_tool_poolalloc.h"
njn1d0825f2006-03-27 11:37:07 +000035#include "pub_tool_hashtable.h"
36#include "pub_tool_libcbase.h"
37#include "pub_tool_libcassert.h"
38#include "pub_tool_libcprint.h"
Elliott Hughesed398002017-06-21 14:41:24 -070039#include "pub_tool_libcproc.h"
njn1d0825f2006-03-27 11:37:07 +000040#include "pub_tool_mallocfree.h"
41#include "pub_tool_options.h"
42#include "pub_tool_replacemalloc.h"
43#include "pub_tool_threadstate.h"
44#include "pub_tool_tooliface.h" // Needed for mc_include.h
sewardjc740d762006-10-05 17:59:23 +000045#include "pub_tool_stacktrace.h" // For VG_(get_and_pp_StackTrace)
Elliott Hughesed398002017-06-21 14:41:24 -070046#include "pub_tool_xarray.h"
47#include "pub_tool_xtree.h"
48#include "pub_tool_xtmemory.h"
njn1d0825f2006-03-27 11:37:07 +000049
50#include "mc_include.h"
51
52/*------------------------------------------------------------*/
53/*--- Defns ---*/
54/*------------------------------------------------------------*/
55
56/* Stats ... */
57static SizeT cmalloc_n_mallocs = 0;
58static SizeT cmalloc_n_frees = 0;
sewardjea9c15e2007-03-14 11:57:37 +000059static ULong cmalloc_bs_mallocd = 0;
njn1d0825f2006-03-27 11:37:07 +000060
sewardjc740d762006-10-05 17:59:23 +000061/* For debug printing to do with mempools: what stack trace
62 depth to show. */
63#define MEMPOOL_DEBUG_STACKTRACE_DEPTH 16
64
njn1d0825f2006-03-27 11:37:07 +000065
66/*------------------------------------------------------------*/
67/*--- Tracking malloc'd and free'd blocks ---*/
68/*------------------------------------------------------------*/
69
philipped99c26a2012-07-31 22:17:28 +000070SizeT MC_(Malloc_Redzone_SzB) = -10000000; // If used before set, should BOMB
71
njn1d0825f2006-03-27 11:37:07 +000072/* Record malloc'd blocks. */
florian09a4c792014-10-18 10:58:05 +000073VgHashTable *MC_(malloc_list) = NULL;
njn1d0825f2006-03-27 11:37:07 +000074
sewardj62b91042011-01-23 20:45:53 +000075/* Memory pools: a hash table of MC_Mempools. Search key is
76 MC_Mempool::pool. */
florian09a4c792014-10-18 10:58:05 +000077VgHashTable *MC_(mempool_list) = NULL;
philippe6643e962012-01-17 21:16:30 +000078
79/* Pool allocator for MC_Chunk. */
80PoolAlloc *MC_(chunk_poolalloc) = NULL;
81static
philippe8617b5b2013-01-12 19:53:08 +000082MC_Chunk* create_MC_Chunk ( ThreadId tid, Addr p, SizeT szB,
philippe6643e962012-01-17 21:16:30 +000083 MC_AllocKind kind);
84static inline
85void delete_MC_Chunk (MC_Chunk* mc);
86
njn1d0825f2006-03-27 11:37:07 +000087/* Records blocks after freeing. */
sewardj403d8aa2011-10-22 19:48:57 +000088/* Blocks freed by the client are queued in one of two lists of
89 freed blocks not yet physically freed:
90 "big blocks" freed list.
91 "small blocks" freed list
92 The blocks with a size >= MC_(clo_freelist_big_blocks)
93 are linked in the big blocks freed list.
94 This allows a client to allocate and free big blocks
95 (e.g. bigger than VG_(clo_freelist_vol)) without losing
96 immediately all protection against dangling pointers.
97 position [0] is for big blocks, [1] is for small blocks. */
98static MC_Chunk* freed_list_start[2] = {NULL, NULL};
99static MC_Chunk* freed_list_end[2] = {NULL, NULL};
njn1d0825f2006-03-27 11:37:07 +0000100
101/* Put a shadow chunk on the freed blocks queue, possibly freeing up
102 some of the oldest blocks in the queue at the same time. */
103static void add_to_freed_queue ( MC_Chunk* mc )
104{
sewardjfa4ca3b2007-11-30 17:19:36 +0000105 const Bool show = False;
sewardj403d8aa2011-10-22 19:48:57 +0000106 const int l = (mc->szB >= MC_(clo_freelist_big_blocks) ? 0 : 1);
sewardjfa4ca3b2007-11-30 17:19:36 +0000107
sewardj403d8aa2011-10-22 19:48:57 +0000108 /* Put it at the end of the freed list, unless the block
109 would be directly released any way : in this case, we
110 put it at the head of the freed list. */
111 if (freed_list_end[l] == NULL) {
112 tl_assert(freed_list_start[l] == NULL);
113 mc->next = NULL;
114 freed_list_end[l] = freed_list_start[l] = mc;
njn1d0825f2006-03-27 11:37:07 +0000115 } else {
sewardj403d8aa2011-10-22 19:48:57 +0000116 tl_assert(freed_list_end[l]->next == NULL);
117 if (mc->szB >= MC_(clo_freelist_vol)) {
118 mc->next = freed_list_start[l];
119 freed_list_start[l] = mc;
njn1d0825f2006-03-27 11:37:07 +0000120 } else {
sewardj403d8aa2011-10-22 19:48:57 +0000121 mc->next = NULL;
122 freed_list_end[l]->next = mc;
123 freed_list_end[l] = mc;
njn1d0825f2006-03-27 11:37:07 +0000124 }
sewardj403d8aa2011-10-22 19:48:57 +0000125 }
126 VG_(free_queue_volume) += (Long)mc->szB;
127 if (show)
128 VG_(printf)("mc_freelist: acquire: volume now %lld\n",
129 VG_(free_queue_volume));
130 VG_(free_queue_length)++;
131}
njn1d0825f2006-03-27 11:37:07 +0000132
sewardj403d8aa2011-10-22 19:48:57 +0000133/* Release enough of the oldest blocks to bring the free queue
134 volume below vg_clo_freelist_vol.
135 Start with big block list first.
136 On entry, VG_(free_queue_volume) must be > MC_(clo_freelist_vol).
137 On exit, VG_(free_queue_volume) will be <= MC_(clo_freelist_vol). */
138static void release_oldest_block(void)
139{
140 const Bool show = False;
141 int i;
142 tl_assert (VG_(free_queue_volume) > MC_(clo_freelist_vol));
143 tl_assert (freed_list_start[0] != NULL || freed_list_start[1] != NULL);
144
145 for (i = 0; i < 2; i++) {
146 while (VG_(free_queue_volume) > MC_(clo_freelist_vol)
147 && freed_list_start[i] != NULL) {
148 MC_Chunk* mc1;
149
150 tl_assert(freed_list_end[i] != NULL);
151
152 mc1 = freed_list_start[i];
153 VG_(free_queue_volume) -= (Long)mc1->szB;
154 VG_(free_queue_length)--;
155 if (show)
156 VG_(printf)("mc_freelist: discard: volume now %lld\n",
157 VG_(free_queue_volume));
158 tl_assert(VG_(free_queue_volume) >= 0);
159
160 if (freed_list_start[i] == freed_list_end[i]) {
161 freed_list_start[i] = freed_list_end[i] = NULL;
162 } else {
163 freed_list_start[i] = mc1->next;
164 }
165 mc1->next = NULL; /* just paranoia */
166
167 /* free MC_Chunk */
168 if (MC_AllocCustom != mc1->allockind)
169 VG_(cli_free) ( (void*)(mc1->data) );
philippe6643e962012-01-17 21:16:30 +0000170 delete_MC_Chunk ( mc1 );
sewardj403d8aa2011-10-22 19:48:57 +0000171 }
njn1d0825f2006-03-27 11:37:07 +0000172 }
173}
174
sewardj403d8aa2011-10-22 19:48:57 +0000175MC_Chunk* MC_(get_freed_block_bracketting) (Addr a)
njn1d0825f2006-03-27 11:37:07 +0000176{
sewardj403d8aa2011-10-22 19:48:57 +0000177 int i;
178 for (i = 0; i < 2; i++) {
179 MC_Chunk* mc;
180 mc = freed_list_start[i];
181 while (mc) {
182 if (VG_(addr_is_in_block)( a, mc->data, mc->szB,
philipped99c26a2012-07-31 22:17:28 +0000183 MC_(Malloc_Redzone_SzB) ))
sewardj403d8aa2011-10-22 19:48:57 +0000184 return mc;
185 mc = mc->next;
186 }
187 }
188 return NULL;
njn1d0825f2006-03-27 11:37:07 +0000189}
190
sewardj403d8aa2011-10-22 19:48:57 +0000191/* Allocate a shadow chunk, put it on the appropriate list.
192 If needed, release oldest blocks from freed list. */
njn1d0825f2006-03-27 11:37:07 +0000193static
philippe8617b5b2013-01-12 19:53:08 +0000194MC_Chunk* create_MC_Chunk ( ThreadId tid, Addr p, SizeT szB,
njn1d0825f2006-03-27 11:37:07 +0000195 MC_AllocKind kind)
196{
philippe6643e962012-01-17 21:16:30 +0000197 MC_Chunk* mc = VG_(allocEltPA)(MC_(chunk_poolalloc));
njn1d0825f2006-03-27 11:37:07 +0000198 mc->data = p;
njn718d3b12006-12-16 00:54:12 +0000199 mc->szB = szB;
njn1d0825f2006-03-27 11:37:07 +0000200 mc->allockind = kind;
philippe8617b5b2013-01-12 19:53:08 +0000201 switch ( MC_(n_where_pointers)() ) {
202 case 2: mc->where[1] = 0; // fallback to 1
203 case 1: mc->where[0] = 0; // fallback to 0
204 case 0: break;
205 default: tl_assert(0);
206 }
207 MC_(set_allocated_at) (tid, mc);
njn1d0825f2006-03-27 11:37:07 +0000208
sewardj403d8aa2011-10-22 19:48:57 +0000209 /* Each time a new MC_Chunk is created, release oldest blocks
210 if the free list volume is exceeded. */
211 if (VG_(free_queue_volume) > MC_(clo_freelist_vol))
212 release_oldest_block();
213
njn1d0825f2006-03-27 11:37:07 +0000214 /* Paranoia ... ensure the MC_Chunk is off-limits to the client, so
215 the mc->data field isn't visible to the leak checker. If memory
216 management is working correctly, any pointer returned by VG_(malloc)
217 should be noaccess as far as the client is concerned. */
njndbf7ca72006-03-31 11:57:59 +0000218 if (!MC_(check_mem_is_noaccess)( (Addr)mc, sizeof(MC_Chunk), NULL )) {
njn1d0825f2006-03-27 11:37:07 +0000219 VG_(tool_panic)("create_MC_Chunk: shadow area is accessible");
220 }
221 return mc;
222}
223
philippe6643e962012-01-17 21:16:30 +0000224static inline
225void delete_MC_Chunk (MC_Chunk* mc)
226{
227 VG_(freeEltPA) (MC_(chunk_poolalloc), mc);
228}
229
philippe8617b5b2013-01-12 19:53:08 +0000230// True if mc is in the given block list.
florian09a4c792014-10-18 10:58:05 +0000231static Bool in_block_list (const VgHashTable *block_list, MC_Chunk* mc)
philippe8617b5b2013-01-12 19:53:08 +0000232{
233 MC_Chunk* found_mc = VG_(HT_lookup) ( block_list, (UWord)mc->data );
234 if (found_mc) {
235 tl_assert (found_mc->data == mc->data);
236 /* If a user builds a pool from a malloc-ed superblock
237 and uses VALGRIND_MALLOCLIKE_BLOCK to "mark"
238 an address at the beginning of this superblock, then
239 this address will be twice in the block_list.
240 We handle this case by checking size and allockind.
241 Note: I suspect that having the same block
242 twice in MC_(malloc_list) is a recipe for bugs.
243 We might maybe better create a "standard" mempool to
244 handle all this more cleanly. */
245 if (found_mc->szB != mc->szB
246 || found_mc->allockind != mc->allockind)
247 return False;
248 tl_assert (found_mc == mc);
249 return True;
250 } else
251 return False;
252}
253
254// True if mc is a live block (not yet freed).
255static Bool live_block (MC_Chunk* mc)
256{
257 if (mc->allockind == MC_AllocCustom) {
258 MC_Mempool* mp;
259 VG_(HT_ResetIter)(MC_(mempool_list));
260 while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) {
261 if ( in_block_list (mp->chunks, mc) )
262 return True;
263 }
264 }
265 /* Note: we fallback here for a not found MC_AllocCustom
266 as such a block can be inserted in MC_(malloc_list)
267 by VALGRIND_MALLOCLIKE_BLOCK. */
268 return in_block_list ( MC_(malloc_list), mc );
269}
270
271ExeContext* MC_(allocated_at) (MC_Chunk* mc)
272{
273 switch (MC_(clo_keep_stacktraces)) {
274 case KS_none: return VG_(null_ExeContext) ();
275 case KS_alloc: return mc->where[0];
276 case KS_free: return VG_(null_ExeContext) ();
277 case KS_alloc_then_free: return (live_block(mc) ?
278 mc->where[0] : VG_(null_ExeContext) ());
279 case KS_alloc_and_free: return mc->where[0];
280 default: tl_assert (0);
281 }
282}
283
284ExeContext* MC_(freed_at) (MC_Chunk* mc)
285{
286 switch (MC_(clo_keep_stacktraces)) {
287 case KS_none: return VG_(null_ExeContext) ();
288 case KS_alloc: return VG_(null_ExeContext) ();
289 case KS_free: return (mc->where[0] ?
290 mc->where[0] : VG_(null_ExeContext) ());
291 case KS_alloc_then_free: return (live_block(mc) ?
292 VG_(null_ExeContext) () : mc->where[0]);
293 case KS_alloc_and_free: return (mc->where[1] ?
294 mc->where[1] : VG_(null_ExeContext) ());
295 default: tl_assert (0);
296 }
297}
298
299void MC_(set_allocated_at) (ThreadId tid, MC_Chunk* mc)
300{
301 switch (MC_(clo_keep_stacktraces)) {
302 case KS_none: return;
303 case KS_alloc: break;
304 case KS_free: return;
305 case KS_alloc_then_free: break;
306 case KS_alloc_and_free: break;
307 default: tl_assert (0);
308 }
309 mc->where[0] = VG_(record_ExeContext) ( tid, 0/*first_ip_delta*/ );
Elliott Hughesed398002017-06-21 14:41:24 -0700310 if (UNLIKELY(VG_(clo_xtree_memory) == Vg_XTMemory_Full))
311 VG_(XTMemory_Full_alloc)(mc->szB, mc->where[0]);
philippe8617b5b2013-01-12 19:53:08 +0000312}
313
314void MC_(set_freed_at) (ThreadId tid, MC_Chunk* mc)
315{
Elliott Hughesed398002017-06-21 14:41:24 -0700316 Int pos;
317 ExeContext* ec_free;
318
philippe8617b5b2013-01-12 19:53:08 +0000319 switch (MC_(clo_keep_stacktraces)) {
320 case KS_none: return;
Elliott Hughesed398002017-06-21 14:41:24 -0700321 case KS_alloc:
322 if (LIKELY(VG_(clo_xtree_memory)
323 != Vg_XTMemory_Full))
324 return;
325 pos = -1; break;
philippe8617b5b2013-01-12 19:53:08 +0000326 case KS_free: pos = 0; break;
327 case KS_alloc_then_free: pos = 0; break;
328 case KS_alloc_and_free: pos = 1; break;
329 default: tl_assert (0);
330 }
Elliott Hughesed398002017-06-21 14:41:24 -0700331 /* We need the execontext for the free operation, either to store
332 it in the mc chunk and/or for full xtree memory profiling.
333 Note: we are guaranteed to find the ec_alloc in mc->where[0], as
334 mc_post_clo_init verifies the consistency of --xtree-memory and
335 --keep-stacktraces. */
336 ec_free = VG_(record_ExeContext) ( tid, 0/*first_ip_delta*/ );
337 if (UNLIKELY(VG_(clo_xtree_memory) == Vg_XTMemory_Full))
338 VG_(XTMemory_Full_free)(mc->szB, mc->where[0], ec_free);
339 if (LIKELY(pos >= 0))
340 mc->where[pos] = ec_free;
philippe8617b5b2013-01-12 19:53:08 +0000341}
342
343UInt MC_(n_where_pointers) (void)
344{
345 switch (MC_(clo_keep_stacktraces)) {
346 case KS_none: return 0;
347 case KS_alloc:
348 case KS_free:
349 case KS_alloc_then_free: return 1;
350 case KS_alloc_and_free: return 2;
351 default: tl_assert (0);
352 }
353}
354
njn1d0825f2006-03-27 11:37:07 +0000355/*------------------------------------------------------------*/
356/*--- client_malloc(), etc ---*/
357/*------------------------------------------------------------*/
358
njn1d0825f2006-03-27 11:37:07 +0000359/* Allocate memory and note change in memory available */
njn1d0825f2006-03-27 11:37:07 +0000360void* MC_(new_block) ( ThreadId tid,
njn1dcee092009-02-24 03:07:37 +0000361 Addr p, SizeT szB, SizeT alignB,
Elliott Hughesa0664b92017-04-18 17:46:52 -0700362 Bool is_zeroed, MC_AllocKind kind,
363 VgHashTable *table)
njn1d0825f2006-03-27 11:37:07 +0000364{
philippe8617b5b2013-01-12 19:53:08 +0000365 MC_Chunk* mc;
sewardj7cf4e6b2008-05-01 20:24:26 +0000366
njn1d0825f2006-03-27 11:37:07 +0000367 // Allocate and zero if necessary
368 if (p) {
369 tl_assert(MC_AllocCustom == kind);
370 } else {
371 tl_assert(MC_AllocCustom != kind);
njn718d3b12006-12-16 00:54:12 +0000372 p = (Addr)VG_(cli_malloc)( alignB, szB );
njn1d0825f2006-03-27 11:37:07 +0000373 if (!p) {
374 return NULL;
375 }
sewardjeb0fa932007-11-30 21:41:40 +0000376 if (is_zeroed) {
377 VG_(memset)((void*)p, 0, szB);
378 } else
379 if (MC_(clo_malloc_fill) != -1) {
380 tl_assert(MC_(clo_malloc_fill) >= 0x00 && MC_(clo_malloc_fill) <= 0xFF);
381 VG_(memset)((void*)p, MC_(clo_malloc_fill), szB);
382 }
njn1d0825f2006-03-27 11:37:07 +0000383 }
384
florian5544d5b2011-12-30 03:09:45 +0000385 // Only update stats if allocation succeeded.
386 cmalloc_n_mallocs ++;
sewardjea9c15e2007-03-14 11:57:37 +0000387 cmalloc_bs_mallocd += (ULong)szB;
philippe8617b5b2013-01-12 19:53:08 +0000388 mc = create_MC_Chunk (tid, p, szB, kind);
389 VG_(HT_add_node)( table, mc );
njn1d0825f2006-03-27 11:37:07 +0000390
391 if (is_zeroed)
njn718d3b12006-12-16 00:54:12 +0000392 MC_(make_mem_defined)( p, szB );
sewardj7cf4e6b2008-05-01 20:24:26 +0000393 else {
philippe8617b5b2013-01-12 19:53:08 +0000394 UInt ecu = VG_(get_ECU_from_ExeContext)(MC_(allocated_at)(mc));
sewardj7cf4e6b2008-05-01 20:24:26 +0000395 tl_assert(VG_(is_plausible_ECU)(ecu));
396 MC_(make_mem_undefined_w_otag)( p, szB, ecu | MC_OKIND_HEAP );
397 }
njn1d0825f2006-03-27 11:37:07 +0000398
399 return (void*)p;
400}
401
402void* MC_(malloc) ( ThreadId tid, SizeT n )
403{
florian7b6899d2014-07-13 14:41:55 +0000404 if (MC_(record_fishy_value_error)(tid, "malloc", "size", n)) {
njn1d0825f2006-03-27 11:37:07 +0000405 return NULL;
406 } else {
407 return MC_(new_block) ( tid, 0, n, VG_(clo_alignment),
njn1dcee092009-02-24 03:07:37 +0000408 /*is_zeroed*/False, MC_AllocMalloc, MC_(malloc_list));
njn1d0825f2006-03-27 11:37:07 +0000409 }
410}
411
412void* MC_(__builtin_new) ( ThreadId tid, SizeT n )
413{
florian7b6899d2014-07-13 14:41:55 +0000414 if (MC_(record_fishy_value_error)(tid, "__builtin_new", "size", n)) {
njn1d0825f2006-03-27 11:37:07 +0000415 return NULL;
416 } else {
417 return MC_(new_block) ( tid, 0, n, VG_(clo_alignment),
njn1dcee092009-02-24 03:07:37 +0000418 /*is_zeroed*/False, MC_AllocNew, MC_(malloc_list));
njn1d0825f2006-03-27 11:37:07 +0000419 }
420}
421
422void* MC_(__builtin_vec_new) ( ThreadId tid, SizeT n )
423{
florian7b6899d2014-07-13 14:41:55 +0000424 if (MC_(record_fishy_value_error)(tid, "__builtin_vec_new", "size", n)) {
njn1d0825f2006-03-27 11:37:07 +0000425 return NULL;
426 } else {
427 return MC_(new_block) ( tid, 0, n, VG_(clo_alignment),
njn1dcee092009-02-24 03:07:37 +0000428 /*is_zeroed*/False, MC_AllocNewVec, MC_(malloc_list));
njn1d0825f2006-03-27 11:37:07 +0000429 }
430}
431
njn718d3b12006-12-16 00:54:12 +0000432void* MC_(memalign) ( ThreadId tid, SizeT alignB, SizeT n )
njn1d0825f2006-03-27 11:37:07 +0000433{
florian7b6899d2014-07-13 14:41:55 +0000434 if (MC_(record_fishy_value_error)(tid, "memalign", "size", n)) {
njn1d0825f2006-03-27 11:37:07 +0000435 return NULL;
436 } else {
njn718d3b12006-12-16 00:54:12 +0000437 return MC_(new_block) ( tid, 0, n, alignB,
njn1dcee092009-02-24 03:07:37 +0000438 /*is_zeroed*/False, MC_AllocMalloc, MC_(malloc_list));
njn1d0825f2006-03-27 11:37:07 +0000439 }
440}
441
442void* MC_(calloc) ( ThreadId tid, SizeT nmemb, SizeT size1 )
443{
florian7b6899d2014-07-13 14:41:55 +0000444 if (MC_(record_fishy_value_error)(tid, "calloc", "nmemb", nmemb) ||
445 MC_(record_fishy_value_error)(tid, "calloc", "size", size1)) {
njn1d0825f2006-03-27 11:37:07 +0000446 return NULL;
447 } else {
448 return MC_(new_block) ( tid, 0, nmemb*size1, VG_(clo_alignment),
njn1dcee092009-02-24 03:07:37 +0000449 /*is_zeroed*/True, MC_AllocMalloc, MC_(malloc_list));
njn1d0825f2006-03-27 11:37:07 +0000450 }
451}
452
453static
454void die_and_free_mem ( ThreadId tid, MC_Chunk* mc, SizeT rzB )
455{
philippea2cc0c02012-05-11 22:10:39 +0000456 /* Note: we do not free fill the custom allocs produced
457 by MEMPOOL or by MALLOC/FREELIKE_BLOCK requests. */
458 if (MC_(clo_free_fill) != -1 && MC_AllocCustom != mc->allockind ) {
sewardjeb0fa932007-11-30 21:41:40 +0000459 tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
460 VG_(memset)((void*)mc->data, MC_(clo_free_fill), mc->szB);
461 }
462
njn1d0825f2006-03-27 11:37:07 +0000463 /* Note: make redzones noaccess again -- just in case user made them
464 accessible with a client request... */
njn718d3b12006-12-16 00:54:12 +0000465 MC_(make_mem_noaccess)( mc->data-rzB, mc->szB + 2*rzB );
njn1d0825f2006-03-27 11:37:07 +0000466
bartc1fc1332010-09-02 10:24:49 +0000467 /* Record where freed */
philippe8617b5b2013-01-12 19:53:08 +0000468 MC_(set_freed_at) (tid, mc);
bartc1fc1332010-09-02 10:24:49 +0000469 /* Put it out of harm's way for a while */
470 add_to_freed_queue ( mc );
sewardj403d8aa2011-10-22 19:48:57 +0000471 /* If the free list volume is bigger than MC_(clo_freelist_vol),
472 we wait till the next block allocation to release blocks.
473 This increase the chance to discover dangling pointer usage,
474 even for big blocks being freed by the client. */
njn1d0825f2006-03-27 11:37:07 +0000475}
476
philippe8617b5b2013-01-12 19:53:08 +0000477
478static
479void record_freemismatch_error (ThreadId tid, MC_Chunk* mc)
480{
sewardj021e6b62014-08-22 19:26:23 +0000481 /* Only show such an error if the user hasn't disabled doing so. */
482 if (!MC_(clo_show_mismatched_frees))
483 return;
484
philippe8617b5b2013-01-12 19:53:08 +0000485 /* MC_(record_freemismatch_error) reports errors for still
486 allocated blocks but we are in the middle of freeing it. To
487 report the error correctly, we re-insert the chunk (making it
488 again a "clean allocated block", report the error, and then
489 re-remove the chunk. This avoids to do a VG_(HT_lookup)
490 followed by a VG_(HT_remove) in all "non-erroneous cases". */
491 VG_(HT_add_node)( MC_(malloc_list), mc );
492 MC_(record_freemismatch_error) ( tid, mc );
493 if ((mc != VG_(HT_remove) ( MC_(malloc_list), (UWord)mc->data )))
494 tl_assert(0);
495}
496
njn1d0825f2006-03-27 11:37:07 +0000497void MC_(handle_free) ( ThreadId tid, Addr p, UInt rzB, MC_AllocKind kind )
498{
499 MC_Chunk* mc;
500
501 cmalloc_n_frees++;
502
503 mc = VG_(HT_remove) ( MC_(malloc_list), (UWord)p );
504 if (mc == NULL) {
505 MC_(record_free_error) ( tid, p );
506 } else {
507 /* check if it is a matching free() / delete / delete [] */
508 if (kind != mc->allockind) {
njn718d3b12006-12-16 00:54:12 +0000509 tl_assert(p == mc->data);
philippe8617b5b2013-01-12 19:53:08 +0000510 record_freemismatch_error ( tid, mc );
njn1d0825f2006-03-27 11:37:07 +0000511 }
512 die_and_free_mem ( tid, mc, rzB );
513 }
514}
515
516void MC_(free) ( ThreadId tid, void* p )
517{
518 MC_(handle_free)(
philipped99c26a2012-07-31 22:17:28 +0000519 tid, (Addr)p, MC_(Malloc_Redzone_SzB), MC_AllocMalloc );
njn1d0825f2006-03-27 11:37:07 +0000520}
521
522void MC_(__builtin_delete) ( ThreadId tid, void* p )
523{
524 MC_(handle_free)(
philipped99c26a2012-07-31 22:17:28 +0000525 tid, (Addr)p, MC_(Malloc_Redzone_SzB), MC_AllocNew);
njn1d0825f2006-03-27 11:37:07 +0000526}
527
528void MC_(__builtin_vec_delete) ( ThreadId tid, void* p )
529{
530 MC_(handle_free)(
philipped99c26a2012-07-31 22:17:28 +0000531 tid, (Addr)p, MC_(Malloc_Redzone_SzB), MC_AllocNewVec);
njn1d0825f2006-03-27 11:37:07 +0000532}
533
njn718d3b12006-12-16 00:54:12 +0000534void* MC_(realloc) ( ThreadId tid, void* p_old, SizeT new_szB )
njn1d0825f2006-03-27 11:37:07 +0000535{
philippe8617b5b2013-01-12 19:53:08 +0000536 MC_Chunk* old_mc;
537 MC_Chunk* new_mc;
538 Addr a_new;
njn718d3b12006-12-16 00:54:12 +0000539 SizeT old_szB;
njn1d0825f2006-03-27 11:37:07 +0000540
florian7b6899d2014-07-13 14:41:55 +0000541 if (MC_(record_fishy_value_error)(tid, "realloc", "size", new_szB))
florian5544d5b2011-12-30 03:09:45 +0000542 return NULL;
543
njn1d0825f2006-03-27 11:37:07 +0000544 cmalloc_n_frees ++;
545 cmalloc_n_mallocs ++;
sewardjea9c15e2007-03-14 11:57:37 +0000546 cmalloc_bs_mallocd += (ULong)new_szB;
njn1d0825f2006-03-27 11:37:07 +0000547
njn1d0825f2006-03-27 11:37:07 +0000548 /* Remove the old block */
philippe8617b5b2013-01-12 19:53:08 +0000549 old_mc = VG_(HT_remove) ( MC_(malloc_list), (UWord)p_old );
550 if (old_mc == NULL) {
njn1d0825f2006-03-27 11:37:07 +0000551 MC_(record_free_error) ( tid, (Addr)p_old );
552 /* We return to the program regardless. */
553 return NULL;
554 }
555
556 /* check if its a matching free() / delete / delete [] */
philippe8617b5b2013-01-12 19:53:08 +0000557 if (MC_AllocMalloc != old_mc->allockind) {
njn1d0825f2006-03-27 11:37:07 +0000558 /* can not realloc a range that was allocated with new or new [] */
philippe8617b5b2013-01-12 19:53:08 +0000559 tl_assert((Addr)p_old == old_mc->data);
560 record_freemismatch_error ( tid, old_mc );
njn1d0825f2006-03-27 11:37:07 +0000561 /* but keep going anyway */
562 }
563
philippe8617b5b2013-01-12 19:53:08 +0000564 old_szB = old_mc->szB;
njn1d0825f2006-03-27 11:37:07 +0000565
philippe8617b5b2013-01-12 19:53:08 +0000566 /* Get new memory */
567 a_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_szB);
sewardjb3238a52008-07-29 09:44:52 +0000568
philippe8617b5b2013-01-12 19:53:08 +0000569 if (a_new) {
570 /* In all cases, even when the new size is smaller or unchanged, we
571 reallocate and copy the contents, and make the old block
572 inaccessible. This is so as to guarantee to catch all cases of
573 accesses via the old address after reallocation, regardless of
574 the change in size. (Of course the ability to detect accesses
575 to the old block also depends on the size of the freed blocks
576 queue). */
sewardj8849a562008-07-22 18:23:16 +0000577
philippe8617b5b2013-01-12 19:53:08 +0000578 // Allocate a new chunk.
579 new_mc = create_MC_Chunk( tid, a_new, new_szB, MC_AllocMalloc );
sewardj8849a562008-07-22 18:23:16 +0000580
philippe8617b5b2013-01-12 19:53:08 +0000581 // Now insert the new mc (with a new 'data' field) into malloc_list.
582 VG_(HT_add_node)( MC_(malloc_list), new_mc );
sewardj8849a562008-07-22 18:23:16 +0000583
philippe8617b5b2013-01-12 19:53:08 +0000584 /* Retained part is copied, red zones set as normal */
585
586 /* Redzone at the front */
587 MC_(make_mem_noaccess)( a_new-MC_(Malloc_Redzone_SzB),
588 MC_(Malloc_Redzone_SzB) );
589
590 /* payload */
591 if (old_szB >= new_szB) {
592 /* new size is smaller or the same */
593
594 /* Copy address range state and value from old to new */
sewardj8849a562008-07-22 18:23:16 +0000595 MC_(copy_address_range_state) ( (Addr)p_old, a_new, new_szB );
sewardj8849a562008-07-22 18:23:16 +0000596 VG_(memcpy)((void*)a_new, p_old, new_szB);
philippe8617b5b2013-01-12 19:53:08 +0000597 } else {
598 /* new size is bigger */
sewardj7cf4e6b2008-05-01 20:24:26 +0000599 UInt ecu;
sewardj7cf4e6b2008-05-01 20:24:26 +0000600
philippe8617b5b2013-01-12 19:53:08 +0000601 /* Copy address range state and value from old to new */
602 MC_(copy_address_range_state) ( (Addr)p_old, a_new, old_szB );
603 VG_(memcpy)((void*)a_new, p_old, old_szB);
604
605 // If the block has grown, we mark the grown area as undefined.
606 // We have to do that after VG_(HT_add_node) to ensure the ecu
607 // execontext is for a fully allocated block.
608 ecu = VG_(get_ECU_from_ExeContext)(MC_(allocated_at)(new_mc));
sewardj7cf4e6b2008-05-01 20:24:26 +0000609 tl_assert(VG_(is_plausible_ECU)(ecu));
philippe8617b5b2013-01-12 19:53:08 +0000610 MC_(make_mem_undefined_w_otag)( a_new+old_szB,
611 new_szB-old_szB,
612 ecu | MC_OKIND_HEAP );
njn1d0825f2006-03-27 11:37:07 +0000613
sewardjeb0fa932007-11-30 21:41:40 +0000614 /* Possibly fill new area with specified junk */
615 if (MC_(clo_malloc_fill) != -1) {
616 tl_assert(MC_(clo_malloc_fill) >= 0x00
617 && MC_(clo_malloc_fill) <= 0xFF);
618 VG_(memset)((void*)(a_new+old_szB), MC_(clo_malloc_fill),
619 new_szB-old_szB);
620 }
njn1d0825f2006-03-27 11:37:07 +0000621 }
622
philippe8617b5b2013-01-12 19:53:08 +0000623 /* Redzone at the back. */
624 MC_(make_mem_noaccess) ( a_new+new_szB, MC_(Malloc_Redzone_SzB));
njn1d0825f2006-03-27 11:37:07 +0000625
philippe8617b5b2013-01-12 19:53:08 +0000626 /* Possibly fill freed area with specified junk. */
627 if (MC_(clo_free_fill) != -1) {
628 tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
629 VG_(memset)((void*)p_old, MC_(clo_free_fill), old_szB);
630 }
njn1d0825f2006-03-27 11:37:07 +0000631
philippe8617b5b2013-01-12 19:53:08 +0000632 /* Free old memory */
633 /* Nb: we have to allocate a new MC_Chunk for the new memory rather
634 than recycling the old one, so that any erroneous accesses to the
635 old memory are reported. */
636 die_and_free_mem ( tid, old_mc, MC_(Malloc_Redzone_SzB) );
637
638 } else {
639 /* Could not allocate new client memory.
640 Re-insert the old_mc (with the old ptr) in the HT, as old_mc was
641 unconditionally removed at the beginning of the function. */
642 VG_(HT_add_node)( MC_(malloc_list), old_mc );
643 }
644
645 return (void*)a_new;
njn1d0825f2006-03-27 11:37:07 +0000646}
647
njn8b140de2009-02-17 04:31:18 +0000648SizeT MC_(malloc_usable_size) ( ThreadId tid, void* p )
649{
650 MC_Chunk* mc = VG_(HT_lookup) ( MC_(malloc_list), (UWord)p );
651
652 // There may be slop, but pretend there isn't because only the asked-for
653 // area will be marked as addressable.
654 return ( mc ? mc->szB : 0 );
655}
656
bart91347382011-03-25 20:07:25 +0000657/* This handles the in place resize of a block, as performed by the
658 VALGRIND_RESIZEINPLACE_BLOCK client request. It is unrelated to,
659 and not used for, handling of the normal libc realloc()
660 function. */
661void MC_(handle_resizeInPlace)(ThreadId tid, Addr p,
662 SizeT oldSizeB, SizeT newSizeB, SizeT rzB)
663{
664 MC_Chunk* mc = VG_(HT_lookup) ( MC_(malloc_list), (UWord)p );
665 if (!mc || mc->szB != oldSizeB || newSizeB == 0) {
666 /* Reject if: p is not found, or oldSizeB is wrong,
667 or new block would be empty. */
668 MC_(record_free_error) ( tid, p );
669 return;
670 }
671
672 if (oldSizeB == newSizeB)
673 return;
674
Elliott Hughesed398002017-06-21 14:41:24 -0700675 if (UNLIKELY(VG_(clo_xtree_memory) == Vg_XTMemory_Full))
676 VG_(XTMemory_Full_resize_in_place)(oldSizeB, newSizeB, mc->where[0]);
677
bart91347382011-03-25 20:07:25 +0000678 mc->szB = newSizeB;
679 if (newSizeB < oldSizeB) {
680 MC_(make_mem_noaccess)( p + newSizeB, oldSizeB - newSizeB + rzB );
681 } else {
682 ExeContext* ec = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
683 UInt ecu = VG_(get_ECU_from_ExeContext)(ec);
684 MC_(make_mem_undefined_w_otag)( p + oldSizeB, newSizeB - oldSizeB,
685 ecu | MC_OKIND_HEAP );
686 if (rzB > 0)
687 MC_(make_mem_noaccess)( p + newSizeB, rzB );
688 }
689}
690
njn017d3772009-05-19 02:10:26 +0000691
sewardj62b91042011-01-23 20:45:53 +0000692/*------------------------------------------------------------*/
693/*--- Memory pool stuff. ---*/
694/*------------------------------------------------------------*/
njn1d0825f2006-03-27 11:37:07 +0000695
sewardj7e30be42011-01-27 23:56:36 +0000696/* Set to 1 for intensive sanity checking. Is very expensive though
697 and should not be used in production scenarios. See #255966. */
698#define MP_DETAILED_SANITY_CHECKS 0
699
700static void check_mempool_sane(MC_Mempool* mp); /*forward*/
701
Elliott Hughesa0664b92017-04-18 17:46:52 -0700702static void free_mallocs_in_mempool_block (MC_Mempool* mp,
703 Addr StartAddr,
704 Addr EndAddr)
705{
706 MC_Chunk *mc;
707 ThreadId tid;
sewardj7e30be42011-01-27 23:56:36 +0000708
Elliott Hughesa0664b92017-04-18 17:46:52 -0700709 tl_assert(mp->auto_free);
710
711 if (VG_(clo_verbosity) > 2) {
712 VG_(message)(Vg_UserMsg,
713 "free_mallocs_in_mempool_block: Start 0x%lx size %lu\n",
714 StartAddr, (SizeT) (EndAddr - StartAddr));
715 }
716
717 tid = VG_(get_running_tid)();
718
719 VG_(HT_ResetIter)(MC_(malloc_list));
720 while ( (mc = VG_(HT_Next)(MC_(malloc_list))) ) {
721 if (mc->data >= StartAddr && mc->data + mc->szB <= EndAddr) {
722 if (VG_(clo_verbosity) > 2) {
723 VG_(message)(Vg_UserMsg, "Auto-free of 0x%lx size=%lu\n",
724 mc->data, (mc->szB + 0UL));
725 }
726
727 VG_(HT_remove_at_Iter)(MC_(malloc_list));
728 die_and_free_mem(tid, mc, mp->rzB);
729 }
730 }
731}
732
733void MC_(create_mempool)(Addr pool, UInt rzB, Bool is_zeroed,
734 Bool auto_free, Bool metapool)
njn1d0825f2006-03-27 11:37:07 +0000735{
sewardjc740d762006-10-05 17:59:23 +0000736 MC_Mempool* mp;
737
Elliott Hughesa0664b92017-04-18 17:46:52 -0700738 if (VG_(clo_verbosity) > 2 || (auto_free && !metapool)) {
739 VG_(message)(Vg_UserMsg,
740 "create_mempool(0x%lx, rzB=%u, zeroed=%d,"
741 " autofree=%d, metapool=%d)\n",
742 pool, rzB, is_zeroed,
743 auto_free, metapool);
sewardjc740d762006-10-05 17:59:23 +0000744 VG_(get_and_pp_StackTrace)
745 (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
Elliott Hughesa0664b92017-04-18 17:46:52 -0700746 if (auto_free && !metapool)
747 VG_(tool_panic)("Inappropriate use of mempool:"
748 " an auto free pool must be a meta pool. Aborting\n");
sewardjc740d762006-10-05 17:59:23 +0000749 }
750
751 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
752 if (mp != NULL) {
753 VG_(tool_panic)("MC_(create_mempool): duplicate pool creation");
754 }
755
sewardj9c606bd2008-09-18 18:12:50 +0000756 mp = VG_(malloc)("mc.cm.1", sizeof(MC_Mempool));
njn1d0825f2006-03-27 11:37:07 +0000757 mp->pool = pool;
758 mp->rzB = rzB;
759 mp->is_zeroed = is_zeroed;
Elliott Hughesa0664b92017-04-18 17:46:52 -0700760 mp->auto_free = auto_free;
761 mp->metapool = metapool;
sewardj3f94a7d2007-08-25 07:19:08 +0000762 mp->chunks = VG_(HT_construct)( "MC_(create_mempool)" );
sewardj7e30be42011-01-27 23:56:36 +0000763 check_mempool_sane(mp);
njn1d0825f2006-03-27 11:37:07 +0000764
765 /* Paranoia ... ensure this area is off-limits to the client, so
766 the mp->data field isn't visible to the leak checker. If memory
767 management is working correctly, anything pointer returned by
768 VG_(malloc) should be noaccess as far as the client is
769 concerned. */
njndbf7ca72006-03-31 11:57:59 +0000770 if (!MC_(check_mem_is_noaccess)( (Addr)mp, sizeof(MC_Mempool), NULL )) {
njn1d0825f2006-03-27 11:37:07 +0000771 VG_(tool_panic)("MC_(create_mempool): shadow area is accessible");
772 }
773
774 VG_(HT_add_node)( MC_(mempool_list), mp );
775}
776
777void MC_(destroy_mempool)(Addr pool)
778{
779 MC_Chunk* mc;
780 MC_Mempool* mp;
781
sewardjc740d762006-10-05 17:59:23 +0000782 if (VG_(clo_verbosity) > 2) {
sewardj6b523cd2009-07-15 14:49:40 +0000783 VG_(message)(Vg_UserMsg, "destroy_mempool(0x%lx)\n", pool);
sewardjc740d762006-10-05 17:59:23 +0000784 VG_(get_and_pp_StackTrace)
785 (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
786 }
787
njn1d0825f2006-03-27 11:37:07 +0000788 mp = VG_(HT_remove) ( MC_(mempool_list), (UWord)pool );
789
790 if (mp == NULL) {
791 ThreadId tid = VG_(get_running_tid)();
792 MC_(record_illegal_mempool_error) ( tid, pool );
793 return;
794 }
sewardj7e30be42011-01-27 23:56:36 +0000795 check_mempool_sane(mp);
njn1d0825f2006-03-27 11:37:07 +0000796
797 // Clean up the chunks, one by one
798 VG_(HT_ResetIter)(mp->chunks);
799 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
800 /* Note: make redzones noaccess again -- just in case user made them
801 accessible with a client request... */
njn718d3b12006-12-16 00:54:12 +0000802 MC_(make_mem_noaccess)(mc->data-mp->rzB, mc->szB + 2*mp->rzB );
njn1d0825f2006-03-27 11:37:07 +0000803 }
804 // Destroy the chunk table
philippe6643e962012-01-17 21:16:30 +0000805 VG_(HT_destruct)(mp->chunks, (void (*)(void *))delete_MC_Chunk);
njn1d0825f2006-03-27 11:37:07 +0000806
807 VG_(free)(mp);
808}
809
sewardjc740d762006-10-05 17:59:23 +0000810static Int
florian6bd9dc12012-11-23 16:17:43 +0000811mp_compar(const void* n1, const void* n2)
sewardjc740d762006-10-05 17:59:23 +0000812{
florian3e798632012-11-24 19:41:54 +0000813 const MC_Chunk* mc1 = *(const MC_Chunk *const *)n1;
814 const MC_Chunk* mc2 = *(const MC_Chunk *const *)n2;
sewardjb8b79ad2008-03-03 01:35:41 +0000815 if (mc1->data < mc2->data) return -1;
816 if (mc1->data > mc2->data) return 1;
817 return 0;
sewardjc740d762006-10-05 17:59:23 +0000818}
819
820static void
821check_mempool_sane(MC_Mempool* mp)
822{
823 UInt n_chunks, i, bad = 0;
824 static UInt tick = 0;
825
826 MC_Chunk **chunks = (MC_Chunk**) VG_(HT_to_array)( mp->chunks, &n_chunks );
827 if (!chunks)
828 return;
829
830 if (VG_(clo_verbosity) > 1) {
831 if (tick++ >= 10000)
832 {
833 UInt total_pools = 0, total_chunks = 0;
834 MC_Mempool* mp2;
835
836 VG_(HT_ResetIter)(MC_(mempool_list));
837 while ( (mp2 = VG_(HT_Next)(MC_(mempool_list))) ) {
838 total_pools++;
839 VG_(HT_ResetIter)(mp2->chunks);
840 while (VG_(HT_Next)(mp2->chunks)) {
841 total_chunks++;
842 }
843 }
844
sewardj6b523cd2009-07-15 14:49:40 +0000845 VG_(message)(Vg_UserMsg,
floriande3df032015-08-04 21:26:10 +0000846 "Total mempools active: %u pools, %u chunks\n",
sewardjc740d762006-10-05 17:59:23 +0000847 total_pools, total_chunks);
848 tick = 0;
849 }
850 }
851
852
853 VG_(ssort)((void*)chunks, n_chunks, sizeof(VgHashNode*), mp_compar);
854
855 /* Sanity check; assert that the blocks are now in order */
856 for (i = 0; i < n_chunks-1; i++) {
857 if (chunks[i]->data > chunks[i+1]->data) {
858 VG_(message)(Vg_UserMsg,
floriande3df032015-08-04 21:26:10 +0000859 "Mempool chunk %u / %u is out of order "
sewardj6b523cd2009-07-15 14:49:40 +0000860 "wrt. its successor\n",
sewardjc740d762006-10-05 17:59:23 +0000861 i+1, n_chunks);
862 bad = 1;
863 }
864 }
865
866 /* Sanity check -- make sure they don't overlap */
867 for (i = 0; i < n_chunks-1; i++) {
njn718d3b12006-12-16 00:54:12 +0000868 if (chunks[i]->data + chunks[i]->szB > chunks[i+1]->data ) {
sewardjc740d762006-10-05 17:59:23 +0000869 VG_(message)(Vg_UserMsg,
floriande3df032015-08-04 21:26:10 +0000870 "Mempool chunk %u / %u overlaps with its successor\n",
sewardjc740d762006-10-05 17:59:23 +0000871 i+1, n_chunks);
872 bad = 1;
873 }
874 }
875
876 if (bad) {
877 VG_(message)(Vg_UserMsg,
floriande3df032015-08-04 21:26:10 +0000878 "Bad mempool (%u chunks), dumping chunks for inspection:\n",
sewardj6b523cd2009-07-15 14:49:40 +0000879 n_chunks);
sewardjc740d762006-10-05 17:59:23 +0000880 for (i = 0; i < n_chunks; ++i) {
881 VG_(message)(Vg_UserMsg,
floriande3df032015-08-04 21:26:10 +0000882 "Mempool chunk %u / %u: %lu bytes "
sewardj6b523cd2009-07-15 14:49:40 +0000883 "[%lx,%lx), allocated:\n",
sewardjc740d762006-10-05 17:59:23 +0000884 i+1,
885 n_chunks,
barta0b6b2c2008-07-07 06:49:24 +0000886 chunks[i]->szB + 0UL,
sewardjc740d762006-10-05 17:59:23 +0000887 chunks[i]->data,
njn718d3b12006-12-16 00:54:12 +0000888 chunks[i]->data + chunks[i]->szB);
sewardjc740d762006-10-05 17:59:23 +0000889
philippe8617b5b2013-01-12 19:53:08 +0000890 VG_(pp_ExeContext)(MC_(allocated_at)(chunks[i]));
sewardjc740d762006-10-05 17:59:23 +0000891 }
892 }
893 VG_(free)(chunks);
894}
895
njn718d3b12006-12-16 00:54:12 +0000896void MC_(mempool_alloc)(ThreadId tid, Addr pool, Addr addr, SizeT szB)
njn1d0825f2006-03-27 11:37:07 +0000897{
sewardjc740d762006-10-05 17:59:23 +0000898 MC_Mempool* mp;
njn1d0825f2006-03-27 11:37:07 +0000899
sewardjc740d762006-10-05 17:59:23 +0000900 if (VG_(clo_verbosity) > 2) {
floriande3df032015-08-04 21:26:10 +0000901 VG_(message)(Vg_UserMsg, "mempool_alloc(0x%lx, 0x%lx, %lu)\n",
sewardj6b523cd2009-07-15 14:49:40 +0000902 pool, addr, szB);
sewardjc740d762006-10-05 17:59:23 +0000903 VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
904 }
905
906 mp = VG_(HT_lookup) ( MC_(mempool_list), (UWord)pool );
njn1d0825f2006-03-27 11:37:07 +0000907 if (mp == NULL) {
908 MC_(record_illegal_mempool_error) ( tid, pool );
909 } else {
sewardj7e30be42011-01-27 23:56:36 +0000910 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
njn1dcee092009-02-24 03:07:37 +0000911 MC_(new_block)(tid, addr, szB, /*ignored*/0, mp->is_zeroed,
njn1d0825f2006-03-27 11:37:07 +0000912 MC_AllocCustom, mp->chunks);
philippee529b872012-07-05 21:11:12 +0000913 if (mp->rzB > 0) {
914 // This is not needed if the user application has properly
915 // marked the superblock noaccess when defining the mempool.
916 // We however still mark the redzones noaccess to still catch
917 // some bugs if user forgot.
918 MC_(make_mem_noaccess) ( addr - mp->rzB, mp->rzB);
919 MC_(make_mem_noaccess) ( addr + szB, mp->rzB);
920 }
sewardj7e30be42011-01-27 23:56:36 +0000921 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
njn1d0825f2006-03-27 11:37:07 +0000922 }
923}
924
925void MC_(mempool_free)(Addr pool, Addr addr)
926{
927 MC_Mempool* mp;
928 MC_Chunk* mc;
929 ThreadId tid = VG_(get_running_tid)();
930
931 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
932 if (mp == NULL) {
933 MC_(record_illegal_mempool_error)(tid, pool);
934 return;
935 }
936
sewardjc740d762006-10-05 17:59:23 +0000937 if (VG_(clo_verbosity) > 2) {
sewardj6b523cd2009-07-15 14:49:40 +0000938 VG_(message)(Vg_UserMsg, "mempool_free(0x%lx, 0x%lx)\n", pool, addr);
sewardjc740d762006-10-05 17:59:23 +0000939 VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
940 }
941
sewardj7e30be42011-01-27 23:56:36 +0000942 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
njn1d0825f2006-03-27 11:37:07 +0000943 mc = VG_(HT_remove)(mp->chunks, (UWord)addr);
944 if (mc == NULL) {
945 MC_(record_free_error)(tid, (Addr)addr);
946 return;
947 }
948
Elliott Hughesa0664b92017-04-18 17:46:52 -0700949 if (mp->auto_free) {
950 free_mallocs_in_mempool_block(mp, mc->data, mc->data + (mc->szB + 0UL));
951 }
952
sewardjc740d762006-10-05 17:59:23 +0000953 if (VG_(clo_verbosity) > 2) {
954 VG_(message)(Vg_UserMsg,
Elliott Hughesa0664b92017-04-18 17:46:52 -0700955 "mempool_free(0x%lx, 0x%lx) freed chunk of %lu bytes\n",
956 pool, addr, mc->szB + 0UL);
sewardjc740d762006-10-05 17:59:23 +0000957 }
958
njn1d0825f2006-03-27 11:37:07 +0000959 die_and_free_mem ( tid, mc, mp->rzB );
sewardj7e30be42011-01-27 23:56:36 +0000960 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
njn1d0825f2006-03-27 11:37:07 +0000961}
962
sewardj2c1c9df2006-07-28 00:06:37 +0000963
njn718d3b12006-12-16 00:54:12 +0000964void MC_(mempool_trim)(Addr pool, Addr addr, SizeT szB)
sewardj2c1c9df2006-07-28 00:06:37 +0000965{
966 MC_Mempool* mp;
967 MC_Chunk* mc;
968 ThreadId tid = VG_(get_running_tid)();
969 UInt n_shadows, i;
970 VgHashNode** chunks;
971
sewardjc740d762006-10-05 17:59:23 +0000972 if (VG_(clo_verbosity) > 2) {
floriande3df032015-08-04 21:26:10 +0000973 VG_(message)(Vg_UserMsg, "mempool_trim(0x%lx, 0x%lx, %lu)\n",
sewardj6b523cd2009-07-15 14:49:40 +0000974 pool, addr, szB);
sewardjc740d762006-10-05 17:59:23 +0000975 VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
976 }
977
sewardj2c1c9df2006-07-28 00:06:37 +0000978 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
979 if (mp == NULL) {
980 MC_(record_illegal_mempool_error)(tid, pool);
981 return;
982 }
983
sewardjc740d762006-10-05 17:59:23 +0000984 check_mempool_sane(mp);
sewardj2c1c9df2006-07-28 00:06:37 +0000985 chunks = VG_(HT_to_array) ( mp->chunks, &n_shadows );
986 if (n_shadows == 0) {
987 tl_assert(chunks == NULL);
988 return;
989 }
990
991 tl_assert(chunks != NULL);
992 for (i = 0; i < n_shadows; ++i) {
sewardj8aeeaa92006-08-16 17:51:28 +0000993
sewardjc740d762006-10-05 17:59:23 +0000994 Addr lo, hi, min, max;
sewardj8aeeaa92006-08-16 17:51:28 +0000995
sewardj2c1c9df2006-07-28 00:06:37 +0000996 mc = (MC_Chunk*) chunks[i];
997
sewardj8aeeaa92006-08-16 17:51:28 +0000998 lo = mc->data;
njn718d3b12006-12-16 00:54:12 +0000999 hi = mc->szB == 0 ? mc->data : mc->data + mc->szB - 1;
sewardj2c1c9df2006-07-28 00:06:37 +00001000
njn718d3b12006-12-16 00:54:12 +00001001#define EXTENT_CONTAINS(x) ((addr <= (x)) && ((x) < addr + szB))
sewardj2c1c9df2006-07-28 00:06:37 +00001002
sewardj8aeeaa92006-08-16 17:51:28 +00001003 if (EXTENT_CONTAINS(lo) && EXTENT_CONTAINS(hi)) {
sewardj2c1c9df2006-07-28 00:06:37 +00001004
1005 /* The current chunk is entirely within the trim extent: keep
1006 it. */
1007
1008 continue;
1009
sewardj8aeeaa92006-08-16 17:51:28 +00001010 } else if ( (! EXTENT_CONTAINS(lo)) &&
1011 (! EXTENT_CONTAINS(hi)) ) {
sewardj2c1c9df2006-07-28 00:06:37 +00001012
1013 /* The current chunk is entirely outside the trim extent:
1014 delete it. */
1015
1016 if (VG_(HT_remove)(mp->chunks, (UWord)mc->data) == NULL) {
1017 MC_(record_free_error)(tid, (Addr)mc->data);
1018 VG_(free)(chunks);
sewardj7e30be42011-01-27 23:56:36 +00001019 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
sewardj2c1c9df2006-07-28 00:06:37 +00001020 return;
1021 }
1022 die_and_free_mem ( tid, mc, mp->rzB );
1023
1024 } else {
1025
1026 /* The current chunk intersects the trim extent: remove,
1027 trim, and reinsert it. */
1028
sewardj8aeeaa92006-08-16 17:51:28 +00001029 tl_assert(EXTENT_CONTAINS(lo) ||
1030 EXTENT_CONTAINS(hi));
sewardj2c1c9df2006-07-28 00:06:37 +00001031 if (VG_(HT_remove)(mp->chunks, (UWord)mc->data) == NULL) {
1032 MC_(record_free_error)(tid, (Addr)mc->data);
1033 VG_(free)(chunks);
sewardj7e30be42011-01-27 23:56:36 +00001034 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
sewardj2c1c9df2006-07-28 00:06:37 +00001035 return;
1036 }
1037
sewardjc740d762006-10-05 17:59:23 +00001038 if (mc->data < addr) {
1039 min = mc->data;
1040 lo = addr;
1041 } else {
1042 min = addr;
1043 lo = mc->data;
1044 }
sewardj2c1c9df2006-07-28 00:06:37 +00001045
njn718d3b12006-12-16 00:54:12 +00001046 if (mc->data + szB > addr + szB) {
1047 max = mc->data + szB;
1048 hi = addr + szB;
sewardjc740d762006-10-05 17:59:23 +00001049 } else {
njn718d3b12006-12-16 00:54:12 +00001050 max = addr + szB;
1051 hi = mc->data + szB;
sewardjc740d762006-10-05 17:59:23 +00001052 }
1053
1054 tl_assert(min <= lo);
sewardj2c1c9df2006-07-28 00:06:37 +00001055 tl_assert(lo < hi);
sewardjc740d762006-10-05 17:59:23 +00001056 tl_assert(hi <= max);
1057
1058 if (min < lo && !EXTENT_CONTAINS(min)) {
1059 MC_(make_mem_noaccess)( min, lo - min);
1060 }
1061
1062 if (hi < max && !EXTENT_CONTAINS(max)) {
1063 MC_(make_mem_noaccess)( hi, max - hi );
1064 }
1065
sewardj2c1c9df2006-07-28 00:06:37 +00001066 mc->data = lo;
njn718d3b12006-12-16 00:54:12 +00001067 mc->szB = (UInt) (hi - lo);
sewardj2c1c9df2006-07-28 00:06:37 +00001068 VG_(HT_add_node)( mp->chunks, mc );
1069 }
1070
1071#undef EXTENT_CONTAINS
1072
1073 }
sewardjc740d762006-10-05 17:59:23 +00001074 check_mempool_sane(mp);
sewardj2c1c9df2006-07-28 00:06:37 +00001075 VG_(free)(chunks);
1076}
1077
sewardjc740d762006-10-05 17:59:23 +00001078void MC_(move_mempool)(Addr poolA, Addr poolB)
1079{
1080 MC_Mempool* mp;
1081
1082 if (VG_(clo_verbosity) > 2) {
sewardj6b523cd2009-07-15 14:49:40 +00001083 VG_(message)(Vg_UserMsg, "move_mempool(0x%lx, 0x%lx)\n", poolA, poolB);
sewardjc740d762006-10-05 17:59:23 +00001084 VG_(get_and_pp_StackTrace)
1085 (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
1086 }
1087
1088 mp = VG_(HT_remove) ( MC_(mempool_list), (UWord)poolA );
1089
1090 if (mp == NULL) {
1091 ThreadId tid = VG_(get_running_tid)();
1092 MC_(record_illegal_mempool_error) ( tid, poolA );
1093 return;
1094 }
1095
1096 mp->pool = poolB;
1097 VG_(HT_add_node)( MC_(mempool_list), mp );
1098}
1099
njn718d3b12006-12-16 00:54:12 +00001100void MC_(mempool_change)(Addr pool, Addr addrA, Addr addrB, SizeT szB)
sewardjc740d762006-10-05 17:59:23 +00001101{
1102 MC_Mempool* mp;
1103 MC_Chunk* mc;
1104 ThreadId tid = VG_(get_running_tid)();
1105
1106 if (VG_(clo_verbosity) > 2) {
floriande3df032015-08-04 21:26:10 +00001107 VG_(message)(Vg_UserMsg, "mempool_change(0x%lx, 0x%lx, 0x%lx, %lu)\n",
njn718d3b12006-12-16 00:54:12 +00001108 pool, addrA, addrB, szB);
sewardjc740d762006-10-05 17:59:23 +00001109 VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
1110 }
1111
1112 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
1113 if (mp == NULL) {
1114 MC_(record_illegal_mempool_error)(tid, pool);
1115 return;
1116 }
1117
1118 check_mempool_sane(mp);
1119
1120 mc = VG_(HT_remove)(mp->chunks, (UWord)addrA);
1121 if (mc == NULL) {
1122 MC_(record_free_error)(tid, (Addr)addrA);
1123 return;
1124 }
1125
1126 mc->data = addrB;
njn718d3b12006-12-16 00:54:12 +00001127 mc->szB = szB;
sewardjc740d762006-10-05 17:59:23 +00001128 VG_(HT_add_node)( mp->chunks, mc );
1129
1130 check_mempool_sane(mp);
1131}
1132
1133Bool MC_(mempool_exists)(Addr pool)
1134{
1135 MC_Mempool* mp;
1136
1137 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
1138 if (mp == NULL) {
1139 return False;
1140 }
1141 return True;
1142}
1143
Elliott Hughesed398002017-06-21 14:41:24 -07001144static void xtmemory_report_next_block(XT_Allocs* xta, ExeContext** ec_alloc)
1145{
1146 MC_Chunk* mc = VG_(HT_Next)(MC_(malloc_list));
1147 if (mc) {
1148 xta->nbytes = mc->szB;
1149 xta->nblocks = 1;
1150 *ec_alloc = MC_(allocated_at)(mc);
1151 } else
1152 xta->nblocks = 0;
1153}
1154
1155void MC_(xtmemory_report) ( const HChar* filename, Bool fini )
1156{
1157 // Make xtmemory_report_next_block ready to be called.
1158 VG_(HT_ResetIter)(MC_(malloc_list));
1159
1160 VG_(XTMemory_report)(filename, fini, xtmemory_report_next_block,
1161 VG_(XT_filter_1top_and_maybe_below_main));
1162}
sewardjc740d762006-10-05 17:59:23 +00001163
njn1d0825f2006-03-27 11:37:07 +00001164/*------------------------------------------------------------*/
1165/*--- Statistics printing ---*/
1166/*------------------------------------------------------------*/
1167
1168void MC_(print_malloc_stats) ( void )
1169{
1170 MC_Chunk* mc;
1171 SizeT nblocks = 0;
sewardjea9c15e2007-03-14 11:57:37 +00001172 ULong nbytes = 0;
njn1d0825f2006-03-27 11:37:07 +00001173
1174 if (VG_(clo_verbosity) == 0)
1175 return;
1176 if (VG_(clo_xml))
1177 return;
1178
1179 /* Count memory still in use. */
1180 VG_(HT_ResetIter)(MC_(malloc_list));
1181 while ( (mc = VG_(HT_Next)(MC_(malloc_list))) ) {
1182 nblocks++;
sewardjea9c15e2007-03-14 11:57:37 +00001183 nbytes += (ULong)mc->szB;
njn1d0825f2006-03-27 11:37:07 +00001184 }
1185
sewardj2d9e8742009-08-07 15:46:56 +00001186 VG_(umsg)(
1187 "HEAP SUMMARY:\n"
njnb6267bd2009-08-12 00:14:16 +00001188 " in use at exit: %'llu bytes in %'lu blocks\n"
1189 " total heap usage: %'lu allocs, %'lu frees, %'llu bytes allocated\n"
1190 "\n",
1191 nbytes, nblocks,
sewardj6b523cd2009-07-15 14:49:40 +00001192 cmalloc_n_mallocs,
1193 cmalloc_n_frees, cmalloc_bs_mallocd
1194 );
njn1d0825f2006-03-27 11:37:07 +00001195}
1196
philippea22f59d2012-01-26 23:13:52 +00001197SizeT MC_(get_cmalloc_n_frees) ( void )
1198{
1199 return cmalloc_n_frees;
1200}
1201
1202
njn1d0825f2006-03-27 11:37:07 +00001203/*--------------------------------------------------------------------*/
1204/*--- end ---*/
1205/*--------------------------------------------------------------------*/