blob: 144559fca8d082bdf4e8be47b889236de2b7efad [file] [log] [blame]
njn1d0825f2006-03-27 11:37:07 +00001
2/*--------------------------------------------------------------------*/
3/*--- malloc/free wrappers for detecting errors and updating bits. ---*/
4/*--- mc_malloc_wrappers.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of MemCheck, a heavyweight Valgrind tool for
njne2656362007-03-10 02:27:44 +00009 detecting memory errors.
njn1d0825f2006-03-27 11:37:07 +000010
sewardj03f8d3f2012-08-05 15:46:46 +000011 Copyright (C) 2000-2012 Julian Seward
njn1d0825f2006-03-27 11:37:07 +000012 jseward@acm.org
13
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
29 The GNU General Public License is contained in the file COPYING.
30*/
31
32#include "pub_tool_basics.h"
33#include "pub_tool_execontext.h"
philippe6643e962012-01-17 21:16:30 +000034#include "pub_tool_poolalloc.h"
njn1d0825f2006-03-27 11:37:07 +000035#include "pub_tool_hashtable.h"
36#include "pub_tool_libcbase.h"
37#include "pub_tool_libcassert.h"
38#include "pub_tool_libcprint.h"
39#include "pub_tool_mallocfree.h"
40#include "pub_tool_options.h"
41#include "pub_tool_replacemalloc.h"
42#include "pub_tool_threadstate.h"
43#include "pub_tool_tooliface.h" // Needed for mc_include.h
sewardjc740d762006-10-05 17:59:23 +000044#include "pub_tool_stacktrace.h" // For VG_(get_and_pp_StackTrace)
njn1d0825f2006-03-27 11:37:07 +000045
46#include "mc_include.h"
47
48/*------------------------------------------------------------*/
49/*--- Defns ---*/
50/*------------------------------------------------------------*/
51
52/* Stats ... */
53static SizeT cmalloc_n_mallocs = 0;
54static SizeT cmalloc_n_frees = 0;
sewardjea9c15e2007-03-14 11:57:37 +000055static ULong cmalloc_bs_mallocd = 0;
njn1d0825f2006-03-27 11:37:07 +000056
sewardjc740d762006-10-05 17:59:23 +000057/* For debug printing to do with mempools: what stack trace
58 depth to show. */
59#define MEMPOOL_DEBUG_STACKTRACE_DEPTH 16
60
njn1d0825f2006-03-27 11:37:07 +000061
62/*------------------------------------------------------------*/
63/*--- Tracking malloc'd and free'd blocks ---*/
64/*------------------------------------------------------------*/
65
philipped99c26a2012-07-31 22:17:28 +000066SizeT MC_(Malloc_Redzone_SzB) = -10000000; // If used before set, should BOMB
67
njn1d0825f2006-03-27 11:37:07 +000068/* Record malloc'd blocks. */
69VgHashTable MC_(malloc_list) = NULL;
70
sewardj62b91042011-01-23 20:45:53 +000071/* Memory pools: a hash table of MC_Mempools. Search key is
72 MC_Mempool::pool. */
njn1d0825f2006-03-27 11:37:07 +000073VgHashTable MC_(mempool_list) = NULL;
philippe6643e962012-01-17 21:16:30 +000074
75/* Pool allocator for MC_Chunk. */
76PoolAlloc *MC_(chunk_poolalloc) = NULL;
77static
philippe8617b5b2013-01-12 19:53:08 +000078MC_Chunk* create_MC_Chunk ( ThreadId tid, Addr p, SizeT szB,
philippe6643e962012-01-17 21:16:30 +000079 MC_AllocKind kind);
80static inline
81void delete_MC_Chunk (MC_Chunk* mc);
82
njn1d0825f2006-03-27 11:37:07 +000083/* Records blocks after freeing. */
sewardj403d8aa2011-10-22 19:48:57 +000084/* Blocks freed by the client are queued in one of two lists of
85 freed blocks not yet physically freed:
86 "big blocks" freed list.
87 "small blocks" freed list
88 The blocks with a size >= MC_(clo_freelist_big_blocks)
89 are linked in the big blocks freed list.
90 This allows a client to allocate and free big blocks
91 (e.g. bigger than VG_(clo_freelist_vol)) without losing
92 immediately all protection against dangling pointers.
93 position [0] is for big blocks, [1] is for small blocks. */
94static MC_Chunk* freed_list_start[2] = {NULL, NULL};
95static MC_Chunk* freed_list_end[2] = {NULL, NULL};
njn1d0825f2006-03-27 11:37:07 +000096
97/* Put a shadow chunk on the freed blocks queue, possibly freeing up
98 some of the oldest blocks in the queue at the same time. */
99static void add_to_freed_queue ( MC_Chunk* mc )
100{
sewardjfa4ca3b2007-11-30 17:19:36 +0000101 const Bool show = False;
sewardj403d8aa2011-10-22 19:48:57 +0000102 const int l = (mc->szB >= MC_(clo_freelist_big_blocks) ? 0 : 1);
sewardjfa4ca3b2007-11-30 17:19:36 +0000103
sewardj403d8aa2011-10-22 19:48:57 +0000104 /* Put it at the end of the freed list, unless the block
105 would be directly released any way : in this case, we
106 put it at the head of the freed list. */
107 if (freed_list_end[l] == NULL) {
108 tl_assert(freed_list_start[l] == NULL);
109 mc->next = NULL;
110 freed_list_end[l] = freed_list_start[l] = mc;
njn1d0825f2006-03-27 11:37:07 +0000111 } else {
sewardj403d8aa2011-10-22 19:48:57 +0000112 tl_assert(freed_list_end[l]->next == NULL);
113 if (mc->szB >= MC_(clo_freelist_vol)) {
114 mc->next = freed_list_start[l];
115 freed_list_start[l] = mc;
njn1d0825f2006-03-27 11:37:07 +0000116 } else {
sewardj403d8aa2011-10-22 19:48:57 +0000117 mc->next = NULL;
118 freed_list_end[l]->next = mc;
119 freed_list_end[l] = mc;
njn1d0825f2006-03-27 11:37:07 +0000120 }
sewardj403d8aa2011-10-22 19:48:57 +0000121 }
122 VG_(free_queue_volume) += (Long)mc->szB;
123 if (show)
124 VG_(printf)("mc_freelist: acquire: volume now %lld\n",
125 VG_(free_queue_volume));
126 VG_(free_queue_length)++;
127}
njn1d0825f2006-03-27 11:37:07 +0000128
sewardj403d8aa2011-10-22 19:48:57 +0000129/* Release enough of the oldest blocks to bring the free queue
130 volume below vg_clo_freelist_vol.
131 Start with big block list first.
132 On entry, VG_(free_queue_volume) must be > MC_(clo_freelist_vol).
133 On exit, VG_(free_queue_volume) will be <= MC_(clo_freelist_vol). */
134static void release_oldest_block(void)
135{
136 const Bool show = False;
137 int i;
138 tl_assert (VG_(free_queue_volume) > MC_(clo_freelist_vol));
139 tl_assert (freed_list_start[0] != NULL || freed_list_start[1] != NULL);
140
141 for (i = 0; i < 2; i++) {
142 while (VG_(free_queue_volume) > MC_(clo_freelist_vol)
143 && freed_list_start[i] != NULL) {
144 MC_Chunk* mc1;
145
146 tl_assert(freed_list_end[i] != NULL);
147
148 mc1 = freed_list_start[i];
149 VG_(free_queue_volume) -= (Long)mc1->szB;
150 VG_(free_queue_length)--;
151 if (show)
152 VG_(printf)("mc_freelist: discard: volume now %lld\n",
153 VG_(free_queue_volume));
154 tl_assert(VG_(free_queue_volume) >= 0);
155
156 if (freed_list_start[i] == freed_list_end[i]) {
157 freed_list_start[i] = freed_list_end[i] = NULL;
158 } else {
159 freed_list_start[i] = mc1->next;
160 }
161 mc1->next = NULL; /* just paranoia */
162
163 /* free MC_Chunk */
164 if (MC_AllocCustom != mc1->allockind)
165 VG_(cli_free) ( (void*)(mc1->data) );
philippe6643e962012-01-17 21:16:30 +0000166 delete_MC_Chunk ( mc1 );
sewardj403d8aa2011-10-22 19:48:57 +0000167 }
njn1d0825f2006-03-27 11:37:07 +0000168 }
169}
170
sewardj403d8aa2011-10-22 19:48:57 +0000171MC_Chunk* MC_(get_freed_block_bracketting) (Addr a)
njn1d0825f2006-03-27 11:37:07 +0000172{
sewardj403d8aa2011-10-22 19:48:57 +0000173 int i;
174 for (i = 0; i < 2; i++) {
175 MC_Chunk* mc;
176 mc = freed_list_start[i];
177 while (mc) {
178 if (VG_(addr_is_in_block)( a, mc->data, mc->szB,
philipped99c26a2012-07-31 22:17:28 +0000179 MC_(Malloc_Redzone_SzB) ))
sewardj403d8aa2011-10-22 19:48:57 +0000180 return mc;
181 mc = mc->next;
182 }
183 }
184 return NULL;
njn1d0825f2006-03-27 11:37:07 +0000185}
186
sewardj403d8aa2011-10-22 19:48:57 +0000187/* Allocate a shadow chunk, put it on the appropriate list.
188 If needed, release oldest blocks from freed list. */
njn1d0825f2006-03-27 11:37:07 +0000189static
philippe8617b5b2013-01-12 19:53:08 +0000190MC_Chunk* create_MC_Chunk ( ThreadId tid, Addr p, SizeT szB,
njn1d0825f2006-03-27 11:37:07 +0000191 MC_AllocKind kind)
192{
philippe6643e962012-01-17 21:16:30 +0000193 MC_Chunk* mc = VG_(allocEltPA)(MC_(chunk_poolalloc));
njn1d0825f2006-03-27 11:37:07 +0000194 mc->data = p;
njn718d3b12006-12-16 00:54:12 +0000195 mc->szB = szB;
njn1d0825f2006-03-27 11:37:07 +0000196 mc->allockind = kind;
philippe8617b5b2013-01-12 19:53:08 +0000197 switch ( MC_(n_where_pointers)() ) {
198 case 2: mc->where[1] = 0; // fallback to 1
199 case 1: mc->where[0] = 0; // fallback to 0
200 case 0: break;
201 default: tl_assert(0);
202 }
203 MC_(set_allocated_at) (tid, mc);
njn1d0825f2006-03-27 11:37:07 +0000204
sewardj403d8aa2011-10-22 19:48:57 +0000205 /* Each time a new MC_Chunk is created, release oldest blocks
206 if the free list volume is exceeded. */
207 if (VG_(free_queue_volume) > MC_(clo_freelist_vol))
208 release_oldest_block();
209
njn1d0825f2006-03-27 11:37:07 +0000210 /* Paranoia ... ensure the MC_Chunk is off-limits to the client, so
211 the mc->data field isn't visible to the leak checker. If memory
212 management is working correctly, any pointer returned by VG_(malloc)
213 should be noaccess as far as the client is concerned. */
njndbf7ca72006-03-31 11:57:59 +0000214 if (!MC_(check_mem_is_noaccess)( (Addr)mc, sizeof(MC_Chunk), NULL )) {
njn1d0825f2006-03-27 11:37:07 +0000215 VG_(tool_panic)("create_MC_Chunk: shadow area is accessible");
216 }
217 return mc;
218}
219
philippe6643e962012-01-17 21:16:30 +0000220static inline
221void delete_MC_Chunk (MC_Chunk* mc)
222{
223 VG_(freeEltPA) (MC_(chunk_poolalloc), mc);
224}
225
philippe8617b5b2013-01-12 19:53:08 +0000226// True if mc is in the given block list.
227static Bool in_block_list (VgHashTable block_list, MC_Chunk* mc)
228{
229 MC_Chunk* found_mc = VG_(HT_lookup) ( block_list, (UWord)mc->data );
230 if (found_mc) {
231 tl_assert (found_mc->data == mc->data);
232 /* If a user builds a pool from a malloc-ed superblock
233 and uses VALGRIND_MALLOCLIKE_BLOCK to "mark"
234 an address at the beginning of this superblock, then
235 this address will be twice in the block_list.
236 We handle this case by checking size and allockind.
237 Note: I suspect that having the same block
238 twice in MC_(malloc_list) is a recipe for bugs.
239 We might maybe better create a "standard" mempool to
240 handle all this more cleanly. */
241 if (found_mc->szB != mc->szB
242 || found_mc->allockind != mc->allockind)
243 return False;
244 tl_assert (found_mc == mc);
245 return True;
246 } else
247 return False;
248}
249
250// True if mc is a live block (not yet freed).
251static Bool live_block (MC_Chunk* mc)
252{
253 if (mc->allockind == MC_AllocCustom) {
254 MC_Mempool* mp;
255 VG_(HT_ResetIter)(MC_(mempool_list));
256 while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) {
257 if ( in_block_list (mp->chunks, mc) )
258 return True;
259 }
260 }
261 /* Note: we fallback here for a not found MC_AllocCustom
262 as such a block can be inserted in MC_(malloc_list)
263 by VALGRIND_MALLOCLIKE_BLOCK. */
264 return in_block_list ( MC_(malloc_list), mc );
265}
266
267ExeContext* MC_(allocated_at) (MC_Chunk* mc)
268{
269 switch (MC_(clo_keep_stacktraces)) {
270 case KS_none: return VG_(null_ExeContext) ();
271 case KS_alloc: return mc->where[0];
272 case KS_free: return VG_(null_ExeContext) ();
273 case KS_alloc_then_free: return (live_block(mc) ?
274 mc->where[0] : VG_(null_ExeContext) ());
275 case KS_alloc_and_free: return mc->where[0];
276 default: tl_assert (0);
277 }
278}
279
280ExeContext* MC_(freed_at) (MC_Chunk* mc)
281{
282 switch (MC_(clo_keep_stacktraces)) {
283 case KS_none: return VG_(null_ExeContext) ();
284 case KS_alloc: return VG_(null_ExeContext) ();
285 case KS_free: return (mc->where[0] ?
286 mc->where[0] : VG_(null_ExeContext) ());
287 case KS_alloc_then_free: return (live_block(mc) ?
288 VG_(null_ExeContext) () : mc->where[0]);
289 case KS_alloc_and_free: return (mc->where[1] ?
290 mc->where[1] : VG_(null_ExeContext) ());
291 default: tl_assert (0);
292 }
293}
294
295void MC_(set_allocated_at) (ThreadId tid, MC_Chunk* mc)
296{
297 switch (MC_(clo_keep_stacktraces)) {
298 case KS_none: return;
299 case KS_alloc: break;
300 case KS_free: return;
301 case KS_alloc_then_free: break;
302 case KS_alloc_and_free: break;
303 default: tl_assert (0);
304 }
305 mc->where[0] = VG_(record_ExeContext) ( tid, 0/*first_ip_delta*/ );
306}
307
308void MC_(set_freed_at) (ThreadId tid, MC_Chunk* mc)
309{
310 UInt pos;
311 switch (MC_(clo_keep_stacktraces)) {
312 case KS_none: return;
313 case KS_alloc: return;
314 case KS_free: pos = 0; break;
315 case KS_alloc_then_free: pos = 0; break;
316 case KS_alloc_and_free: pos = 1; break;
317 default: tl_assert (0);
318 }
319 mc->where[pos] = VG_(record_ExeContext) ( tid, 0/*first_ip_delta*/ );
320}
321
322UInt MC_(n_where_pointers) (void)
323{
324 switch (MC_(clo_keep_stacktraces)) {
325 case KS_none: return 0;
326 case KS_alloc:
327 case KS_free:
328 case KS_alloc_then_free: return 1;
329 case KS_alloc_and_free: return 2;
330 default: tl_assert (0);
331 }
332}
333
njn1d0825f2006-03-27 11:37:07 +0000334/*------------------------------------------------------------*/
335/*--- client_malloc(), etc ---*/
336/*------------------------------------------------------------*/
337
njn017d3772009-05-19 02:10:26 +0000338// XXX: should make this a proper error (bug #79311).
floriana5f894c2012-10-21 03:43:20 +0000339static Bool complain_about_silly_args(SizeT sizeB, const HChar* fn)
njn1d0825f2006-03-27 11:37:07 +0000340{
341 // Cast to a signed type to catch any unexpectedly negative args. We're
342 // assuming here that the size asked for is not greater than 2^31 bytes
343 // (for 32-bit platforms) or 2^63 bytes (for 64-bit platforms).
344 if ((SSizeT)sizeB < 0) {
sewardj22faf712007-11-09 11:33:02 +0000345 if (!VG_(clo_xml))
sewardj6b523cd2009-07-15 14:49:40 +0000346 VG_(message)(Vg_UserMsg, "Warning: silly arg (%ld) to %s()\n",
sewardj22faf712007-11-09 11:33:02 +0000347 (SSizeT)sizeB, fn );
njn1d0825f2006-03-27 11:37:07 +0000348 return True;
349 }
350 return False;
351}
352
353static Bool complain_about_silly_args2(SizeT n, SizeT sizeB)
354{
355 if ((SSizeT)n < 0 || (SSizeT)sizeB < 0) {
sewardj22faf712007-11-09 11:33:02 +0000356 if (!VG_(clo_xml))
357 VG_(message)(Vg_UserMsg,
sewardj6b523cd2009-07-15 14:49:40 +0000358 "Warning: silly args (%ld,%ld) to calloc()\n",
sewardj22faf712007-11-09 11:33:02 +0000359 (SSizeT)n, (SSizeT)sizeB);
njn1d0825f2006-03-27 11:37:07 +0000360 return True;
361 }
362 return False;
363}
364
365/* Allocate memory and note change in memory available */
njn1d0825f2006-03-27 11:37:07 +0000366void* MC_(new_block) ( ThreadId tid,
njn1dcee092009-02-24 03:07:37 +0000367 Addr p, SizeT szB, SizeT alignB,
sewardjeb0fa932007-11-30 21:41:40 +0000368 Bool is_zeroed, MC_AllocKind kind, VgHashTable table)
njn1d0825f2006-03-27 11:37:07 +0000369{
philippe8617b5b2013-01-12 19:53:08 +0000370 MC_Chunk* mc;
sewardj7cf4e6b2008-05-01 20:24:26 +0000371
njn1d0825f2006-03-27 11:37:07 +0000372 // Allocate and zero if necessary
373 if (p) {
374 tl_assert(MC_AllocCustom == kind);
375 } else {
376 tl_assert(MC_AllocCustom != kind);
njn718d3b12006-12-16 00:54:12 +0000377 p = (Addr)VG_(cli_malloc)( alignB, szB );
njn1d0825f2006-03-27 11:37:07 +0000378 if (!p) {
379 return NULL;
380 }
sewardjeb0fa932007-11-30 21:41:40 +0000381 if (is_zeroed) {
382 VG_(memset)((void*)p, 0, szB);
383 } else
384 if (MC_(clo_malloc_fill) != -1) {
385 tl_assert(MC_(clo_malloc_fill) >= 0x00 && MC_(clo_malloc_fill) <= 0xFF);
386 VG_(memset)((void*)p, MC_(clo_malloc_fill), szB);
387 }
njn1d0825f2006-03-27 11:37:07 +0000388 }
389
florian5544d5b2011-12-30 03:09:45 +0000390 // Only update stats if allocation succeeded.
391 cmalloc_n_mallocs ++;
sewardjea9c15e2007-03-14 11:57:37 +0000392 cmalloc_bs_mallocd += (ULong)szB;
philippe8617b5b2013-01-12 19:53:08 +0000393 mc = create_MC_Chunk (tid, p, szB, kind);
394 VG_(HT_add_node)( table, mc );
njn1d0825f2006-03-27 11:37:07 +0000395
396 if (is_zeroed)
njn718d3b12006-12-16 00:54:12 +0000397 MC_(make_mem_defined)( p, szB );
sewardj7cf4e6b2008-05-01 20:24:26 +0000398 else {
philippe8617b5b2013-01-12 19:53:08 +0000399 UInt ecu = VG_(get_ECU_from_ExeContext)(MC_(allocated_at)(mc));
sewardj7cf4e6b2008-05-01 20:24:26 +0000400 tl_assert(VG_(is_plausible_ECU)(ecu));
401 MC_(make_mem_undefined_w_otag)( p, szB, ecu | MC_OKIND_HEAP );
402 }
njn1d0825f2006-03-27 11:37:07 +0000403
404 return (void*)p;
405}
406
407void* MC_(malloc) ( ThreadId tid, SizeT n )
408{
409 if (complain_about_silly_args(n, "malloc")) {
410 return NULL;
411 } else {
412 return MC_(new_block) ( tid, 0, n, VG_(clo_alignment),
njn1dcee092009-02-24 03:07:37 +0000413 /*is_zeroed*/False, MC_AllocMalloc, MC_(malloc_list));
njn1d0825f2006-03-27 11:37:07 +0000414 }
415}
416
417void* MC_(__builtin_new) ( ThreadId tid, SizeT n )
418{
419 if (complain_about_silly_args(n, "__builtin_new")) {
420 return NULL;
421 } else {
422 return MC_(new_block) ( tid, 0, n, VG_(clo_alignment),
njn1dcee092009-02-24 03:07:37 +0000423 /*is_zeroed*/False, MC_AllocNew, MC_(malloc_list));
njn1d0825f2006-03-27 11:37:07 +0000424 }
425}
426
427void* MC_(__builtin_vec_new) ( ThreadId tid, SizeT n )
428{
429 if (complain_about_silly_args(n, "__builtin_vec_new")) {
430 return NULL;
431 } else {
432 return MC_(new_block) ( tid, 0, n, VG_(clo_alignment),
njn1dcee092009-02-24 03:07:37 +0000433 /*is_zeroed*/False, MC_AllocNewVec, MC_(malloc_list));
njn1d0825f2006-03-27 11:37:07 +0000434 }
435}
436
njn718d3b12006-12-16 00:54:12 +0000437void* MC_(memalign) ( ThreadId tid, SizeT alignB, SizeT n )
njn1d0825f2006-03-27 11:37:07 +0000438{
439 if (complain_about_silly_args(n, "memalign")) {
440 return NULL;
441 } else {
njn718d3b12006-12-16 00:54:12 +0000442 return MC_(new_block) ( tid, 0, n, alignB,
njn1dcee092009-02-24 03:07:37 +0000443 /*is_zeroed*/False, MC_AllocMalloc, MC_(malloc_list));
njn1d0825f2006-03-27 11:37:07 +0000444 }
445}
446
447void* MC_(calloc) ( ThreadId tid, SizeT nmemb, SizeT size1 )
448{
449 if (complain_about_silly_args2(nmemb, size1)) {
450 return NULL;
451 } else {
452 return MC_(new_block) ( tid, 0, nmemb*size1, VG_(clo_alignment),
njn1dcee092009-02-24 03:07:37 +0000453 /*is_zeroed*/True, MC_AllocMalloc, MC_(malloc_list));
njn1d0825f2006-03-27 11:37:07 +0000454 }
455}
456
457static
458void die_and_free_mem ( ThreadId tid, MC_Chunk* mc, SizeT rzB )
459{
philippea2cc0c02012-05-11 22:10:39 +0000460 /* Note: we do not free fill the custom allocs produced
461 by MEMPOOL or by MALLOC/FREELIKE_BLOCK requests. */
462 if (MC_(clo_free_fill) != -1 && MC_AllocCustom != mc->allockind ) {
sewardjeb0fa932007-11-30 21:41:40 +0000463 tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
464 VG_(memset)((void*)mc->data, MC_(clo_free_fill), mc->szB);
465 }
466
njn1d0825f2006-03-27 11:37:07 +0000467 /* Note: make redzones noaccess again -- just in case user made them
468 accessible with a client request... */
njn718d3b12006-12-16 00:54:12 +0000469 MC_(make_mem_noaccess)( mc->data-rzB, mc->szB + 2*rzB );
njn1d0825f2006-03-27 11:37:07 +0000470
bartc1fc1332010-09-02 10:24:49 +0000471 /* Record where freed */
philippe8617b5b2013-01-12 19:53:08 +0000472 MC_(set_freed_at) (tid, mc);
bartc1fc1332010-09-02 10:24:49 +0000473 /* Put it out of harm's way for a while */
474 add_to_freed_queue ( mc );
sewardj403d8aa2011-10-22 19:48:57 +0000475 /* If the free list volume is bigger than MC_(clo_freelist_vol),
476 we wait till the next block allocation to release blocks.
477 This increase the chance to discover dangling pointer usage,
478 even for big blocks being freed by the client. */
njn1d0825f2006-03-27 11:37:07 +0000479}
480
philippe8617b5b2013-01-12 19:53:08 +0000481
482static
483void record_freemismatch_error (ThreadId tid, MC_Chunk* mc)
484{
485 /* MC_(record_freemismatch_error) reports errors for still
486 allocated blocks but we are in the middle of freeing it. To
487 report the error correctly, we re-insert the chunk (making it
488 again a "clean allocated block", report the error, and then
489 re-remove the chunk. This avoids to do a VG_(HT_lookup)
490 followed by a VG_(HT_remove) in all "non-erroneous cases". */
491 VG_(HT_add_node)( MC_(malloc_list), mc );
492 MC_(record_freemismatch_error) ( tid, mc );
493 if ((mc != VG_(HT_remove) ( MC_(malloc_list), (UWord)mc->data )))
494 tl_assert(0);
495}
496
njn1d0825f2006-03-27 11:37:07 +0000497void MC_(handle_free) ( ThreadId tid, Addr p, UInt rzB, MC_AllocKind kind )
498{
499 MC_Chunk* mc;
500
501 cmalloc_n_frees++;
502
503 mc = VG_(HT_remove) ( MC_(malloc_list), (UWord)p );
504 if (mc == NULL) {
505 MC_(record_free_error) ( tid, p );
506 } else {
507 /* check if it is a matching free() / delete / delete [] */
508 if (kind != mc->allockind) {
njn718d3b12006-12-16 00:54:12 +0000509 tl_assert(p == mc->data);
philippe8617b5b2013-01-12 19:53:08 +0000510 record_freemismatch_error ( tid, mc );
njn1d0825f2006-03-27 11:37:07 +0000511 }
512 die_and_free_mem ( tid, mc, rzB );
513 }
514}
515
516void MC_(free) ( ThreadId tid, void* p )
517{
518 MC_(handle_free)(
philipped99c26a2012-07-31 22:17:28 +0000519 tid, (Addr)p, MC_(Malloc_Redzone_SzB), MC_AllocMalloc );
njn1d0825f2006-03-27 11:37:07 +0000520}
521
522void MC_(__builtin_delete) ( ThreadId tid, void* p )
523{
524 MC_(handle_free)(
philipped99c26a2012-07-31 22:17:28 +0000525 tid, (Addr)p, MC_(Malloc_Redzone_SzB), MC_AllocNew);
njn1d0825f2006-03-27 11:37:07 +0000526}
527
528void MC_(__builtin_vec_delete) ( ThreadId tid, void* p )
529{
530 MC_(handle_free)(
philipped99c26a2012-07-31 22:17:28 +0000531 tid, (Addr)p, MC_(Malloc_Redzone_SzB), MC_AllocNewVec);
njn1d0825f2006-03-27 11:37:07 +0000532}
533
njn718d3b12006-12-16 00:54:12 +0000534void* MC_(realloc) ( ThreadId tid, void* p_old, SizeT new_szB )
njn1d0825f2006-03-27 11:37:07 +0000535{
philippe8617b5b2013-01-12 19:53:08 +0000536 MC_Chunk* old_mc;
537 MC_Chunk* new_mc;
538 Addr a_new;
njn718d3b12006-12-16 00:54:12 +0000539 SizeT old_szB;
njn1d0825f2006-03-27 11:37:07 +0000540
florian5544d5b2011-12-30 03:09:45 +0000541 if (complain_about_silly_args(new_szB, "realloc"))
542 return NULL;
543
njn1d0825f2006-03-27 11:37:07 +0000544 cmalloc_n_frees ++;
545 cmalloc_n_mallocs ++;
sewardjea9c15e2007-03-14 11:57:37 +0000546 cmalloc_bs_mallocd += (ULong)new_szB;
njn1d0825f2006-03-27 11:37:07 +0000547
njn1d0825f2006-03-27 11:37:07 +0000548 /* Remove the old block */
philippe8617b5b2013-01-12 19:53:08 +0000549 old_mc = VG_(HT_remove) ( MC_(malloc_list), (UWord)p_old );
550 if (old_mc == NULL) {
njn1d0825f2006-03-27 11:37:07 +0000551 MC_(record_free_error) ( tid, (Addr)p_old );
552 /* We return to the program regardless. */
553 return NULL;
554 }
555
556 /* check if its a matching free() / delete / delete [] */
philippe8617b5b2013-01-12 19:53:08 +0000557 if (MC_AllocMalloc != old_mc->allockind) {
njn1d0825f2006-03-27 11:37:07 +0000558 /* can not realloc a range that was allocated with new or new [] */
philippe8617b5b2013-01-12 19:53:08 +0000559 tl_assert((Addr)p_old == old_mc->data);
560 record_freemismatch_error ( tid, old_mc );
njn1d0825f2006-03-27 11:37:07 +0000561 /* but keep going anyway */
562 }
563
philippe8617b5b2013-01-12 19:53:08 +0000564 old_szB = old_mc->szB;
njn1d0825f2006-03-27 11:37:07 +0000565
philippe8617b5b2013-01-12 19:53:08 +0000566 /* Get new memory */
567 a_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_szB);
sewardjb3238a52008-07-29 09:44:52 +0000568
philippe8617b5b2013-01-12 19:53:08 +0000569 if (a_new) {
570 /* In all cases, even when the new size is smaller or unchanged, we
571 reallocate and copy the contents, and make the old block
572 inaccessible. This is so as to guarantee to catch all cases of
573 accesses via the old address after reallocation, regardless of
574 the change in size. (Of course the ability to detect accesses
575 to the old block also depends on the size of the freed blocks
576 queue). */
sewardj8849a562008-07-22 18:23:16 +0000577
philippe8617b5b2013-01-12 19:53:08 +0000578 // Allocate a new chunk.
579 new_mc = create_MC_Chunk( tid, a_new, new_szB, MC_AllocMalloc );
sewardj8849a562008-07-22 18:23:16 +0000580
philippe8617b5b2013-01-12 19:53:08 +0000581 // Now insert the new mc (with a new 'data' field) into malloc_list.
582 VG_(HT_add_node)( MC_(malloc_list), new_mc );
sewardj8849a562008-07-22 18:23:16 +0000583
philippe8617b5b2013-01-12 19:53:08 +0000584 /* Retained part is copied, red zones set as normal */
585
586 /* Redzone at the front */
587 MC_(make_mem_noaccess)( a_new-MC_(Malloc_Redzone_SzB),
588 MC_(Malloc_Redzone_SzB) );
589
590 /* payload */
591 if (old_szB >= new_szB) {
592 /* new size is smaller or the same */
593
594 /* Copy address range state and value from old to new */
sewardj8849a562008-07-22 18:23:16 +0000595 MC_(copy_address_range_state) ( (Addr)p_old, a_new, new_szB );
sewardj8849a562008-07-22 18:23:16 +0000596 VG_(memcpy)((void*)a_new, p_old, new_szB);
philippe8617b5b2013-01-12 19:53:08 +0000597 } else {
598 /* new size is bigger */
sewardj7cf4e6b2008-05-01 20:24:26 +0000599 UInt ecu;
sewardj7cf4e6b2008-05-01 20:24:26 +0000600
philippe8617b5b2013-01-12 19:53:08 +0000601 /* Copy address range state and value from old to new */
602 MC_(copy_address_range_state) ( (Addr)p_old, a_new, old_szB );
603 VG_(memcpy)((void*)a_new, p_old, old_szB);
604
605 // If the block has grown, we mark the grown area as undefined.
606 // We have to do that after VG_(HT_add_node) to ensure the ecu
607 // execontext is for a fully allocated block.
608 ecu = VG_(get_ECU_from_ExeContext)(MC_(allocated_at)(new_mc));
sewardj7cf4e6b2008-05-01 20:24:26 +0000609 tl_assert(VG_(is_plausible_ECU)(ecu));
philippe8617b5b2013-01-12 19:53:08 +0000610 MC_(make_mem_undefined_w_otag)( a_new+old_szB,
611 new_szB-old_szB,
612 ecu | MC_OKIND_HEAP );
njn1d0825f2006-03-27 11:37:07 +0000613
sewardjeb0fa932007-11-30 21:41:40 +0000614 /* Possibly fill new area with specified junk */
615 if (MC_(clo_malloc_fill) != -1) {
616 tl_assert(MC_(clo_malloc_fill) >= 0x00
617 && MC_(clo_malloc_fill) <= 0xFF);
618 VG_(memset)((void*)(a_new+old_szB), MC_(clo_malloc_fill),
619 new_szB-old_szB);
620 }
njn1d0825f2006-03-27 11:37:07 +0000621 }
622
philippe8617b5b2013-01-12 19:53:08 +0000623 /* Redzone at the back. */
624 MC_(make_mem_noaccess) ( a_new+new_szB, MC_(Malloc_Redzone_SzB));
njn1d0825f2006-03-27 11:37:07 +0000625
philippe8617b5b2013-01-12 19:53:08 +0000626 /* Possibly fill freed area with specified junk. */
627 if (MC_(clo_free_fill) != -1) {
628 tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
629 VG_(memset)((void*)p_old, MC_(clo_free_fill), old_szB);
630 }
njn1d0825f2006-03-27 11:37:07 +0000631
philippe8617b5b2013-01-12 19:53:08 +0000632 /* Free old memory */
633 /* Nb: we have to allocate a new MC_Chunk for the new memory rather
634 than recycling the old one, so that any erroneous accesses to the
635 old memory are reported. */
636 die_and_free_mem ( tid, old_mc, MC_(Malloc_Redzone_SzB) );
637
638 } else {
639 /* Could not allocate new client memory.
640 Re-insert the old_mc (with the old ptr) in the HT, as old_mc was
641 unconditionally removed at the beginning of the function. */
642 VG_(HT_add_node)( MC_(malloc_list), old_mc );
643 }
644
645 return (void*)a_new;
njn1d0825f2006-03-27 11:37:07 +0000646}
647
njn8b140de2009-02-17 04:31:18 +0000648SizeT MC_(malloc_usable_size) ( ThreadId tid, void* p )
649{
650 MC_Chunk* mc = VG_(HT_lookup) ( MC_(malloc_list), (UWord)p );
651
652 // There may be slop, but pretend there isn't because only the asked-for
653 // area will be marked as addressable.
654 return ( mc ? mc->szB : 0 );
655}
656
bart91347382011-03-25 20:07:25 +0000657/* This handles the in place resize of a block, as performed by the
658 VALGRIND_RESIZEINPLACE_BLOCK client request. It is unrelated to,
659 and not used for, handling of the normal libc realloc()
660 function. */
661void MC_(handle_resizeInPlace)(ThreadId tid, Addr p,
662 SizeT oldSizeB, SizeT newSizeB, SizeT rzB)
663{
664 MC_Chunk* mc = VG_(HT_lookup) ( MC_(malloc_list), (UWord)p );
665 if (!mc || mc->szB != oldSizeB || newSizeB == 0) {
666 /* Reject if: p is not found, or oldSizeB is wrong,
667 or new block would be empty. */
668 MC_(record_free_error) ( tid, p );
669 return;
670 }
671
672 if (oldSizeB == newSizeB)
673 return;
674
675 mc->szB = newSizeB;
676 if (newSizeB < oldSizeB) {
677 MC_(make_mem_noaccess)( p + newSizeB, oldSizeB - newSizeB + rzB );
678 } else {
679 ExeContext* ec = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
680 UInt ecu = VG_(get_ECU_from_ExeContext)(ec);
681 MC_(make_mem_undefined_w_otag)( p + oldSizeB, newSizeB - oldSizeB,
682 ecu | MC_OKIND_HEAP );
683 if (rzB > 0)
684 MC_(make_mem_noaccess)( p + newSizeB, rzB );
685 }
686}
687
njn017d3772009-05-19 02:10:26 +0000688
sewardj62b91042011-01-23 20:45:53 +0000689/*------------------------------------------------------------*/
690/*--- Memory pool stuff. ---*/
691/*------------------------------------------------------------*/
njn1d0825f2006-03-27 11:37:07 +0000692
sewardj7e30be42011-01-27 23:56:36 +0000693/* Set to 1 for intensive sanity checking. Is very expensive though
694 and should not be used in production scenarios. See #255966. */
695#define MP_DETAILED_SANITY_CHECKS 0
696
697static void check_mempool_sane(MC_Mempool* mp); /*forward*/
698
699
njn1d0825f2006-03-27 11:37:07 +0000700void MC_(create_mempool)(Addr pool, UInt rzB, Bool is_zeroed)
701{
sewardjc740d762006-10-05 17:59:23 +0000702 MC_Mempool* mp;
703
704 if (VG_(clo_verbosity) > 2) {
sewardj6b523cd2009-07-15 14:49:40 +0000705 VG_(message)(Vg_UserMsg, "create_mempool(0x%lx, %d, %d)\n",
sewardjc740d762006-10-05 17:59:23 +0000706 pool, rzB, is_zeroed);
707 VG_(get_and_pp_StackTrace)
708 (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
709 }
710
711 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
712 if (mp != NULL) {
713 VG_(tool_panic)("MC_(create_mempool): duplicate pool creation");
714 }
715
sewardj9c606bd2008-09-18 18:12:50 +0000716 mp = VG_(malloc)("mc.cm.1", sizeof(MC_Mempool));
njn1d0825f2006-03-27 11:37:07 +0000717 mp->pool = pool;
718 mp->rzB = rzB;
719 mp->is_zeroed = is_zeroed;
sewardj3f94a7d2007-08-25 07:19:08 +0000720 mp->chunks = VG_(HT_construct)( "MC_(create_mempool)" );
sewardj7e30be42011-01-27 23:56:36 +0000721 check_mempool_sane(mp);
njn1d0825f2006-03-27 11:37:07 +0000722
723 /* Paranoia ... ensure this area is off-limits to the client, so
724 the mp->data field isn't visible to the leak checker. If memory
725 management is working correctly, anything pointer returned by
726 VG_(malloc) should be noaccess as far as the client is
727 concerned. */
njndbf7ca72006-03-31 11:57:59 +0000728 if (!MC_(check_mem_is_noaccess)( (Addr)mp, sizeof(MC_Mempool), NULL )) {
njn1d0825f2006-03-27 11:37:07 +0000729 VG_(tool_panic)("MC_(create_mempool): shadow area is accessible");
730 }
731
732 VG_(HT_add_node)( MC_(mempool_list), mp );
733}
734
735void MC_(destroy_mempool)(Addr pool)
736{
737 MC_Chunk* mc;
738 MC_Mempool* mp;
739
sewardjc740d762006-10-05 17:59:23 +0000740 if (VG_(clo_verbosity) > 2) {
sewardj6b523cd2009-07-15 14:49:40 +0000741 VG_(message)(Vg_UserMsg, "destroy_mempool(0x%lx)\n", pool);
sewardjc740d762006-10-05 17:59:23 +0000742 VG_(get_and_pp_StackTrace)
743 (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
744 }
745
njn1d0825f2006-03-27 11:37:07 +0000746 mp = VG_(HT_remove) ( MC_(mempool_list), (UWord)pool );
747
748 if (mp == NULL) {
749 ThreadId tid = VG_(get_running_tid)();
750 MC_(record_illegal_mempool_error) ( tid, pool );
751 return;
752 }
sewardj7e30be42011-01-27 23:56:36 +0000753 check_mempool_sane(mp);
njn1d0825f2006-03-27 11:37:07 +0000754
755 // Clean up the chunks, one by one
756 VG_(HT_ResetIter)(mp->chunks);
757 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
758 /* Note: make redzones noaccess again -- just in case user made them
759 accessible with a client request... */
njn718d3b12006-12-16 00:54:12 +0000760 MC_(make_mem_noaccess)(mc->data-mp->rzB, mc->szB + 2*mp->rzB );
njn1d0825f2006-03-27 11:37:07 +0000761 }
762 // Destroy the chunk table
philippe6643e962012-01-17 21:16:30 +0000763 VG_(HT_destruct)(mp->chunks, (void (*)(void *))delete_MC_Chunk);
njn1d0825f2006-03-27 11:37:07 +0000764
765 VG_(free)(mp);
766}
767
sewardjc740d762006-10-05 17:59:23 +0000768static Int
florian6bd9dc12012-11-23 16:17:43 +0000769mp_compar(const void* n1, const void* n2)
sewardjc740d762006-10-05 17:59:23 +0000770{
florian3e798632012-11-24 19:41:54 +0000771 const MC_Chunk* mc1 = *(const MC_Chunk *const *)n1;
772 const MC_Chunk* mc2 = *(const MC_Chunk *const *)n2;
sewardjb8b79ad2008-03-03 01:35:41 +0000773 if (mc1->data < mc2->data) return -1;
774 if (mc1->data > mc2->data) return 1;
775 return 0;
sewardjc740d762006-10-05 17:59:23 +0000776}
777
778static void
779check_mempool_sane(MC_Mempool* mp)
780{
781 UInt n_chunks, i, bad = 0;
782 static UInt tick = 0;
783
784 MC_Chunk **chunks = (MC_Chunk**) VG_(HT_to_array)( mp->chunks, &n_chunks );
785 if (!chunks)
786 return;
787
788 if (VG_(clo_verbosity) > 1) {
789 if (tick++ >= 10000)
790 {
791 UInt total_pools = 0, total_chunks = 0;
792 MC_Mempool* mp2;
793
794 VG_(HT_ResetIter)(MC_(mempool_list));
795 while ( (mp2 = VG_(HT_Next)(MC_(mempool_list))) ) {
796 total_pools++;
797 VG_(HT_ResetIter)(mp2->chunks);
798 while (VG_(HT_Next)(mp2->chunks)) {
799 total_chunks++;
800 }
801 }
802
sewardj6b523cd2009-07-15 14:49:40 +0000803 VG_(message)(Vg_UserMsg,
sewardjc740d762006-10-05 17:59:23 +0000804 "Total mempools active: %d pools, %d chunks\n",
805 total_pools, total_chunks);
806 tick = 0;
807 }
808 }
809
810
811 VG_(ssort)((void*)chunks, n_chunks, sizeof(VgHashNode*), mp_compar);
812
813 /* Sanity check; assert that the blocks are now in order */
814 for (i = 0; i < n_chunks-1; i++) {
815 if (chunks[i]->data > chunks[i+1]->data) {
816 VG_(message)(Vg_UserMsg,
817 "Mempool chunk %d / %d is out of order "
sewardj6b523cd2009-07-15 14:49:40 +0000818 "wrt. its successor\n",
sewardjc740d762006-10-05 17:59:23 +0000819 i+1, n_chunks);
820 bad = 1;
821 }
822 }
823
824 /* Sanity check -- make sure they don't overlap */
825 for (i = 0; i < n_chunks-1; i++) {
njn718d3b12006-12-16 00:54:12 +0000826 if (chunks[i]->data + chunks[i]->szB > chunks[i+1]->data ) {
sewardjc740d762006-10-05 17:59:23 +0000827 VG_(message)(Vg_UserMsg,
sewardj6b523cd2009-07-15 14:49:40 +0000828 "Mempool chunk %d / %d overlaps with its successor\n",
sewardjc740d762006-10-05 17:59:23 +0000829 i+1, n_chunks);
830 bad = 1;
831 }
832 }
833
834 if (bad) {
835 VG_(message)(Vg_UserMsg,
sewardj6b523cd2009-07-15 14:49:40 +0000836 "Bad mempool (%d chunks), dumping chunks for inspection:\n",
837 n_chunks);
sewardjc740d762006-10-05 17:59:23 +0000838 for (i = 0; i < n_chunks; ++i) {
839 VG_(message)(Vg_UserMsg,
sewardj6b523cd2009-07-15 14:49:40 +0000840 "Mempool chunk %d / %d: %ld bytes "
841 "[%lx,%lx), allocated:\n",
sewardjc740d762006-10-05 17:59:23 +0000842 i+1,
843 n_chunks,
barta0b6b2c2008-07-07 06:49:24 +0000844 chunks[i]->szB + 0UL,
sewardjc740d762006-10-05 17:59:23 +0000845 chunks[i]->data,
njn718d3b12006-12-16 00:54:12 +0000846 chunks[i]->data + chunks[i]->szB);
sewardjc740d762006-10-05 17:59:23 +0000847
philippe8617b5b2013-01-12 19:53:08 +0000848 VG_(pp_ExeContext)(MC_(allocated_at)(chunks[i]));
sewardjc740d762006-10-05 17:59:23 +0000849 }
850 }
851 VG_(free)(chunks);
852}
853
njn718d3b12006-12-16 00:54:12 +0000854void MC_(mempool_alloc)(ThreadId tid, Addr pool, Addr addr, SizeT szB)
njn1d0825f2006-03-27 11:37:07 +0000855{
sewardjc740d762006-10-05 17:59:23 +0000856 MC_Mempool* mp;
njn1d0825f2006-03-27 11:37:07 +0000857
sewardjc740d762006-10-05 17:59:23 +0000858 if (VG_(clo_verbosity) > 2) {
sewardj6b523cd2009-07-15 14:49:40 +0000859 VG_(message)(Vg_UserMsg, "mempool_alloc(0x%lx, 0x%lx, %ld)\n",
860 pool, addr, szB);
sewardjc740d762006-10-05 17:59:23 +0000861 VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
862 }
863
864 mp = VG_(HT_lookup) ( MC_(mempool_list), (UWord)pool );
njn1d0825f2006-03-27 11:37:07 +0000865 if (mp == NULL) {
866 MC_(record_illegal_mempool_error) ( tid, pool );
867 } else {
sewardj7e30be42011-01-27 23:56:36 +0000868 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
njn1dcee092009-02-24 03:07:37 +0000869 MC_(new_block)(tid, addr, szB, /*ignored*/0, mp->is_zeroed,
njn1d0825f2006-03-27 11:37:07 +0000870 MC_AllocCustom, mp->chunks);
philippee529b872012-07-05 21:11:12 +0000871 if (mp->rzB > 0) {
872 // This is not needed if the user application has properly
873 // marked the superblock noaccess when defining the mempool.
874 // We however still mark the redzones noaccess to still catch
875 // some bugs if user forgot.
876 MC_(make_mem_noaccess) ( addr - mp->rzB, mp->rzB);
877 MC_(make_mem_noaccess) ( addr + szB, mp->rzB);
878 }
sewardj7e30be42011-01-27 23:56:36 +0000879 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
njn1d0825f2006-03-27 11:37:07 +0000880 }
881}
882
883void MC_(mempool_free)(Addr pool, Addr addr)
884{
885 MC_Mempool* mp;
886 MC_Chunk* mc;
887 ThreadId tid = VG_(get_running_tid)();
888
889 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
890 if (mp == NULL) {
891 MC_(record_illegal_mempool_error)(tid, pool);
892 return;
893 }
894
sewardjc740d762006-10-05 17:59:23 +0000895 if (VG_(clo_verbosity) > 2) {
sewardj6b523cd2009-07-15 14:49:40 +0000896 VG_(message)(Vg_UserMsg, "mempool_free(0x%lx, 0x%lx)\n", pool, addr);
sewardjc740d762006-10-05 17:59:23 +0000897 VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
898 }
899
sewardj7e30be42011-01-27 23:56:36 +0000900 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
njn1d0825f2006-03-27 11:37:07 +0000901 mc = VG_(HT_remove)(mp->chunks, (UWord)addr);
902 if (mc == NULL) {
903 MC_(record_free_error)(tid, (Addr)addr);
904 return;
905 }
906
sewardjc740d762006-10-05 17:59:23 +0000907 if (VG_(clo_verbosity) > 2) {
908 VG_(message)(Vg_UserMsg,
sewardj6b523cd2009-07-15 14:49:40 +0000909 "mempool_free(0x%lx, 0x%lx) freed chunk of %ld bytes\n",
barta0b6b2c2008-07-07 06:49:24 +0000910 pool, addr, mc->szB + 0UL);
sewardjc740d762006-10-05 17:59:23 +0000911 }
912
njn1d0825f2006-03-27 11:37:07 +0000913 die_and_free_mem ( tid, mc, mp->rzB );
sewardj7e30be42011-01-27 23:56:36 +0000914 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
njn1d0825f2006-03-27 11:37:07 +0000915}
916
sewardj2c1c9df2006-07-28 00:06:37 +0000917
njn718d3b12006-12-16 00:54:12 +0000918void MC_(mempool_trim)(Addr pool, Addr addr, SizeT szB)
sewardj2c1c9df2006-07-28 00:06:37 +0000919{
920 MC_Mempool* mp;
921 MC_Chunk* mc;
922 ThreadId tid = VG_(get_running_tid)();
923 UInt n_shadows, i;
924 VgHashNode** chunks;
925
sewardjc740d762006-10-05 17:59:23 +0000926 if (VG_(clo_verbosity) > 2) {
sewardj6b523cd2009-07-15 14:49:40 +0000927 VG_(message)(Vg_UserMsg, "mempool_trim(0x%lx, 0x%lx, %ld)\n",
928 pool, addr, szB);
sewardjc740d762006-10-05 17:59:23 +0000929 VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
930 }
931
sewardj2c1c9df2006-07-28 00:06:37 +0000932 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
933 if (mp == NULL) {
934 MC_(record_illegal_mempool_error)(tid, pool);
935 return;
936 }
937
sewardjc740d762006-10-05 17:59:23 +0000938 check_mempool_sane(mp);
sewardj2c1c9df2006-07-28 00:06:37 +0000939 chunks = VG_(HT_to_array) ( mp->chunks, &n_shadows );
940 if (n_shadows == 0) {
941 tl_assert(chunks == NULL);
942 return;
943 }
944
945 tl_assert(chunks != NULL);
946 for (i = 0; i < n_shadows; ++i) {
sewardj8aeeaa92006-08-16 17:51:28 +0000947
sewardjc740d762006-10-05 17:59:23 +0000948 Addr lo, hi, min, max;
sewardj8aeeaa92006-08-16 17:51:28 +0000949
sewardj2c1c9df2006-07-28 00:06:37 +0000950 mc = (MC_Chunk*) chunks[i];
951
sewardj8aeeaa92006-08-16 17:51:28 +0000952 lo = mc->data;
njn718d3b12006-12-16 00:54:12 +0000953 hi = mc->szB == 0 ? mc->data : mc->data + mc->szB - 1;
sewardj2c1c9df2006-07-28 00:06:37 +0000954
njn718d3b12006-12-16 00:54:12 +0000955#define EXTENT_CONTAINS(x) ((addr <= (x)) && ((x) < addr + szB))
sewardj2c1c9df2006-07-28 00:06:37 +0000956
sewardj8aeeaa92006-08-16 17:51:28 +0000957 if (EXTENT_CONTAINS(lo) && EXTENT_CONTAINS(hi)) {
sewardj2c1c9df2006-07-28 00:06:37 +0000958
959 /* The current chunk is entirely within the trim extent: keep
960 it. */
961
962 continue;
963
sewardj8aeeaa92006-08-16 17:51:28 +0000964 } else if ( (! EXTENT_CONTAINS(lo)) &&
965 (! EXTENT_CONTAINS(hi)) ) {
sewardj2c1c9df2006-07-28 00:06:37 +0000966
967 /* The current chunk is entirely outside the trim extent:
968 delete it. */
969
970 if (VG_(HT_remove)(mp->chunks, (UWord)mc->data) == NULL) {
971 MC_(record_free_error)(tid, (Addr)mc->data);
972 VG_(free)(chunks);
sewardj7e30be42011-01-27 23:56:36 +0000973 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
sewardj2c1c9df2006-07-28 00:06:37 +0000974 return;
975 }
976 die_and_free_mem ( tid, mc, mp->rzB );
977
978 } else {
979
980 /* The current chunk intersects the trim extent: remove,
981 trim, and reinsert it. */
982
sewardj8aeeaa92006-08-16 17:51:28 +0000983 tl_assert(EXTENT_CONTAINS(lo) ||
984 EXTENT_CONTAINS(hi));
sewardj2c1c9df2006-07-28 00:06:37 +0000985 if (VG_(HT_remove)(mp->chunks, (UWord)mc->data) == NULL) {
986 MC_(record_free_error)(tid, (Addr)mc->data);
987 VG_(free)(chunks);
sewardj7e30be42011-01-27 23:56:36 +0000988 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
sewardj2c1c9df2006-07-28 00:06:37 +0000989 return;
990 }
991
sewardjc740d762006-10-05 17:59:23 +0000992 if (mc->data < addr) {
993 min = mc->data;
994 lo = addr;
995 } else {
996 min = addr;
997 lo = mc->data;
998 }
sewardj2c1c9df2006-07-28 00:06:37 +0000999
njn718d3b12006-12-16 00:54:12 +00001000 if (mc->data + szB > addr + szB) {
1001 max = mc->data + szB;
1002 hi = addr + szB;
sewardjc740d762006-10-05 17:59:23 +00001003 } else {
njn718d3b12006-12-16 00:54:12 +00001004 max = addr + szB;
1005 hi = mc->data + szB;
sewardjc740d762006-10-05 17:59:23 +00001006 }
1007
1008 tl_assert(min <= lo);
sewardj2c1c9df2006-07-28 00:06:37 +00001009 tl_assert(lo < hi);
sewardjc740d762006-10-05 17:59:23 +00001010 tl_assert(hi <= max);
1011
1012 if (min < lo && !EXTENT_CONTAINS(min)) {
1013 MC_(make_mem_noaccess)( min, lo - min);
1014 }
1015
1016 if (hi < max && !EXTENT_CONTAINS(max)) {
1017 MC_(make_mem_noaccess)( hi, max - hi );
1018 }
1019
sewardj2c1c9df2006-07-28 00:06:37 +00001020 mc->data = lo;
njn718d3b12006-12-16 00:54:12 +00001021 mc->szB = (UInt) (hi - lo);
sewardj2c1c9df2006-07-28 00:06:37 +00001022 VG_(HT_add_node)( mp->chunks, mc );
1023 }
1024
1025#undef EXTENT_CONTAINS
1026
1027 }
sewardjc740d762006-10-05 17:59:23 +00001028 check_mempool_sane(mp);
sewardj2c1c9df2006-07-28 00:06:37 +00001029 VG_(free)(chunks);
1030}
1031
sewardjc740d762006-10-05 17:59:23 +00001032void MC_(move_mempool)(Addr poolA, Addr poolB)
1033{
1034 MC_Mempool* mp;
1035
1036 if (VG_(clo_verbosity) > 2) {
sewardj6b523cd2009-07-15 14:49:40 +00001037 VG_(message)(Vg_UserMsg, "move_mempool(0x%lx, 0x%lx)\n", poolA, poolB);
sewardjc740d762006-10-05 17:59:23 +00001038 VG_(get_and_pp_StackTrace)
1039 (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
1040 }
1041
1042 mp = VG_(HT_remove) ( MC_(mempool_list), (UWord)poolA );
1043
1044 if (mp == NULL) {
1045 ThreadId tid = VG_(get_running_tid)();
1046 MC_(record_illegal_mempool_error) ( tid, poolA );
1047 return;
1048 }
1049
1050 mp->pool = poolB;
1051 VG_(HT_add_node)( MC_(mempool_list), mp );
1052}
1053
njn718d3b12006-12-16 00:54:12 +00001054void MC_(mempool_change)(Addr pool, Addr addrA, Addr addrB, SizeT szB)
sewardjc740d762006-10-05 17:59:23 +00001055{
1056 MC_Mempool* mp;
1057 MC_Chunk* mc;
1058 ThreadId tid = VG_(get_running_tid)();
1059
1060 if (VG_(clo_verbosity) > 2) {
sewardj6b523cd2009-07-15 14:49:40 +00001061 VG_(message)(Vg_UserMsg, "mempool_change(0x%lx, 0x%lx, 0x%lx, %ld)\n",
njn718d3b12006-12-16 00:54:12 +00001062 pool, addrA, addrB, szB);
sewardjc740d762006-10-05 17:59:23 +00001063 VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
1064 }
1065
1066 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
1067 if (mp == NULL) {
1068 MC_(record_illegal_mempool_error)(tid, pool);
1069 return;
1070 }
1071
1072 check_mempool_sane(mp);
1073
1074 mc = VG_(HT_remove)(mp->chunks, (UWord)addrA);
1075 if (mc == NULL) {
1076 MC_(record_free_error)(tid, (Addr)addrA);
1077 return;
1078 }
1079
1080 mc->data = addrB;
njn718d3b12006-12-16 00:54:12 +00001081 mc->szB = szB;
sewardjc740d762006-10-05 17:59:23 +00001082 VG_(HT_add_node)( mp->chunks, mc );
1083
1084 check_mempool_sane(mp);
1085}
1086
1087Bool MC_(mempool_exists)(Addr pool)
1088{
1089 MC_Mempool* mp;
1090
1091 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
1092 if (mp == NULL) {
1093 return False;
1094 }
1095 return True;
1096}
1097
1098
njn1d0825f2006-03-27 11:37:07 +00001099/*------------------------------------------------------------*/
1100/*--- Statistics printing ---*/
1101/*------------------------------------------------------------*/
1102
1103void MC_(print_malloc_stats) ( void )
1104{
1105 MC_Chunk* mc;
1106 SizeT nblocks = 0;
sewardjea9c15e2007-03-14 11:57:37 +00001107 ULong nbytes = 0;
njn1d0825f2006-03-27 11:37:07 +00001108
1109 if (VG_(clo_verbosity) == 0)
1110 return;
1111 if (VG_(clo_xml))
1112 return;
1113
1114 /* Count memory still in use. */
1115 VG_(HT_ResetIter)(MC_(malloc_list));
1116 while ( (mc = VG_(HT_Next)(MC_(malloc_list))) ) {
1117 nblocks++;
sewardjea9c15e2007-03-14 11:57:37 +00001118 nbytes += (ULong)mc->szB;
njn1d0825f2006-03-27 11:37:07 +00001119 }
1120
sewardj2d9e8742009-08-07 15:46:56 +00001121 VG_(umsg)(
1122 "HEAP SUMMARY:\n"
njnb6267bd2009-08-12 00:14:16 +00001123 " in use at exit: %'llu bytes in %'lu blocks\n"
1124 " total heap usage: %'lu allocs, %'lu frees, %'llu bytes allocated\n"
1125 "\n",
1126 nbytes, nblocks,
sewardj6b523cd2009-07-15 14:49:40 +00001127 cmalloc_n_mallocs,
1128 cmalloc_n_frees, cmalloc_bs_mallocd
1129 );
njn1d0825f2006-03-27 11:37:07 +00001130}
1131
philippea22f59d2012-01-26 23:13:52 +00001132SizeT MC_(get_cmalloc_n_frees) ( void )
1133{
1134 return cmalloc_n_frees;
1135}
1136
1137
njn1d0825f2006-03-27 11:37:07 +00001138/*--------------------------------------------------------------------*/
1139/*--- end ---*/
1140/*--------------------------------------------------------------------*/