blob: 436060969ca9cb87d383066764a814073e03e1df [file] [log] [blame]
njn1d0825f2006-03-27 11:37:07 +00001
2/*--------------------------------------------------------------------*/
3/*--- malloc/free wrappers for detecting errors and updating bits. ---*/
4/*--- mc_malloc_wrappers.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of MemCheck, a heavyweight Valgrind tool for
njne2656362007-03-10 02:27:44 +00009 detecting memory errors.
njn1d0825f2006-03-27 11:37:07 +000010
sewardjec062e82011-10-23 07:32:08 +000011 Copyright (C) 2000-2011 Julian Seward
njn1d0825f2006-03-27 11:37:07 +000012 jseward@acm.org
13
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
29 The GNU General Public License is contained in the file COPYING.
30*/
31
32#include "pub_tool_basics.h"
33#include "pub_tool_execontext.h"
philippe6643e962012-01-17 21:16:30 +000034#include "pub_tool_poolalloc.h"
njn1d0825f2006-03-27 11:37:07 +000035#include "pub_tool_hashtable.h"
36#include "pub_tool_libcbase.h"
37#include "pub_tool_libcassert.h"
38#include "pub_tool_libcprint.h"
39#include "pub_tool_mallocfree.h"
40#include "pub_tool_options.h"
41#include "pub_tool_replacemalloc.h"
42#include "pub_tool_threadstate.h"
43#include "pub_tool_tooliface.h" // Needed for mc_include.h
sewardjc740d762006-10-05 17:59:23 +000044#include "pub_tool_stacktrace.h" // For VG_(get_and_pp_StackTrace)
njn1d0825f2006-03-27 11:37:07 +000045
46#include "mc_include.h"
47
48/*------------------------------------------------------------*/
49/*--- Defns ---*/
50/*------------------------------------------------------------*/
51
52/* Stats ... */
53static SizeT cmalloc_n_mallocs = 0;
54static SizeT cmalloc_n_frees = 0;
sewardjea9c15e2007-03-14 11:57:37 +000055static ULong cmalloc_bs_mallocd = 0;
njn1d0825f2006-03-27 11:37:07 +000056
sewardjc740d762006-10-05 17:59:23 +000057/* For debug printing to do with mempools: what stack trace
58 depth to show. */
59#define MEMPOOL_DEBUG_STACKTRACE_DEPTH 16
60
njn1d0825f2006-03-27 11:37:07 +000061
62/*------------------------------------------------------------*/
63/*--- Tracking malloc'd and free'd blocks ---*/
64/*------------------------------------------------------------*/
65
66/* Record malloc'd blocks. */
67VgHashTable MC_(malloc_list) = NULL;
68
sewardj62b91042011-01-23 20:45:53 +000069/* Memory pools: a hash table of MC_Mempools. Search key is
70 MC_Mempool::pool. */
njn1d0825f2006-03-27 11:37:07 +000071VgHashTable MC_(mempool_list) = NULL;
philippe6643e962012-01-17 21:16:30 +000072
73/* Pool allocator for MC_Chunk. */
74PoolAlloc *MC_(chunk_poolalloc) = NULL;
75static
76MC_Chunk* create_MC_Chunk ( ExeContext* ec, Addr p, SizeT szB,
77 MC_AllocKind kind);
78static inline
79void delete_MC_Chunk (MC_Chunk* mc);
80
njn1d0825f2006-03-27 11:37:07 +000081/* Records blocks after freeing. */
sewardj403d8aa2011-10-22 19:48:57 +000082/* Blocks freed by the client are queued in one of two lists of
83 freed blocks not yet physically freed:
84 "big blocks" freed list.
85 "small blocks" freed list
86 The blocks with a size >= MC_(clo_freelist_big_blocks)
87 are linked in the big blocks freed list.
88 This allows a client to allocate and free big blocks
89 (e.g. bigger than VG_(clo_freelist_vol)) without losing
90 immediately all protection against dangling pointers.
91 position [0] is for big blocks, [1] is for small blocks. */
92static MC_Chunk* freed_list_start[2] = {NULL, NULL};
93static MC_Chunk* freed_list_end[2] = {NULL, NULL};
njn1d0825f2006-03-27 11:37:07 +000094
95/* Put a shadow chunk on the freed blocks queue, possibly freeing up
96 some of the oldest blocks in the queue at the same time. */
97static void add_to_freed_queue ( MC_Chunk* mc )
98{
sewardjfa4ca3b2007-11-30 17:19:36 +000099 const Bool show = False;
sewardj403d8aa2011-10-22 19:48:57 +0000100 const int l = (mc->szB >= MC_(clo_freelist_big_blocks) ? 0 : 1);
sewardjfa4ca3b2007-11-30 17:19:36 +0000101
sewardj403d8aa2011-10-22 19:48:57 +0000102 /* Put it at the end of the freed list, unless the block
103 would be directly released any way : in this case, we
104 put it at the head of the freed list. */
105 if (freed_list_end[l] == NULL) {
106 tl_assert(freed_list_start[l] == NULL);
107 mc->next = NULL;
108 freed_list_end[l] = freed_list_start[l] = mc;
njn1d0825f2006-03-27 11:37:07 +0000109 } else {
sewardj403d8aa2011-10-22 19:48:57 +0000110 tl_assert(freed_list_end[l]->next == NULL);
111 if (mc->szB >= MC_(clo_freelist_vol)) {
112 mc->next = freed_list_start[l];
113 freed_list_start[l] = mc;
njn1d0825f2006-03-27 11:37:07 +0000114 } else {
sewardj403d8aa2011-10-22 19:48:57 +0000115 mc->next = NULL;
116 freed_list_end[l]->next = mc;
117 freed_list_end[l] = mc;
njn1d0825f2006-03-27 11:37:07 +0000118 }
sewardj403d8aa2011-10-22 19:48:57 +0000119 }
120 VG_(free_queue_volume) += (Long)mc->szB;
121 if (show)
122 VG_(printf)("mc_freelist: acquire: volume now %lld\n",
123 VG_(free_queue_volume));
124 VG_(free_queue_length)++;
125}
njn1d0825f2006-03-27 11:37:07 +0000126
sewardj403d8aa2011-10-22 19:48:57 +0000127/* Release enough of the oldest blocks to bring the free queue
128 volume below vg_clo_freelist_vol.
129 Start with big block list first.
130 On entry, VG_(free_queue_volume) must be > MC_(clo_freelist_vol).
131 On exit, VG_(free_queue_volume) will be <= MC_(clo_freelist_vol). */
132static void release_oldest_block(void)
133{
134 const Bool show = False;
135 int i;
136 tl_assert (VG_(free_queue_volume) > MC_(clo_freelist_vol));
137 tl_assert (freed_list_start[0] != NULL || freed_list_start[1] != NULL);
138
139 for (i = 0; i < 2; i++) {
140 while (VG_(free_queue_volume) > MC_(clo_freelist_vol)
141 && freed_list_start[i] != NULL) {
142 MC_Chunk* mc1;
143
144 tl_assert(freed_list_end[i] != NULL);
145
146 mc1 = freed_list_start[i];
147 VG_(free_queue_volume) -= (Long)mc1->szB;
148 VG_(free_queue_length)--;
149 if (show)
150 VG_(printf)("mc_freelist: discard: volume now %lld\n",
151 VG_(free_queue_volume));
152 tl_assert(VG_(free_queue_volume) >= 0);
153
154 if (freed_list_start[i] == freed_list_end[i]) {
155 freed_list_start[i] = freed_list_end[i] = NULL;
156 } else {
157 freed_list_start[i] = mc1->next;
158 }
159 mc1->next = NULL; /* just paranoia */
160
161 /* free MC_Chunk */
162 if (MC_AllocCustom != mc1->allockind)
163 VG_(cli_free) ( (void*)(mc1->data) );
philippe6643e962012-01-17 21:16:30 +0000164 delete_MC_Chunk ( mc1 );
sewardj403d8aa2011-10-22 19:48:57 +0000165 }
njn1d0825f2006-03-27 11:37:07 +0000166 }
167}
168
sewardj403d8aa2011-10-22 19:48:57 +0000169MC_Chunk* MC_(get_freed_block_bracketting) (Addr a)
njn1d0825f2006-03-27 11:37:07 +0000170{
sewardj403d8aa2011-10-22 19:48:57 +0000171 int i;
172 for (i = 0; i < 2; i++) {
173 MC_Chunk* mc;
174 mc = freed_list_start[i];
175 while (mc) {
176 if (VG_(addr_is_in_block)( a, mc->data, mc->szB,
177 MC_MALLOC_REDZONE_SZB ))
178 return mc;
179 mc = mc->next;
180 }
181 }
182 return NULL;
njn1d0825f2006-03-27 11:37:07 +0000183}
184
sewardj403d8aa2011-10-22 19:48:57 +0000185/* Allocate a shadow chunk, put it on the appropriate list.
186 If needed, release oldest blocks from freed list. */
njn1d0825f2006-03-27 11:37:07 +0000187static
sewardj7cf4e6b2008-05-01 20:24:26 +0000188MC_Chunk* create_MC_Chunk ( ExeContext* ec, Addr p, SizeT szB,
njn1d0825f2006-03-27 11:37:07 +0000189 MC_AllocKind kind)
190{
philippe6643e962012-01-17 21:16:30 +0000191 MC_Chunk* mc = VG_(allocEltPA)(MC_(chunk_poolalloc));
njn1d0825f2006-03-27 11:37:07 +0000192 mc->data = p;
njn718d3b12006-12-16 00:54:12 +0000193 mc->szB = szB;
njn1d0825f2006-03-27 11:37:07 +0000194 mc->allockind = kind;
sewardj7cf4e6b2008-05-01 20:24:26 +0000195 mc->where = ec;
njn1d0825f2006-03-27 11:37:07 +0000196
sewardj403d8aa2011-10-22 19:48:57 +0000197 /* Each time a new MC_Chunk is created, release oldest blocks
198 if the free list volume is exceeded. */
199 if (VG_(free_queue_volume) > MC_(clo_freelist_vol))
200 release_oldest_block();
201
njn1d0825f2006-03-27 11:37:07 +0000202 /* Paranoia ... ensure the MC_Chunk is off-limits to the client, so
203 the mc->data field isn't visible to the leak checker. If memory
204 management is working correctly, any pointer returned by VG_(malloc)
205 should be noaccess as far as the client is concerned. */
njndbf7ca72006-03-31 11:57:59 +0000206 if (!MC_(check_mem_is_noaccess)( (Addr)mc, sizeof(MC_Chunk), NULL )) {
njn1d0825f2006-03-27 11:37:07 +0000207 VG_(tool_panic)("create_MC_Chunk: shadow area is accessible");
208 }
209 return mc;
210}
211
philippe6643e962012-01-17 21:16:30 +0000212static inline
213void delete_MC_Chunk (MC_Chunk* mc)
214{
215 VG_(freeEltPA) (MC_(chunk_poolalloc), mc);
216}
217
njn1d0825f2006-03-27 11:37:07 +0000218/*------------------------------------------------------------*/
219/*--- client_malloc(), etc ---*/
220/*------------------------------------------------------------*/
221
njn017d3772009-05-19 02:10:26 +0000222// XXX: should make this a proper error (bug #79311).
njn1d0825f2006-03-27 11:37:07 +0000223static Bool complain_about_silly_args(SizeT sizeB, Char* fn)
224{
225 // Cast to a signed type to catch any unexpectedly negative args. We're
226 // assuming here that the size asked for is not greater than 2^31 bytes
227 // (for 32-bit platforms) or 2^63 bytes (for 64-bit platforms).
228 if ((SSizeT)sizeB < 0) {
sewardj22faf712007-11-09 11:33:02 +0000229 if (!VG_(clo_xml))
sewardj6b523cd2009-07-15 14:49:40 +0000230 VG_(message)(Vg_UserMsg, "Warning: silly arg (%ld) to %s()\n",
sewardj22faf712007-11-09 11:33:02 +0000231 (SSizeT)sizeB, fn );
njn1d0825f2006-03-27 11:37:07 +0000232 return True;
233 }
234 return False;
235}
236
237static Bool complain_about_silly_args2(SizeT n, SizeT sizeB)
238{
239 if ((SSizeT)n < 0 || (SSizeT)sizeB < 0) {
sewardj22faf712007-11-09 11:33:02 +0000240 if (!VG_(clo_xml))
241 VG_(message)(Vg_UserMsg,
sewardj6b523cd2009-07-15 14:49:40 +0000242 "Warning: silly args (%ld,%ld) to calloc()\n",
sewardj22faf712007-11-09 11:33:02 +0000243 (SSizeT)n, (SSizeT)sizeB);
njn1d0825f2006-03-27 11:37:07 +0000244 return True;
245 }
246 return False;
247}
248
249/* Allocate memory and note change in memory available */
njn1d0825f2006-03-27 11:37:07 +0000250void* MC_(new_block) ( ThreadId tid,
njn1dcee092009-02-24 03:07:37 +0000251 Addr p, SizeT szB, SizeT alignB,
sewardjeb0fa932007-11-30 21:41:40 +0000252 Bool is_zeroed, MC_AllocKind kind, VgHashTable table)
njn1d0825f2006-03-27 11:37:07 +0000253{
sewardj7cf4e6b2008-05-01 20:24:26 +0000254 ExeContext* ec;
255
njn1d0825f2006-03-27 11:37:07 +0000256 // Allocate and zero if necessary
257 if (p) {
258 tl_assert(MC_AllocCustom == kind);
259 } else {
260 tl_assert(MC_AllocCustom != kind);
njn718d3b12006-12-16 00:54:12 +0000261 p = (Addr)VG_(cli_malloc)( alignB, szB );
njn1d0825f2006-03-27 11:37:07 +0000262 if (!p) {
263 return NULL;
264 }
sewardjeb0fa932007-11-30 21:41:40 +0000265 if (is_zeroed) {
266 VG_(memset)((void*)p, 0, szB);
267 } else
268 if (MC_(clo_malloc_fill) != -1) {
269 tl_assert(MC_(clo_malloc_fill) >= 0x00 && MC_(clo_malloc_fill) <= 0xFF);
270 VG_(memset)((void*)p, MC_(clo_malloc_fill), szB);
271 }
njn1d0825f2006-03-27 11:37:07 +0000272 }
273
florian5544d5b2011-12-30 03:09:45 +0000274 // Only update stats if allocation succeeded.
275 cmalloc_n_mallocs ++;
sewardjea9c15e2007-03-14 11:57:37 +0000276 cmalloc_bs_mallocd += (ULong)szB;
njn1d0825f2006-03-27 11:37:07 +0000277
sewardj7cf4e6b2008-05-01 20:24:26 +0000278 ec = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
279 tl_assert(ec);
280
281 VG_(HT_add_node)( table, create_MC_Chunk(ec, p, szB, kind) );
njn1d0825f2006-03-27 11:37:07 +0000282
283 if (is_zeroed)
njn718d3b12006-12-16 00:54:12 +0000284 MC_(make_mem_defined)( p, szB );
sewardj7cf4e6b2008-05-01 20:24:26 +0000285 else {
286 UInt ecu = VG_(get_ECU_from_ExeContext)(ec);
287 tl_assert(VG_(is_plausible_ECU)(ecu));
288 MC_(make_mem_undefined_w_otag)( p, szB, ecu | MC_OKIND_HEAP );
289 }
njn1d0825f2006-03-27 11:37:07 +0000290
291 return (void*)p;
292}
293
294void* MC_(malloc) ( ThreadId tid, SizeT n )
295{
296 if (complain_about_silly_args(n, "malloc")) {
297 return NULL;
298 } else {
299 return MC_(new_block) ( tid, 0, n, VG_(clo_alignment),
njn1dcee092009-02-24 03:07:37 +0000300 /*is_zeroed*/False, MC_AllocMalloc, MC_(malloc_list));
njn1d0825f2006-03-27 11:37:07 +0000301 }
302}
303
304void* MC_(__builtin_new) ( ThreadId tid, SizeT n )
305{
306 if (complain_about_silly_args(n, "__builtin_new")) {
307 return NULL;
308 } else {
309 return MC_(new_block) ( tid, 0, n, VG_(clo_alignment),
njn1dcee092009-02-24 03:07:37 +0000310 /*is_zeroed*/False, MC_AllocNew, MC_(malloc_list));
njn1d0825f2006-03-27 11:37:07 +0000311 }
312}
313
314void* MC_(__builtin_vec_new) ( ThreadId tid, SizeT n )
315{
316 if (complain_about_silly_args(n, "__builtin_vec_new")) {
317 return NULL;
318 } else {
319 return MC_(new_block) ( tid, 0, n, VG_(clo_alignment),
njn1dcee092009-02-24 03:07:37 +0000320 /*is_zeroed*/False, MC_AllocNewVec, MC_(malloc_list));
njn1d0825f2006-03-27 11:37:07 +0000321 }
322}
323
njn718d3b12006-12-16 00:54:12 +0000324void* MC_(memalign) ( ThreadId tid, SizeT alignB, SizeT n )
njn1d0825f2006-03-27 11:37:07 +0000325{
326 if (complain_about_silly_args(n, "memalign")) {
327 return NULL;
328 } else {
njn718d3b12006-12-16 00:54:12 +0000329 return MC_(new_block) ( tid, 0, n, alignB,
njn1dcee092009-02-24 03:07:37 +0000330 /*is_zeroed*/False, MC_AllocMalloc, MC_(malloc_list));
njn1d0825f2006-03-27 11:37:07 +0000331 }
332}
333
334void* MC_(calloc) ( ThreadId tid, SizeT nmemb, SizeT size1 )
335{
336 if (complain_about_silly_args2(nmemb, size1)) {
337 return NULL;
338 } else {
339 return MC_(new_block) ( tid, 0, nmemb*size1, VG_(clo_alignment),
njn1dcee092009-02-24 03:07:37 +0000340 /*is_zeroed*/True, MC_AllocMalloc, MC_(malloc_list));
njn1d0825f2006-03-27 11:37:07 +0000341 }
342}
343
344static
345void die_and_free_mem ( ThreadId tid, MC_Chunk* mc, SizeT rzB )
346{
philippea2cc0c02012-05-11 22:10:39 +0000347 /* Note: we do not free fill the custom allocs produced
348 by MEMPOOL or by MALLOC/FREELIKE_BLOCK requests. */
349 if (MC_(clo_free_fill) != -1 && MC_AllocCustom != mc->allockind ) {
sewardjeb0fa932007-11-30 21:41:40 +0000350 tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
351 VG_(memset)((void*)mc->data, MC_(clo_free_fill), mc->szB);
352 }
353
njn1d0825f2006-03-27 11:37:07 +0000354 /* Note: make redzones noaccess again -- just in case user made them
355 accessible with a client request... */
njn718d3b12006-12-16 00:54:12 +0000356 MC_(make_mem_noaccess)( mc->data-rzB, mc->szB + 2*rzB );
njn1d0825f2006-03-27 11:37:07 +0000357
bartc1fc1332010-09-02 10:24:49 +0000358 /* Record where freed */
359 mc->where = VG_(record_ExeContext) ( tid, 0/*first_ip_delta*/ );
360 /* Put it out of harm's way for a while */
361 add_to_freed_queue ( mc );
sewardj403d8aa2011-10-22 19:48:57 +0000362 /* If the free list volume is bigger than MC_(clo_freelist_vol),
363 we wait till the next block allocation to release blocks.
364 This increase the chance to discover dangling pointer usage,
365 even for big blocks being freed by the client. */
njn1d0825f2006-03-27 11:37:07 +0000366}
367
njn1d0825f2006-03-27 11:37:07 +0000368void MC_(handle_free) ( ThreadId tid, Addr p, UInt rzB, MC_AllocKind kind )
369{
370 MC_Chunk* mc;
371
372 cmalloc_n_frees++;
373
374 mc = VG_(HT_remove) ( MC_(malloc_list), (UWord)p );
375 if (mc == NULL) {
376 MC_(record_free_error) ( tid, p );
377 } else {
378 /* check if it is a matching free() / delete / delete [] */
379 if (kind != mc->allockind) {
njn718d3b12006-12-16 00:54:12 +0000380 tl_assert(p == mc->data);
381 MC_(record_freemismatch_error) ( tid, mc );
njn1d0825f2006-03-27 11:37:07 +0000382 }
383 die_and_free_mem ( tid, mc, rzB );
384 }
385}
386
387void MC_(free) ( ThreadId tid, void* p )
388{
389 MC_(handle_free)(
390 tid, (Addr)p, MC_MALLOC_REDZONE_SZB, MC_AllocMalloc );
391}
392
393void MC_(__builtin_delete) ( ThreadId tid, void* p )
394{
395 MC_(handle_free)(
396 tid, (Addr)p, MC_MALLOC_REDZONE_SZB, MC_AllocNew);
397}
398
399void MC_(__builtin_vec_delete) ( ThreadId tid, void* p )
400{
401 MC_(handle_free)(
402 tid, (Addr)p, MC_MALLOC_REDZONE_SZB, MC_AllocNewVec);
403}
404
njn718d3b12006-12-16 00:54:12 +0000405void* MC_(realloc) ( ThreadId tid, void* p_old, SizeT new_szB )
njn1d0825f2006-03-27 11:37:07 +0000406{
407 MC_Chunk* mc;
408 void* p_new;
njn718d3b12006-12-16 00:54:12 +0000409 SizeT old_szB;
njn1d0825f2006-03-27 11:37:07 +0000410
florian5544d5b2011-12-30 03:09:45 +0000411 if (complain_about_silly_args(new_szB, "realloc"))
412 return NULL;
413
njn1d0825f2006-03-27 11:37:07 +0000414 cmalloc_n_frees ++;
415 cmalloc_n_mallocs ++;
sewardjea9c15e2007-03-14 11:57:37 +0000416 cmalloc_bs_mallocd += (ULong)new_szB;
njn1d0825f2006-03-27 11:37:07 +0000417
njn1d0825f2006-03-27 11:37:07 +0000418 /* Remove the old block */
419 mc = VG_(HT_remove) ( MC_(malloc_list), (UWord)p_old );
420 if (mc == NULL) {
421 MC_(record_free_error) ( tid, (Addr)p_old );
422 /* We return to the program regardless. */
423 return NULL;
424 }
425
426 /* check if its a matching free() / delete / delete [] */
427 if (MC_AllocMalloc != mc->allockind) {
428 /* can not realloc a range that was allocated with new or new [] */
njn718d3b12006-12-16 00:54:12 +0000429 tl_assert((Addr)p_old == mc->data);
430 MC_(record_freemismatch_error) ( tid, mc );
njn1d0825f2006-03-27 11:37:07 +0000431 /* but keep going anyway */
432 }
433
njn718d3b12006-12-16 00:54:12 +0000434 old_szB = mc->szB;
njn1d0825f2006-03-27 11:37:07 +0000435
sewardjb3238a52008-07-29 09:44:52 +0000436 /* In all cases, even when the new size is smaller or unchanged, we
437 reallocate and copy the contents, and make the old block
438 inaccessible. This is so as to guarantee to catch all cases of
439 accesses via the old address after reallocation, regardless of
440 the change in size. (Of course the ability to detect accesses
441 to the old block also depends on the size of the freed blocks
442 queue). */
443
sewardj8849a562008-07-22 18:23:16 +0000444 if (new_szB <= old_szB) {
445 /* new size is smaller or the same */
446 Addr a_new;
447 /* Get new memory */
448 a_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_szB);
449
450 if (a_new) {
451 ExeContext* ec;
452
453 ec = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
454 tl_assert(ec);
455
456 /* Retained part is copied, red zones set as normal */
457 MC_(make_mem_noaccess)( a_new-MC_MALLOC_REDZONE_SZB,
458 MC_MALLOC_REDZONE_SZB );
459 MC_(copy_address_range_state) ( (Addr)p_old, a_new, new_szB );
460 MC_(make_mem_noaccess) ( a_new+new_szB, MC_MALLOC_REDZONE_SZB );
461
462 /* Copy from old to new */
463 VG_(memcpy)((void*)a_new, p_old, new_szB);
464
465 /* Possibly fill freed area with specified junk. */
466 if (MC_(clo_free_fill) != -1) {
467 tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
468 VG_(memset)((void*)p_old, MC_(clo_free_fill), old_szB);
469 }
470
471 /* Free old memory */
472 /* Nb: we have to allocate a new MC_Chunk for the new memory rather
473 than recycling the old one, so that any erroneous accesses to the
474 old memory are reported. */
475 die_and_free_mem ( tid, mc, MC_MALLOC_REDZONE_SZB );
476
477 // Allocate a new chunk.
478 mc = create_MC_Chunk( ec, a_new, new_szB, MC_AllocMalloc );
sewardjeb0fa932007-11-30 21:41:40 +0000479 }
njn1d0825f2006-03-27 11:37:07 +0000480
sewardj8849a562008-07-22 18:23:16 +0000481 p_new = (void*)a_new;
482
njn1d0825f2006-03-27 11:37:07 +0000483 } else {
484 /* new size is bigger */
sewardjeb0fa932007-11-30 21:41:40 +0000485 Addr a_new;
486 tl_assert(old_szB < new_szB);
njn1d0825f2006-03-27 11:37:07 +0000487 /* Get new memory */
sewardjeb0fa932007-11-30 21:41:40 +0000488 a_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_szB);
njn1d0825f2006-03-27 11:37:07 +0000489
490 if (a_new) {
sewardj7cf4e6b2008-05-01 20:24:26 +0000491 UInt ecu;
492 ExeContext* ec;
493
494 ec = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
495 tl_assert(ec);
496 ecu = VG_(get_ECU_from_ExeContext)(ec);
497 tl_assert(VG_(is_plausible_ECU)(ecu));
498
njn1d0825f2006-03-27 11:37:07 +0000499 /* First half kept and copied, second half new, red zones as normal */
sewardj7cf4e6b2008-05-01 20:24:26 +0000500 MC_(make_mem_noaccess)( a_new-MC_MALLOC_REDZONE_SZB,
501 MC_MALLOC_REDZONE_SZB );
502 MC_(copy_address_range_state) ( (Addr)p_old, a_new, mc->szB );
503 MC_(make_mem_undefined_w_otag)( a_new+mc->szB, new_szB-mc->szB,
504 ecu | MC_OKIND_HEAP );
505 MC_(make_mem_noaccess) ( a_new+new_szB, MC_MALLOC_REDZONE_SZB );
njn1d0825f2006-03-27 11:37:07 +0000506
sewardjeb0fa932007-11-30 21:41:40 +0000507 /* Possibly fill new area with specified junk */
508 if (MC_(clo_malloc_fill) != -1) {
509 tl_assert(MC_(clo_malloc_fill) >= 0x00
510 && MC_(clo_malloc_fill) <= 0xFF);
511 VG_(memset)((void*)(a_new+old_szB), MC_(clo_malloc_fill),
512 new_szB-old_szB);
513 }
514
njn1d0825f2006-03-27 11:37:07 +0000515 /* Copy from old to new */
njn718d3b12006-12-16 00:54:12 +0000516 VG_(memcpy)((void*)a_new, p_old, mc->szB);
njn1d0825f2006-03-27 11:37:07 +0000517
sewardjeb0fa932007-11-30 21:41:40 +0000518 /* Possibly fill freed area with specified junk. */
519 if (MC_(clo_free_fill) != -1) {
520 tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
521 VG_(memset)((void*)p_old, MC_(clo_free_fill), old_szB);
522 }
523
njn1d0825f2006-03-27 11:37:07 +0000524 /* Free old memory */
525 /* Nb: we have to allocate a new MC_Chunk for the new memory rather
526 than recycling the old one, so that any erroneous accesses to the
527 old memory are reported. */
528 die_and_free_mem ( tid, mc, MC_MALLOC_REDZONE_SZB );
529
530 // Allocate a new chunk.
sewardj7cf4e6b2008-05-01 20:24:26 +0000531 mc = create_MC_Chunk( ec, a_new, new_szB, MC_AllocMalloc );
njn1d0825f2006-03-27 11:37:07 +0000532 }
533
534 p_new = (void*)a_new;
535 }
536
537 // Now insert the new mc (with a possibly new 'data' field) into
538 // malloc_list. If this realloc() did not increase the memory size, we
539 // will have removed and then re-added mc unnecessarily. But that's ok
540 // because shrinking a block with realloc() is (presumably) much rarer
541 // than growing it, and this way simplifies the growing case.
542 VG_(HT_add_node)( MC_(malloc_list), mc );
543
544 return p_new;
545}
546
njn8b140de2009-02-17 04:31:18 +0000547SizeT MC_(malloc_usable_size) ( ThreadId tid, void* p )
548{
549 MC_Chunk* mc = VG_(HT_lookup) ( MC_(malloc_list), (UWord)p );
550
551 // There may be slop, but pretend there isn't because only the asked-for
552 // area will be marked as addressable.
553 return ( mc ? mc->szB : 0 );
554}
555
bart91347382011-03-25 20:07:25 +0000556/* This handles the in place resize of a block, as performed by the
557 VALGRIND_RESIZEINPLACE_BLOCK client request. It is unrelated to,
558 and not used for, handling of the normal libc realloc()
559 function. */
560void MC_(handle_resizeInPlace)(ThreadId tid, Addr p,
561 SizeT oldSizeB, SizeT newSizeB, SizeT rzB)
562{
563 MC_Chunk* mc = VG_(HT_lookup) ( MC_(malloc_list), (UWord)p );
564 if (!mc || mc->szB != oldSizeB || newSizeB == 0) {
565 /* Reject if: p is not found, or oldSizeB is wrong,
566 or new block would be empty. */
567 MC_(record_free_error) ( tid, p );
568 return;
569 }
570
571 if (oldSizeB == newSizeB)
572 return;
573
574 mc->szB = newSizeB;
575 if (newSizeB < oldSizeB) {
576 MC_(make_mem_noaccess)( p + newSizeB, oldSizeB - newSizeB + rzB );
577 } else {
578 ExeContext* ec = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
579 UInt ecu = VG_(get_ECU_from_ExeContext)(ec);
580 MC_(make_mem_undefined_w_otag)( p + oldSizeB, newSizeB - oldSizeB,
581 ecu | MC_OKIND_HEAP );
582 if (rzB > 0)
583 MC_(make_mem_noaccess)( p + newSizeB, rzB );
584 }
585}
586
njn017d3772009-05-19 02:10:26 +0000587
sewardj62b91042011-01-23 20:45:53 +0000588/*------------------------------------------------------------*/
589/*--- Memory pool stuff. ---*/
590/*------------------------------------------------------------*/
njn1d0825f2006-03-27 11:37:07 +0000591
sewardj7e30be42011-01-27 23:56:36 +0000592/* Set to 1 for intensive sanity checking. Is very expensive though
593 and should not be used in production scenarios. See #255966. */
594#define MP_DETAILED_SANITY_CHECKS 0
595
596static void check_mempool_sane(MC_Mempool* mp); /*forward*/
597
598
njn1d0825f2006-03-27 11:37:07 +0000599void MC_(create_mempool)(Addr pool, UInt rzB, Bool is_zeroed)
600{
sewardjc740d762006-10-05 17:59:23 +0000601 MC_Mempool* mp;
602
603 if (VG_(clo_verbosity) > 2) {
sewardj6b523cd2009-07-15 14:49:40 +0000604 VG_(message)(Vg_UserMsg, "create_mempool(0x%lx, %d, %d)\n",
sewardjc740d762006-10-05 17:59:23 +0000605 pool, rzB, is_zeroed);
606 VG_(get_and_pp_StackTrace)
607 (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
608 }
609
610 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
611 if (mp != NULL) {
612 VG_(tool_panic)("MC_(create_mempool): duplicate pool creation");
613 }
614
sewardj9c606bd2008-09-18 18:12:50 +0000615 mp = VG_(malloc)("mc.cm.1", sizeof(MC_Mempool));
njn1d0825f2006-03-27 11:37:07 +0000616 mp->pool = pool;
617 mp->rzB = rzB;
618 mp->is_zeroed = is_zeroed;
sewardj3f94a7d2007-08-25 07:19:08 +0000619 mp->chunks = VG_(HT_construct)( "MC_(create_mempool)" );
sewardj7e30be42011-01-27 23:56:36 +0000620 check_mempool_sane(mp);
njn1d0825f2006-03-27 11:37:07 +0000621
622 /* Paranoia ... ensure this area is off-limits to the client, so
623 the mp->data field isn't visible to the leak checker. If memory
624 management is working correctly, anything pointer returned by
625 VG_(malloc) should be noaccess as far as the client is
626 concerned. */
njndbf7ca72006-03-31 11:57:59 +0000627 if (!MC_(check_mem_is_noaccess)( (Addr)mp, sizeof(MC_Mempool), NULL )) {
njn1d0825f2006-03-27 11:37:07 +0000628 VG_(tool_panic)("MC_(create_mempool): shadow area is accessible");
629 }
630
631 VG_(HT_add_node)( MC_(mempool_list), mp );
632}
633
634void MC_(destroy_mempool)(Addr pool)
635{
636 MC_Chunk* mc;
637 MC_Mempool* mp;
638
sewardjc740d762006-10-05 17:59:23 +0000639 if (VG_(clo_verbosity) > 2) {
sewardj6b523cd2009-07-15 14:49:40 +0000640 VG_(message)(Vg_UserMsg, "destroy_mempool(0x%lx)\n", pool);
sewardjc740d762006-10-05 17:59:23 +0000641 VG_(get_and_pp_StackTrace)
642 (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
643 }
644
njn1d0825f2006-03-27 11:37:07 +0000645 mp = VG_(HT_remove) ( MC_(mempool_list), (UWord)pool );
646
647 if (mp == NULL) {
648 ThreadId tid = VG_(get_running_tid)();
649 MC_(record_illegal_mempool_error) ( tid, pool );
650 return;
651 }
sewardj7e30be42011-01-27 23:56:36 +0000652 check_mempool_sane(mp);
njn1d0825f2006-03-27 11:37:07 +0000653
654 // Clean up the chunks, one by one
655 VG_(HT_ResetIter)(mp->chunks);
656 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
657 /* Note: make redzones noaccess again -- just in case user made them
658 accessible with a client request... */
njn718d3b12006-12-16 00:54:12 +0000659 MC_(make_mem_noaccess)(mc->data-mp->rzB, mc->szB + 2*mp->rzB );
njn1d0825f2006-03-27 11:37:07 +0000660 }
661 // Destroy the chunk table
philippe6643e962012-01-17 21:16:30 +0000662 VG_(HT_destruct)(mp->chunks, (void (*)(void *))delete_MC_Chunk);
njn1d0825f2006-03-27 11:37:07 +0000663
664 VG_(free)(mp);
665}
666
sewardjc740d762006-10-05 17:59:23 +0000667static Int
668mp_compar(void* n1, void* n2)
669{
670 MC_Chunk* mc1 = *(MC_Chunk**)n1;
671 MC_Chunk* mc2 = *(MC_Chunk**)n2;
sewardjb8b79ad2008-03-03 01:35:41 +0000672 if (mc1->data < mc2->data) return -1;
673 if (mc1->data > mc2->data) return 1;
674 return 0;
sewardjc740d762006-10-05 17:59:23 +0000675}
676
677static void
678check_mempool_sane(MC_Mempool* mp)
679{
680 UInt n_chunks, i, bad = 0;
681 static UInt tick = 0;
682
683 MC_Chunk **chunks = (MC_Chunk**) VG_(HT_to_array)( mp->chunks, &n_chunks );
684 if (!chunks)
685 return;
686
687 if (VG_(clo_verbosity) > 1) {
688 if (tick++ >= 10000)
689 {
690 UInt total_pools = 0, total_chunks = 0;
691 MC_Mempool* mp2;
692
693 VG_(HT_ResetIter)(MC_(mempool_list));
694 while ( (mp2 = VG_(HT_Next)(MC_(mempool_list))) ) {
695 total_pools++;
696 VG_(HT_ResetIter)(mp2->chunks);
697 while (VG_(HT_Next)(mp2->chunks)) {
698 total_chunks++;
699 }
700 }
701
sewardj6b523cd2009-07-15 14:49:40 +0000702 VG_(message)(Vg_UserMsg,
sewardjc740d762006-10-05 17:59:23 +0000703 "Total mempools active: %d pools, %d chunks\n",
704 total_pools, total_chunks);
705 tick = 0;
706 }
707 }
708
709
710 VG_(ssort)((void*)chunks, n_chunks, sizeof(VgHashNode*), mp_compar);
711
712 /* Sanity check; assert that the blocks are now in order */
713 for (i = 0; i < n_chunks-1; i++) {
714 if (chunks[i]->data > chunks[i+1]->data) {
715 VG_(message)(Vg_UserMsg,
716 "Mempool chunk %d / %d is out of order "
sewardj6b523cd2009-07-15 14:49:40 +0000717 "wrt. its successor\n",
sewardjc740d762006-10-05 17:59:23 +0000718 i+1, n_chunks);
719 bad = 1;
720 }
721 }
722
723 /* Sanity check -- make sure they don't overlap */
724 for (i = 0; i < n_chunks-1; i++) {
njn718d3b12006-12-16 00:54:12 +0000725 if (chunks[i]->data + chunks[i]->szB > chunks[i+1]->data ) {
sewardjc740d762006-10-05 17:59:23 +0000726 VG_(message)(Vg_UserMsg,
sewardj6b523cd2009-07-15 14:49:40 +0000727 "Mempool chunk %d / %d overlaps with its successor\n",
sewardjc740d762006-10-05 17:59:23 +0000728 i+1, n_chunks);
729 bad = 1;
730 }
731 }
732
733 if (bad) {
734 VG_(message)(Vg_UserMsg,
sewardj6b523cd2009-07-15 14:49:40 +0000735 "Bad mempool (%d chunks), dumping chunks for inspection:\n",
736 n_chunks);
sewardjc740d762006-10-05 17:59:23 +0000737 for (i = 0; i < n_chunks; ++i) {
738 VG_(message)(Vg_UserMsg,
sewardj6b523cd2009-07-15 14:49:40 +0000739 "Mempool chunk %d / %d: %ld bytes "
740 "[%lx,%lx), allocated:\n",
sewardjc740d762006-10-05 17:59:23 +0000741 i+1,
742 n_chunks,
barta0b6b2c2008-07-07 06:49:24 +0000743 chunks[i]->szB + 0UL,
sewardjc740d762006-10-05 17:59:23 +0000744 chunks[i]->data,
njn718d3b12006-12-16 00:54:12 +0000745 chunks[i]->data + chunks[i]->szB);
sewardjc740d762006-10-05 17:59:23 +0000746
747 VG_(pp_ExeContext)(chunks[i]->where);
748 }
749 }
750 VG_(free)(chunks);
751}
752
njn718d3b12006-12-16 00:54:12 +0000753void MC_(mempool_alloc)(ThreadId tid, Addr pool, Addr addr, SizeT szB)
njn1d0825f2006-03-27 11:37:07 +0000754{
sewardjc740d762006-10-05 17:59:23 +0000755 MC_Mempool* mp;
njn1d0825f2006-03-27 11:37:07 +0000756
sewardjc740d762006-10-05 17:59:23 +0000757 if (VG_(clo_verbosity) > 2) {
sewardj6b523cd2009-07-15 14:49:40 +0000758 VG_(message)(Vg_UserMsg, "mempool_alloc(0x%lx, 0x%lx, %ld)\n",
759 pool, addr, szB);
sewardjc740d762006-10-05 17:59:23 +0000760 VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
761 }
762
763 mp = VG_(HT_lookup) ( MC_(mempool_list), (UWord)pool );
njn1d0825f2006-03-27 11:37:07 +0000764 if (mp == NULL) {
765 MC_(record_illegal_mempool_error) ( tid, pool );
766 } else {
sewardj7e30be42011-01-27 23:56:36 +0000767 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
njn1dcee092009-02-24 03:07:37 +0000768 MC_(new_block)(tid, addr, szB, /*ignored*/0, mp->is_zeroed,
njn1d0825f2006-03-27 11:37:07 +0000769 MC_AllocCustom, mp->chunks);
sewardj7e30be42011-01-27 23:56:36 +0000770 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
njn1d0825f2006-03-27 11:37:07 +0000771 }
772}
773
774void MC_(mempool_free)(Addr pool, Addr addr)
775{
776 MC_Mempool* mp;
777 MC_Chunk* mc;
778 ThreadId tid = VG_(get_running_tid)();
779
780 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
781 if (mp == NULL) {
782 MC_(record_illegal_mempool_error)(tid, pool);
783 return;
784 }
785
sewardjc740d762006-10-05 17:59:23 +0000786 if (VG_(clo_verbosity) > 2) {
sewardj6b523cd2009-07-15 14:49:40 +0000787 VG_(message)(Vg_UserMsg, "mempool_free(0x%lx, 0x%lx)\n", pool, addr);
sewardjc740d762006-10-05 17:59:23 +0000788 VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
789 }
790
sewardj7e30be42011-01-27 23:56:36 +0000791 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
njn1d0825f2006-03-27 11:37:07 +0000792 mc = VG_(HT_remove)(mp->chunks, (UWord)addr);
793 if (mc == NULL) {
794 MC_(record_free_error)(tid, (Addr)addr);
795 return;
796 }
797
sewardjc740d762006-10-05 17:59:23 +0000798 if (VG_(clo_verbosity) > 2) {
799 VG_(message)(Vg_UserMsg,
sewardj6b523cd2009-07-15 14:49:40 +0000800 "mempool_free(0x%lx, 0x%lx) freed chunk of %ld bytes\n",
barta0b6b2c2008-07-07 06:49:24 +0000801 pool, addr, mc->szB + 0UL);
sewardjc740d762006-10-05 17:59:23 +0000802 }
803
njn1d0825f2006-03-27 11:37:07 +0000804 die_and_free_mem ( tid, mc, mp->rzB );
sewardj7e30be42011-01-27 23:56:36 +0000805 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
njn1d0825f2006-03-27 11:37:07 +0000806}
807
sewardj2c1c9df2006-07-28 00:06:37 +0000808
njn718d3b12006-12-16 00:54:12 +0000809void MC_(mempool_trim)(Addr pool, Addr addr, SizeT szB)
sewardj2c1c9df2006-07-28 00:06:37 +0000810{
811 MC_Mempool* mp;
812 MC_Chunk* mc;
813 ThreadId tid = VG_(get_running_tid)();
814 UInt n_shadows, i;
815 VgHashNode** chunks;
816
sewardjc740d762006-10-05 17:59:23 +0000817 if (VG_(clo_verbosity) > 2) {
sewardj6b523cd2009-07-15 14:49:40 +0000818 VG_(message)(Vg_UserMsg, "mempool_trim(0x%lx, 0x%lx, %ld)\n",
819 pool, addr, szB);
sewardjc740d762006-10-05 17:59:23 +0000820 VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
821 }
822
sewardj2c1c9df2006-07-28 00:06:37 +0000823 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
824 if (mp == NULL) {
825 MC_(record_illegal_mempool_error)(tid, pool);
826 return;
827 }
828
sewardjc740d762006-10-05 17:59:23 +0000829 check_mempool_sane(mp);
sewardj2c1c9df2006-07-28 00:06:37 +0000830 chunks = VG_(HT_to_array) ( mp->chunks, &n_shadows );
831 if (n_shadows == 0) {
832 tl_assert(chunks == NULL);
833 return;
834 }
835
836 tl_assert(chunks != NULL);
837 for (i = 0; i < n_shadows; ++i) {
sewardj8aeeaa92006-08-16 17:51:28 +0000838
sewardjc740d762006-10-05 17:59:23 +0000839 Addr lo, hi, min, max;
sewardj8aeeaa92006-08-16 17:51:28 +0000840
sewardj2c1c9df2006-07-28 00:06:37 +0000841 mc = (MC_Chunk*) chunks[i];
842
sewardj8aeeaa92006-08-16 17:51:28 +0000843 lo = mc->data;
njn718d3b12006-12-16 00:54:12 +0000844 hi = mc->szB == 0 ? mc->data : mc->data + mc->szB - 1;
sewardj2c1c9df2006-07-28 00:06:37 +0000845
njn718d3b12006-12-16 00:54:12 +0000846#define EXTENT_CONTAINS(x) ((addr <= (x)) && ((x) < addr + szB))
sewardj2c1c9df2006-07-28 00:06:37 +0000847
sewardj8aeeaa92006-08-16 17:51:28 +0000848 if (EXTENT_CONTAINS(lo) && EXTENT_CONTAINS(hi)) {
sewardj2c1c9df2006-07-28 00:06:37 +0000849
850 /* The current chunk is entirely within the trim extent: keep
851 it. */
852
853 continue;
854
sewardj8aeeaa92006-08-16 17:51:28 +0000855 } else if ( (! EXTENT_CONTAINS(lo)) &&
856 (! EXTENT_CONTAINS(hi)) ) {
sewardj2c1c9df2006-07-28 00:06:37 +0000857
858 /* The current chunk is entirely outside the trim extent:
859 delete it. */
860
861 if (VG_(HT_remove)(mp->chunks, (UWord)mc->data) == NULL) {
862 MC_(record_free_error)(tid, (Addr)mc->data);
863 VG_(free)(chunks);
sewardj7e30be42011-01-27 23:56:36 +0000864 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
sewardj2c1c9df2006-07-28 00:06:37 +0000865 return;
866 }
867 die_and_free_mem ( tid, mc, mp->rzB );
868
869 } else {
870
871 /* The current chunk intersects the trim extent: remove,
872 trim, and reinsert it. */
873
sewardj8aeeaa92006-08-16 17:51:28 +0000874 tl_assert(EXTENT_CONTAINS(lo) ||
875 EXTENT_CONTAINS(hi));
sewardj2c1c9df2006-07-28 00:06:37 +0000876 if (VG_(HT_remove)(mp->chunks, (UWord)mc->data) == NULL) {
877 MC_(record_free_error)(tid, (Addr)mc->data);
878 VG_(free)(chunks);
sewardj7e30be42011-01-27 23:56:36 +0000879 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
sewardj2c1c9df2006-07-28 00:06:37 +0000880 return;
881 }
882
sewardjc740d762006-10-05 17:59:23 +0000883 if (mc->data < addr) {
884 min = mc->data;
885 lo = addr;
886 } else {
887 min = addr;
888 lo = mc->data;
889 }
sewardj2c1c9df2006-07-28 00:06:37 +0000890
njn718d3b12006-12-16 00:54:12 +0000891 if (mc->data + szB > addr + szB) {
892 max = mc->data + szB;
893 hi = addr + szB;
sewardjc740d762006-10-05 17:59:23 +0000894 } else {
njn718d3b12006-12-16 00:54:12 +0000895 max = addr + szB;
896 hi = mc->data + szB;
sewardjc740d762006-10-05 17:59:23 +0000897 }
898
899 tl_assert(min <= lo);
sewardj2c1c9df2006-07-28 00:06:37 +0000900 tl_assert(lo < hi);
sewardjc740d762006-10-05 17:59:23 +0000901 tl_assert(hi <= max);
902
903 if (min < lo && !EXTENT_CONTAINS(min)) {
904 MC_(make_mem_noaccess)( min, lo - min);
905 }
906
907 if (hi < max && !EXTENT_CONTAINS(max)) {
908 MC_(make_mem_noaccess)( hi, max - hi );
909 }
910
sewardj2c1c9df2006-07-28 00:06:37 +0000911 mc->data = lo;
njn718d3b12006-12-16 00:54:12 +0000912 mc->szB = (UInt) (hi - lo);
sewardj2c1c9df2006-07-28 00:06:37 +0000913 VG_(HT_add_node)( mp->chunks, mc );
914 }
915
916#undef EXTENT_CONTAINS
917
918 }
sewardjc740d762006-10-05 17:59:23 +0000919 check_mempool_sane(mp);
sewardj2c1c9df2006-07-28 00:06:37 +0000920 VG_(free)(chunks);
921}
922
sewardjc740d762006-10-05 17:59:23 +0000923void MC_(move_mempool)(Addr poolA, Addr poolB)
924{
925 MC_Mempool* mp;
926
927 if (VG_(clo_verbosity) > 2) {
sewardj6b523cd2009-07-15 14:49:40 +0000928 VG_(message)(Vg_UserMsg, "move_mempool(0x%lx, 0x%lx)\n", poolA, poolB);
sewardjc740d762006-10-05 17:59:23 +0000929 VG_(get_and_pp_StackTrace)
930 (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
931 }
932
933 mp = VG_(HT_remove) ( MC_(mempool_list), (UWord)poolA );
934
935 if (mp == NULL) {
936 ThreadId tid = VG_(get_running_tid)();
937 MC_(record_illegal_mempool_error) ( tid, poolA );
938 return;
939 }
940
941 mp->pool = poolB;
942 VG_(HT_add_node)( MC_(mempool_list), mp );
943}
944
njn718d3b12006-12-16 00:54:12 +0000945void MC_(mempool_change)(Addr pool, Addr addrA, Addr addrB, SizeT szB)
sewardjc740d762006-10-05 17:59:23 +0000946{
947 MC_Mempool* mp;
948 MC_Chunk* mc;
949 ThreadId tid = VG_(get_running_tid)();
950
951 if (VG_(clo_verbosity) > 2) {
sewardj6b523cd2009-07-15 14:49:40 +0000952 VG_(message)(Vg_UserMsg, "mempool_change(0x%lx, 0x%lx, 0x%lx, %ld)\n",
njn718d3b12006-12-16 00:54:12 +0000953 pool, addrA, addrB, szB);
sewardjc740d762006-10-05 17:59:23 +0000954 VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
955 }
956
957 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
958 if (mp == NULL) {
959 MC_(record_illegal_mempool_error)(tid, pool);
960 return;
961 }
962
963 check_mempool_sane(mp);
964
965 mc = VG_(HT_remove)(mp->chunks, (UWord)addrA);
966 if (mc == NULL) {
967 MC_(record_free_error)(tid, (Addr)addrA);
968 return;
969 }
970
971 mc->data = addrB;
njn718d3b12006-12-16 00:54:12 +0000972 mc->szB = szB;
sewardjc740d762006-10-05 17:59:23 +0000973 VG_(HT_add_node)( mp->chunks, mc );
974
975 check_mempool_sane(mp);
976}
977
978Bool MC_(mempool_exists)(Addr pool)
979{
980 MC_Mempool* mp;
981
982 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
983 if (mp == NULL) {
984 return False;
985 }
986 return True;
987}
988
989
njn1d0825f2006-03-27 11:37:07 +0000990/*------------------------------------------------------------*/
991/*--- Statistics printing ---*/
992/*------------------------------------------------------------*/
993
994void MC_(print_malloc_stats) ( void )
995{
996 MC_Chunk* mc;
997 SizeT nblocks = 0;
sewardjea9c15e2007-03-14 11:57:37 +0000998 ULong nbytes = 0;
njn1d0825f2006-03-27 11:37:07 +0000999
1000 if (VG_(clo_verbosity) == 0)
1001 return;
1002 if (VG_(clo_xml))
1003 return;
1004
1005 /* Count memory still in use. */
1006 VG_(HT_ResetIter)(MC_(malloc_list));
1007 while ( (mc = VG_(HT_Next)(MC_(malloc_list))) ) {
1008 nblocks++;
sewardjea9c15e2007-03-14 11:57:37 +00001009 nbytes += (ULong)mc->szB;
njn1d0825f2006-03-27 11:37:07 +00001010 }
1011
sewardj2d9e8742009-08-07 15:46:56 +00001012 VG_(umsg)(
1013 "HEAP SUMMARY:\n"
njnb6267bd2009-08-12 00:14:16 +00001014 " in use at exit: %'llu bytes in %'lu blocks\n"
1015 " total heap usage: %'lu allocs, %'lu frees, %'llu bytes allocated\n"
1016 "\n",
1017 nbytes, nblocks,
sewardj6b523cd2009-07-15 14:49:40 +00001018 cmalloc_n_mallocs,
1019 cmalloc_n_frees, cmalloc_bs_mallocd
1020 );
njn1d0825f2006-03-27 11:37:07 +00001021}
1022
philippea22f59d2012-01-26 23:13:52 +00001023SizeT MC_(get_cmalloc_n_frees) ( void )
1024{
1025 return cmalloc_n_frees;
1026}
1027
1028
njn1d0825f2006-03-27 11:37:07 +00001029/*--------------------------------------------------------------------*/
1030/*--- end ---*/
1031/*--------------------------------------------------------------------*/