blob: 4466aa5256c1f38421c4c5901b39bad271a07746 [file] [log] [blame]
njn1d0825f2006-03-27 11:37:07 +00001
2/*--------------------------------------------------------------------*/
3/*--- malloc/free wrappers for detecting errors and updating bits. ---*/
4/*--- mc_malloc_wrappers.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of MemCheck, a heavyweight Valgrind tool for
njne2656362007-03-10 02:27:44 +00009 detecting memory errors.
njn1d0825f2006-03-27 11:37:07 +000010
sewardj9eecbbb2010-05-03 21:37:12 +000011 Copyright (C) 2000-2010 Julian Seward
njn1d0825f2006-03-27 11:37:07 +000012 jseward@acm.org
13
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
29 The GNU General Public License is contained in the file COPYING.
30*/
31
32#include "pub_tool_basics.h"
33#include "pub_tool_execontext.h"
34#include "pub_tool_hashtable.h"
35#include "pub_tool_libcbase.h"
36#include "pub_tool_libcassert.h"
37#include "pub_tool_libcprint.h"
38#include "pub_tool_mallocfree.h"
39#include "pub_tool_options.h"
40#include "pub_tool_replacemalloc.h"
41#include "pub_tool_threadstate.h"
42#include "pub_tool_tooliface.h" // Needed for mc_include.h
sewardjc740d762006-10-05 17:59:23 +000043#include "pub_tool_stacktrace.h" // For VG_(get_and_pp_StackTrace)
njn1d0825f2006-03-27 11:37:07 +000044
45#include "mc_include.h"
46
47/*------------------------------------------------------------*/
48/*--- Defns ---*/
49/*------------------------------------------------------------*/
50
51/* Stats ... */
52static SizeT cmalloc_n_mallocs = 0;
53static SizeT cmalloc_n_frees = 0;
sewardjea9c15e2007-03-14 11:57:37 +000054static ULong cmalloc_bs_mallocd = 0;
njn1d0825f2006-03-27 11:37:07 +000055
sewardjc740d762006-10-05 17:59:23 +000056/* For debug printing to do with mempools: what stack trace
57 depth to show. */
58#define MEMPOOL_DEBUG_STACKTRACE_DEPTH 16
59
njn1d0825f2006-03-27 11:37:07 +000060
61/*------------------------------------------------------------*/
62/*--- Tracking malloc'd and free'd blocks ---*/
63/*------------------------------------------------------------*/
64
65/* Record malloc'd blocks. */
66VgHashTable MC_(malloc_list) = NULL;
67
68/* Memory pools. */
69VgHashTable MC_(mempool_list) = NULL;
70
71/* Records blocks after freeing. */
72static MC_Chunk* freed_list_start = NULL;
73static MC_Chunk* freed_list_end = NULL;
njn1d0825f2006-03-27 11:37:07 +000074
75/* Put a shadow chunk on the freed blocks queue, possibly freeing up
76 some of the oldest blocks in the queue at the same time. */
77static void add_to_freed_queue ( MC_Chunk* mc )
78{
sewardjfa4ca3b2007-11-30 17:19:36 +000079 const Bool show = False;
80
njn1d0825f2006-03-27 11:37:07 +000081 /* Put it at the end of the freed list */
82 if (freed_list_end == NULL) {
83 tl_assert(freed_list_start == NULL);
84 freed_list_end = freed_list_start = mc;
bart545380e2008-04-21 17:28:50 +000085 VG_(free_queue_volume) = (Long)mc->szB;
njn1d0825f2006-03-27 11:37:07 +000086 } else {
87 tl_assert(freed_list_end->next == NULL);
88 freed_list_end->next = mc;
89 freed_list_end = mc;
bart545380e2008-04-21 17:28:50 +000090 VG_(free_queue_volume) += (Long)mc->szB;
sewardjfa4ca3b2007-11-30 17:19:36 +000091 if (show)
92 VG_(printf)("mc_freelist: acquire: volume now %lld\n",
bart545380e2008-04-21 17:28:50 +000093 VG_(free_queue_volume));
njn1d0825f2006-03-27 11:37:07 +000094 }
bart545380e2008-04-21 17:28:50 +000095 VG_(free_queue_length)++;
njn1d0825f2006-03-27 11:37:07 +000096 mc->next = NULL;
97
98 /* Release enough of the oldest blocks to bring the free queue
99 volume below vg_clo_freelist_vol. */
100
bart545380e2008-04-21 17:28:50 +0000101 while (VG_(free_queue_volume) > MC_(clo_freelist_vol)) {
njn1d0825f2006-03-27 11:37:07 +0000102 MC_Chunk* mc1;
103
104 tl_assert(freed_list_start != NULL);
105 tl_assert(freed_list_end != NULL);
106
107 mc1 = freed_list_start;
bart545380e2008-04-21 17:28:50 +0000108 VG_(free_queue_volume) -= (Long)mc1->szB;
109 VG_(free_queue_length)--;
sewardjfa4ca3b2007-11-30 17:19:36 +0000110 if (show)
111 VG_(printf)("mc_freelist: discard: volume now %lld\n",
bart545380e2008-04-21 17:28:50 +0000112 VG_(free_queue_volume));
113 tl_assert(VG_(free_queue_volume) >= 0);
njn1d0825f2006-03-27 11:37:07 +0000114
115 if (freed_list_start == freed_list_end) {
116 freed_list_start = freed_list_end = NULL;
117 } else {
118 freed_list_start = mc1->next;
119 }
120 mc1->next = NULL; /* just paranoia */
121
122 /* free MC_Chunk */
123 VG_(cli_free) ( (void*)(mc1->data) );
124 VG_(free) ( mc1 );
125 }
126}
127
128MC_Chunk* MC_(get_freed_list_head)(void)
129{
130 return freed_list_start;
131}
132
133/* Allocate its shadow chunk, put it on the appropriate list. */
134static
sewardj7cf4e6b2008-05-01 20:24:26 +0000135MC_Chunk* create_MC_Chunk ( ExeContext* ec, Addr p, SizeT szB,
njn1d0825f2006-03-27 11:37:07 +0000136 MC_AllocKind kind)
137{
sewardj9c606bd2008-09-18 18:12:50 +0000138 MC_Chunk* mc = VG_(malloc)("mc.cMC.1 (a MC_Chunk)", sizeof(MC_Chunk));
njn1d0825f2006-03-27 11:37:07 +0000139 mc->data = p;
njn718d3b12006-12-16 00:54:12 +0000140 mc->szB = szB;
njn1d0825f2006-03-27 11:37:07 +0000141 mc->allockind = kind;
sewardj7cf4e6b2008-05-01 20:24:26 +0000142 mc->where = ec;
njn1d0825f2006-03-27 11:37:07 +0000143
144 /* Paranoia ... ensure the MC_Chunk is off-limits to the client, so
145 the mc->data field isn't visible to the leak checker. If memory
146 management is working correctly, any pointer returned by VG_(malloc)
147 should be noaccess as far as the client is concerned. */
njndbf7ca72006-03-31 11:57:59 +0000148 if (!MC_(check_mem_is_noaccess)( (Addr)mc, sizeof(MC_Chunk), NULL )) {
njn1d0825f2006-03-27 11:37:07 +0000149 VG_(tool_panic)("create_MC_Chunk: shadow area is accessible");
150 }
151 return mc;
152}
153
154/*------------------------------------------------------------*/
155/*--- client_malloc(), etc ---*/
156/*------------------------------------------------------------*/
157
njn017d3772009-05-19 02:10:26 +0000158// XXX: should make this a proper error (bug #79311).
njn1d0825f2006-03-27 11:37:07 +0000159static Bool complain_about_silly_args(SizeT sizeB, Char* fn)
160{
161 // Cast to a signed type to catch any unexpectedly negative args. We're
162 // assuming here that the size asked for is not greater than 2^31 bytes
163 // (for 32-bit platforms) or 2^63 bytes (for 64-bit platforms).
164 if ((SSizeT)sizeB < 0) {
sewardj22faf712007-11-09 11:33:02 +0000165 if (!VG_(clo_xml))
sewardj6b523cd2009-07-15 14:49:40 +0000166 VG_(message)(Vg_UserMsg, "Warning: silly arg (%ld) to %s()\n",
sewardj22faf712007-11-09 11:33:02 +0000167 (SSizeT)sizeB, fn );
njn1d0825f2006-03-27 11:37:07 +0000168 return True;
169 }
170 return False;
171}
172
173static Bool complain_about_silly_args2(SizeT n, SizeT sizeB)
174{
175 if ((SSizeT)n < 0 || (SSizeT)sizeB < 0) {
sewardj22faf712007-11-09 11:33:02 +0000176 if (!VG_(clo_xml))
177 VG_(message)(Vg_UserMsg,
sewardj6b523cd2009-07-15 14:49:40 +0000178 "Warning: silly args (%ld,%ld) to calloc()\n",
sewardj22faf712007-11-09 11:33:02 +0000179 (SSizeT)n, (SSizeT)sizeB);
njn1d0825f2006-03-27 11:37:07 +0000180 return True;
181 }
182 return False;
183}
184
185/* Allocate memory and note change in memory available */
njn1d0825f2006-03-27 11:37:07 +0000186void* MC_(new_block) ( ThreadId tid,
njn1dcee092009-02-24 03:07:37 +0000187 Addr p, SizeT szB, SizeT alignB,
sewardjeb0fa932007-11-30 21:41:40 +0000188 Bool is_zeroed, MC_AllocKind kind, VgHashTable table)
njn1d0825f2006-03-27 11:37:07 +0000189{
sewardj7cf4e6b2008-05-01 20:24:26 +0000190 ExeContext* ec;
191
njn1d0825f2006-03-27 11:37:07 +0000192 cmalloc_n_mallocs ++;
193
194 // Allocate and zero if necessary
195 if (p) {
196 tl_assert(MC_AllocCustom == kind);
197 } else {
198 tl_assert(MC_AllocCustom != kind);
njn718d3b12006-12-16 00:54:12 +0000199 p = (Addr)VG_(cli_malloc)( alignB, szB );
njn1d0825f2006-03-27 11:37:07 +0000200 if (!p) {
201 return NULL;
202 }
sewardjeb0fa932007-11-30 21:41:40 +0000203 if (is_zeroed) {
204 VG_(memset)((void*)p, 0, szB);
205 } else
206 if (MC_(clo_malloc_fill) != -1) {
207 tl_assert(MC_(clo_malloc_fill) >= 0x00 && MC_(clo_malloc_fill) <= 0xFF);
208 VG_(memset)((void*)p, MC_(clo_malloc_fill), szB);
209 }
njn1d0825f2006-03-27 11:37:07 +0000210 }
211
212 // Only update this stat if allocation succeeded.
sewardjea9c15e2007-03-14 11:57:37 +0000213 cmalloc_bs_mallocd += (ULong)szB;
njn1d0825f2006-03-27 11:37:07 +0000214
sewardj7cf4e6b2008-05-01 20:24:26 +0000215 ec = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
216 tl_assert(ec);
217
218 VG_(HT_add_node)( table, create_MC_Chunk(ec, p, szB, kind) );
njn1d0825f2006-03-27 11:37:07 +0000219
220 if (is_zeroed)
njn718d3b12006-12-16 00:54:12 +0000221 MC_(make_mem_defined)( p, szB );
sewardj7cf4e6b2008-05-01 20:24:26 +0000222 else {
223 UInt ecu = VG_(get_ECU_from_ExeContext)(ec);
224 tl_assert(VG_(is_plausible_ECU)(ecu));
225 MC_(make_mem_undefined_w_otag)( p, szB, ecu | MC_OKIND_HEAP );
226 }
njn1d0825f2006-03-27 11:37:07 +0000227
228 return (void*)p;
229}
230
231void* MC_(malloc) ( ThreadId tid, SizeT n )
232{
233 if (complain_about_silly_args(n, "malloc")) {
234 return NULL;
235 } else {
236 return MC_(new_block) ( tid, 0, n, VG_(clo_alignment),
njn1dcee092009-02-24 03:07:37 +0000237 /*is_zeroed*/False, MC_AllocMalloc, MC_(malloc_list));
njn1d0825f2006-03-27 11:37:07 +0000238 }
239}
240
241void* MC_(__builtin_new) ( ThreadId tid, SizeT n )
242{
243 if (complain_about_silly_args(n, "__builtin_new")) {
244 return NULL;
245 } else {
246 return MC_(new_block) ( tid, 0, n, VG_(clo_alignment),
njn1dcee092009-02-24 03:07:37 +0000247 /*is_zeroed*/False, MC_AllocNew, MC_(malloc_list));
njn1d0825f2006-03-27 11:37:07 +0000248 }
249}
250
251void* MC_(__builtin_vec_new) ( ThreadId tid, SizeT n )
252{
253 if (complain_about_silly_args(n, "__builtin_vec_new")) {
254 return NULL;
255 } else {
256 return MC_(new_block) ( tid, 0, n, VG_(clo_alignment),
njn1dcee092009-02-24 03:07:37 +0000257 /*is_zeroed*/False, MC_AllocNewVec, MC_(malloc_list));
njn1d0825f2006-03-27 11:37:07 +0000258 }
259}
260
njn718d3b12006-12-16 00:54:12 +0000261void* MC_(memalign) ( ThreadId tid, SizeT alignB, SizeT n )
njn1d0825f2006-03-27 11:37:07 +0000262{
263 if (complain_about_silly_args(n, "memalign")) {
264 return NULL;
265 } else {
njn718d3b12006-12-16 00:54:12 +0000266 return MC_(new_block) ( tid, 0, n, alignB,
njn1dcee092009-02-24 03:07:37 +0000267 /*is_zeroed*/False, MC_AllocMalloc, MC_(malloc_list));
njn1d0825f2006-03-27 11:37:07 +0000268 }
269}
270
271void* MC_(calloc) ( ThreadId tid, SizeT nmemb, SizeT size1 )
272{
273 if (complain_about_silly_args2(nmemb, size1)) {
274 return NULL;
275 } else {
276 return MC_(new_block) ( tid, 0, nmemb*size1, VG_(clo_alignment),
njn1dcee092009-02-24 03:07:37 +0000277 /*is_zeroed*/True, MC_AllocMalloc, MC_(malloc_list));
njn1d0825f2006-03-27 11:37:07 +0000278 }
279}
280
281static
282void die_and_free_mem ( ThreadId tid, MC_Chunk* mc, SizeT rzB )
283{
sewardjeb0fa932007-11-30 21:41:40 +0000284 if (MC_(clo_free_fill) != -1) {
285 tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
286 VG_(memset)((void*)mc->data, MC_(clo_free_fill), mc->szB);
287 }
288
njn1d0825f2006-03-27 11:37:07 +0000289 /* Note: make redzones noaccess again -- just in case user made them
290 accessible with a client request... */
njn718d3b12006-12-16 00:54:12 +0000291 MC_(make_mem_noaccess)( mc->data-rzB, mc->szB + 2*rzB );
njn1d0825f2006-03-27 11:37:07 +0000292
293 /* Put it out of harm's way for a while, if not from a client request */
294 if (MC_AllocCustom != mc->allockind) {
295 /* Record where freed */
sewardj39f34232007-11-09 23:02:28 +0000296 mc->where = VG_(record_ExeContext) ( tid, 0/*first_ip_delta*/ );
njn1d0825f2006-03-27 11:37:07 +0000297 add_to_freed_queue ( mc );
298 } else {
299 VG_(free) ( mc );
300 }
301}
302
njn1d0825f2006-03-27 11:37:07 +0000303void MC_(handle_free) ( ThreadId tid, Addr p, UInt rzB, MC_AllocKind kind )
304{
305 MC_Chunk* mc;
306
307 cmalloc_n_frees++;
308
309 mc = VG_(HT_remove) ( MC_(malloc_list), (UWord)p );
310 if (mc == NULL) {
311 MC_(record_free_error) ( tid, p );
312 } else {
313 /* check if it is a matching free() / delete / delete [] */
314 if (kind != mc->allockind) {
njn718d3b12006-12-16 00:54:12 +0000315 tl_assert(p == mc->data);
316 MC_(record_freemismatch_error) ( tid, mc );
njn1d0825f2006-03-27 11:37:07 +0000317 }
318 die_and_free_mem ( tid, mc, rzB );
319 }
320}
321
322void MC_(free) ( ThreadId tid, void* p )
323{
324 MC_(handle_free)(
325 tid, (Addr)p, MC_MALLOC_REDZONE_SZB, MC_AllocMalloc );
326}
327
328void MC_(__builtin_delete) ( ThreadId tid, void* p )
329{
330 MC_(handle_free)(
331 tid, (Addr)p, MC_MALLOC_REDZONE_SZB, MC_AllocNew);
332}
333
334void MC_(__builtin_vec_delete) ( ThreadId tid, void* p )
335{
336 MC_(handle_free)(
337 tid, (Addr)p, MC_MALLOC_REDZONE_SZB, MC_AllocNewVec);
338}
339
njn718d3b12006-12-16 00:54:12 +0000340void* MC_(realloc) ( ThreadId tid, void* p_old, SizeT new_szB )
njn1d0825f2006-03-27 11:37:07 +0000341{
342 MC_Chunk* mc;
343 void* p_new;
njn718d3b12006-12-16 00:54:12 +0000344 SizeT old_szB;
njn1d0825f2006-03-27 11:37:07 +0000345
346 cmalloc_n_frees ++;
347 cmalloc_n_mallocs ++;
sewardjea9c15e2007-03-14 11:57:37 +0000348 cmalloc_bs_mallocd += (ULong)new_szB;
njn1d0825f2006-03-27 11:37:07 +0000349
njn718d3b12006-12-16 00:54:12 +0000350 if (complain_about_silly_args(new_szB, "realloc"))
njn1d0825f2006-03-27 11:37:07 +0000351 return NULL;
352
353 /* Remove the old block */
354 mc = VG_(HT_remove) ( MC_(malloc_list), (UWord)p_old );
355 if (mc == NULL) {
356 MC_(record_free_error) ( tid, (Addr)p_old );
357 /* We return to the program regardless. */
358 return NULL;
359 }
360
361 /* check if its a matching free() / delete / delete [] */
362 if (MC_AllocMalloc != mc->allockind) {
363 /* can not realloc a range that was allocated with new or new [] */
njn718d3b12006-12-16 00:54:12 +0000364 tl_assert((Addr)p_old == mc->data);
365 MC_(record_freemismatch_error) ( tid, mc );
njn1d0825f2006-03-27 11:37:07 +0000366 /* but keep going anyway */
367 }
368
njn718d3b12006-12-16 00:54:12 +0000369 old_szB = mc->szB;
njn1d0825f2006-03-27 11:37:07 +0000370
sewardjb3238a52008-07-29 09:44:52 +0000371 /* In all cases, even when the new size is smaller or unchanged, we
372 reallocate and copy the contents, and make the old block
373 inaccessible. This is so as to guarantee to catch all cases of
374 accesses via the old address after reallocation, regardless of
375 the change in size. (Of course the ability to detect accesses
376 to the old block also depends on the size of the freed blocks
377 queue). */
378
sewardj8849a562008-07-22 18:23:16 +0000379 if (new_szB <= old_szB) {
380 /* new size is smaller or the same */
381 Addr a_new;
382 /* Get new memory */
383 a_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_szB);
384
385 if (a_new) {
386 ExeContext* ec;
387
388 ec = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
389 tl_assert(ec);
390
391 /* Retained part is copied, red zones set as normal */
392 MC_(make_mem_noaccess)( a_new-MC_MALLOC_REDZONE_SZB,
393 MC_MALLOC_REDZONE_SZB );
394 MC_(copy_address_range_state) ( (Addr)p_old, a_new, new_szB );
395 MC_(make_mem_noaccess) ( a_new+new_szB, MC_MALLOC_REDZONE_SZB );
396
397 /* Copy from old to new */
398 VG_(memcpy)((void*)a_new, p_old, new_szB);
399
400 /* Possibly fill freed area with specified junk. */
401 if (MC_(clo_free_fill) != -1) {
402 tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
403 VG_(memset)((void*)p_old, MC_(clo_free_fill), old_szB);
404 }
405
406 /* Free old memory */
407 /* Nb: we have to allocate a new MC_Chunk for the new memory rather
408 than recycling the old one, so that any erroneous accesses to the
409 old memory are reported. */
410 die_and_free_mem ( tid, mc, MC_MALLOC_REDZONE_SZB );
411
412 // Allocate a new chunk.
413 mc = create_MC_Chunk( ec, a_new, new_szB, MC_AllocMalloc );
sewardjeb0fa932007-11-30 21:41:40 +0000414 }
njn1d0825f2006-03-27 11:37:07 +0000415
sewardj8849a562008-07-22 18:23:16 +0000416 p_new = (void*)a_new;
417
njn1d0825f2006-03-27 11:37:07 +0000418 } else {
419 /* new size is bigger */
sewardjeb0fa932007-11-30 21:41:40 +0000420 Addr a_new;
421 tl_assert(old_szB < new_szB);
njn1d0825f2006-03-27 11:37:07 +0000422 /* Get new memory */
sewardjeb0fa932007-11-30 21:41:40 +0000423 a_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_szB);
njn1d0825f2006-03-27 11:37:07 +0000424
425 if (a_new) {
sewardj7cf4e6b2008-05-01 20:24:26 +0000426 UInt ecu;
427 ExeContext* ec;
428
429 ec = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
430 tl_assert(ec);
431 ecu = VG_(get_ECU_from_ExeContext)(ec);
432 tl_assert(VG_(is_plausible_ECU)(ecu));
433
njn1d0825f2006-03-27 11:37:07 +0000434 /* First half kept and copied, second half new, red zones as normal */
sewardj7cf4e6b2008-05-01 20:24:26 +0000435 MC_(make_mem_noaccess)( a_new-MC_MALLOC_REDZONE_SZB,
436 MC_MALLOC_REDZONE_SZB );
437 MC_(copy_address_range_state) ( (Addr)p_old, a_new, mc->szB );
438 MC_(make_mem_undefined_w_otag)( a_new+mc->szB, new_szB-mc->szB,
439 ecu | MC_OKIND_HEAP );
440 MC_(make_mem_noaccess) ( a_new+new_szB, MC_MALLOC_REDZONE_SZB );
njn1d0825f2006-03-27 11:37:07 +0000441
sewardjeb0fa932007-11-30 21:41:40 +0000442 /* Possibly fill new area with specified junk */
443 if (MC_(clo_malloc_fill) != -1) {
444 tl_assert(MC_(clo_malloc_fill) >= 0x00
445 && MC_(clo_malloc_fill) <= 0xFF);
446 VG_(memset)((void*)(a_new+old_szB), MC_(clo_malloc_fill),
447 new_szB-old_szB);
448 }
449
njn1d0825f2006-03-27 11:37:07 +0000450 /* Copy from old to new */
njn718d3b12006-12-16 00:54:12 +0000451 VG_(memcpy)((void*)a_new, p_old, mc->szB);
njn1d0825f2006-03-27 11:37:07 +0000452
sewardjeb0fa932007-11-30 21:41:40 +0000453 /* Possibly fill freed area with specified junk. */
454 if (MC_(clo_free_fill) != -1) {
455 tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
456 VG_(memset)((void*)p_old, MC_(clo_free_fill), old_szB);
457 }
458
njn1d0825f2006-03-27 11:37:07 +0000459 /* Free old memory */
460 /* Nb: we have to allocate a new MC_Chunk for the new memory rather
461 than recycling the old one, so that any erroneous accesses to the
462 old memory are reported. */
463 die_and_free_mem ( tid, mc, MC_MALLOC_REDZONE_SZB );
464
465 // Allocate a new chunk.
sewardj7cf4e6b2008-05-01 20:24:26 +0000466 mc = create_MC_Chunk( ec, a_new, new_szB, MC_AllocMalloc );
njn1d0825f2006-03-27 11:37:07 +0000467 }
468
469 p_new = (void*)a_new;
470 }
471
472 // Now insert the new mc (with a possibly new 'data' field) into
473 // malloc_list. If this realloc() did not increase the memory size, we
474 // will have removed and then re-added mc unnecessarily. But that's ok
475 // because shrinking a block with realloc() is (presumably) much rarer
476 // than growing it, and this way simplifies the growing case.
477 VG_(HT_add_node)( MC_(malloc_list), mc );
478
479 return p_new;
480}
481
njn8b140de2009-02-17 04:31:18 +0000482SizeT MC_(malloc_usable_size) ( ThreadId tid, void* p )
483{
484 MC_Chunk* mc = VG_(HT_lookup) ( MC_(malloc_list), (UWord)p );
485
486 // There may be slop, but pretend there isn't because only the asked-for
487 // area will be marked as addressable.
488 return ( mc ? mc->szB : 0 );
489}
490
njn017d3772009-05-19 02:10:26 +0000491
njn1d0825f2006-03-27 11:37:07 +0000492/* Memory pool stuff. */
493
494void MC_(create_mempool)(Addr pool, UInt rzB, Bool is_zeroed)
495{
sewardjc740d762006-10-05 17:59:23 +0000496 MC_Mempool* mp;
497
498 if (VG_(clo_verbosity) > 2) {
sewardj6b523cd2009-07-15 14:49:40 +0000499 VG_(message)(Vg_UserMsg, "create_mempool(0x%lx, %d, %d)\n",
sewardjc740d762006-10-05 17:59:23 +0000500 pool, rzB, is_zeroed);
501 VG_(get_and_pp_StackTrace)
502 (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
503 }
504
505 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
506 if (mp != NULL) {
507 VG_(tool_panic)("MC_(create_mempool): duplicate pool creation");
508 }
509
sewardj9c606bd2008-09-18 18:12:50 +0000510 mp = VG_(malloc)("mc.cm.1", sizeof(MC_Mempool));
njn1d0825f2006-03-27 11:37:07 +0000511 mp->pool = pool;
512 mp->rzB = rzB;
513 mp->is_zeroed = is_zeroed;
sewardj3f94a7d2007-08-25 07:19:08 +0000514 mp->chunks = VG_(HT_construct)( "MC_(create_mempool)" );
njn1d0825f2006-03-27 11:37:07 +0000515
516 /* Paranoia ... ensure this area is off-limits to the client, so
517 the mp->data field isn't visible to the leak checker. If memory
518 management is working correctly, anything pointer returned by
519 VG_(malloc) should be noaccess as far as the client is
520 concerned. */
njndbf7ca72006-03-31 11:57:59 +0000521 if (!MC_(check_mem_is_noaccess)( (Addr)mp, sizeof(MC_Mempool), NULL )) {
njn1d0825f2006-03-27 11:37:07 +0000522 VG_(tool_panic)("MC_(create_mempool): shadow area is accessible");
523 }
524
525 VG_(HT_add_node)( MC_(mempool_list), mp );
526}
527
528void MC_(destroy_mempool)(Addr pool)
529{
530 MC_Chunk* mc;
531 MC_Mempool* mp;
532
sewardjc740d762006-10-05 17:59:23 +0000533 if (VG_(clo_verbosity) > 2) {
sewardj6b523cd2009-07-15 14:49:40 +0000534 VG_(message)(Vg_UserMsg, "destroy_mempool(0x%lx)\n", pool);
sewardjc740d762006-10-05 17:59:23 +0000535 VG_(get_and_pp_StackTrace)
536 (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
537 }
538
njn1d0825f2006-03-27 11:37:07 +0000539 mp = VG_(HT_remove) ( MC_(mempool_list), (UWord)pool );
540
541 if (mp == NULL) {
542 ThreadId tid = VG_(get_running_tid)();
543 MC_(record_illegal_mempool_error) ( tid, pool );
544 return;
545 }
546
547 // Clean up the chunks, one by one
548 VG_(HT_ResetIter)(mp->chunks);
549 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
550 /* Note: make redzones noaccess again -- just in case user made them
551 accessible with a client request... */
njn718d3b12006-12-16 00:54:12 +0000552 MC_(make_mem_noaccess)(mc->data-mp->rzB, mc->szB + 2*mp->rzB );
njn1d0825f2006-03-27 11:37:07 +0000553 }
554 // Destroy the chunk table
555 VG_(HT_destruct)(mp->chunks);
556
557 VG_(free)(mp);
558}
559
sewardjc740d762006-10-05 17:59:23 +0000560static Int
561mp_compar(void* n1, void* n2)
562{
563 MC_Chunk* mc1 = *(MC_Chunk**)n1;
564 MC_Chunk* mc2 = *(MC_Chunk**)n2;
sewardjb8b79ad2008-03-03 01:35:41 +0000565 if (mc1->data < mc2->data) return -1;
566 if (mc1->data > mc2->data) return 1;
567 return 0;
sewardjc740d762006-10-05 17:59:23 +0000568}
569
570static void
571check_mempool_sane(MC_Mempool* mp)
572{
573 UInt n_chunks, i, bad = 0;
574 static UInt tick = 0;
575
576 MC_Chunk **chunks = (MC_Chunk**) VG_(HT_to_array)( mp->chunks, &n_chunks );
577 if (!chunks)
578 return;
579
580 if (VG_(clo_verbosity) > 1) {
581 if (tick++ >= 10000)
582 {
583 UInt total_pools = 0, total_chunks = 0;
584 MC_Mempool* mp2;
585
586 VG_(HT_ResetIter)(MC_(mempool_list));
587 while ( (mp2 = VG_(HT_Next)(MC_(mempool_list))) ) {
588 total_pools++;
589 VG_(HT_ResetIter)(mp2->chunks);
590 while (VG_(HT_Next)(mp2->chunks)) {
591 total_chunks++;
592 }
593 }
594
sewardj6b523cd2009-07-15 14:49:40 +0000595 VG_(message)(Vg_UserMsg,
sewardjc740d762006-10-05 17:59:23 +0000596 "Total mempools active: %d pools, %d chunks\n",
597 total_pools, total_chunks);
598 tick = 0;
599 }
600 }
601
602
603 VG_(ssort)((void*)chunks, n_chunks, sizeof(VgHashNode*), mp_compar);
604
605 /* Sanity check; assert that the blocks are now in order */
606 for (i = 0; i < n_chunks-1; i++) {
607 if (chunks[i]->data > chunks[i+1]->data) {
608 VG_(message)(Vg_UserMsg,
609 "Mempool chunk %d / %d is out of order "
sewardj6b523cd2009-07-15 14:49:40 +0000610 "wrt. its successor\n",
sewardjc740d762006-10-05 17:59:23 +0000611 i+1, n_chunks);
612 bad = 1;
613 }
614 }
615
616 /* Sanity check -- make sure they don't overlap */
617 for (i = 0; i < n_chunks-1; i++) {
njn718d3b12006-12-16 00:54:12 +0000618 if (chunks[i]->data + chunks[i]->szB > chunks[i+1]->data ) {
sewardjc740d762006-10-05 17:59:23 +0000619 VG_(message)(Vg_UserMsg,
sewardj6b523cd2009-07-15 14:49:40 +0000620 "Mempool chunk %d / %d overlaps with its successor\n",
sewardjc740d762006-10-05 17:59:23 +0000621 i+1, n_chunks);
622 bad = 1;
623 }
624 }
625
626 if (bad) {
627 VG_(message)(Vg_UserMsg,
sewardj6b523cd2009-07-15 14:49:40 +0000628 "Bad mempool (%d chunks), dumping chunks for inspection:\n",
629 n_chunks);
sewardjc740d762006-10-05 17:59:23 +0000630 for (i = 0; i < n_chunks; ++i) {
631 VG_(message)(Vg_UserMsg,
sewardj6b523cd2009-07-15 14:49:40 +0000632 "Mempool chunk %d / %d: %ld bytes "
633 "[%lx,%lx), allocated:\n",
sewardjc740d762006-10-05 17:59:23 +0000634 i+1,
635 n_chunks,
barta0b6b2c2008-07-07 06:49:24 +0000636 chunks[i]->szB + 0UL,
sewardjc740d762006-10-05 17:59:23 +0000637 chunks[i]->data,
njn718d3b12006-12-16 00:54:12 +0000638 chunks[i]->data + chunks[i]->szB);
sewardjc740d762006-10-05 17:59:23 +0000639
640 VG_(pp_ExeContext)(chunks[i]->where);
641 }
642 }
643 VG_(free)(chunks);
644}
645
njn718d3b12006-12-16 00:54:12 +0000646void MC_(mempool_alloc)(ThreadId tid, Addr pool, Addr addr, SizeT szB)
njn1d0825f2006-03-27 11:37:07 +0000647{
sewardjc740d762006-10-05 17:59:23 +0000648 MC_Mempool* mp;
njn1d0825f2006-03-27 11:37:07 +0000649
sewardjc740d762006-10-05 17:59:23 +0000650 if (VG_(clo_verbosity) > 2) {
sewardj6b523cd2009-07-15 14:49:40 +0000651 VG_(message)(Vg_UserMsg, "mempool_alloc(0x%lx, 0x%lx, %ld)\n",
652 pool, addr, szB);
sewardjc740d762006-10-05 17:59:23 +0000653 VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
654 }
655
656 mp = VG_(HT_lookup) ( MC_(mempool_list), (UWord)pool );
njn1d0825f2006-03-27 11:37:07 +0000657 if (mp == NULL) {
658 MC_(record_illegal_mempool_error) ( tid, pool );
659 } else {
sewardjc740d762006-10-05 17:59:23 +0000660 check_mempool_sane(mp);
njn1dcee092009-02-24 03:07:37 +0000661 MC_(new_block)(tid, addr, szB, /*ignored*/0, mp->is_zeroed,
njn1d0825f2006-03-27 11:37:07 +0000662 MC_AllocCustom, mp->chunks);
sewardjc740d762006-10-05 17:59:23 +0000663 check_mempool_sane(mp);
njn1d0825f2006-03-27 11:37:07 +0000664 }
665}
666
667void MC_(mempool_free)(Addr pool, Addr addr)
668{
669 MC_Mempool* mp;
670 MC_Chunk* mc;
671 ThreadId tid = VG_(get_running_tid)();
672
673 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
674 if (mp == NULL) {
675 MC_(record_illegal_mempool_error)(tid, pool);
676 return;
677 }
678
sewardjc740d762006-10-05 17:59:23 +0000679 if (VG_(clo_verbosity) > 2) {
sewardj6b523cd2009-07-15 14:49:40 +0000680 VG_(message)(Vg_UserMsg, "mempool_free(0x%lx, 0x%lx)\n", pool, addr);
sewardjc740d762006-10-05 17:59:23 +0000681 VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
682 }
683
684 check_mempool_sane(mp);
njn1d0825f2006-03-27 11:37:07 +0000685 mc = VG_(HT_remove)(mp->chunks, (UWord)addr);
686 if (mc == NULL) {
687 MC_(record_free_error)(tid, (Addr)addr);
688 return;
689 }
690
sewardjc740d762006-10-05 17:59:23 +0000691 if (VG_(clo_verbosity) > 2) {
692 VG_(message)(Vg_UserMsg,
sewardj6b523cd2009-07-15 14:49:40 +0000693 "mempool_free(0x%lx, 0x%lx) freed chunk of %ld bytes\n",
barta0b6b2c2008-07-07 06:49:24 +0000694 pool, addr, mc->szB + 0UL);
sewardjc740d762006-10-05 17:59:23 +0000695 }
696
njn1d0825f2006-03-27 11:37:07 +0000697 die_and_free_mem ( tid, mc, mp->rzB );
sewardjc740d762006-10-05 17:59:23 +0000698 check_mempool_sane(mp);
njn1d0825f2006-03-27 11:37:07 +0000699}
700
sewardj2c1c9df2006-07-28 00:06:37 +0000701
njn718d3b12006-12-16 00:54:12 +0000702void MC_(mempool_trim)(Addr pool, Addr addr, SizeT szB)
sewardj2c1c9df2006-07-28 00:06:37 +0000703{
704 MC_Mempool* mp;
705 MC_Chunk* mc;
706 ThreadId tid = VG_(get_running_tid)();
707 UInt n_shadows, i;
708 VgHashNode** chunks;
709
sewardjc740d762006-10-05 17:59:23 +0000710 if (VG_(clo_verbosity) > 2) {
sewardj6b523cd2009-07-15 14:49:40 +0000711 VG_(message)(Vg_UserMsg, "mempool_trim(0x%lx, 0x%lx, %ld)\n",
712 pool, addr, szB);
sewardjc740d762006-10-05 17:59:23 +0000713 VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
714 }
715
sewardj2c1c9df2006-07-28 00:06:37 +0000716 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
717 if (mp == NULL) {
718 MC_(record_illegal_mempool_error)(tid, pool);
719 return;
720 }
721
sewardjc740d762006-10-05 17:59:23 +0000722 check_mempool_sane(mp);
sewardj2c1c9df2006-07-28 00:06:37 +0000723 chunks = VG_(HT_to_array) ( mp->chunks, &n_shadows );
724 if (n_shadows == 0) {
725 tl_assert(chunks == NULL);
726 return;
727 }
728
729 tl_assert(chunks != NULL);
730 for (i = 0; i < n_shadows; ++i) {
sewardj8aeeaa92006-08-16 17:51:28 +0000731
sewardjc740d762006-10-05 17:59:23 +0000732 Addr lo, hi, min, max;
sewardj8aeeaa92006-08-16 17:51:28 +0000733
sewardj2c1c9df2006-07-28 00:06:37 +0000734 mc = (MC_Chunk*) chunks[i];
735
sewardj8aeeaa92006-08-16 17:51:28 +0000736 lo = mc->data;
njn718d3b12006-12-16 00:54:12 +0000737 hi = mc->szB == 0 ? mc->data : mc->data + mc->szB - 1;
sewardj2c1c9df2006-07-28 00:06:37 +0000738
njn718d3b12006-12-16 00:54:12 +0000739#define EXTENT_CONTAINS(x) ((addr <= (x)) && ((x) < addr + szB))
sewardj2c1c9df2006-07-28 00:06:37 +0000740
sewardj8aeeaa92006-08-16 17:51:28 +0000741 if (EXTENT_CONTAINS(lo) && EXTENT_CONTAINS(hi)) {
sewardj2c1c9df2006-07-28 00:06:37 +0000742
743 /* The current chunk is entirely within the trim extent: keep
744 it. */
745
746 continue;
747
sewardj8aeeaa92006-08-16 17:51:28 +0000748 } else if ( (! EXTENT_CONTAINS(lo)) &&
749 (! EXTENT_CONTAINS(hi)) ) {
sewardj2c1c9df2006-07-28 00:06:37 +0000750
751 /* The current chunk is entirely outside the trim extent:
752 delete it. */
753
754 if (VG_(HT_remove)(mp->chunks, (UWord)mc->data) == NULL) {
755 MC_(record_free_error)(tid, (Addr)mc->data);
756 VG_(free)(chunks);
sewardjc740d762006-10-05 17:59:23 +0000757 check_mempool_sane(mp);
sewardj2c1c9df2006-07-28 00:06:37 +0000758 return;
759 }
760 die_and_free_mem ( tid, mc, mp->rzB );
761
762 } else {
763
764 /* The current chunk intersects the trim extent: remove,
765 trim, and reinsert it. */
766
sewardj8aeeaa92006-08-16 17:51:28 +0000767 tl_assert(EXTENT_CONTAINS(lo) ||
768 EXTENT_CONTAINS(hi));
sewardj2c1c9df2006-07-28 00:06:37 +0000769 if (VG_(HT_remove)(mp->chunks, (UWord)mc->data) == NULL) {
770 MC_(record_free_error)(tid, (Addr)mc->data);
771 VG_(free)(chunks);
sewardjc740d762006-10-05 17:59:23 +0000772 check_mempool_sane(mp);
sewardj2c1c9df2006-07-28 00:06:37 +0000773 return;
774 }
775
sewardjc740d762006-10-05 17:59:23 +0000776 if (mc->data < addr) {
777 min = mc->data;
778 lo = addr;
779 } else {
780 min = addr;
781 lo = mc->data;
782 }
sewardj2c1c9df2006-07-28 00:06:37 +0000783
njn718d3b12006-12-16 00:54:12 +0000784 if (mc->data + szB > addr + szB) {
785 max = mc->data + szB;
786 hi = addr + szB;
sewardjc740d762006-10-05 17:59:23 +0000787 } else {
njn718d3b12006-12-16 00:54:12 +0000788 max = addr + szB;
789 hi = mc->data + szB;
sewardjc740d762006-10-05 17:59:23 +0000790 }
791
792 tl_assert(min <= lo);
sewardj2c1c9df2006-07-28 00:06:37 +0000793 tl_assert(lo < hi);
sewardjc740d762006-10-05 17:59:23 +0000794 tl_assert(hi <= max);
795
796 if (min < lo && !EXTENT_CONTAINS(min)) {
797 MC_(make_mem_noaccess)( min, lo - min);
798 }
799
800 if (hi < max && !EXTENT_CONTAINS(max)) {
801 MC_(make_mem_noaccess)( hi, max - hi );
802 }
803
sewardj2c1c9df2006-07-28 00:06:37 +0000804 mc->data = lo;
njn718d3b12006-12-16 00:54:12 +0000805 mc->szB = (UInt) (hi - lo);
sewardj2c1c9df2006-07-28 00:06:37 +0000806 VG_(HT_add_node)( mp->chunks, mc );
807 }
808
809#undef EXTENT_CONTAINS
810
811 }
sewardjc740d762006-10-05 17:59:23 +0000812 check_mempool_sane(mp);
sewardj2c1c9df2006-07-28 00:06:37 +0000813 VG_(free)(chunks);
814}
815
sewardjc740d762006-10-05 17:59:23 +0000816void MC_(move_mempool)(Addr poolA, Addr poolB)
817{
818 MC_Mempool* mp;
819
820 if (VG_(clo_verbosity) > 2) {
sewardj6b523cd2009-07-15 14:49:40 +0000821 VG_(message)(Vg_UserMsg, "move_mempool(0x%lx, 0x%lx)\n", poolA, poolB);
sewardjc740d762006-10-05 17:59:23 +0000822 VG_(get_and_pp_StackTrace)
823 (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
824 }
825
826 mp = VG_(HT_remove) ( MC_(mempool_list), (UWord)poolA );
827
828 if (mp == NULL) {
829 ThreadId tid = VG_(get_running_tid)();
830 MC_(record_illegal_mempool_error) ( tid, poolA );
831 return;
832 }
833
834 mp->pool = poolB;
835 VG_(HT_add_node)( MC_(mempool_list), mp );
836}
837
njn718d3b12006-12-16 00:54:12 +0000838void MC_(mempool_change)(Addr pool, Addr addrA, Addr addrB, SizeT szB)
sewardjc740d762006-10-05 17:59:23 +0000839{
840 MC_Mempool* mp;
841 MC_Chunk* mc;
842 ThreadId tid = VG_(get_running_tid)();
843
844 if (VG_(clo_verbosity) > 2) {
sewardj6b523cd2009-07-15 14:49:40 +0000845 VG_(message)(Vg_UserMsg, "mempool_change(0x%lx, 0x%lx, 0x%lx, %ld)\n",
njn718d3b12006-12-16 00:54:12 +0000846 pool, addrA, addrB, szB);
sewardjc740d762006-10-05 17:59:23 +0000847 VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
848 }
849
850 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
851 if (mp == NULL) {
852 MC_(record_illegal_mempool_error)(tid, pool);
853 return;
854 }
855
856 check_mempool_sane(mp);
857
858 mc = VG_(HT_remove)(mp->chunks, (UWord)addrA);
859 if (mc == NULL) {
860 MC_(record_free_error)(tid, (Addr)addrA);
861 return;
862 }
863
864 mc->data = addrB;
njn718d3b12006-12-16 00:54:12 +0000865 mc->szB = szB;
sewardjc740d762006-10-05 17:59:23 +0000866 VG_(HT_add_node)( mp->chunks, mc );
867
868 check_mempool_sane(mp);
869}
870
871Bool MC_(mempool_exists)(Addr pool)
872{
873 MC_Mempool* mp;
874
875 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
876 if (mp == NULL) {
877 return False;
878 }
879 return True;
880}
881
882
njn1d0825f2006-03-27 11:37:07 +0000883/*------------------------------------------------------------*/
884/*--- Statistics printing ---*/
885/*------------------------------------------------------------*/
886
887void MC_(print_malloc_stats) ( void )
888{
889 MC_Chunk* mc;
890 SizeT nblocks = 0;
sewardjea9c15e2007-03-14 11:57:37 +0000891 ULong nbytes = 0;
njn1d0825f2006-03-27 11:37:07 +0000892
893 if (VG_(clo_verbosity) == 0)
894 return;
895 if (VG_(clo_xml))
896 return;
897
898 /* Count memory still in use. */
899 VG_(HT_ResetIter)(MC_(malloc_list));
900 while ( (mc = VG_(HT_Next)(MC_(malloc_list))) ) {
901 nblocks++;
sewardjea9c15e2007-03-14 11:57:37 +0000902 nbytes += (ULong)mc->szB;
njn1d0825f2006-03-27 11:37:07 +0000903 }
904
sewardj2d9e8742009-08-07 15:46:56 +0000905 VG_(umsg)(
906 "HEAP SUMMARY:\n"
njnb6267bd2009-08-12 00:14:16 +0000907 " in use at exit: %'llu bytes in %'lu blocks\n"
908 " total heap usage: %'lu allocs, %'lu frees, %'llu bytes allocated\n"
909 "\n",
910 nbytes, nblocks,
sewardj6b523cd2009-07-15 14:49:40 +0000911 cmalloc_n_mallocs,
912 cmalloc_n_frees, cmalloc_bs_mallocd
913 );
njn1d0825f2006-03-27 11:37:07 +0000914}
915
916/*--------------------------------------------------------------------*/
917/*--- end ---*/
918/*--------------------------------------------------------------------*/