blob: 58a5832398befb44f448aa2e6fed353430834940 [file] [log] [blame]
njn1d0825f2006-03-27 11:37:07 +00001
2/*--------------------------------------------------------------------*/
3/*--- malloc/free wrappers for detecting errors and updating bits. ---*/
4/*--- mc_malloc_wrappers.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of MemCheck, a heavyweight Valgrind tool for
njne2656362007-03-10 02:27:44 +00009 detecting memory errors.
njn1d0825f2006-03-27 11:37:07 +000010
njn9f207462009-03-10 22:02:09 +000011 Copyright (C) 2000-2009 Julian Seward
njn1d0825f2006-03-27 11:37:07 +000012 jseward@acm.org
13
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
29 The GNU General Public License is contained in the file COPYING.
30*/
31
32#include "pub_tool_basics.h"
33#include "pub_tool_execontext.h"
34#include "pub_tool_hashtable.h"
35#include "pub_tool_libcbase.h"
36#include "pub_tool_libcassert.h"
37#include "pub_tool_libcprint.h"
38#include "pub_tool_mallocfree.h"
39#include "pub_tool_options.h"
40#include "pub_tool_replacemalloc.h"
41#include "pub_tool_threadstate.h"
42#include "pub_tool_tooliface.h" // Needed for mc_include.h
sewardjc740d762006-10-05 17:59:23 +000043#include "pub_tool_stacktrace.h" // For VG_(get_and_pp_StackTrace)
njn1d0825f2006-03-27 11:37:07 +000044
45#include "mc_include.h"
46
47/*------------------------------------------------------------*/
48/*--- Defns ---*/
49/*------------------------------------------------------------*/
50
51/* Stats ... */
52static SizeT cmalloc_n_mallocs = 0;
53static SizeT cmalloc_n_frees = 0;
sewardjea9c15e2007-03-14 11:57:37 +000054static ULong cmalloc_bs_mallocd = 0;
njn1d0825f2006-03-27 11:37:07 +000055
sewardjc740d762006-10-05 17:59:23 +000056/* For debug printing to do with mempools: what stack trace
57 depth to show. */
58#define MEMPOOL_DEBUG_STACKTRACE_DEPTH 16
59
njn1d0825f2006-03-27 11:37:07 +000060
61/*------------------------------------------------------------*/
62/*--- Tracking malloc'd and free'd blocks ---*/
63/*------------------------------------------------------------*/
64
65/* Record malloc'd blocks. */
66VgHashTable MC_(malloc_list) = NULL;
67
68/* Memory pools. */
69VgHashTable MC_(mempool_list) = NULL;
70
71/* Records blocks after freeing. */
72static MC_Chunk* freed_list_start = NULL;
73static MC_Chunk* freed_list_end = NULL;
njn1d0825f2006-03-27 11:37:07 +000074
75/* Put a shadow chunk on the freed blocks queue, possibly freeing up
76 some of the oldest blocks in the queue at the same time. */
77static void add_to_freed_queue ( MC_Chunk* mc )
78{
sewardjfa4ca3b2007-11-30 17:19:36 +000079 const Bool show = False;
80
njn1d0825f2006-03-27 11:37:07 +000081 /* Put it at the end of the freed list */
82 if (freed_list_end == NULL) {
83 tl_assert(freed_list_start == NULL);
84 freed_list_end = freed_list_start = mc;
bart545380e2008-04-21 17:28:50 +000085 VG_(free_queue_volume) = (Long)mc->szB;
njn1d0825f2006-03-27 11:37:07 +000086 } else {
87 tl_assert(freed_list_end->next == NULL);
88 freed_list_end->next = mc;
89 freed_list_end = mc;
bart545380e2008-04-21 17:28:50 +000090 VG_(free_queue_volume) += (Long)mc->szB;
sewardjfa4ca3b2007-11-30 17:19:36 +000091 if (show)
92 VG_(printf)("mc_freelist: acquire: volume now %lld\n",
bart545380e2008-04-21 17:28:50 +000093 VG_(free_queue_volume));
njn1d0825f2006-03-27 11:37:07 +000094 }
bart545380e2008-04-21 17:28:50 +000095 VG_(free_queue_length)++;
njn1d0825f2006-03-27 11:37:07 +000096 mc->next = NULL;
97
98 /* Release enough of the oldest blocks to bring the free queue
99 volume below vg_clo_freelist_vol. */
100
bart545380e2008-04-21 17:28:50 +0000101 while (VG_(free_queue_volume) > MC_(clo_freelist_vol)) {
njn1d0825f2006-03-27 11:37:07 +0000102 MC_Chunk* mc1;
103
104 tl_assert(freed_list_start != NULL);
105 tl_assert(freed_list_end != NULL);
106
107 mc1 = freed_list_start;
bart545380e2008-04-21 17:28:50 +0000108 VG_(free_queue_volume) -= (Long)mc1->szB;
109 VG_(free_queue_length)--;
sewardjfa4ca3b2007-11-30 17:19:36 +0000110 if (show)
111 VG_(printf)("mc_freelist: discard: volume now %lld\n",
bart545380e2008-04-21 17:28:50 +0000112 VG_(free_queue_volume));
113 tl_assert(VG_(free_queue_volume) >= 0);
njn1d0825f2006-03-27 11:37:07 +0000114
115 if (freed_list_start == freed_list_end) {
116 freed_list_start = freed_list_end = NULL;
117 } else {
118 freed_list_start = mc1->next;
119 }
120 mc1->next = NULL; /* just paranoia */
121
122 /* free MC_Chunk */
123 VG_(cli_free) ( (void*)(mc1->data) );
124 VG_(free) ( mc1 );
125 }
126}
127
128MC_Chunk* MC_(get_freed_list_head)(void)
129{
130 return freed_list_start;
131}
132
133/* Allocate its shadow chunk, put it on the appropriate list. */
134static
sewardj7cf4e6b2008-05-01 20:24:26 +0000135MC_Chunk* create_MC_Chunk ( ExeContext* ec, Addr p, SizeT szB,
njn1d0825f2006-03-27 11:37:07 +0000136 MC_AllocKind kind)
137{
sewardj9c606bd2008-09-18 18:12:50 +0000138 MC_Chunk* mc = VG_(malloc)("mc.cMC.1 (a MC_Chunk)", sizeof(MC_Chunk));
njn1d0825f2006-03-27 11:37:07 +0000139 mc->data = p;
njn718d3b12006-12-16 00:54:12 +0000140 mc->szB = szB;
njn1d0825f2006-03-27 11:37:07 +0000141 mc->allockind = kind;
sewardj7cf4e6b2008-05-01 20:24:26 +0000142 mc->where = ec;
njn1d0825f2006-03-27 11:37:07 +0000143
144 /* Paranoia ... ensure the MC_Chunk is off-limits to the client, so
145 the mc->data field isn't visible to the leak checker. If memory
146 management is working correctly, any pointer returned by VG_(malloc)
147 should be noaccess as far as the client is concerned. */
njndbf7ca72006-03-31 11:57:59 +0000148 if (!MC_(check_mem_is_noaccess)( (Addr)mc, sizeof(MC_Chunk), NULL )) {
njn1d0825f2006-03-27 11:37:07 +0000149 VG_(tool_panic)("create_MC_Chunk: shadow area is accessible");
150 }
151 return mc;
152}
153
154/*------------------------------------------------------------*/
155/*--- client_malloc(), etc ---*/
156/*------------------------------------------------------------*/
157
njn017d3772009-05-19 02:10:26 +0000158// XXX: should make this a proper error (bug #79311).
njn1d0825f2006-03-27 11:37:07 +0000159static Bool complain_about_silly_args(SizeT sizeB, Char* fn)
160{
161 // Cast to a signed type to catch any unexpectedly negative args. We're
162 // assuming here that the size asked for is not greater than 2^31 bytes
163 // (for 32-bit platforms) or 2^63 bytes (for 64-bit platforms).
164 if ((SSizeT)sizeB < 0) {
sewardj22faf712007-11-09 11:33:02 +0000165 if (!VG_(clo_xml))
166 VG_(message)(Vg_UserMsg, "Warning: silly arg (%ld) to %s()",
167 (SSizeT)sizeB, fn );
njn1d0825f2006-03-27 11:37:07 +0000168 return True;
169 }
170 return False;
171}
172
173static Bool complain_about_silly_args2(SizeT n, SizeT sizeB)
174{
175 if ((SSizeT)n < 0 || (SSizeT)sizeB < 0) {
sewardj22faf712007-11-09 11:33:02 +0000176 if (!VG_(clo_xml))
177 VG_(message)(Vg_UserMsg,
178 "Warning: silly args (%ld,%ld) to calloc()",
179 (SSizeT)n, (SSizeT)sizeB);
njn1d0825f2006-03-27 11:37:07 +0000180 return True;
181 }
182 return False;
183}
184
185/* Allocate memory and note change in memory available */
njn1d0825f2006-03-27 11:37:07 +0000186void* MC_(new_block) ( ThreadId tid,
njn1dcee092009-02-24 03:07:37 +0000187 Addr p, SizeT szB, SizeT alignB,
sewardjeb0fa932007-11-30 21:41:40 +0000188 Bool is_zeroed, MC_AllocKind kind, VgHashTable table)
njn1d0825f2006-03-27 11:37:07 +0000189{
sewardj7cf4e6b2008-05-01 20:24:26 +0000190 ExeContext* ec;
191
njn1d0825f2006-03-27 11:37:07 +0000192 cmalloc_n_mallocs ++;
193
194 // Allocate and zero if necessary
195 if (p) {
196 tl_assert(MC_AllocCustom == kind);
197 } else {
198 tl_assert(MC_AllocCustom != kind);
njn718d3b12006-12-16 00:54:12 +0000199 p = (Addr)VG_(cli_malloc)( alignB, szB );
njn1d0825f2006-03-27 11:37:07 +0000200 if (!p) {
201 return NULL;
202 }
sewardjeb0fa932007-11-30 21:41:40 +0000203 if (is_zeroed) {
204 VG_(memset)((void*)p, 0, szB);
205 } else
206 if (MC_(clo_malloc_fill) != -1) {
207 tl_assert(MC_(clo_malloc_fill) >= 0x00 && MC_(clo_malloc_fill) <= 0xFF);
208 VG_(memset)((void*)p, MC_(clo_malloc_fill), szB);
209 }
njn1d0825f2006-03-27 11:37:07 +0000210 }
211
212 // Only update this stat if allocation succeeded.
sewardjea9c15e2007-03-14 11:57:37 +0000213 cmalloc_bs_mallocd += (ULong)szB;
njn1d0825f2006-03-27 11:37:07 +0000214
sewardj7cf4e6b2008-05-01 20:24:26 +0000215 ec = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
216 tl_assert(ec);
217
218 VG_(HT_add_node)( table, create_MC_Chunk(ec, p, szB, kind) );
njn1d0825f2006-03-27 11:37:07 +0000219
220 if (is_zeroed)
njn718d3b12006-12-16 00:54:12 +0000221 MC_(make_mem_defined)( p, szB );
sewardj7cf4e6b2008-05-01 20:24:26 +0000222 else {
223 UInt ecu = VG_(get_ECU_from_ExeContext)(ec);
224 tl_assert(VG_(is_plausible_ECU)(ecu));
225 MC_(make_mem_undefined_w_otag)( p, szB, ecu | MC_OKIND_HEAP );
226 }
njn1d0825f2006-03-27 11:37:07 +0000227
228 return (void*)p;
229}
230
231void* MC_(malloc) ( ThreadId tid, SizeT n )
232{
233 if (complain_about_silly_args(n, "malloc")) {
234 return NULL;
235 } else {
236 return MC_(new_block) ( tid, 0, n, VG_(clo_alignment),
njn1dcee092009-02-24 03:07:37 +0000237 /*is_zeroed*/False, MC_AllocMalloc, MC_(malloc_list));
njn1d0825f2006-03-27 11:37:07 +0000238 }
239}
240
241void* MC_(__builtin_new) ( ThreadId tid, SizeT n )
242{
243 if (complain_about_silly_args(n, "__builtin_new")) {
244 return NULL;
245 } else {
246 return MC_(new_block) ( tid, 0, n, VG_(clo_alignment),
njn1dcee092009-02-24 03:07:37 +0000247 /*is_zeroed*/False, MC_AllocNew, MC_(malloc_list));
njn1d0825f2006-03-27 11:37:07 +0000248 }
249}
250
251void* MC_(__builtin_vec_new) ( ThreadId tid, SizeT n )
252{
253 if (complain_about_silly_args(n, "__builtin_vec_new")) {
254 return NULL;
255 } else {
256 return MC_(new_block) ( tid, 0, n, VG_(clo_alignment),
njn1dcee092009-02-24 03:07:37 +0000257 /*is_zeroed*/False, MC_AllocNewVec, MC_(malloc_list));
njn1d0825f2006-03-27 11:37:07 +0000258 }
259}
260
njn718d3b12006-12-16 00:54:12 +0000261void* MC_(memalign) ( ThreadId tid, SizeT alignB, SizeT n )
njn1d0825f2006-03-27 11:37:07 +0000262{
263 if (complain_about_silly_args(n, "memalign")) {
264 return NULL;
265 } else {
njn718d3b12006-12-16 00:54:12 +0000266 return MC_(new_block) ( tid, 0, n, alignB,
njn1dcee092009-02-24 03:07:37 +0000267 /*is_zeroed*/False, MC_AllocMalloc, MC_(malloc_list));
njn1d0825f2006-03-27 11:37:07 +0000268 }
269}
270
271void* MC_(calloc) ( ThreadId tid, SizeT nmemb, SizeT size1 )
272{
273 if (complain_about_silly_args2(nmemb, size1)) {
274 return NULL;
275 } else {
276 return MC_(new_block) ( tid, 0, nmemb*size1, VG_(clo_alignment),
njn1dcee092009-02-24 03:07:37 +0000277 /*is_zeroed*/True, MC_AllocMalloc, MC_(malloc_list));
njn1d0825f2006-03-27 11:37:07 +0000278 }
279}
280
281static
282void die_and_free_mem ( ThreadId tid, MC_Chunk* mc, SizeT rzB )
283{
sewardjeb0fa932007-11-30 21:41:40 +0000284 if (MC_(clo_free_fill) != -1) {
285 tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
286 VG_(memset)((void*)mc->data, MC_(clo_free_fill), mc->szB);
287 }
288
njn1d0825f2006-03-27 11:37:07 +0000289 /* Note: make redzones noaccess again -- just in case user made them
290 accessible with a client request... */
njn718d3b12006-12-16 00:54:12 +0000291 MC_(make_mem_noaccess)( mc->data-rzB, mc->szB + 2*rzB );
njn1d0825f2006-03-27 11:37:07 +0000292
293 /* Put it out of harm's way for a while, if not from a client request */
294 if (MC_AllocCustom != mc->allockind) {
295 /* Record where freed */
sewardj39f34232007-11-09 23:02:28 +0000296 mc->where = VG_(record_ExeContext) ( tid, 0/*first_ip_delta*/ );
njn1d0825f2006-03-27 11:37:07 +0000297 add_to_freed_queue ( mc );
298 } else {
299 VG_(free) ( mc );
300 }
301}
302
njn1d0825f2006-03-27 11:37:07 +0000303void MC_(handle_free) ( ThreadId tid, Addr p, UInt rzB, MC_AllocKind kind )
304{
305 MC_Chunk* mc;
306
307 cmalloc_n_frees++;
308
309 mc = VG_(HT_remove) ( MC_(malloc_list), (UWord)p );
310 if (mc == NULL) {
311 MC_(record_free_error) ( tid, p );
312 } else {
313 /* check if it is a matching free() / delete / delete [] */
314 if (kind != mc->allockind) {
njn718d3b12006-12-16 00:54:12 +0000315 tl_assert(p == mc->data);
316 MC_(record_freemismatch_error) ( tid, mc );
njn1d0825f2006-03-27 11:37:07 +0000317 }
318 die_and_free_mem ( tid, mc, rzB );
319 }
320}
321
322void MC_(free) ( ThreadId tid, void* p )
323{
324 MC_(handle_free)(
325 tid, (Addr)p, MC_MALLOC_REDZONE_SZB, MC_AllocMalloc );
326}
327
328void MC_(__builtin_delete) ( ThreadId tid, void* p )
329{
330 MC_(handle_free)(
331 tid, (Addr)p, MC_MALLOC_REDZONE_SZB, MC_AllocNew);
332}
333
334void MC_(__builtin_vec_delete) ( ThreadId tid, void* p )
335{
336 MC_(handle_free)(
337 tid, (Addr)p, MC_MALLOC_REDZONE_SZB, MC_AllocNewVec);
338}
339
njn718d3b12006-12-16 00:54:12 +0000340void* MC_(realloc) ( ThreadId tid, void* p_old, SizeT new_szB )
njn1d0825f2006-03-27 11:37:07 +0000341{
342 MC_Chunk* mc;
343 void* p_new;
njn718d3b12006-12-16 00:54:12 +0000344 SizeT old_szB;
njn1d0825f2006-03-27 11:37:07 +0000345
346 cmalloc_n_frees ++;
347 cmalloc_n_mallocs ++;
sewardjea9c15e2007-03-14 11:57:37 +0000348 cmalloc_bs_mallocd += (ULong)new_szB;
njn1d0825f2006-03-27 11:37:07 +0000349
njn718d3b12006-12-16 00:54:12 +0000350 if (complain_about_silly_args(new_szB, "realloc"))
njn1d0825f2006-03-27 11:37:07 +0000351 return NULL;
352
353 /* Remove the old block */
354 mc = VG_(HT_remove) ( MC_(malloc_list), (UWord)p_old );
355 if (mc == NULL) {
356 MC_(record_free_error) ( tid, (Addr)p_old );
357 /* We return to the program regardless. */
358 return NULL;
359 }
360
361 /* check if its a matching free() / delete / delete [] */
362 if (MC_AllocMalloc != mc->allockind) {
363 /* can not realloc a range that was allocated with new or new [] */
njn718d3b12006-12-16 00:54:12 +0000364 tl_assert((Addr)p_old == mc->data);
365 MC_(record_freemismatch_error) ( tid, mc );
njn1d0825f2006-03-27 11:37:07 +0000366 /* but keep going anyway */
367 }
368
njn718d3b12006-12-16 00:54:12 +0000369 old_szB = mc->szB;
njn1d0825f2006-03-27 11:37:07 +0000370
sewardjb3238a52008-07-29 09:44:52 +0000371 /* In all cases, even when the new size is smaller or unchanged, we
372 reallocate and copy the contents, and make the old block
373 inaccessible. This is so as to guarantee to catch all cases of
374 accesses via the old address after reallocation, regardless of
375 the change in size. (Of course the ability to detect accesses
376 to the old block also depends on the size of the freed blocks
377 queue). */
378
sewardj8849a562008-07-22 18:23:16 +0000379 if (new_szB <= old_szB) {
380 /* new size is smaller or the same */
381 Addr a_new;
382 /* Get new memory */
383 a_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_szB);
384
385 if (a_new) {
386 ExeContext* ec;
387
388 ec = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
389 tl_assert(ec);
390
391 /* Retained part is copied, red zones set as normal */
392 MC_(make_mem_noaccess)( a_new-MC_MALLOC_REDZONE_SZB,
393 MC_MALLOC_REDZONE_SZB );
394 MC_(copy_address_range_state) ( (Addr)p_old, a_new, new_szB );
395 MC_(make_mem_noaccess) ( a_new+new_szB, MC_MALLOC_REDZONE_SZB );
396
397 /* Copy from old to new */
398 VG_(memcpy)((void*)a_new, p_old, new_szB);
399
400 /* Possibly fill freed area with specified junk. */
401 if (MC_(clo_free_fill) != -1) {
402 tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
403 VG_(memset)((void*)p_old, MC_(clo_free_fill), old_szB);
404 }
405
406 /* Free old memory */
407 /* Nb: we have to allocate a new MC_Chunk for the new memory rather
408 than recycling the old one, so that any erroneous accesses to the
409 old memory are reported. */
410 die_and_free_mem ( tid, mc, MC_MALLOC_REDZONE_SZB );
411
412 // Allocate a new chunk.
413 mc = create_MC_Chunk( ec, a_new, new_szB, MC_AllocMalloc );
sewardjeb0fa932007-11-30 21:41:40 +0000414 }
njn1d0825f2006-03-27 11:37:07 +0000415
sewardj8849a562008-07-22 18:23:16 +0000416 p_new = (void*)a_new;
417
njn1d0825f2006-03-27 11:37:07 +0000418 } else {
419 /* new size is bigger */
sewardjeb0fa932007-11-30 21:41:40 +0000420 Addr a_new;
421 tl_assert(old_szB < new_szB);
njn1d0825f2006-03-27 11:37:07 +0000422 /* Get new memory */
sewardjeb0fa932007-11-30 21:41:40 +0000423 a_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_szB);
njn1d0825f2006-03-27 11:37:07 +0000424
425 if (a_new) {
sewardj7cf4e6b2008-05-01 20:24:26 +0000426 UInt ecu;
427 ExeContext* ec;
428
429 ec = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
430 tl_assert(ec);
431 ecu = VG_(get_ECU_from_ExeContext)(ec);
432 tl_assert(VG_(is_plausible_ECU)(ecu));
433
njn1d0825f2006-03-27 11:37:07 +0000434 /* First half kept and copied, second half new, red zones as normal */
sewardj7cf4e6b2008-05-01 20:24:26 +0000435 MC_(make_mem_noaccess)( a_new-MC_MALLOC_REDZONE_SZB,
436 MC_MALLOC_REDZONE_SZB );
437 MC_(copy_address_range_state) ( (Addr)p_old, a_new, mc->szB );
438 MC_(make_mem_undefined_w_otag)( a_new+mc->szB, new_szB-mc->szB,
439 ecu | MC_OKIND_HEAP );
440 MC_(make_mem_noaccess) ( a_new+new_szB, MC_MALLOC_REDZONE_SZB );
njn1d0825f2006-03-27 11:37:07 +0000441
sewardjeb0fa932007-11-30 21:41:40 +0000442 /* Possibly fill new area with specified junk */
443 if (MC_(clo_malloc_fill) != -1) {
444 tl_assert(MC_(clo_malloc_fill) >= 0x00
445 && MC_(clo_malloc_fill) <= 0xFF);
446 VG_(memset)((void*)(a_new+old_szB), MC_(clo_malloc_fill),
447 new_szB-old_szB);
448 }
449
njn1d0825f2006-03-27 11:37:07 +0000450 /* Copy from old to new */
njn718d3b12006-12-16 00:54:12 +0000451 VG_(memcpy)((void*)a_new, p_old, mc->szB);
njn1d0825f2006-03-27 11:37:07 +0000452
sewardjeb0fa932007-11-30 21:41:40 +0000453 /* Possibly fill freed area with specified junk. */
454 if (MC_(clo_free_fill) != -1) {
455 tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
456 VG_(memset)((void*)p_old, MC_(clo_free_fill), old_szB);
457 }
458
njn1d0825f2006-03-27 11:37:07 +0000459 /* Free old memory */
460 /* Nb: we have to allocate a new MC_Chunk for the new memory rather
461 than recycling the old one, so that any erroneous accesses to the
462 old memory are reported. */
463 die_and_free_mem ( tid, mc, MC_MALLOC_REDZONE_SZB );
464
465 // Allocate a new chunk.
sewardj7cf4e6b2008-05-01 20:24:26 +0000466 mc = create_MC_Chunk( ec, a_new, new_szB, MC_AllocMalloc );
njn1d0825f2006-03-27 11:37:07 +0000467 }
468
469 p_new = (void*)a_new;
470 }
471
472 // Now insert the new mc (with a possibly new 'data' field) into
473 // malloc_list. If this realloc() did not increase the memory size, we
474 // will have removed and then re-added mc unnecessarily. But that's ok
475 // because shrinking a block with realloc() is (presumably) much rarer
476 // than growing it, and this way simplifies the growing case.
477 VG_(HT_add_node)( MC_(malloc_list), mc );
478
479 return p_new;
480}
481
njn8b140de2009-02-17 04:31:18 +0000482SizeT MC_(malloc_usable_size) ( ThreadId tid, void* p )
483{
484 MC_Chunk* mc = VG_(HT_lookup) ( MC_(malloc_list), (UWord)p );
485
486 // There may be slop, but pretend there isn't because only the asked-for
487 // area will be marked as addressable.
488 return ( mc ? mc->szB : 0 );
489}
490
njn017d3772009-05-19 02:10:26 +0000491
njn1d0825f2006-03-27 11:37:07 +0000492/* Memory pool stuff. */
493
494void MC_(create_mempool)(Addr pool, UInt rzB, Bool is_zeroed)
495{
sewardjc740d762006-10-05 17:59:23 +0000496 MC_Mempool* mp;
497
498 if (VG_(clo_verbosity) > 2) {
barta0b6b2c2008-07-07 06:49:24 +0000499 VG_(message)(Vg_UserMsg, "create_mempool(0x%lx, %d, %d)",
sewardjc740d762006-10-05 17:59:23 +0000500 pool, rzB, is_zeroed);
501 VG_(get_and_pp_StackTrace)
502 (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
503 }
504
505 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
506 if (mp != NULL) {
507 VG_(tool_panic)("MC_(create_mempool): duplicate pool creation");
508 }
509
sewardj9c606bd2008-09-18 18:12:50 +0000510 mp = VG_(malloc)("mc.cm.1", sizeof(MC_Mempool));
njn1d0825f2006-03-27 11:37:07 +0000511 mp->pool = pool;
512 mp->rzB = rzB;
513 mp->is_zeroed = is_zeroed;
sewardj3f94a7d2007-08-25 07:19:08 +0000514 mp->chunks = VG_(HT_construct)( "MC_(create_mempool)" );
njn1d0825f2006-03-27 11:37:07 +0000515
516 /* Paranoia ... ensure this area is off-limits to the client, so
517 the mp->data field isn't visible to the leak checker. If memory
518 management is working correctly, anything pointer returned by
519 VG_(malloc) should be noaccess as far as the client is
520 concerned. */
njndbf7ca72006-03-31 11:57:59 +0000521 if (!MC_(check_mem_is_noaccess)( (Addr)mp, sizeof(MC_Mempool), NULL )) {
njn1d0825f2006-03-27 11:37:07 +0000522 VG_(tool_panic)("MC_(create_mempool): shadow area is accessible");
523 }
524
525 VG_(HT_add_node)( MC_(mempool_list), mp );
526}
527
528void MC_(destroy_mempool)(Addr pool)
529{
530 MC_Chunk* mc;
531 MC_Mempool* mp;
532
sewardjc740d762006-10-05 17:59:23 +0000533 if (VG_(clo_verbosity) > 2) {
barta0b6b2c2008-07-07 06:49:24 +0000534 VG_(message)(Vg_UserMsg, "destroy_mempool(0x%lx)", pool);
sewardjc740d762006-10-05 17:59:23 +0000535 VG_(get_and_pp_StackTrace)
536 (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
537 }
538
njn1d0825f2006-03-27 11:37:07 +0000539 mp = VG_(HT_remove) ( MC_(mempool_list), (UWord)pool );
540
541 if (mp == NULL) {
542 ThreadId tid = VG_(get_running_tid)();
543 MC_(record_illegal_mempool_error) ( tid, pool );
544 return;
545 }
546
547 // Clean up the chunks, one by one
548 VG_(HT_ResetIter)(mp->chunks);
549 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
550 /* Note: make redzones noaccess again -- just in case user made them
551 accessible with a client request... */
njn718d3b12006-12-16 00:54:12 +0000552 MC_(make_mem_noaccess)(mc->data-mp->rzB, mc->szB + 2*mp->rzB );
njn1d0825f2006-03-27 11:37:07 +0000553 }
554 // Destroy the chunk table
555 VG_(HT_destruct)(mp->chunks);
556
557 VG_(free)(mp);
558}
559
sewardjc740d762006-10-05 17:59:23 +0000560static Int
561mp_compar(void* n1, void* n2)
562{
563 MC_Chunk* mc1 = *(MC_Chunk**)n1;
564 MC_Chunk* mc2 = *(MC_Chunk**)n2;
sewardjb8b79ad2008-03-03 01:35:41 +0000565 if (mc1->data < mc2->data) return -1;
566 if (mc1->data > mc2->data) return 1;
567 return 0;
sewardjc740d762006-10-05 17:59:23 +0000568}
569
570static void
571check_mempool_sane(MC_Mempool* mp)
572{
573 UInt n_chunks, i, bad = 0;
574 static UInt tick = 0;
575
576 MC_Chunk **chunks = (MC_Chunk**) VG_(HT_to_array)( mp->chunks, &n_chunks );
577 if (!chunks)
578 return;
579
580 if (VG_(clo_verbosity) > 1) {
581 if (tick++ >= 10000)
582 {
583 UInt total_pools = 0, total_chunks = 0;
584 MC_Mempool* mp2;
585
586 VG_(HT_ResetIter)(MC_(mempool_list));
587 while ( (mp2 = VG_(HT_Next)(MC_(mempool_list))) ) {
588 total_pools++;
589 VG_(HT_ResetIter)(mp2->chunks);
590 while (VG_(HT_Next)(mp2->chunks)) {
591 total_chunks++;
592 }
593 }
594
595 VG_(message)(Vg_UserMsg,
596 "Total mempools active: %d pools, %d chunks\n",
597 total_pools, total_chunks);
598 tick = 0;
599 }
600 }
601
602
603 VG_(ssort)((void*)chunks, n_chunks, sizeof(VgHashNode*), mp_compar);
604
605 /* Sanity check; assert that the blocks are now in order */
606 for (i = 0; i < n_chunks-1; i++) {
607 if (chunks[i]->data > chunks[i+1]->data) {
608 VG_(message)(Vg_UserMsg,
609 "Mempool chunk %d / %d is out of order "
610 "wrt. its successor",
611 i+1, n_chunks);
612 bad = 1;
613 }
614 }
615
616 /* Sanity check -- make sure they don't overlap */
617 for (i = 0; i < n_chunks-1; i++) {
njn718d3b12006-12-16 00:54:12 +0000618 if (chunks[i]->data + chunks[i]->szB > chunks[i+1]->data ) {
sewardjc740d762006-10-05 17:59:23 +0000619 VG_(message)(Vg_UserMsg,
620 "Mempool chunk %d / %d overlaps with its successor",
621 i+1, n_chunks);
622 bad = 1;
623 }
624 }
625
626 if (bad) {
627 VG_(message)(Vg_UserMsg,
628 "Bad mempool (%d chunks), dumping chunks for inspection:",
629 n_chunks);
630 for (i = 0; i < n_chunks; ++i) {
631 VG_(message)(Vg_UserMsg,
barta0b6b2c2008-07-07 06:49:24 +0000632 "Mempool chunk %d / %d: %ld bytes [%lx,%lx), allocated:",
sewardjc740d762006-10-05 17:59:23 +0000633 i+1,
634 n_chunks,
barta0b6b2c2008-07-07 06:49:24 +0000635 chunks[i]->szB + 0UL,
sewardjc740d762006-10-05 17:59:23 +0000636 chunks[i]->data,
njn718d3b12006-12-16 00:54:12 +0000637 chunks[i]->data + chunks[i]->szB);
sewardjc740d762006-10-05 17:59:23 +0000638
639 VG_(pp_ExeContext)(chunks[i]->where);
640 }
641 }
642 VG_(free)(chunks);
643}
644
njn718d3b12006-12-16 00:54:12 +0000645void MC_(mempool_alloc)(ThreadId tid, Addr pool, Addr addr, SizeT szB)
njn1d0825f2006-03-27 11:37:07 +0000646{
sewardjc740d762006-10-05 17:59:23 +0000647 MC_Mempool* mp;
njn1d0825f2006-03-27 11:37:07 +0000648
sewardjc740d762006-10-05 17:59:23 +0000649 if (VG_(clo_verbosity) > 2) {
barta0b6b2c2008-07-07 06:49:24 +0000650 VG_(message)(Vg_UserMsg, "mempool_alloc(0x%lx, 0x%lx, %ld)", pool, addr, szB);
sewardjc740d762006-10-05 17:59:23 +0000651 VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
652 }
653
654 mp = VG_(HT_lookup) ( MC_(mempool_list), (UWord)pool );
njn1d0825f2006-03-27 11:37:07 +0000655 if (mp == NULL) {
656 MC_(record_illegal_mempool_error) ( tid, pool );
657 } else {
sewardjc740d762006-10-05 17:59:23 +0000658 check_mempool_sane(mp);
njn1dcee092009-02-24 03:07:37 +0000659 MC_(new_block)(tid, addr, szB, /*ignored*/0, mp->is_zeroed,
njn1d0825f2006-03-27 11:37:07 +0000660 MC_AllocCustom, mp->chunks);
sewardjc740d762006-10-05 17:59:23 +0000661 check_mempool_sane(mp);
njn1d0825f2006-03-27 11:37:07 +0000662 }
663}
664
665void MC_(mempool_free)(Addr pool, Addr addr)
666{
667 MC_Mempool* mp;
668 MC_Chunk* mc;
669 ThreadId tid = VG_(get_running_tid)();
670
671 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
672 if (mp == NULL) {
673 MC_(record_illegal_mempool_error)(tid, pool);
674 return;
675 }
676
sewardjc740d762006-10-05 17:59:23 +0000677 if (VG_(clo_verbosity) > 2) {
barta0b6b2c2008-07-07 06:49:24 +0000678 VG_(message)(Vg_UserMsg, "mempool_free(0x%lx, 0x%lx)", pool, addr);
sewardjc740d762006-10-05 17:59:23 +0000679 VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
680 }
681
682 check_mempool_sane(mp);
njn1d0825f2006-03-27 11:37:07 +0000683 mc = VG_(HT_remove)(mp->chunks, (UWord)addr);
684 if (mc == NULL) {
685 MC_(record_free_error)(tid, (Addr)addr);
686 return;
687 }
688
sewardjc740d762006-10-05 17:59:23 +0000689 if (VG_(clo_verbosity) > 2) {
690 VG_(message)(Vg_UserMsg,
barta0b6b2c2008-07-07 06:49:24 +0000691 "mempool_free(0x%lx, 0x%lx) freed chunk of %ld bytes",
692 pool, addr, mc->szB + 0UL);
sewardjc740d762006-10-05 17:59:23 +0000693 }
694
njn1d0825f2006-03-27 11:37:07 +0000695 die_and_free_mem ( tid, mc, mp->rzB );
sewardjc740d762006-10-05 17:59:23 +0000696 check_mempool_sane(mp);
njn1d0825f2006-03-27 11:37:07 +0000697}
698
sewardj2c1c9df2006-07-28 00:06:37 +0000699
njn718d3b12006-12-16 00:54:12 +0000700void MC_(mempool_trim)(Addr pool, Addr addr, SizeT szB)
sewardj2c1c9df2006-07-28 00:06:37 +0000701{
702 MC_Mempool* mp;
703 MC_Chunk* mc;
704 ThreadId tid = VG_(get_running_tid)();
705 UInt n_shadows, i;
706 VgHashNode** chunks;
707
sewardjc740d762006-10-05 17:59:23 +0000708 if (VG_(clo_verbosity) > 2) {
barta0b6b2c2008-07-07 06:49:24 +0000709 VG_(message)(Vg_UserMsg, "mempool_trim(0x%lx, 0x%lx, %ld)", pool, addr, szB);
sewardjc740d762006-10-05 17:59:23 +0000710 VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
711 }
712
sewardj2c1c9df2006-07-28 00:06:37 +0000713 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
714 if (mp == NULL) {
715 MC_(record_illegal_mempool_error)(tid, pool);
716 return;
717 }
718
sewardjc740d762006-10-05 17:59:23 +0000719 check_mempool_sane(mp);
sewardj2c1c9df2006-07-28 00:06:37 +0000720 chunks = VG_(HT_to_array) ( mp->chunks, &n_shadows );
721 if (n_shadows == 0) {
722 tl_assert(chunks == NULL);
723 return;
724 }
725
726 tl_assert(chunks != NULL);
727 for (i = 0; i < n_shadows; ++i) {
sewardj8aeeaa92006-08-16 17:51:28 +0000728
sewardjc740d762006-10-05 17:59:23 +0000729 Addr lo, hi, min, max;
sewardj8aeeaa92006-08-16 17:51:28 +0000730
sewardj2c1c9df2006-07-28 00:06:37 +0000731 mc = (MC_Chunk*) chunks[i];
732
sewardj8aeeaa92006-08-16 17:51:28 +0000733 lo = mc->data;
njn718d3b12006-12-16 00:54:12 +0000734 hi = mc->szB == 0 ? mc->data : mc->data + mc->szB - 1;
sewardj2c1c9df2006-07-28 00:06:37 +0000735
njn718d3b12006-12-16 00:54:12 +0000736#define EXTENT_CONTAINS(x) ((addr <= (x)) && ((x) < addr + szB))
sewardj2c1c9df2006-07-28 00:06:37 +0000737
sewardj8aeeaa92006-08-16 17:51:28 +0000738 if (EXTENT_CONTAINS(lo) && EXTENT_CONTAINS(hi)) {
sewardj2c1c9df2006-07-28 00:06:37 +0000739
740 /* The current chunk is entirely within the trim extent: keep
741 it. */
742
743 continue;
744
sewardj8aeeaa92006-08-16 17:51:28 +0000745 } else if ( (! EXTENT_CONTAINS(lo)) &&
746 (! EXTENT_CONTAINS(hi)) ) {
sewardj2c1c9df2006-07-28 00:06:37 +0000747
748 /* The current chunk is entirely outside the trim extent:
749 delete it. */
750
751 if (VG_(HT_remove)(mp->chunks, (UWord)mc->data) == NULL) {
752 MC_(record_free_error)(tid, (Addr)mc->data);
753 VG_(free)(chunks);
sewardjc740d762006-10-05 17:59:23 +0000754 check_mempool_sane(mp);
sewardj2c1c9df2006-07-28 00:06:37 +0000755 return;
756 }
757 die_and_free_mem ( tid, mc, mp->rzB );
758
759 } else {
760
761 /* The current chunk intersects the trim extent: remove,
762 trim, and reinsert it. */
763
sewardj8aeeaa92006-08-16 17:51:28 +0000764 tl_assert(EXTENT_CONTAINS(lo) ||
765 EXTENT_CONTAINS(hi));
sewardj2c1c9df2006-07-28 00:06:37 +0000766 if (VG_(HT_remove)(mp->chunks, (UWord)mc->data) == NULL) {
767 MC_(record_free_error)(tid, (Addr)mc->data);
768 VG_(free)(chunks);
sewardjc740d762006-10-05 17:59:23 +0000769 check_mempool_sane(mp);
sewardj2c1c9df2006-07-28 00:06:37 +0000770 return;
771 }
772
sewardjc740d762006-10-05 17:59:23 +0000773 if (mc->data < addr) {
774 min = mc->data;
775 lo = addr;
776 } else {
777 min = addr;
778 lo = mc->data;
779 }
sewardj2c1c9df2006-07-28 00:06:37 +0000780
njn718d3b12006-12-16 00:54:12 +0000781 if (mc->data + szB > addr + szB) {
782 max = mc->data + szB;
783 hi = addr + szB;
sewardjc740d762006-10-05 17:59:23 +0000784 } else {
njn718d3b12006-12-16 00:54:12 +0000785 max = addr + szB;
786 hi = mc->data + szB;
sewardjc740d762006-10-05 17:59:23 +0000787 }
788
789 tl_assert(min <= lo);
sewardj2c1c9df2006-07-28 00:06:37 +0000790 tl_assert(lo < hi);
sewardjc740d762006-10-05 17:59:23 +0000791 tl_assert(hi <= max);
792
793 if (min < lo && !EXTENT_CONTAINS(min)) {
794 MC_(make_mem_noaccess)( min, lo - min);
795 }
796
797 if (hi < max && !EXTENT_CONTAINS(max)) {
798 MC_(make_mem_noaccess)( hi, max - hi );
799 }
800
sewardj2c1c9df2006-07-28 00:06:37 +0000801 mc->data = lo;
njn718d3b12006-12-16 00:54:12 +0000802 mc->szB = (UInt) (hi - lo);
sewardj2c1c9df2006-07-28 00:06:37 +0000803 VG_(HT_add_node)( mp->chunks, mc );
804 }
805
806#undef EXTENT_CONTAINS
807
808 }
sewardjc740d762006-10-05 17:59:23 +0000809 check_mempool_sane(mp);
sewardj2c1c9df2006-07-28 00:06:37 +0000810 VG_(free)(chunks);
811}
812
sewardjc740d762006-10-05 17:59:23 +0000813void MC_(move_mempool)(Addr poolA, Addr poolB)
814{
815 MC_Mempool* mp;
816
817 if (VG_(clo_verbosity) > 2) {
barta0b6b2c2008-07-07 06:49:24 +0000818 VG_(message)(Vg_UserMsg, "move_mempool(0x%lx, 0x%lx)", poolA, poolB);
sewardjc740d762006-10-05 17:59:23 +0000819 VG_(get_and_pp_StackTrace)
820 (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
821 }
822
823 mp = VG_(HT_remove) ( MC_(mempool_list), (UWord)poolA );
824
825 if (mp == NULL) {
826 ThreadId tid = VG_(get_running_tid)();
827 MC_(record_illegal_mempool_error) ( tid, poolA );
828 return;
829 }
830
831 mp->pool = poolB;
832 VG_(HT_add_node)( MC_(mempool_list), mp );
833}
834
njn718d3b12006-12-16 00:54:12 +0000835void MC_(mempool_change)(Addr pool, Addr addrA, Addr addrB, SizeT szB)
sewardjc740d762006-10-05 17:59:23 +0000836{
837 MC_Mempool* mp;
838 MC_Chunk* mc;
839 ThreadId tid = VG_(get_running_tid)();
840
841 if (VG_(clo_verbosity) > 2) {
barta0b6b2c2008-07-07 06:49:24 +0000842 VG_(message)(Vg_UserMsg, "mempool_change(0x%lx, 0x%lx, 0x%lx, %ld)",
njn718d3b12006-12-16 00:54:12 +0000843 pool, addrA, addrB, szB);
sewardjc740d762006-10-05 17:59:23 +0000844 VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
845 }
846
847 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
848 if (mp == NULL) {
849 MC_(record_illegal_mempool_error)(tid, pool);
850 return;
851 }
852
853 check_mempool_sane(mp);
854
855 mc = VG_(HT_remove)(mp->chunks, (UWord)addrA);
856 if (mc == NULL) {
857 MC_(record_free_error)(tid, (Addr)addrA);
858 return;
859 }
860
861 mc->data = addrB;
njn718d3b12006-12-16 00:54:12 +0000862 mc->szB = szB;
sewardjc740d762006-10-05 17:59:23 +0000863 VG_(HT_add_node)( mp->chunks, mc );
864
865 check_mempool_sane(mp);
866}
867
868Bool MC_(mempool_exists)(Addr pool)
869{
870 MC_Mempool* mp;
871
872 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
873 if (mp == NULL) {
874 return False;
875 }
876 return True;
877}
878
879
njn1d0825f2006-03-27 11:37:07 +0000880/*------------------------------------------------------------*/
881/*--- Statistics printing ---*/
882/*------------------------------------------------------------*/
883
884void MC_(print_malloc_stats) ( void )
885{
886 MC_Chunk* mc;
887 SizeT nblocks = 0;
sewardjea9c15e2007-03-14 11:57:37 +0000888 ULong nbytes = 0;
njn1d0825f2006-03-27 11:37:07 +0000889
890 if (VG_(clo_verbosity) == 0)
891 return;
892 if (VG_(clo_xml))
893 return;
894
895 /* Count memory still in use. */
896 VG_(HT_ResetIter)(MC_(malloc_list));
897 while ( (mc = VG_(HT_Next)(MC_(malloc_list))) ) {
898 nblocks++;
sewardjea9c15e2007-03-14 11:57:37 +0000899 nbytes += (ULong)mc->szB;
njn1d0825f2006-03-27 11:37:07 +0000900 }
901
902 VG_(message)(Vg_UserMsg,
barta0b6b2c2008-07-07 06:49:24 +0000903 "malloc/free: in use at exit: %'llu bytes in %'lu blocks.",
njn1d0825f2006-03-27 11:37:07 +0000904 nbytes, nblocks);
905 VG_(message)(Vg_UserMsg,
barta0b6b2c2008-07-07 06:49:24 +0000906 "malloc/free: %'lu allocs, %'lu frees, %'llu bytes allocated.",
njn1d0825f2006-03-27 11:37:07 +0000907 cmalloc_n_mallocs,
908 cmalloc_n_frees, cmalloc_bs_mallocd);
909 if (VG_(clo_verbosity) > 1)
910 VG_(message)(Vg_UserMsg, "");
911}
912
913/*--------------------------------------------------------------------*/
914/*--- end ---*/
915/*--------------------------------------------------------------------*/