blob: 17c47acacdf21dbc45e93575ab86bd6b6e81b326 [file] [log] [blame]
njn3e884182003-04-15 13:03:23 +00001
2/*--------------------------------------------------------------------*/
3/*--- malloc/free wrappers for detecting errors and updating bits. ---*/
4/*--- mac_malloc_wrappers.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
nethercote137bc552003-11-14 17:47:54 +00008 This file is part of MemCheck, a heavyweight Valgrind tool for
9 detecting memory errors, and AddrCheck, a lightweight Valgrind tool
njn3e884182003-04-15 13:03:23 +000010 for detecting memory errors.
11
nethercotebb1c9912004-01-04 16:43:23 +000012 Copyright (C) 2000-2004 Julian Seward
njn3e884182003-04-15 13:03:23 +000013 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
33#include "mac_shared.h"
34
35/*------------------------------------------------------------*/
36/*--- Defns ---*/
37/*------------------------------------------------------------*/
38
39/* Stats ... */
nethercote7ac7f7b2004-11-02 12:36:02 +000040static SizeT cmalloc_n_mallocs = 0;
41static SizeT cmalloc_n_frees = 0;
42static SizeT cmalloc_bs_mallocd = 0;
njn3e884182003-04-15 13:03:23 +000043
44/* We want a 16B redzone on heap blocks for Addrcheck and Memcheck */
45UInt VG_(vg_malloc_redzone_szB) = 16;
46
nethercote7cc9c232004-01-21 15:08:04 +000047/* Function pointers for the two tools to track interesting events. */
nethercote451eae92004-11-02 13:06:32 +000048void (*MAC_(new_mem_heap)) ( Addr a, SizeT len, Bool is_inited ) = NULL;
49void (*MAC_(ban_mem_heap)) ( Addr a, SizeT len ) = NULL;
50void (*MAC_(die_mem_heap)) ( Addr a, SizeT len ) = NULL;
51void (*MAC_(copy_mem_heap))( Addr from, Addr to, SizeT len ) = NULL;
sewardjecf8e102003-07-12 12:11:39 +000052
53/* Function pointers for internal sanity checking. */
nethercote451eae92004-11-02 13:06:32 +000054Bool (*MAC_(check_noaccess))( Addr a, SizeT len, Addr* bad_addr ) = NULL;
sewardjf9025f72003-07-12 01:41:24 +000055
56
njn3e884182003-04-15 13:03:23 +000057/*------------------------------------------------------------*/
58/*--- Tracking malloc'd and free'd blocks ---*/
59/*------------------------------------------------------------*/
60
61/* Record malloc'd blocks. Nb: Addrcheck and Memcheck construct this
62 separately in their respective initialisation functions. */
63VgHashTable MAC_(malloc_list) = NULL;
rjwalshbc0bb832004-06-19 18:12:36 +000064
65/* Memory pools. Nb: Addrcheck and Memcheck construct this separately
66 in their respective initialisation functions. */
67VgHashTable MAC_(mempool_list) = NULL;
njn3e884182003-04-15 13:03:23 +000068
69/* Records blocks after freeing. */
70static MAC_Chunk* freed_list_start = NULL;
71static MAC_Chunk* freed_list_end = NULL;
72static Int freed_list_volume = 0;
73
74/* Put a shadow chunk on the freed blocks queue, possibly freeing up
75 some of the oldest blocks in the queue at the same time. */
76static void add_to_freed_queue ( MAC_Chunk* mc )
77{
78 MAC_Chunk* sc1;
79
80 /* Put it at the end of the freed list */
81 if (freed_list_end == NULL) {
82 sk_assert(freed_list_start == NULL);
83 freed_list_end = freed_list_start = mc;
84 freed_list_volume = mc->size;
85 } else {
86 sk_assert(freed_list_end->next == NULL);
87 freed_list_end->next = mc;
88 freed_list_end = mc;
89 freed_list_volume += mc->size;
90 }
91 mc->next = NULL;
92
93 /* Release enough of the oldest blocks to bring the free queue
94 volume below vg_clo_freelist_vol. */
95
96 while (freed_list_volume > MAC_(clo_freelist_vol)) {
97 sk_assert(freed_list_start != NULL);
98 sk_assert(freed_list_end != NULL);
99
100 sc1 = freed_list_start;
101 freed_list_volume -= sc1->size;
102 /* VG_(printf)("volume now %d\n", freed_list_volume); */
103 sk_assert(freed_list_volume >= 0);
104
105 if (freed_list_start == freed_list_end) {
106 freed_list_start = freed_list_end = NULL;
107 } else {
108 freed_list_start = sc1->next;
109 }
110 sc1->next = NULL; /* just paranoia */
111
112 /* free MAC_Chunk */
113 VG_(cli_free) ( (void*)(sc1->data) );
114 VG_(free) ( sc1 );
115 }
116}
117
118/* Return the first shadow chunk satisfying the predicate p. */
thughes4ad52d02004-06-27 17:37:21 +0000119MAC_Chunk* MAC_(first_matching_freed_MAC_Chunk) ( Bool (*p)(MAC_Chunk*, void*),
120 void* d )
njn3e884182003-04-15 13:03:23 +0000121{
122 MAC_Chunk* mc;
123
124 /* No point looking through freed blocks if we're not keeping
125 them around for a while... */
126 for (mc = freed_list_start; mc != NULL; mc = mc->next)
thughes4ad52d02004-06-27 17:37:21 +0000127 if (p(mc, d))
njn3e884182003-04-15 13:03:23 +0000128 return mc;
129
130 return NULL;
131}
132
njn10785452003-05-20 16:38:24 +0000133/* Allocate its shadow chunk, put it on the appropriate list. */
134static
nethercote7ac7f7b2004-11-02 12:36:02 +0000135void add_MAC_Chunk ( Addr p, SizeT size, MAC_AllocKind kind, VgHashTable table)
njn3e884182003-04-15 13:03:23 +0000136{
137 MAC_Chunk* mc;
138
139 mc = VG_(malloc)(sizeof(MAC_Chunk));
140 mc->data = p;
141 mc->size = size;
142 mc->allockind = kind;
njn72718642003-07-24 08:45:32 +0000143 mc->where = VG_(get_ExeContext)(VG_(get_current_or_recent_tid)());
njn3e884182003-04-15 13:03:23 +0000144
sewardjecf8e102003-07-12 12:11:39 +0000145 /* Paranoia ... ensure this area is off-limits to the client, so
146 the mc->data field isn't visible to the leak checker. If memory
147 management is working correctly, anything pointer returned by
148 VG_(malloc) should be noaccess as far as the client is
149 concerned. */
150 if (!MAC_(check_noaccess)( (Addr)mc, sizeof(MAC_Chunk), NULL )) {
151 VG_(skin_panic)("add_MAC_chunk: shadow area is accessible");
152 }
sewardjf9025f72003-07-12 01:41:24 +0000153
rjwalshbc0bb832004-06-19 18:12:36 +0000154 VG_(HT_add_node)( table, (VgHashNode*)mc );
njn3e884182003-04-15 13:03:23 +0000155}
156
157/*------------------------------------------------------------*/
158/*--- client_malloc(), etc ---*/
159/*------------------------------------------------------------*/
160
nethercote7ac7f7b2004-11-02 12:36:02 +0000161static Bool complain_about_silly_args(SizeT sizeB, Char* fn)
162{
163 // Cast to a signed type to catch any unexpectedly negative args. We're
164 // assuming here that the size asked for is not greater than 2^31 bytes
165 // (for 32-bit platforms) or 2^63 bytes (for 64-bit platforms).
166 if ((SSizeT)sizeB < 0) {
167 VG_(message)(Vg_UserMsg, "Warning: silly arg (%d) to %s()", sizeB, fn );
168 return True;
169 }
170 return False;
171}
172
173static Bool complain_about_silly_args2(SizeT n, SizeT sizeB)
174{
175 if ((SSizeT)n < 0 || (SSizeT)sizeB < 0) {
176 VG_(message)(Vg_UserMsg, "Warning: silly args (%d,%d) to calloc()",
177 n, sizeB);
178 return True;
179 }
180 return False;
181}
182
njn3e884182003-04-15 13:03:23 +0000183/* Allocate memory and note change in memory available */
njn10785452003-05-20 16:38:24 +0000184__inline__
nethercote7ac7f7b2004-11-02 12:36:02 +0000185void* MAC_(new_block) ( Addr p, SizeT size, SizeT align, UInt rzB,
nethercote57e36b32004-07-10 14:56:28 +0000186 Bool is_zeroed, MAC_AllocKind kind, VgHashTable table)
njn3e884182003-04-15 13:03:23 +0000187{
njn3e884182003-04-15 13:03:23 +0000188 VGP_PUSHCC(VgpCliMalloc);
njn3e884182003-04-15 13:03:23 +0000189 cmalloc_n_mallocs ++;
190 cmalloc_bs_mallocd += size;
191
nethercote57e36b32004-07-10 14:56:28 +0000192 // Allocate and zero if necessary
193 if (p) {
194 sk_assert(MAC_AllocCustom == kind);
195 } else {
196 sk_assert(MAC_AllocCustom != kind);
197 p = (Addr)VG_(cli_malloc)( align, size );
198 if (!p) {
199 VGP_POPCC(VgpCliMalloc);
200 return NULL;
201 }
202 if (is_zeroed) VG_(memset)((void*)p, 0, size);
203 }
204
205 add_MAC_Chunk( p, size, kind, table );
njn3e884182003-04-15 13:03:23 +0000206
njn10785452003-05-20 16:38:24 +0000207 MAC_(ban_mem_heap)( p-rzB, rzB );
njn3e884182003-04-15 13:03:23 +0000208 MAC_(new_mem_heap)( p, size, is_zeroed );
njn10785452003-05-20 16:38:24 +0000209 MAC_(ban_mem_heap)( p+size, rzB );
njn3e884182003-04-15 13:03:23 +0000210
211 VGP_POPCC(VgpCliMalloc);
rjwalshbc0bb832004-06-19 18:12:36 +0000212
nethercote57e36b32004-07-10 14:56:28 +0000213 return (void*)p;
njn3e884182003-04-15 13:03:23 +0000214}
215
nethercote7ac7f7b2004-11-02 12:36:02 +0000216void* SK_(malloc) ( SizeT n )
njn3e884182003-04-15 13:03:23 +0000217{
nethercote7ac7f7b2004-11-02 12:36:02 +0000218 if (complain_about_silly_args(n, "malloc")) {
njn3e884182003-04-15 13:03:23 +0000219 return NULL;
220 } else {
nethercote57e36b32004-07-10 14:56:28 +0000221 return MAC_(new_block) ( 0, n, VG_(clo_alignment),
222 VG_(vg_malloc_redzone_szB), /*is_zeroed*/False, MAC_AllocMalloc,
223 MAC_(malloc_list));
njn3e884182003-04-15 13:03:23 +0000224 }
225}
226
nethercote7ac7f7b2004-11-02 12:36:02 +0000227void* SK_(__builtin_new) ( SizeT n )
njn3e884182003-04-15 13:03:23 +0000228{
nethercote7ac7f7b2004-11-02 12:36:02 +0000229 if (complain_about_silly_args(n, "__builtin_new")) {
njn3e884182003-04-15 13:03:23 +0000230 return NULL;
231 } else {
nethercote57e36b32004-07-10 14:56:28 +0000232 return MAC_(new_block) ( 0, n, VG_(clo_alignment),
233 VG_(vg_malloc_redzone_szB), /*is_zeroed*/False, MAC_AllocNew,
234 MAC_(malloc_list));
njn3e884182003-04-15 13:03:23 +0000235 }
236}
237
nethercote7ac7f7b2004-11-02 12:36:02 +0000238void* SK_(__builtin_vec_new) ( SizeT n )
njn3e884182003-04-15 13:03:23 +0000239{
nethercote7ac7f7b2004-11-02 12:36:02 +0000240 if (complain_about_silly_args(n, "__builtin_vec_new")) {
njn3e884182003-04-15 13:03:23 +0000241 return NULL;
242 } else {
nethercote57e36b32004-07-10 14:56:28 +0000243 return MAC_(new_block) ( 0, n, VG_(clo_alignment),
244 VG_(vg_malloc_redzone_szB), /*is_zeroed*/False, MAC_AllocNewVec,
245 MAC_(malloc_list));
njn3e884182003-04-15 13:03:23 +0000246 }
247}
248
nethercote7ac7f7b2004-11-02 12:36:02 +0000249void* SK_(memalign) ( SizeT align, SizeT n )
njn3e884182003-04-15 13:03:23 +0000250{
nethercote7ac7f7b2004-11-02 12:36:02 +0000251 if (complain_about_silly_args(n, "memalign")) {
njn3e884182003-04-15 13:03:23 +0000252 return NULL;
253 } else {
nethercote57e36b32004-07-10 14:56:28 +0000254 return MAC_(new_block) ( 0, n, align,
255 VG_(vg_malloc_redzone_szB), /*is_zeroed*/False, MAC_AllocMalloc,
256 MAC_(malloc_list));
njn3e884182003-04-15 13:03:23 +0000257 }
258}
259
nethercote7ac7f7b2004-11-02 12:36:02 +0000260void* SK_(calloc) ( SizeT nmemb, SizeT size1 )
njn3e884182003-04-15 13:03:23 +0000261{
nethercote7ac7f7b2004-11-02 12:36:02 +0000262 if (complain_about_silly_args2(nmemb, size1)) {
njn3e884182003-04-15 13:03:23 +0000263 return NULL;
264 } else {
nethercote57e36b32004-07-10 14:56:28 +0000265 return MAC_(new_block) ( 0, nmemb*size1, VG_(clo_alignment),
266 VG_(vg_malloc_redzone_szB), /*is_zeroed*/True, MAC_AllocMalloc,
267 MAC_(malloc_list));
njn3e884182003-04-15 13:03:23 +0000268 }
269}
270
271static
njn72718642003-07-24 08:45:32 +0000272void die_and_free_mem ( MAC_Chunk* mc,
nethercote7ac7f7b2004-11-02 12:36:02 +0000273 MAC_Chunk** prev_chunks_next_ptr, SizeT rzB )
njn3e884182003-04-15 13:03:23 +0000274{
275 /* Note: ban redzones again -- just in case user de-banned them
276 with a client request... */
njn10785452003-05-20 16:38:24 +0000277 MAC_(ban_mem_heap)( mc->data-rzB, rzB );
njn3e884182003-04-15 13:03:23 +0000278 MAC_(die_mem_heap)( mc->data, mc->size );
njn10785452003-05-20 16:38:24 +0000279 MAC_(ban_mem_heap)( mc->data+mc->size, rzB );
njn3e884182003-04-15 13:03:23 +0000280
281 /* Remove mc from the malloclist using prev_chunks_next_ptr to
282 avoid repeating the hash table lookup. Can't remove until at least
283 after free and free_mismatch errors are done because they use
284 describe_addr() which looks for it in malloclist. */
285 *prev_chunks_next_ptr = mc->next;
286
njn10785452003-05-20 16:38:24 +0000287 /* Put it out of harm's way for a while, if not from a client request */
rjwalshbc0bb832004-06-19 18:12:36 +0000288 if (MAC_AllocCustom != mc->allockind) {
289 /* Record where freed */
290 mc->where = VG_(get_ExeContext) ( VG_(get_current_or_recent_tid)() );
njn10785452003-05-20 16:38:24 +0000291 add_to_freed_queue ( mc );
rjwalshbc0bb832004-06-19 18:12:36 +0000292 } else
njn10785452003-05-20 16:38:24 +0000293 VG_(free) ( mc );
njn3e884182003-04-15 13:03:23 +0000294}
295
njn10785452003-05-20 16:38:24 +0000296__inline__
njn72718642003-07-24 08:45:32 +0000297void MAC_(handle_free) ( Addr p, UInt rzB, MAC_AllocKind kind )
njn3e884182003-04-15 13:03:23 +0000298{
299 MAC_Chunk* mc;
300 MAC_Chunk** prev_chunks_next_ptr;
njn72718642003-07-24 08:45:32 +0000301 ThreadId tid = VG_(get_current_or_recent_tid)();
njn3e884182003-04-15 13:03:23 +0000302
303 VGP_PUSHCC(VgpCliMalloc);
304
305 cmalloc_n_frees++;
306
nethercote3d6b6112004-11-04 16:39:43 +0000307 mc = (MAC_Chunk*)VG_(HT_get_node) ( MAC_(malloc_list), (UWord)p,
nethercote76c65502004-10-25 19:46:07 +0000308 (void*)&prev_chunks_next_ptr );
njn3e884182003-04-15 13:03:23 +0000309 if (mc == NULL) {
njn72718642003-07-24 08:45:32 +0000310 MAC_(record_free_error) ( tid, p );
njn3e884182003-04-15 13:03:23 +0000311 VGP_POPCC(VgpCliMalloc);
312 return;
313 }
314
315 /* check if its a matching free() / delete / delete [] */
316 if (kind != mc->allockind) {
njn72718642003-07-24 08:45:32 +0000317 MAC_(record_freemismatch_error) ( tid, p );
njn3e884182003-04-15 13:03:23 +0000318 }
319
njn72718642003-07-24 08:45:32 +0000320 die_and_free_mem ( mc, prev_chunks_next_ptr, rzB );
njn3e884182003-04-15 13:03:23 +0000321 VGP_POPCC(VgpCliMalloc);
322}
323
njn72718642003-07-24 08:45:32 +0000324void SK_(free) ( void* p )
njn3e884182003-04-15 13:03:23 +0000325{
njn72718642003-07-24 08:45:32 +0000326 MAC_(handle_free)((Addr)p, VG_(vg_malloc_redzone_szB), MAC_AllocMalloc);
njn3e884182003-04-15 13:03:23 +0000327}
328
njn72718642003-07-24 08:45:32 +0000329void SK_(__builtin_delete) ( void* p )
njn3e884182003-04-15 13:03:23 +0000330{
njn72718642003-07-24 08:45:32 +0000331 MAC_(handle_free)((Addr)p, VG_(vg_malloc_redzone_szB), MAC_AllocNew);
njn3e884182003-04-15 13:03:23 +0000332}
333
njn72718642003-07-24 08:45:32 +0000334void SK_(__builtin_vec_delete) ( void* p )
njn3e884182003-04-15 13:03:23 +0000335{
njn72718642003-07-24 08:45:32 +0000336 MAC_(handle_free)((Addr)p, VG_(vg_malloc_redzone_szB), MAC_AllocNewVec);
njn3e884182003-04-15 13:03:23 +0000337}
338
nethercote7ac7f7b2004-11-02 12:36:02 +0000339void* SK_(realloc) ( void* p, SizeT new_size )
njn3e884182003-04-15 13:03:23 +0000340{
341 MAC_Chunk *mc;
342 MAC_Chunk **prev_chunks_next_ptr;
343 UInt i;
njn72718642003-07-24 08:45:32 +0000344 ThreadId tid = VG_(get_current_or_recent_tid)();
njn3e884182003-04-15 13:03:23 +0000345
346 VGP_PUSHCC(VgpCliMalloc);
347
348 cmalloc_n_frees ++;
349 cmalloc_n_mallocs ++;
350 cmalloc_bs_mallocd += new_size;
351
nethercote7ac7f7b2004-11-02 12:36:02 +0000352 if (complain_about_silly_args(new_size, "realloc"))
njn3e884182003-04-15 13:03:23 +0000353 return NULL;
njn3e884182003-04-15 13:03:23 +0000354
355 /* First try and find the block. */
nethercote3d6b6112004-11-04 16:39:43 +0000356 mc = (MAC_Chunk*)VG_(HT_get_node) ( MAC_(malloc_list), (UWord)p,
nethercote76c65502004-10-25 19:46:07 +0000357 (void*)&prev_chunks_next_ptr );
njn3e884182003-04-15 13:03:23 +0000358
359 if (mc == NULL) {
njn72718642003-07-24 08:45:32 +0000360 MAC_(record_free_error) ( tid, (Addr)p );
njn3e884182003-04-15 13:03:23 +0000361 /* Perhaps we should return to the program regardless. */
362 VGP_POPCC(VgpCliMalloc);
363 return NULL;
364 }
365
366 /* check if its a matching free() / delete / delete [] */
367 if (MAC_AllocMalloc != mc->allockind) {
368 /* can not realloc a range that was allocated with new or new [] */
njn72718642003-07-24 08:45:32 +0000369 MAC_(record_freemismatch_error) ( tid, (Addr)p );
njn3e884182003-04-15 13:03:23 +0000370 /* but keep going anyway */
371 }
372
373 if (mc->size == new_size) {
374 /* size unchanged */
njn398044f2003-07-24 17:39:59 +0000375 mc->where = VG_(get_ExeContext)(tid);
njn3e884182003-04-15 13:03:23 +0000376 VGP_POPCC(VgpCliMalloc);
377 return p;
378
379 } else if (mc->size > new_size) {
380 /* new size is smaller */
381 MAC_(die_mem_heap)( mc->data+new_size, mc->size-new_size );
382 mc->size = new_size;
njn398044f2003-07-24 17:39:59 +0000383 mc->where = VG_(get_ExeContext)(tid);
njn3e884182003-04-15 13:03:23 +0000384 VGP_POPCC(VgpCliMalloc);
385 return p;
386
387 } else {
388 /* new size is bigger */
389 Addr p_new;
390
391 /* Get new memory */
392 p_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
393
394 /* First half kept and copied, second half new,
395 red zones as normal */
396 MAC_(ban_mem_heap) ( p_new-VG_(vg_malloc_redzone_szB),
397 VG_(vg_malloc_redzone_szB) );
398 MAC_(copy_mem_heap)( (Addr)p, p_new, mc->size );
399 MAC_(new_mem_heap) ( p_new+mc->size, new_size-mc->size, /*inited*/False );
400 MAC_(ban_mem_heap) ( p_new+new_size, VG_(vg_malloc_redzone_szB) );
401
402 /* Copy from old to new */
403 for (i = 0; i < mc->size; i++)
404 ((UChar*)p_new)[i] = ((UChar*)p)[i];
405
406 /* Free old memory */
njn72718642003-07-24 08:45:32 +0000407 die_and_free_mem ( mc, prev_chunks_next_ptr,
njn10785452003-05-20 16:38:24 +0000408 VG_(vg_malloc_redzone_szB) );
njn3e884182003-04-15 13:03:23 +0000409
410 /* this has to be after die_and_free_mem, otherwise the
411 former succeeds in shorting out the new block, not the
412 old, in the case when both are on the same list. */
rjwalshbc0bb832004-06-19 18:12:36 +0000413 add_MAC_Chunk ( p_new, new_size, MAC_AllocMalloc, MAC_(malloc_list) );
njn3e884182003-04-15 13:03:23 +0000414
415 VGP_POPCC(VgpCliMalloc);
416 return (void*)p_new;
417 }
418}
419
rjwalshbc0bb832004-06-19 18:12:36 +0000420/* Memory pool stuff. */
421
422void MAC_(create_mempool)(Addr pool, UInt rzB, Bool is_zeroed)
423{
424 MAC_Mempool* mp;
425
426 mp = VG_(malloc)(sizeof(MAC_Mempool));
427 mp->pool = pool;
428 mp->rzB = rzB;
429 mp->is_zeroed = is_zeroed;
430 mp->chunks = VG_(HT_construct)();
431
432 /* Paranoia ... ensure this area is off-limits to the client, so
433 the mp->data field isn't visible to the leak checker. If memory
434 management is working correctly, anything pointer returned by
435 VG_(malloc) should be noaccess as far as the client is
436 concerned. */
437 if (!MAC_(check_noaccess)( (Addr)mp, sizeof(MAC_Mempool), NULL )) {
438 VG_(skin_panic)("MAC_(create_mempool): shadow area is accessible");
439 }
440
441 VG_(HT_add_node)( MAC_(mempool_list), (VgHashNode*)mp );
442
443}
444
thughes4ad52d02004-06-27 17:37:21 +0000445static void destroy_mempool_nuke_chunk(VgHashNode *node, void *d)
446{
447 MAC_Chunk *mc = (MAC_Chunk *)node;
448 MAC_Mempool *mp = (MAC_Mempool *)d;
449
450 /* Note: ban redzones again -- just in case user de-banned them
451 with a client request... */
452 MAC_(ban_mem_heap)(mc->data-mp->rzB, mp->rzB );
453 MAC_(die_mem_heap)(mc->data, mc->size );
454 MAC_(ban_mem_heap)(mc->data+mc->size, mp->rzB );
455}
456
rjwalshbc0bb832004-06-19 18:12:36 +0000457void MAC_(destroy_mempool)(Addr pool)
458{
thughes4ad52d02004-06-27 17:37:21 +0000459 MAC_Mempool* mp;
rjwalshbc0bb832004-06-19 18:12:36 +0000460 MAC_Mempool** prev_next;
461
thughes4ad52d02004-06-27 17:37:21 +0000462 mp = (MAC_Mempool*)VG_(HT_get_node) ( MAC_(mempool_list),
nethercote3d6b6112004-11-04 16:39:43 +0000463 (UWord)pool,
nethercote76c65502004-10-25 19:46:07 +0000464 (void*)&prev_next );
rjwalshbc0bb832004-06-19 18:12:36 +0000465
466 if (mp == NULL) {
467 ThreadId tid = VG_(get_current_or_recent_tid)();
468
469 MAC_(record_illegal_mempool_error) ( tid, pool );
470 return;
471 }
472
473 *prev_next = mp->next;
thughes4ad52d02004-06-27 17:37:21 +0000474 VG_(HT_apply_to_all_nodes)(mp->chunks, destroy_mempool_nuke_chunk, mp);
rjwalshbc0bb832004-06-19 18:12:36 +0000475 VG_(HT_destruct)(mp->chunks);
476
477 VG_(free)(mp);
478}
479
nethercote7ac7f7b2004-11-02 12:36:02 +0000480void MAC_(mempool_alloc)(Addr pool, Addr addr, SizeT size)
rjwalshbc0bb832004-06-19 18:12:36 +0000481{
482 MAC_Mempool* mp;
483 MAC_Mempool** prev_next;
rjwalshbc0bb832004-06-19 18:12:36 +0000484
nethercote3d6b6112004-11-04 16:39:43 +0000485 mp = (MAC_Mempool*)VG_(HT_get_node) ( MAC_(mempool_list), (UWord)pool,
nethercote76c65502004-10-25 19:46:07 +0000486 (void*)&prev_next );
rjwalshbc0bb832004-06-19 18:12:36 +0000487
488 if (mp == NULL) {
489 ThreadId tid = VG_(get_current_or_recent_tid)();
490
491 MAC_(record_illegal_mempool_error) ( tid, pool );
492 return;
493 }
494
nethercote57e36b32004-07-10 14:56:28 +0000495 MAC_(new_block)(addr, size, /*ignored*/0, mp->rzB, mp->is_zeroed,
496 MAC_AllocCustom, mp->chunks);
rjwalshbc0bb832004-06-19 18:12:36 +0000497}
498
499void MAC_(mempool_free)(Addr pool, Addr addr)
500{
501 MAC_Mempool* mp;
502 MAC_Mempool** prev_pool;
503 MAC_Chunk* mc;
504 MAC_Chunk** prev_chunk;
505 ThreadId tid = VG_(get_current_or_recent_tid)();
506
507
nethercote3d6b6112004-11-04 16:39:43 +0000508 mp = (MAC_Mempool*)VG_(HT_get_node)(MAC_(mempool_list), (UWord)pool,
nethercote76c65502004-10-25 19:46:07 +0000509 (void*)&prev_pool);
rjwalshbc0bb832004-06-19 18:12:36 +0000510
511 if (mp == NULL) {
512 MAC_(record_illegal_mempool_error)(tid, pool);
513 return;
514 }
515
nethercote3d6b6112004-11-04 16:39:43 +0000516 mc = (MAC_Chunk*)VG_(HT_get_node)(mp->chunks, (UWord)addr,
nethercote76c65502004-10-25 19:46:07 +0000517 (void*)&prev_chunk);
rjwalshbc0bb832004-06-19 18:12:36 +0000518
519 if (mc == NULL) {
520 MAC_(record_free_error)(tid, (Addr)addr);
521 return;
522 }
523
524 die_and_free_mem(mc, prev_chunk, mp->rzB);
525}
526
thughes4ad52d02004-06-27 17:37:21 +0000527typedef
528 struct {
nethercote7ac7f7b2004-11-02 12:36:02 +0000529 UInt nblocks;
530 SizeT nbytes;
thughes4ad52d02004-06-27 17:37:21 +0000531 }
532 MallocStats;
533
534static void malloc_stats_count_chunk(VgHashNode* node, void* d) {
535 MAC_Chunk* mc = (MAC_Chunk*)node;
536 MallocStats *ms = (MallocStats *)d;
537
538 ms->nblocks ++;
539 ms->nbytes += mc->size;
540}
541
njn3e884182003-04-15 13:03:23 +0000542void MAC_(print_malloc_stats) ( void )
543{
thughes4ad52d02004-06-27 17:37:21 +0000544 MallocStats ms;
545
546 ms.nblocks = 0;
547 ms.nbytes = 0;
njn3e884182003-04-15 13:03:23 +0000548
549 /* Mmm... more lexical scoping */
njn3e884182003-04-15 13:03:23 +0000550 if (VG_(clo_verbosity) == 0)
551 return;
552
553 /* Count memory still in use. */
thughes4ad52d02004-06-27 17:37:21 +0000554 VG_(HT_apply_to_all_nodes)(MAC_(malloc_list), malloc_stats_count_chunk, &ms);
njn3e884182003-04-15 13:03:23 +0000555
556 VG_(message)(Vg_UserMsg,
557 "malloc/free: in use at exit: %d bytes in %d blocks.",
thughes4ad52d02004-06-27 17:37:21 +0000558 ms.nbytes, ms.nblocks);
njn3e884182003-04-15 13:03:23 +0000559 VG_(message)(Vg_UserMsg,
560 "malloc/free: %d allocs, %d frees, %u bytes allocated.",
561 cmalloc_n_mallocs,
562 cmalloc_n_frees, cmalloc_bs_mallocd);
563 if (VG_(clo_verbosity) > 1)
564 VG_(message)(Vg_UserMsg, "");
565}
566
567/*--------------------------------------------------------------------*/
568/*--- end mac_malloc_wrappers.c ---*/
569/*--------------------------------------------------------------------*/