blob: c9b2fca176e9a928b706442396134ddba589edc9 [file] [log] [blame]
njn3e884182003-04-15 13:03:23 +00001
2/*--------------------------------------------------------------------*/
3/*--- malloc/free wrappers for detecting errors and updating bits. ---*/
4/*--- mac_malloc_wrappers.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
nethercote137bc552003-11-14 17:47:54 +00008 This file is part of MemCheck, a heavyweight Valgrind tool for
9 detecting memory errors, and AddrCheck, a lightweight Valgrind tool
njn3e884182003-04-15 13:03:23 +000010 for detecting memory errors.
11
njn0e1b5142003-04-15 14:58:06 +000012 Copyright (C) 2000-2003 Julian Seward
njn3e884182003-04-15 13:03:23 +000013 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
33#include "mac_shared.h"
34
35/*------------------------------------------------------------*/
36/*--- Defns ---*/
37/*------------------------------------------------------------*/
38
39/* Stats ... */
40static UInt cmalloc_n_mallocs = 0;
41static UInt cmalloc_n_frees = 0;
42static UInt cmalloc_bs_mallocd = 0;
43
44/* We want a 16B redzone on heap blocks for Addrcheck and Memcheck */
45UInt VG_(vg_malloc_redzone_szB) = 16;
46
sewardjf9025f72003-07-12 01:41:24 +000047/* Function pointers for the two skins to track interesting events. */
sewardjecf8e102003-07-12 12:11:39 +000048void (*MAC_(new_mem_heap)) ( Addr a, UInt len, Bool is_inited ) = NULL;
49void (*MAC_(ban_mem_heap)) ( Addr a, UInt len ) = NULL;
50void (*MAC_(die_mem_heap)) ( Addr a, UInt len ) = NULL;
51void (*MAC_(copy_mem_heap))( Addr from, Addr to, UInt len ) = NULL;
52
53/* Function pointers for internal sanity checking. */
54Bool (*MAC_(check_noaccess))( Addr a, UInt len, Addr* bad_addr ) = NULL;
sewardjf9025f72003-07-12 01:41:24 +000055
56
njn3e884182003-04-15 13:03:23 +000057/*------------------------------------------------------------*/
58/*--- Tracking malloc'd and free'd blocks ---*/
59/*------------------------------------------------------------*/
60
61/* Record malloc'd blocks. Nb: Addrcheck and Memcheck construct this
62 separately in their respective initialisation functions. */
63VgHashTable MAC_(malloc_list) = NULL;
64
65/* Records blocks after freeing. */
66static MAC_Chunk* freed_list_start = NULL;
67static MAC_Chunk* freed_list_end = NULL;
68static Int freed_list_volume = 0;
69
70/* Put a shadow chunk on the freed blocks queue, possibly freeing up
71 some of the oldest blocks in the queue at the same time. */
72static void add_to_freed_queue ( MAC_Chunk* mc )
73{
74 MAC_Chunk* sc1;
75
76 /* Put it at the end of the freed list */
77 if (freed_list_end == NULL) {
78 sk_assert(freed_list_start == NULL);
79 freed_list_end = freed_list_start = mc;
80 freed_list_volume = mc->size;
81 } else {
82 sk_assert(freed_list_end->next == NULL);
83 freed_list_end->next = mc;
84 freed_list_end = mc;
85 freed_list_volume += mc->size;
86 }
87 mc->next = NULL;
88
89 /* Release enough of the oldest blocks to bring the free queue
90 volume below vg_clo_freelist_vol. */
91
92 while (freed_list_volume > MAC_(clo_freelist_vol)) {
93 sk_assert(freed_list_start != NULL);
94 sk_assert(freed_list_end != NULL);
95
96 sc1 = freed_list_start;
97 freed_list_volume -= sc1->size;
98 /* VG_(printf)("volume now %d\n", freed_list_volume); */
99 sk_assert(freed_list_volume >= 0);
100
101 if (freed_list_start == freed_list_end) {
102 freed_list_start = freed_list_end = NULL;
103 } else {
104 freed_list_start = sc1->next;
105 }
106 sc1->next = NULL; /* just paranoia */
107
108 /* free MAC_Chunk */
109 VG_(cli_free) ( (void*)(sc1->data) );
110 VG_(free) ( sc1 );
111 }
112}
113
114/* Return the first shadow chunk satisfying the predicate p. */
115MAC_Chunk* MAC_(first_matching_freed_MAC_Chunk) ( Bool (*p)(MAC_Chunk*) )
116{
117 MAC_Chunk* mc;
118
119 /* No point looking through freed blocks if we're not keeping
120 them around for a while... */
121 for (mc = freed_list_start; mc != NULL; mc = mc->next)
122 if (p(mc))
123 return mc;
124
125 return NULL;
126}
127
njn10785452003-05-20 16:38:24 +0000128/* Allocate its shadow chunk, put it on the appropriate list. */
129static
njn72718642003-07-24 08:45:32 +0000130void add_MAC_Chunk ( Addr p, UInt size, MAC_AllocKind kind )
njn3e884182003-04-15 13:03:23 +0000131{
132 MAC_Chunk* mc;
133
134 mc = VG_(malloc)(sizeof(MAC_Chunk));
135 mc->data = p;
136 mc->size = size;
137 mc->allockind = kind;
njn72718642003-07-24 08:45:32 +0000138 mc->where = VG_(get_ExeContext)(VG_(get_current_or_recent_tid)());
njn3e884182003-04-15 13:03:23 +0000139
sewardjecf8e102003-07-12 12:11:39 +0000140 /* Paranoia ... ensure this area is off-limits to the client, so
141 the mc->data field isn't visible to the leak checker. If memory
142 management is working correctly, anything pointer returned by
143 VG_(malloc) should be noaccess as far as the client is
144 concerned. */
145 if (!MAC_(check_noaccess)( (Addr)mc, sizeof(MAC_Chunk), NULL )) {
146 VG_(skin_panic)("add_MAC_chunk: shadow area is accessible");
147 }
sewardjf9025f72003-07-12 01:41:24 +0000148
njn3e884182003-04-15 13:03:23 +0000149 VG_(HT_add_node)( MAC_(malloc_list), (VgHashNode*)mc );
150}
151
152/*------------------------------------------------------------*/
153/*--- client_malloc(), etc ---*/
154/*------------------------------------------------------------*/
155
njn3e884182003-04-15 13:03:23 +0000156/* Allocate memory and note change in memory available */
njn10785452003-05-20 16:38:24 +0000157__inline__
njn72718642003-07-24 08:45:32 +0000158void MAC_(new_block) ( Addr p, UInt size,
159 UInt rzB, Bool is_zeroed, MAC_AllocKind kind )
njn3e884182003-04-15 13:03:23 +0000160{
njn3e884182003-04-15 13:03:23 +0000161 VGP_PUSHCC(VgpCliMalloc);
162
163 cmalloc_n_mallocs ++;
164 cmalloc_bs_mallocd += size;
165
njn72718642003-07-24 08:45:32 +0000166 add_MAC_Chunk( p, size, kind );
njn3e884182003-04-15 13:03:23 +0000167
njn10785452003-05-20 16:38:24 +0000168 MAC_(ban_mem_heap)( p-rzB, rzB );
njn3e884182003-04-15 13:03:23 +0000169 MAC_(new_mem_heap)( p, size, is_zeroed );
njn10785452003-05-20 16:38:24 +0000170 MAC_(ban_mem_heap)( p+size, rzB );
njn3e884182003-04-15 13:03:23 +0000171
172 VGP_POPCC(VgpCliMalloc);
njn3e884182003-04-15 13:03:23 +0000173}
174
njn72718642003-07-24 08:45:32 +0000175void* SK_(malloc) ( Int n )
njn3e884182003-04-15 13:03:23 +0000176{
177 if (n < 0) {
178 VG_(message)(Vg_UserMsg, "Warning: silly arg (%d) to malloc()", n );
179 return NULL;
180 } else {
njn10785452003-05-20 16:38:24 +0000181 Addr p = (Addr)VG_(cli_malloc)( VG_(clo_alignment), n );
njn72718642003-07-24 08:45:32 +0000182 MAC_(new_block) ( p, n, VG_(vg_malloc_redzone_szB),
njn10785452003-05-20 16:38:24 +0000183 /*is_zeroed*/False, MAC_AllocMalloc );
184 return (void*)p;
njn3e884182003-04-15 13:03:23 +0000185 }
186}
187
njn72718642003-07-24 08:45:32 +0000188void* SK_(__builtin_new) ( Int n )
njn3e884182003-04-15 13:03:23 +0000189{
190 if (n < 0) {
191 VG_(message)(Vg_UserMsg, "Warning: silly arg (%d) to __builtin_new()", n);
192 return NULL;
193 } else {
njn10785452003-05-20 16:38:24 +0000194 Addr p = (Addr)VG_(cli_malloc)( VG_(clo_alignment), n );
njn72718642003-07-24 08:45:32 +0000195 MAC_(new_block) ( p, n, VG_(vg_malloc_redzone_szB),
njn10785452003-05-20 16:38:24 +0000196 /*is_zeroed*/False, MAC_AllocNew );
197 return (void*)p;
njn3e884182003-04-15 13:03:23 +0000198 }
199}
200
njn72718642003-07-24 08:45:32 +0000201void* SK_(__builtin_vec_new) ( Int n )
njn3e884182003-04-15 13:03:23 +0000202{
203 if (n < 0) {
204 VG_(message)(Vg_UserMsg,
205 "Warning: silly arg (%d) to __builtin_vec_new()", n );
206 return NULL;
207 } else {
njn10785452003-05-20 16:38:24 +0000208 Addr p = (Addr)VG_(cli_malloc)( VG_(clo_alignment), n );
njn72718642003-07-24 08:45:32 +0000209 MAC_(new_block) ( p, n, VG_(vg_malloc_redzone_szB),
njn10785452003-05-20 16:38:24 +0000210 /*is_zeroed*/False, MAC_AllocNewVec );
211 return (void*)p;
njn3e884182003-04-15 13:03:23 +0000212 }
213}
214
njn72718642003-07-24 08:45:32 +0000215void* SK_(memalign) ( Int align, Int n )
njn3e884182003-04-15 13:03:23 +0000216{
217 if (n < 0) {
218 VG_(message)(Vg_UserMsg, "Warning: silly arg (%d) to memalign()", n);
219 return NULL;
220 } else {
njn10785452003-05-20 16:38:24 +0000221 Addr p = (Addr)VG_(cli_malloc)( align, n );
njn72718642003-07-24 08:45:32 +0000222 MAC_(new_block) ( p, n, VG_(vg_malloc_redzone_szB),
njn10785452003-05-20 16:38:24 +0000223 /*is_zeroed*/False, MAC_AllocMalloc );
224 return (void*)p;
njn3e884182003-04-15 13:03:23 +0000225 }
226}
227
njn72718642003-07-24 08:45:32 +0000228void* SK_(calloc) ( Int nmemb, Int size1 )
njn3e884182003-04-15 13:03:23 +0000229{
njn10785452003-05-20 16:38:24 +0000230 Int n, i;
njn3e884182003-04-15 13:03:23 +0000231
njn10785452003-05-20 16:38:24 +0000232 n = nmemb * size1;
njn3e884182003-04-15 13:03:23 +0000233
234 if (nmemb < 0 || size1 < 0) {
235 VG_(message)(Vg_UserMsg, "Warning: silly args (%d,%d) to calloc()",
236 nmemb, size1 );
237 return NULL;
238 } else {
njn10785452003-05-20 16:38:24 +0000239 Addr p = (Addr)VG_(cli_malloc)( VG_(clo_alignment), n );
njn72718642003-07-24 08:45:32 +0000240 MAC_(new_block) ( p, n, VG_(vg_malloc_redzone_szB),
njn10785452003-05-20 16:38:24 +0000241 /*is_zeroed*/True, MAC_AllocMalloc );
242 for (i = 0; i < n; i++)
njn3e884182003-04-15 13:03:23 +0000243 ((UChar*)p)[i] = 0;
njn10785452003-05-20 16:38:24 +0000244 return (void*)p;
njn3e884182003-04-15 13:03:23 +0000245 }
246}
247
248static
njn72718642003-07-24 08:45:32 +0000249void die_and_free_mem ( MAC_Chunk* mc,
njn10785452003-05-20 16:38:24 +0000250 MAC_Chunk** prev_chunks_next_ptr, UInt rzB )
njn3e884182003-04-15 13:03:23 +0000251{
252 /* Note: ban redzones again -- just in case user de-banned them
253 with a client request... */
njn10785452003-05-20 16:38:24 +0000254 MAC_(ban_mem_heap)( mc->data-rzB, rzB );
njn3e884182003-04-15 13:03:23 +0000255 MAC_(die_mem_heap)( mc->data, mc->size );
njn10785452003-05-20 16:38:24 +0000256 MAC_(ban_mem_heap)( mc->data+mc->size, rzB );
njn3e884182003-04-15 13:03:23 +0000257
258 /* Remove mc from the malloclist using prev_chunks_next_ptr to
259 avoid repeating the hash table lookup. Can't remove until at least
260 after free and free_mismatch errors are done because they use
261 describe_addr() which looks for it in malloclist. */
262 *prev_chunks_next_ptr = mc->next;
263
264 /* Record where freed */
njn72718642003-07-24 08:45:32 +0000265 mc->where = VG_(get_ExeContext) ( VG_(get_current_or_recent_tid)() );
njn3e884182003-04-15 13:03:23 +0000266
njn10785452003-05-20 16:38:24 +0000267 /* Put it out of harm's way for a while, if not from a client request */
268 if (MAC_AllocCustom != mc->allockind)
269 add_to_freed_queue ( mc );
270 else
271 VG_(free) ( mc );
njn3e884182003-04-15 13:03:23 +0000272}
273
274
njn10785452003-05-20 16:38:24 +0000275__inline__
njn72718642003-07-24 08:45:32 +0000276void MAC_(handle_free) ( Addr p, UInt rzB, MAC_AllocKind kind )
njn3e884182003-04-15 13:03:23 +0000277{
278 MAC_Chunk* mc;
279 MAC_Chunk** prev_chunks_next_ptr;
njn72718642003-07-24 08:45:32 +0000280 ThreadId tid = VG_(get_current_or_recent_tid)();
njn3e884182003-04-15 13:03:23 +0000281
282 VGP_PUSHCC(VgpCliMalloc);
283
284 cmalloc_n_frees++;
285
286 mc = (MAC_Chunk*)VG_(HT_get_node) ( MAC_(malloc_list), (UInt)p,
287 (VgHashNode***)&prev_chunks_next_ptr );
njn3e884182003-04-15 13:03:23 +0000288 if (mc == NULL) {
njn72718642003-07-24 08:45:32 +0000289 MAC_(record_free_error) ( tid, p );
njn3e884182003-04-15 13:03:23 +0000290 VGP_POPCC(VgpCliMalloc);
291 return;
292 }
293
294 /* check if its a matching free() / delete / delete [] */
295 if (kind != mc->allockind) {
njn72718642003-07-24 08:45:32 +0000296 MAC_(record_freemismatch_error) ( tid, p );
njn3e884182003-04-15 13:03:23 +0000297 }
298
njn72718642003-07-24 08:45:32 +0000299 die_and_free_mem ( mc, prev_chunks_next_ptr, rzB );
njn3e884182003-04-15 13:03:23 +0000300 VGP_POPCC(VgpCliMalloc);
301}
302
njn72718642003-07-24 08:45:32 +0000303void SK_(free) ( void* p )
njn3e884182003-04-15 13:03:23 +0000304{
njn72718642003-07-24 08:45:32 +0000305 MAC_(handle_free)((Addr)p, VG_(vg_malloc_redzone_szB), MAC_AllocMalloc);
njn3e884182003-04-15 13:03:23 +0000306}
307
njn72718642003-07-24 08:45:32 +0000308void SK_(__builtin_delete) ( void* p )
njn3e884182003-04-15 13:03:23 +0000309{
njn72718642003-07-24 08:45:32 +0000310 MAC_(handle_free)((Addr)p, VG_(vg_malloc_redzone_szB), MAC_AllocNew);
njn3e884182003-04-15 13:03:23 +0000311}
312
njn72718642003-07-24 08:45:32 +0000313void SK_(__builtin_vec_delete) ( void* p )
njn3e884182003-04-15 13:03:23 +0000314{
njn72718642003-07-24 08:45:32 +0000315 MAC_(handle_free)((Addr)p, VG_(vg_malloc_redzone_szB), MAC_AllocNewVec);
njn3e884182003-04-15 13:03:23 +0000316}
317
njn72718642003-07-24 08:45:32 +0000318void* SK_(realloc) ( void* p, Int new_size )
njn3e884182003-04-15 13:03:23 +0000319{
320 MAC_Chunk *mc;
321 MAC_Chunk **prev_chunks_next_ptr;
322 UInt i;
njn72718642003-07-24 08:45:32 +0000323 ThreadId tid = VG_(get_current_or_recent_tid)();
njn3e884182003-04-15 13:03:23 +0000324
325 VGP_PUSHCC(VgpCliMalloc);
326
327 cmalloc_n_frees ++;
328 cmalloc_n_mallocs ++;
329 cmalloc_bs_mallocd += new_size;
330
331 if (new_size < 0) {
332 VG_(message)(Vg_UserMsg,
333 "Warning: silly arg (%d) to realloc()", new_size );
334 return NULL;
335 }
336
337 /* First try and find the block. */
338 mc = (MAC_Chunk*)VG_(HT_get_node) ( MAC_(malloc_list), (UInt)p,
339 (VgHashNode***)&prev_chunks_next_ptr );
340
341 if (mc == NULL) {
njn72718642003-07-24 08:45:32 +0000342 MAC_(record_free_error) ( tid, (Addr)p );
njn3e884182003-04-15 13:03:23 +0000343 /* Perhaps we should return to the program regardless. */
344 VGP_POPCC(VgpCliMalloc);
345 return NULL;
346 }
347
348 /* check if its a matching free() / delete / delete [] */
349 if (MAC_AllocMalloc != mc->allockind) {
350 /* can not realloc a range that was allocated with new or new [] */
njn72718642003-07-24 08:45:32 +0000351 MAC_(record_freemismatch_error) ( tid, (Addr)p );
njn3e884182003-04-15 13:03:23 +0000352 /* but keep going anyway */
353 }
354
355 if (mc->size == new_size) {
356 /* size unchanged */
njn398044f2003-07-24 17:39:59 +0000357 mc->where = VG_(get_ExeContext)(tid);
njn3e884182003-04-15 13:03:23 +0000358 VGP_POPCC(VgpCliMalloc);
359 return p;
360
361 } else if (mc->size > new_size) {
362 /* new size is smaller */
363 MAC_(die_mem_heap)( mc->data+new_size, mc->size-new_size );
364 mc->size = new_size;
njn398044f2003-07-24 17:39:59 +0000365 mc->where = VG_(get_ExeContext)(tid);
njn3e884182003-04-15 13:03:23 +0000366 VGP_POPCC(VgpCliMalloc);
367 return p;
368
369 } else {
370 /* new size is bigger */
371 Addr p_new;
372
373 /* Get new memory */
374 p_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
375
376 /* First half kept and copied, second half new,
377 red zones as normal */
378 MAC_(ban_mem_heap) ( p_new-VG_(vg_malloc_redzone_szB),
379 VG_(vg_malloc_redzone_szB) );
380 MAC_(copy_mem_heap)( (Addr)p, p_new, mc->size );
381 MAC_(new_mem_heap) ( p_new+mc->size, new_size-mc->size, /*inited*/False );
382 MAC_(ban_mem_heap) ( p_new+new_size, VG_(vg_malloc_redzone_szB) );
383
384 /* Copy from old to new */
385 for (i = 0; i < mc->size; i++)
386 ((UChar*)p_new)[i] = ((UChar*)p)[i];
387
388 /* Free old memory */
njn72718642003-07-24 08:45:32 +0000389 die_and_free_mem ( mc, prev_chunks_next_ptr,
njn10785452003-05-20 16:38:24 +0000390 VG_(vg_malloc_redzone_szB) );
njn3e884182003-04-15 13:03:23 +0000391
392 /* this has to be after die_and_free_mem, otherwise the
393 former succeeds in shorting out the new block, not the
394 old, in the case when both are on the same list. */
njn72718642003-07-24 08:45:32 +0000395 add_MAC_Chunk ( p_new, new_size, MAC_AllocMalloc );
njn3e884182003-04-15 13:03:23 +0000396
397 VGP_POPCC(VgpCliMalloc);
398 return (void*)p_new;
399 }
400}
401
402void MAC_(print_malloc_stats) ( void )
403{
404 UInt nblocks = 0, nbytes = 0;
405
406 /* Mmm... more lexical scoping */
407 void count_one_chunk(VgHashNode* node) {
408 MAC_Chunk* mc = (MAC_Chunk*)node;
409 nblocks ++;
410 nbytes += mc->size;
411 }
412
413 if (VG_(clo_verbosity) == 0)
414 return;
415
416 /* Count memory still in use. */
417 VG_(HT_apply_to_all_nodes)(MAC_(malloc_list), count_one_chunk);
418
419 VG_(message)(Vg_UserMsg,
420 "malloc/free: in use at exit: %d bytes in %d blocks.",
421 nbytes, nblocks);
422 VG_(message)(Vg_UserMsg,
423 "malloc/free: %d allocs, %d frees, %u bytes allocated.",
424 cmalloc_n_mallocs,
425 cmalloc_n_frees, cmalloc_bs_mallocd);
426 if (VG_(clo_verbosity) > 1)
427 VG_(message)(Vg_UserMsg, "");
428}
429
430/*--------------------------------------------------------------------*/
431/*--- end mac_malloc_wrappers.c ---*/
432/*--------------------------------------------------------------------*/