blob: 063647747ecf8599468a28744efc789b112d155b [file] [log] [blame]
njn3e884182003-04-15 13:03:23 +00001
2/*--------------------------------------------------------------------*/
3/*--- malloc/free wrappers for detecting errors and updating bits. ---*/
4/*--- mac_malloc_wrappers.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of MemCheck, a heavyweight Valgrind skin for
9 detecting memory errors, and AddrCheck, a lightweight Valgrind skin
10 for detecting memory errors.
11
12 Copyright (C) 2000-2002 Julian Seward
13 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
33#include "mac_shared.h"
34
35/*------------------------------------------------------------*/
36/*--- Defns ---*/
37/*------------------------------------------------------------*/
38
39/* Stats ... */
40static UInt cmalloc_n_mallocs = 0;
41static UInt cmalloc_n_frees = 0;
42static UInt cmalloc_bs_mallocd = 0;
43
44/* We want a 16B redzone on heap blocks for Addrcheck and Memcheck */
45UInt VG_(vg_malloc_redzone_szB) = 16;
46
47/*------------------------------------------------------------*/
48/*--- Tracking malloc'd and free'd blocks ---*/
49/*------------------------------------------------------------*/
50
51/* Record malloc'd blocks. Nb: Addrcheck and Memcheck construct this
52 separately in their respective initialisation functions. */
53VgHashTable MAC_(malloc_list) = NULL;
54
55/* Records blocks after freeing. */
56static MAC_Chunk* freed_list_start = NULL;
57static MAC_Chunk* freed_list_end = NULL;
58static Int freed_list_volume = 0;
59
60/* Put a shadow chunk on the freed blocks queue, possibly freeing up
61 some of the oldest blocks in the queue at the same time. */
62static void add_to_freed_queue ( MAC_Chunk* mc )
63{
64 MAC_Chunk* sc1;
65
66 /* Put it at the end of the freed list */
67 if (freed_list_end == NULL) {
68 sk_assert(freed_list_start == NULL);
69 freed_list_end = freed_list_start = mc;
70 freed_list_volume = mc->size;
71 } else {
72 sk_assert(freed_list_end->next == NULL);
73 freed_list_end->next = mc;
74 freed_list_end = mc;
75 freed_list_volume += mc->size;
76 }
77 mc->next = NULL;
78
79 /* Release enough of the oldest blocks to bring the free queue
80 volume below vg_clo_freelist_vol. */
81
82 while (freed_list_volume > MAC_(clo_freelist_vol)) {
83 sk_assert(freed_list_start != NULL);
84 sk_assert(freed_list_end != NULL);
85
86 sc1 = freed_list_start;
87 freed_list_volume -= sc1->size;
88 /* VG_(printf)("volume now %d\n", freed_list_volume); */
89 sk_assert(freed_list_volume >= 0);
90
91 if (freed_list_start == freed_list_end) {
92 freed_list_start = freed_list_end = NULL;
93 } else {
94 freed_list_start = sc1->next;
95 }
96 sc1->next = NULL; /* just paranoia */
97
98 /* free MAC_Chunk */
99 VG_(cli_free) ( (void*)(sc1->data) );
100 VG_(free) ( sc1 );
101 }
102}
103
104/* Return the first shadow chunk satisfying the predicate p. */
105MAC_Chunk* MAC_(first_matching_freed_MAC_Chunk) ( Bool (*p)(MAC_Chunk*) )
106{
107 MAC_Chunk* mc;
108
109 /* No point looking through freed blocks if we're not keeping
110 them around for a while... */
111 for (mc = freed_list_start; mc != NULL; mc = mc->next)
112 if (p(mc))
113 return mc;
114
115 return NULL;
116}
117
118/* Allocate a user-chunk of size bytes. Also allocate its shadow
119 block, make the shadow block point at the user block. Put the
120 shadow chunk on the appropriate list, and set all memory
121 protections correctly. */
122
123static void add_MAC_Chunk ( ThreadState* tst,
124 Addr p, UInt size, MAC_AllocKind kind )
125{
126 MAC_Chunk* mc;
127
128 mc = VG_(malloc)(sizeof(MAC_Chunk));
129 mc->data = p;
130 mc->size = size;
131 mc->allockind = kind;
132 mc->where = VG_(get_ExeContext)(tst);
133
134 VG_(HT_add_node)( MAC_(malloc_list), (VgHashNode*)mc );
135}
136
137/*------------------------------------------------------------*/
138/*--- client_malloc(), etc ---*/
139/*------------------------------------------------------------*/
140
141/* Function pointers for the two skins to track interesting events. */
142void (*MAC_(new_mem_heap)) ( Addr a, UInt len, Bool is_inited );
143void (*MAC_(ban_mem_heap)) ( Addr a, UInt len );
144void (*MAC_(die_mem_heap)) ( Addr a, UInt len );
145void (*MAC_(copy_mem_heap))( Addr from, Addr to, UInt len );
146
147/* Allocate memory and note change in memory available */
148static __inline__
149void* alloc_and_new_mem ( ThreadState* tst, UInt size, UInt alignment,
150 Bool is_zeroed, MAC_AllocKind kind )
151{
152 Addr p;
153
154 VGP_PUSHCC(VgpCliMalloc);
155
156 cmalloc_n_mallocs ++;
157 cmalloc_bs_mallocd += size;
158
159 p = (Addr)VG_(cli_malloc)(alignment, size);
160
161 add_MAC_Chunk ( tst, p, size, kind );
162
163 MAC_(ban_mem_heap)( p-VG_(vg_malloc_redzone_szB),
164 VG_(vg_malloc_redzone_szB) );
165 MAC_(new_mem_heap)( p, size, is_zeroed );
166 MAC_(ban_mem_heap)( p+size, VG_(vg_malloc_redzone_szB) );
167
168 VGP_POPCC(VgpCliMalloc);
169 return (void*)p;
170}
171
172void* SK_(malloc) ( ThreadState* tst, Int n )
173{
174 if (n < 0) {
175 VG_(message)(Vg_UserMsg, "Warning: silly arg (%d) to malloc()", n );
176 return NULL;
177 } else {
178 return alloc_and_new_mem ( tst, n, VG_(clo_alignment),
179 /*is_zeroed*/False, MAC_AllocMalloc );
180 }
181}
182
183void* SK_(__builtin_new) ( ThreadState* tst, Int n )
184{
185 if (n < 0) {
186 VG_(message)(Vg_UserMsg, "Warning: silly arg (%d) to __builtin_new()", n);
187 return NULL;
188 } else {
189 return alloc_and_new_mem ( tst, n, VG_(clo_alignment),
190 /*is_zeroed*/False, MAC_AllocNew );
191 }
192}
193
194void* SK_(__builtin_vec_new) ( ThreadState* tst, Int n )
195{
196 if (n < 0) {
197 VG_(message)(Vg_UserMsg,
198 "Warning: silly arg (%d) to __builtin_vec_new()", n );
199 return NULL;
200 } else {
201 return alloc_and_new_mem ( tst, n, VG_(clo_alignment),
202 /*is_zeroed*/False, MAC_AllocNewVec );
203 }
204}
205
206void* SK_(memalign) ( ThreadState* tst, Int align, Int n )
207{
208 if (n < 0) {
209 VG_(message)(Vg_UserMsg, "Warning: silly arg (%d) to memalign()", n);
210 return NULL;
211 } else {
212 return alloc_and_new_mem ( tst, n, align, /*is_zeroed*/False,
213 MAC_AllocMalloc );
214 }
215}
216
217void* SK_(calloc) ( ThreadState* tst, Int nmemb, Int size1 )
218{
219 void* p;
220 Int size, i;
221
222 size = nmemb * size1;
223
224 if (nmemb < 0 || size1 < 0) {
225 VG_(message)(Vg_UserMsg, "Warning: silly args (%d,%d) to calloc()",
226 nmemb, size1 );
227 return NULL;
228 } else {
229 p = alloc_and_new_mem ( tst, size, VG_(clo_alignment),
230 /*is_zeroed*/True, MAC_AllocMalloc );
231 for (i = 0; i < size; i++)
232 ((UChar*)p)[i] = 0;
233 return p;
234 }
235}
236
237static
238void die_and_free_mem ( ThreadState* tst, MAC_Chunk* mc,
239 MAC_Chunk** prev_chunks_next_ptr )
240{
241 /* Note: ban redzones again -- just in case user de-banned them
242 with a client request... */
243 MAC_(ban_mem_heap)( mc->data-VG_(vg_malloc_redzone_szB),
244 VG_(vg_malloc_redzone_szB) );
245 MAC_(die_mem_heap)( mc->data, mc->size );
246 MAC_(ban_mem_heap)( mc->data+mc->size, VG_(vg_malloc_redzone_szB) );
247
248 /* Remove mc from the malloclist using prev_chunks_next_ptr to
249 avoid repeating the hash table lookup. Can't remove until at least
250 after free and free_mismatch errors are done because they use
251 describe_addr() which looks for it in malloclist. */
252 *prev_chunks_next_ptr = mc->next;
253
254 /* Record where freed */
255 mc->where = VG_(get_ExeContext) ( tst );
256
257 /* Put it out of harm's way for a while. */
258 add_to_freed_queue ( mc );
259}
260
261
262static __inline__
263void handle_free ( ThreadState* tst, void* p, MAC_AllocKind kind )
264{
265 MAC_Chunk* mc;
266 MAC_Chunk** prev_chunks_next_ptr;
267
268 VGP_PUSHCC(VgpCliMalloc);
269
270 cmalloc_n_frees++;
271
272 mc = (MAC_Chunk*)VG_(HT_get_node) ( MAC_(malloc_list), (UInt)p,
273 (VgHashNode***)&prev_chunks_next_ptr );
274
275 if (mc == NULL) {
276 MAC_(record_free_error) ( tst, (Addr)p );
277 VGP_POPCC(VgpCliMalloc);
278 return;
279 }
280
281 /* check if its a matching free() / delete / delete [] */
282 if (kind != mc->allockind) {
283 MAC_(record_freemismatch_error) ( tst, (Addr)p );
284 }
285
286 die_and_free_mem ( tst, mc, prev_chunks_next_ptr );
287 VGP_POPCC(VgpCliMalloc);
288}
289
290void SK_(free) ( ThreadState* tst, void* p )
291{
292 handle_free(tst, p, MAC_AllocMalloc);
293}
294
295void SK_(__builtin_delete) ( ThreadState* tst, void* p )
296{
297 handle_free(tst, p, MAC_AllocNew);
298}
299
300void SK_(__builtin_vec_delete) ( ThreadState* tst, void* p )
301{
302 handle_free(tst, p, MAC_AllocNewVec);
303}
304
305void* SK_(realloc) ( ThreadState* tst, void* p, Int new_size )
306{
307 MAC_Chunk *mc;
308 MAC_Chunk **prev_chunks_next_ptr;
309 UInt i;
310
311 VGP_PUSHCC(VgpCliMalloc);
312
313 cmalloc_n_frees ++;
314 cmalloc_n_mallocs ++;
315 cmalloc_bs_mallocd += new_size;
316
317 if (new_size < 0) {
318 VG_(message)(Vg_UserMsg,
319 "Warning: silly arg (%d) to realloc()", new_size );
320 return NULL;
321 }
322
323 /* First try and find the block. */
324 mc = (MAC_Chunk*)VG_(HT_get_node) ( MAC_(malloc_list), (UInt)p,
325 (VgHashNode***)&prev_chunks_next_ptr );
326
327 if (mc == NULL) {
328 MAC_(record_free_error) ( tst, (Addr)p );
329 /* Perhaps we should return to the program regardless. */
330 VGP_POPCC(VgpCliMalloc);
331 return NULL;
332 }
333
334 /* check if its a matching free() / delete / delete [] */
335 if (MAC_AllocMalloc != mc->allockind) {
336 /* can not realloc a range that was allocated with new or new [] */
337 MAC_(record_freemismatch_error) ( tst, (Addr)p );
338 /* but keep going anyway */
339 }
340
341 if (mc->size == new_size) {
342 /* size unchanged */
343 VGP_POPCC(VgpCliMalloc);
344 return p;
345
346 } else if (mc->size > new_size) {
347 /* new size is smaller */
348 MAC_(die_mem_heap)( mc->data+new_size, mc->size-new_size );
349 mc->size = new_size;
350 VGP_POPCC(VgpCliMalloc);
351 return p;
352
353 } else {
354 /* new size is bigger */
355 Addr p_new;
356
357 /* Get new memory */
358 p_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
359
360 /* First half kept and copied, second half new,
361 red zones as normal */
362 MAC_(ban_mem_heap) ( p_new-VG_(vg_malloc_redzone_szB),
363 VG_(vg_malloc_redzone_szB) );
364 MAC_(copy_mem_heap)( (Addr)p, p_new, mc->size );
365 MAC_(new_mem_heap) ( p_new+mc->size, new_size-mc->size, /*inited*/False );
366 MAC_(ban_mem_heap) ( p_new+new_size, VG_(vg_malloc_redzone_szB) );
367
368 /* Copy from old to new */
369 for (i = 0; i < mc->size; i++)
370 ((UChar*)p_new)[i] = ((UChar*)p)[i];
371
372 /* Free old memory */
373 die_and_free_mem ( tst, mc, prev_chunks_next_ptr );
374
375 /* this has to be after die_and_free_mem, otherwise the
376 former succeeds in shorting out the new block, not the
377 old, in the case when both are on the same list. */
378 add_MAC_Chunk ( tst, p_new, new_size, MAC_AllocMalloc );
379
380 VGP_POPCC(VgpCliMalloc);
381 return (void*)p_new;
382 }
383}
384
385void MAC_(print_malloc_stats) ( void )
386{
387 UInt nblocks = 0, nbytes = 0;
388
389 /* Mmm... more lexical scoping */
390 void count_one_chunk(VgHashNode* node) {
391 MAC_Chunk* mc = (MAC_Chunk*)node;
392 nblocks ++;
393 nbytes += mc->size;
394 }
395
396 if (VG_(clo_verbosity) == 0)
397 return;
398
399 /* Count memory still in use. */
400 VG_(HT_apply_to_all_nodes)(MAC_(malloc_list), count_one_chunk);
401
402 VG_(message)(Vg_UserMsg,
403 "malloc/free: in use at exit: %d bytes in %d blocks.",
404 nbytes, nblocks);
405 VG_(message)(Vg_UserMsg,
406 "malloc/free: %d allocs, %d frees, %u bytes allocated.",
407 cmalloc_n_mallocs,
408 cmalloc_n_frees, cmalloc_bs_mallocd);
409 if (VG_(clo_verbosity) > 1)
410 VG_(message)(Vg_UserMsg, "");
411}
412
413/*--------------------------------------------------------------------*/
414/*--- end mac_malloc_wrappers.c ---*/
415/*--------------------------------------------------------------------*/