blob: 653dc06edb94a23b2e3090534b2282309490dc96 [file] [log] [blame]
sewardjde4a1d02002-03-22 01:27:54 +00001
2/*--------------------------------------------------------------------*/
3/*--- An implementation of malloc/free for the client. ---*/
4/*--- vg_clientmalloc.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
njnc9539842002-10-02 13:26:35 +00008 This file is part of Valgrind, an extensible x86 protected-mode
9 emulator for monitoring program execution on x86-Unixes.
sewardjde4a1d02002-03-22 01:27:54 +000010
11 Copyright (C) 2000-2002 Julian Seward
12 jseward@acm.org
sewardjde4a1d02002-03-22 01:27:54 +000013
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
njn25e49d8e72002-09-23 09:36:25 +000029 The GNU General Public License is contained in the file COPYING.
sewardjde4a1d02002-03-22 01:27:54 +000030*/
31
32#include "vg_include.h"
33
34
35/*------------------------------------------------------------*/
36/*--- Defns ---*/
37/*------------------------------------------------------------*/
38
39/* #define DEBUG_CLIENTMALLOC */
40
njn25e49d8e72002-09-23 09:36:25 +000041/* Holds malloc'd but not freed blocks. Static, so zero-inited by default. */
sewardjde4a1d02002-03-22 01:27:54 +000042#define VG_MALLOCLIST_NO(aa) (((UInt)(aa)) % VG_N_MALLOCLISTS)
43static ShadowChunk* vg_malloclist[VG_N_MALLOCLISTS];
sewardjde4a1d02002-03-22 01:27:54 +000044
45/* Stats ... */
46static UInt vg_cmalloc_n_mallocs = 0;
47static UInt vg_cmalloc_n_frees = 0;
48static UInt vg_cmalloc_bs_mallocd = 0;
49
50static UInt vg_mlist_frees = 0;
51static UInt vg_mlist_tries = 0;
52
53
54/*------------------------------------------------------------*/
55/*--- Fns ---*/
56/*------------------------------------------------------------*/
57
njn25e49d8e72002-09-23 09:36:25 +000058static __inline__
59Bool needs_shadow_chunks ( void )
60{
61 return VG_(needs).core_errors ||
62 VG_(needs).alternative_free ||
63 VG_(needs).sizeof_shadow_block > 0 ||
64 VG_(track_events).bad_free ||
65 VG_(track_events).mismatched_free ||
66 VG_(track_events).copy_mem_heap ||
67 VG_(track_events).die_mem_heap;
68}
69
70#ifdef DEBUG_CLIENTMALLOC
71static
72Int count_malloclists ( void )
73{
74 ShadowChunk* sc;
75 UInt ml_no;
76 Int n = 0;
77
78 for (ml_no = 0; ml_no < VG_N_MALLOCLISTS; ml_no++)
79 for (sc = vg_malloclist[ml_no]; sc != NULL; sc = sc->next)
80 n++;
81 return n;
82}
83#endif
84
85/*------------------------------------------------------------*/
86/*--- Shadow chunks, etc ---*/
87/*------------------------------------------------------------*/
88
89/* Allocate a user-chunk of size bytes. Also allocate its shadow
90 block, make the shadow block point at the user block. Put the
91 shadow chunk on the appropriate list, and set all memory
92 protections correctly. */
93static void addShadowChunk ( ThreadState* tst,
94 Addr p, UInt size, VgAllocKind kind )
95{
96 ShadowChunk* sc;
97 UInt ml_no = VG_MALLOCLIST_NO(p);
98
99# ifdef DEBUG_CLIENTMALLOC
100 VG_(printf)("[m %d, f %d (%d)] addShadowChunk "
101 "( sz %d, addr %p, list %d )\n",
102 count_malloclists(),
103 0/*count_freelist()*/, 0/*vg_freed_list_volume*/,
104 size, p, ml_no );
105# endif
106
107 sc = VG_(arena_malloc)(VG_AR_CORE,
108 sizeof(ShadowChunk)
109 + VG_(needs).sizeof_shadow_block);
110 sc->size = size;
111 sc->allockind = kind;
112 sc->data = p;
113 /* Fill in any skin-specific shadow chunk stuff */
114 if (VG_(needs).sizeof_shadow_block > 0)
115 SK_(complete_shadow_chunk) ( sc, tst );
116
117 sc->next = vg_malloclist[ml_no];
118 vg_malloclist[ml_no] = sc;
119}
120
121/* Get the sc, and return the address of the previous node's next pointer
122 which allows sc to be removed from the list later without having to look
123 it up again. */
124static ShadowChunk* getShadowChunk ( Addr a, /*OUT*/ShadowChunk*** next_ptr )
125{
126 ShadowChunk *prev, *curr;
127 Int ml_no;
128
129 ml_no = VG_MALLOCLIST_NO(a);
130
131 prev = NULL;
132 curr = vg_malloclist[ml_no];
133 while (True) {
134 if (curr == NULL)
135 break;
136 if (a == curr->data)
137 break;
138 prev = curr;
139 curr = curr->next;
140 }
141
142 if (NULL == prev)
143 *next_ptr = &vg_malloclist[ml_no];
144 else
145 *next_ptr = &prev->next;
146
147 return curr;
148}
149
njn4ba5a792002-09-30 10:23:54 +0000150void VG_(free_ShadowChunk) ( ShadowChunk* sc )
njn25e49d8e72002-09-23 09:36:25 +0000151{
152 VG_(arena_free) ( VG_AR_CLIENT, (void*)sc->data );
153 VG_(arena_free) ( VG_AR_CORE, sc );
154}
155
156
sewardjde4a1d02002-03-22 01:27:54 +0000157/* Allocate a suitably-sized array, copy all the malloc-d block
158 shadows into it, and return both the array and the size of it.
159 This is used by the memory-leak detector.
160*/
161ShadowChunk** VG_(get_malloc_shadows) ( /*OUT*/ UInt* n_shadows )
162{
163 UInt i, scn;
164 ShadowChunk** arr;
165 ShadowChunk* sc;
166 *n_shadows = 0;
167 for (scn = 0; scn < VG_N_MALLOCLISTS; scn++) {
168 for (sc = vg_malloclist[scn]; sc != NULL; sc = sc->next) {
169 (*n_shadows)++;
170 }
171 }
172 if (*n_shadows == 0) return NULL;
173
njn25e49d8e72002-09-23 09:36:25 +0000174 arr = VG_(malloc)( *n_shadows * sizeof(ShadowChunk*) );
sewardjde4a1d02002-03-22 01:27:54 +0000175
176 i = 0;
177 for (scn = 0; scn < VG_N_MALLOCLISTS; scn++) {
178 for (sc = vg_malloclist[scn]; sc != NULL; sc = sc->next) {
179 arr[i++] = sc;
180 }
181 }
182 vg_assert(i == *n_shadows);
183 return arr;
184}
185
njn25e49d8e72002-09-23 09:36:25 +0000186Bool VG_(addr_is_in_block)( Addr a, Addr start, UInt size )
187{
188 return (start - VG_AR_CLIENT_REDZONE_SZB <= a
189 && a < start + size + VG_AR_CLIENT_REDZONE_SZB);
190}
191
192/* Return the first shadow chunk satisfying the predicate p. */
193ShadowChunk* VG_(any_matching_mallocd_ShadowChunks)
194 ( Bool (*p) ( ShadowChunk* ))
sewardjde4a1d02002-03-22 01:27:54 +0000195{
196 UInt ml_no;
njn25e49d8e72002-09-23 09:36:25 +0000197 ShadowChunk* sc;
198
sewardjde4a1d02002-03-22 01:27:54 +0000199 for (ml_no = 0; ml_no < VG_N_MALLOCLISTS; ml_no++)
sewardjde4a1d02002-03-22 01:27:54 +0000200 for (sc = vg_malloclist[ml_no]; sc != NULL; sc = sc->next)
njn25e49d8e72002-09-23 09:36:25 +0000201 if (p(sc))
202 return sc;
sewardjde4a1d02002-03-22 01:27:54 +0000203
njn25e49d8e72002-09-23 09:36:25 +0000204 return NULL;
sewardjde4a1d02002-03-22 01:27:54 +0000205}
206
207
njn25e49d8e72002-09-23 09:36:25 +0000208/*------------------------------------------------------------*/
209/*--- client_malloc(), etc ---*/
210/*------------------------------------------------------------*/
sewardjde4a1d02002-03-22 01:27:54 +0000211
212/* Allocate memory, noticing whether or not we are doing the full
213 instrumentation thing. */
njn25e49d8e72002-09-23 09:36:25 +0000214static __inline__
215void* alloc_and_new_mem ( ThreadState* tst, UInt size, UInt alignment,
216 Bool is_zeroed, VgAllocKind kind )
sewardjde4a1d02002-03-22 01:27:54 +0000217{
njn25e49d8e72002-09-23 09:36:25 +0000218 Addr p;
sewardjde4a1d02002-03-22 01:27:54 +0000219
220 VGP_PUSHCC(VgpCliMalloc);
sewardj2e93c502002-04-12 11:12:52 +0000221
222 vg_cmalloc_n_mallocs ++;
223 vg_cmalloc_bs_mallocd += size;
224
njn25e49d8e72002-09-23 09:36:25 +0000225 vg_assert(alignment >= 4);
226 if (alignment == 4)
227 p = (Addr)VG_(arena_malloc)(VG_AR_CLIENT, size);
228 else
229 p = (Addr)VG_(arena_malloc_aligned)(VG_AR_CLIENT, alignment, size);
sewardj2e93c502002-04-12 11:12:52 +0000230
njn25e49d8e72002-09-23 09:36:25 +0000231 if (needs_shadow_chunks())
232 addShadowChunk ( tst, p, size, kind );
233
234 VG_TRACK( ban_mem_heap, p-VG_AR_CLIENT_REDZONE_SZB,
235 VG_AR_CLIENT_REDZONE_SZB );
236 VG_TRACK( new_mem_heap, p, size, is_zeroed );
237 VG_TRACK( ban_mem_heap, p+size, VG_AR_CLIENT_REDZONE_SZB );
238
239 VGP_POPCC(VgpCliMalloc);
240 return (void*)p;
241}
242
243void* VG_(client_malloc) ( ThreadState* tst, UInt size, VgAllocKind kind )
244{
245 void* p = alloc_and_new_mem ( tst, size, VG_(clo_alignment),
246 /*is_zeroed*/False, kind );
247# ifdef DEBUG_CLIENTMALLOC
248 VG_(printf)("[m %d, f %d (%d)] client_malloc ( %d, %x ) = %p\n",
249 count_malloclists(),
250 0/*count_freelist()*/, 0/*vg_freed_list_volume*/,
251 size, kind, p );
252# endif
253 return p;
sewardjde4a1d02002-03-22 01:27:54 +0000254}
255
256
sewardj8c824512002-04-14 04:16:48 +0000257void* VG_(client_memalign) ( ThreadState* tst, UInt align, UInt size )
sewardjde4a1d02002-03-22 01:27:54 +0000258{
njn25e49d8e72002-09-23 09:36:25 +0000259 void* p = alloc_and_new_mem ( tst, size, align,
260 /*is_zeroed*/False, Vg_AllocMalloc );
sewardjde4a1d02002-03-22 01:27:54 +0000261# ifdef DEBUG_CLIENTMALLOC
njn25e49d8e72002-09-23 09:36:25 +0000262 VG_(printf)("[m %d, f %d (%d)] client_memalign ( al %d, sz %d ) = %p\n",
sewardjde4a1d02002-03-22 01:27:54 +0000263 count_malloclists(),
njn25e49d8e72002-09-23 09:36:25 +0000264 0/*count_freelist()*/, 0/*vg_freed_list_volume*/,
265 align, size, p );
sewardjde4a1d02002-03-22 01:27:54 +0000266# endif
njn25e49d8e72002-09-23 09:36:25 +0000267 return p;
sewardjde4a1d02002-03-22 01:27:54 +0000268}
269
270
sewardj8c824512002-04-14 04:16:48 +0000271void* VG_(client_calloc) ( ThreadState* tst, UInt nmemb, UInt size1 )
sewardjde4a1d02002-03-22 01:27:54 +0000272{
njn25e49d8e72002-09-23 09:36:25 +0000273 void* p;
274 UInt size, i;
sewardjde4a1d02002-03-22 01:27:54 +0000275
njn25e49d8e72002-09-23 09:36:25 +0000276 size = nmemb * size1;
sewardjde4a1d02002-03-22 01:27:54 +0000277
njn25e49d8e72002-09-23 09:36:25 +0000278 p = alloc_and_new_mem ( tst, size, VG_(clo_alignment),
279 /*is_zeroed*/True, Vg_AllocMalloc );
280 /* Must zero block for calloc! */
sewardjde4a1d02002-03-22 01:27:54 +0000281 for (i = 0; i < size; i++) ((UChar*)p)[i] = 0;
282
njn25e49d8e72002-09-23 09:36:25 +0000283# ifdef DEBUG_CLIENTMALLOC
284 VG_(printf)("[m %d, f %d (%d)] client_calloc ( %d, %d ) = %p\n",
285 count_malloclists(),
286 0/*count_freelist()*/, 0/*vg_freed_list_volume*/,
287 nmemb, size1, p );
288# endif
289
290 return p;
291}
292
293static
294void die_and_free_mem ( ThreadState* tst, ShadowChunk* sc,
295 ShadowChunk** prev_chunks_next_ptr )
296{
297 /* Note: ban redzones again -- just in case user de-banned them
298 with a client request... */
299 VG_TRACK( ban_mem_heap, sc->data-VG_AR_CLIENT_REDZONE_SZB,
300 VG_AR_CLIENT_REDZONE_SZB );
301 VG_TRACK( die_mem_heap, sc->data, sc->size );
302 VG_TRACK( ban_mem_heap, sc->data+sc->size, VG_AR_CLIENT_REDZONE_SZB );
303
304 /* Remove sc from the malloclist using prev_chunks_next_ptr to
305 avoid repeating the hash table lookup. Can't remove until at least
306 after free and free_mismatch errors are done because they use
307 describe_addr() which looks for it in malloclist. */
308 *prev_chunks_next_ptr = sc->next;
309
310 if (VG_(needs).alternative_free)
311 SK_(alt_free) ( sc, tst );
312 else
njn4ba5a792002-09-30 10:23:54 +0000313 VG_(free_ShadowChunk) ( sc );
sewardjde4a1d02002-03-22 01:27:54 +0000314}
315
316
njn25e49d8e72002-09-23 09:36:25 +0000317void VG_(client_free) ( ThreadState* tst, void* p, VgAllocKind kind )
sewardjde4a1d02002-03-22 01:27:54 +0000318{
njn25e49d8e72002-09-23 09:36:25 +0000319 ShadowChunk* sc;
320 ShadowChunk** prev_chunks_next_ptr;
sewardjde4a1d02002-03-22 01:27:54 +0000321
322 VGP_PUSHCC(VgpCliMalloc);
sewardjde4a1d02002-03-22 01:27:54 +0000323
324# ifdef DEBUG_CLIENTMALLOC
njn25e49d8e72002-09-23 09:36:25 +0000325 VG_(printf)("[m %d, f %d (%d)] client_free ( %p, %x )\n",
sewardjde4a1d02002-03-22 01:27:54 +0000326 count_malloclists(),
njn25e49d8e72002-09-23 09:36:25 +0000327 0/*count_freelist()*/, 0/*vg_freed_list_volume*/,
328 p, kind );
sewardjde4a1d02002-03-22 01:27:54 +0000329# endif
330
sewardj2e93c502002-04-12 11:12:52 +0000331 vg_cmalloc_n_frees ++;
sewardj2e93c502002-04-12 11:12:52 +0000332
njn25e49d8e72002-09-23 09:36:25 +0000333 if (! needs_shadow_chunks()) {
334 VG_(arena_free) ( VG_AR_CLIENT, p );
sewardjde4a1d02002-03-22 01:27:54 +0000335
sewardjde4a1d02002-03-22 01:27:54 +0000336 } else {
njn25e49d8e72002-09-23 09:36:25 +0000337 sc = getShadowChunk ( (Addr)p, &prev_chunks_next_ptr );
338
339 if (sc == NULL) {
340 VG_TRACK( bad_free, tst, (Addr)p );
341 VGP_POPCC(VgpCliMalloc);
342 return;
343 }
344
345 /* check if its a matching free() / delete / delete [] */
346 if (kind != sc->allockind)
347 VG_TRACK( mismatched_free, tst, (Addr)p );
348
349 die_and_free_mem ( tst, sc, prev_chunks_next_ptr );
350 }
351 VGP_POPCC(VgpCliMalloc);
sewardjde4a1d02002-03-22 01:27:54 +0000352}
353
354
njn25e49d8e72002-09-23 09:36:25 +0000355void* VG_(client_realloc) ( ThreadState* tst, void* p, UInt new_size )
356{
357 ShadowChunk *sc;
358 ShadowChunk **prev_chunks_next_ptr;
359 UInt i;
360
361 VGP_PUSHCC(VgpCliMalloc);
362
363 vg_cmalloc_n_frees ++;
364 vg_cmalloc_n_mallocs ++;
365 vg_cmalloc_bs_mallocd += new_size;
366
367 if (! needs_shadow_chunks()) {
368 vg_assert(p != NULL && new_size != 0);
369 p = VG_(arena_realloc) ( VG_AR_CLIENT, p, VG_(clo_alignment),
370 new_size );
371 VGP_POPCC(VgpCliMalloc);
372 return p;
373
374 } else {
375 /* First try and find the block. */
376 sc = getShadowChunk ( (Addr)p, &prev_chunks_next_ptr );
377
378 if (sc == NULL) {
379 VG_TRACK( bad_free, tst, (Addr)p );
380 /* Perhaps we should return to the program regardless. */
381 VGP_POPCC(VgpCliMalloc);
382 return NULL;
383 }
384
385 /* check if its a matching free() / delete / delete [] */
386 if (Vg_AllocMalloc != sc->allockind) {
387 /* can not realloc a range that was allocated with new or new [] */
388 VG_TRACK( mismatched_free, tst, (Addr)p );
389 /* but keep going anyway */
390 }
391
392 if (sc->size == new_size) {
393 /* size unchanged */
394 VGP_POPCC(VgpCliMalloc);
395 return p;
396
397 } else if (sc->size > new_size) {
398 /* new size is smaller */
399 VG_TRACK( die_mem_heap, sc->data+new_size, sc->size-new_size );
400 sc->size = new_size;
401 VGP_POPCC(VgpCliMalloc);
402# ifdef DEBUG_CLIENTMALLOC
403 VG_(printf)("[m %d, f %d (%d)] client_realloc_smaller ( %p, %d ) = %p\n",
404 count_malloclists(),
405 0/*count_freelist()*/, 0/*vg_freed_list_volume*/,
406 p, new_size, p );
407# endif
408 return p;
409
410 } else {
411 /* new size is bigger */
412 Addr p_new;
413
414 /* Get new memory */
415 vg_assert(VG_(clo_alignment) >= 4);
416 if (VG_(clo_alignment) == 4)
417 p_new = (Addr)VG_(arena_malloc)(VG_AR_CLIENT, new_size);
418 else
419 p_new = (Addr)VG_(arena_malloc_aligned)(VG_AR_CLIENT,
420 VG_(clo_alignment), new_size);
421
422 /* First half kept and copied, second half new,
423 red zones as normal */
424 VG_TRACK( ban_mem_heap, p_new-VG_AR_CLIENT_REDZONE_SZB,
425 VG_AR_CLIENT_REDZONE_SZB );
426 VG_TRACK( copy_mem_heap, (Addr)p, p_new, sc->size );
427 VG_TRACK( new_mem_heap, p_new+sc->size, new_size-sc->size,
428 /*inited=*/False );
429 VG_TRACK( ban_mem_heap, p_new+new_size, VG_AR_CLIENT_REDZONE_SZB );
430
431 /* Copy from old to new */
432 for (i = 0; i < sc->size; i++)
433 ((UChar*)p_new)[i] = ((UChar*)p)[i];
434
435 /* Free old memory */
436 die_and_free_mem ( tst, sc, prev_chunks_next_ptr );
437
438 /* this has to be after die_and_free_mem, otherwise the
439 former succeeds in shorting out the new block, not the
440 old, in the case when both are on the same list. */
441 addShadowChunk ( tst, p_new, new_size, Vg_AllocMalloc );
442
443 VGP_POPCC(VgpCliMalloc);
444# ifdef DEBUG_CLIENTMALLOC
445 VG_(printf)("[m %d, f %d (%d)] client_realloc_bigger ( %p, %d ) = %p\n",
446 count_malloclists(),
447 0/*count_freelist()*/, 0/*vg_freed_list_volume*/,
448 p, new_size, (void*)p_new );
449# endif
450 return (void*)p_new;
451 }
452 }
453}
454
455void VG_(print_malloc_stats) ( void )
sewardjde4a1d02002-03-22 01:27:54 +0000456{
457 UInt nblocks, nbytes, ml_no;
458 ShadowChunk* sc;
459
njn25e49d8e72002-09-23 09:36:25 +0000460 if (VG_(clo_verbosity) == 0)
461 return;
462
463 vg_assert(needs_shadow_chunks());
sewardjde4a1d02002-03-22 01:27:54 +0000464
465 nblocks = nbytes = 0;
466
467 for (ml_no = 0; ml_no < VG_N_MALLOCLISTS; ml_no++) {
468 for (sc = vg_malloclist[ml_no]; sc != NULL; sc = sc->next) {
469 nblocks ++;
470 nbytes += sc->size;
471 }
472 }
473
sewardjde4a1d02002-03-22 01:27:54 +0000474 VG_(message)(Vg_UserMsg,
475 "malloc/free: in use at exit: %d bytes in %d blocks.",
476 nbytes, nblocks);
477 VG_(message)(Vg_UserMsg,
sewardj488dc282002-12-22 19:24:22 +0000478 "malloc/free: %d allocs, %d frees, %u bytes allocated.",
sewardjde4a1d02002-03-22 01:27:54 +0000479 vg_cmalloc_n_mallocs,
480 vg_cmalloc_n_frees, vg_cmalloc_bs_mallocd);
sewardjde4a1d02002-03-22 01:27:54 +0000481 if (0)
482 VG_(message)(Vg_DebugMsg,
483 "free search: %d tries, %d frees",
484 vg_mlist_tries,
485 vg_mlist_frees );
486 if (VG_(clo_verbosity) > 1)
487 VG_(message)(Vg_UserMsg, "");
488}
489
sewardjde4a1d02002-03-22 01:27:54 +0000490/*--------------------------------------------------------------------*/
491/*--- end vg_clientmalloc.c ---*/
492/*--------------------------------------------------------------------*/