blob: 0215f7863c2a8493bc1760ff7c67f918269ff79d [file] [log] [blame]
sewardj99a2ceb2007-11-09 12:30:36 +00001/*--------------------------------------------------------------------*/
2/*--- The Omega tool: traces memory allocations and alerts when ---*/
3/*--- the final reference to an allocated block dies. ---*/
4/*--- o_main.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of Omega, a Valgrind tool for detecting memory
9 leaks as they occur.
10
11 Copyright (C) 2006-2007 Bryan "Brain Murders" Meredith
12 omega@brainmurders.eclipse.co.uk
13 (A note of personal thanks to my employers at Apertio (www.apertio.com)
14 for allowing the use of their time, equipment for 64bit testing and
15 providing moral support.)
16
17 Partly based upon other Valgrind tools
sewardj39f34232007-11-09 23:02:28 +000018 Copyright (C) 2000-2007 Julian Seward, Nicholas Nethercote et al.
sewardj99a2ceb2007-11-09 12:30:36 +000019 jseward@acm.org
20 njn@valgrind.org
21
22 This program is free software; you can redistribute it and/or
23 modify it under the terms of the GNU General Public License as
24 published by the Free Software Foundation; either version 2 of the
25 License, or (at your option) any later version.
26
27 This program is distributed in the hope that it will be useful, but
28 WITHOUT ANY WARRANTY; without even the implied warranty of
29 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
30 General Public License for more details.
31
32 You should have received a copy of the GNU General Public License
33 along with this program; if not, write to the Free Software
34 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
35 02111-1307, USA.
36
37 The GNU General Public License is contained in the file COPYING.
38*/
39
40/*
41** Read the tool documentation for an explaination of the ideas
42** behind this implementation.
43*/
44
45#include "pub_tool_basics.h"
46#include "pub_tool_libcassert.h"
47#include "pub_tool_tooliface.h"
48#include "pub_tool_hashtable.h"
49#include "pub_tool_libcbase.h"
50#include "pub_tool_libcprint.h"
51#include "pub_tool_libcassert.h"
52#include "pub_tool_mallocfree.h"
53#include "pub_tool_replacemalloc.h"
54#include "pub_tool_machine.h"
55#include "pub_tool_threadstate.h"
56#include "pub_tool_stacktrace.h"
57#include "pub_tool_options.h"
58#include "pub_tool_clreq.h"
59
60#include "coregrind/pub_core_options.h"
61#include "coregrind/pub_core_debugger.h"
62
63#include "libvex_guest_offsets.h"
64
65#include "omega.h"
66
67/*
68** A little sanity in a mad, mad world.
69*/
70#if !(VG_WORDSIZE == 4) && !(VG_WORDSIZE == 8)
71/*
72** We don't handle anything else yet.
73*/
74#error Unsupported VG_WORDSIZE
75#endif
76
77/*
78** 4 lots of debug - always, general, memory and pbit.
79** general, memory and pbit can also be turned off with a master switch.
80** You wont want any of this on unless you are hacking the source around.
81*/
82#define NO_DEBUG(fmt, args...)
83#define O_DEBUG(fmt, args...) VG_(message)(Vg_DebugMsg, fmt, ## args)
84
85// Set to 0 to remove almost all debug from compiled tool
86#if 0
87
88static Bool o_traceMem = True; //False;
89static Bool o_tracePBit = False;
90static Bool o_traceGeneral = True; //False;
91static Bool o_traceStop = True;
92
93#define O_GDEBUG(fmt, args...) \
94 if(o_traceGeneral && !o_traceStop) \
95 { \
96 VG_(message)(Vg_DebugMsg, fmt, ## args); \
97 }
98
99#define O_MDEBUG(fmt, args...) \
100 if(o_traceMem && !o_traceStop) \
101 { \
102 VG_(message)(Vg_DebugMsg, fmt, ## args); \
103 }
104
105#define O_PDEBUG(fmt, args...) \
106 if(o_tracePBit && !o_traceStop) \
107 { \
108 VG_(message)(Vg_DebugMsg, fmt, ## args); \
109 }
110
111#define O_TRACE_ON() {o_traceStop = False;}
112#define O_TRACE_OFF() {o_traceStop = True;}
113#define O_TRACE_MEM_ON() {o_traceMem = True;}
114#define O_TRACE_MEM_OFF() {o_traceMem = False;}
115#define O_TRACE_PBIT_ON() {o_tracePBit = True;}
116#define O_TRACE_PBIT_OFF() {o_tracePBit = False;}
117#define O_TRACE_GENERAL_ON() {o_traceGeneral = True;}
118#define O_TRACE_GENERAL_OFF() {o_traceGeneral = False;}
119#define O_MASTER_DEBUG 1
120
121/*
122** Should we instrument memory loads for debugging?
123** Comment out to stop register loads from showing.
124*/
125//#define O_TRACK_LOADS 1
126#else
127/*
128** No debug included at all.
129*/
130#define O_GDEBUG(fmt, args...)
131#define O_MDEBUG(fmt, args...)
132#define O_PDEBUG(fmt, args...)
133#define O_TRACE_ON()
134#define O_TRACE_OFF()
135#define O_TRACE_MEM_ON()
136#define O_TRACE_MEM_OFF()
137#define O_TRACE_PBIT_ON()
138#define O_TRACE_PBIT_OFF()
139#define O_TRACE_GENERAL_ON()
140#define O_TRACE_GENERAL_OFF()
141
142#endif
143
144/*
145** Need somewhere to give addresses to tracked pointers in registers.
146** We dont write to the locations, just use their addresses.
147** To make it easy to see, use the very top 64K of memory.
148** Note that we might have to map this somewhere else if this is in user space.
149*/
150#if (VG_WORDSIZE == 4)
151#define FAKE_REG_BASE 0xFFFF0000
152#else
153#define FAKE_REG_BASE 0xFFFFFFFFFFFF0000
154#endif
155#define MAP_TO_REG(tid, offset) \
156 (FAKE_REG_BASE + (0x0100 * ((tid) - 1)) + (offset))
157#define OFFSET_FROM_REG(regAddress) \
158 ((regAddress) & 0x00ff)
159#define IS_REG(addr) ((addr >= FAKE_REG_BASE) ? !0 : 0)
160
161static UInt o_isReturnIgnoreReg(Addr reg)
162{
163 /*
164 ** Indicate registers that are 'scratch' registers and should be ignored on
165 ** function return for tracked pointer purposes.
166 */
167 switch(OFFSET_FROM_REG(reg))
168 {
169#if defined(VGA_x86)
170 case OFFSET_x86_ECX:
171 case OFFSET_x86_EDX:
172#elif defined(VGA_amd64)
173 case OFFSET_amd64_RCX:
174 case OFFSET_amd64_RSI:
175 case OFFSET_amd64_RDI:
176 case OFFSET_amd64_R8:
177 case OFFSET_amd64_R9:
178 case OFFSET_amd64_R10:
179 case OFFSET_amd64_R11:
180#elif defined(VGA_ppc32)
181#error I know even less about PPC than x86 - please add appropriate registers
182#elif defined(VGA_ppc64)
183#error I know even less about PPC than x86 - please add appropriate registers
184#else
185# error Unknown arch
186#endif
187 return 1;
188 break;
189
190 default:
191 break;
192 }
193
194 return 0;
195}
196
197
198/*------------------------------------------------------------*/
199/*--- Command Line Option Flags and Values ---*/
200/*------------------------------------------------------------*/
201/*
202** Should we track all memory block allocations or just blocks
203** indicated to us with the MALLOCLIKE_BLOCK user request?
204*/
205static Bool o_onlyMallocLike = False;
206/*
207** Should we show memory that leaks due to a block leaking?
208*/
209static Bool o_showIndirect = False;
210/*
211** Should we show pointers to a block that is deallocated?
212*/
213static Bool o_showHanging = False;
214/*
215** Should we show blocks with only circular references?
216*/
217static Bool o_showCircular = False;
218/*
219** Show interal stats at the end of the run.
220*/
221static Bool o_showInternStats = False;
222/*
223** Should we only show the summary report.
224*/
225static Bool o_showSummaryOnly = True;
226
227/*
228** Should we clear leaked blocks to try and force an error.
229*/
230static Bool o_poison = False;
231
232/*
233** These figures are pure wet finger in the air guestimates.
234** If the user has _lots_ of memory blocks / tracked pointers, they can
235** increase the prime number on the command line.
236*/
237/*
238** Number of PBit Node entries in the hash table.
239*/
240static UInt o_pbitNodeHashSize = 1031;
241/*
242** Number of MemBlock entries in the hash table.
243*/
244static UInt o_memblockHashSize = 65537;
245/*
246** Number of Tracked Pointer entries in the hash table.
247*/
248static UInt o_trackedPointerHashSize = 65537;
249
250/*------------------------------------------------------------*/
251/*--- Statistics ---*/
252/*------------------------------------------------------------*/
253typedef struct
254{
255 unsigned long liveTrackedPointers;
256 unsigned long trackedPointersAllocated;
257 unsigned long liveMemoryBlocks;
258 unsigned long memoryBlocksAllocated;
259 unsigned long shadowMemoryBlocksAllocated;
260 unsigned long memoryBlocksLeaked;
261 unsigned long memoryBlocksLostAndFound;
262 unsigned long pbitNodes;
263} Stats;
264
265static Stats o_stats;
266
267/*------------------------------------------------------------*/
268/*--- PBit Tracking ---*/
269/*------------------------------------------------------------*/
270/*
271** Setup constants for PBit tracking.
272*/
273#if (VG_WORDSIZE == 4)
274#define PBIT_MAJOR_SHIFT 7
275#define PBIT_MINOR_SHIFT 2
276#define PBIT_MINOR_MASK 0x1F
277#elif (VG_WORDSIZE == 8)
278#define PBIT_MAJOR_SHIFT 8
279#define PBIT_MINOR_SHIFT 3
280#define PBIT_MINOR_MASK 0x1F
281#endif
282
283/*
284** Work out how many bytes a UInt of pbits covers
285*/
286#define PBIT_RANGE (sizeof(UInt) * 8 * VG_WORDSIZE)
287
288/*
289** Number of UInts to store in a node so that the node covers 64K
290*/
291#define PBIT_NODE_UINTS ((64 * 1024) / PBIT_RANGE)
292
293/*
294** Memory range covered by a pbit node
295*/
296#define PBIT_NODE_RANGE 0xFFFF
297#define PBIT_NODE_RANGE_MASK (~PBIT_NODE_RANGE)
298#define PBIT_NODE_SHIFT 16
299
300/* Define the pbit storage node. */
301typedef struct {
302 VgHashNode hdr; // Must be first item
303 UInt set_bits; // Count of set bits
304 UInt pbits[PBIT_NODE_UINTS]; // 64K of coverage
305} PBitNode;
306
307/*
308** We use a hash table to track the p-bits.
309** The node is defined just above. The key to a node is the memory
310** address right shifted PBIT_NODE_SHIFT bits.
311*/
312static VgHashTable o_PBits = NULL;
313
314/*
315** For speed, we keep a node to track register allocations and cache the last
316** node that was accessed.
317*/
318static PBitNode o_registerPBits;
319static PBitNode *o_lastPBitNode = NULL;
320static Addr o_lastPBitNodeKey = 0;
321
322/*
323** Convenience macros for working out which bit in which PBIT_NODE_UINT we
324** wish to address.
325*/
326#define PBIT_MAJOR_INDEX( addr ) \
327 (((addr) & PBIT_NODE_RANGE) >> PBIT_MAJOR_SHIFT)
328#define PBIT_MINOR_INDEX( addr ) \
329 (((addr) >> PBIT_MINOR_SHIFT) & PBIT_MINOR_MASK)
330#define PBIT_KEY( addr ) ((Addr)(addr) >> PBIT_NODE_SHIFT)
331
332typedef struct {
333 PBitNode *node;
334 Addr currentAddress;
335 Addr finalAddress;
336} PBitContext;
337
338/*
339** Helper functions for doing fast searches through an address range.
340*/
341static Addr o_firstPBit(PBitContext *context, Addr start, SizeT length);
342static Addr o_nextPBit(PBitContext *context);
343
344/*
345** Basic PBit manipulation.
346*/
347static PBitNode *o_getPBitNode(Addr address, Bool create)
348{
349 Addr key = PBIT_KEY(address);
350
351 O_PDEBUG("o_getPBitNode(%p%s)", address,
352 create ? ", create" : "");
353
354 O_PDEBUG("o_getPBitNode last node %p, last key %p",
355 o_lastPBitNode, o_lastPBitNodeKey);
356
357 if(IS_REG(address))
358 {
359 /*
360 ** This is a register - use the register PBit node.
361 */
362 O_PDEBUG("o_getPBitNode returning register PBit node");
363 return &o_registerPBits;
364 }
365 else if((key == o_lastPBitNodeKey) &&
366 (o_lastPBitNode || !create))
367 {
368 /*
369 ** This is in the same node as last time.
370 */
371 O_PDEBUG("o_getPBitNode returning last PBit node");
372 return o_lastPBitNode;
373 }
374 else
375 {
376 /*
377 ** It's a new node.
378 ** Look it up then cache both the node and the node key.
379 */
380 o_lastPBitNode = VG_(HT_lookup)(o_PBits, key);
381 o_lastPBitNodeKey = key;
382
383 if(!o_lastPBitNode & create)
384 {
385 /*
386 ** We don't have a node for this address. Create one now.
387 */
388 o_lastPBitNode = VG_(malloc)( sizeof(PBitNode) );
389 tl_assert(o_lastPBitNode);
390 VG_(memset)(o_lastPBitNode, 0, sizeof(PBitNode));
391 o_lastPBitNode->hdr.key = key;
392
393 /*
394 ** Add this node into the hash table.
395 */
396 VG_(HT_add_node)(o_PBits, o_lastPBitNode);
397
398 O_PDEBUG("Created PBit node beginning %p for address %p",
399 (key << PBIT_NODE_SHIFT),
400 address);
401
402 o_stats.pbitNodes++;
403
404 }
405 O_PDEBUG("o_getPBitNode returning lookup PBit node");
406
407 return o_lastPBitNode;
408 }
409}
410
411static void o_setPBit( Addr address )
412{
413 /*
414 ** Retrieve the node that contains this address then set the appropriate bit.
415 */
416 PBitNode *pbn = o_getPBitNode(address, True);
417
418 O_PDEBUG("o_setPBit(%p)", address);
419
420 O_PDEBUG("o_setPBit - node = %p, MAJOR = %d, MINOR = %d",
421 pbn,
422 PBIT_MAJOR_INDEX(address),
423 PBIT_MINOR_INDEX(address));
424 /*
425 ** The PBit might not be clear so only tweak things if it is.
426 */
427 if(!(pbn->pbits[PBIT_MAJOR_INDEX(address)] &
428 (1 << PBIT_MINOR_INDEX(address))))
429 {
430 /*
431 ** Set the pbit and increment the convenience count.
432 */
433 pbn->pbits[PBIT_MAJOR_INDEX(address)] |=
434 (1 << PBIT_MINOR_INDEX(address));
435 pbn->set_bits++;
436 }
437
438 O_PDEBUG("o_setPBit done");
439 return;
440}
441
442static void o_clearPBit( Addr address )
443{
444 /*
445 ** Retrieve the node that contains this address. If the node does not exist,
446 ** we assert as this really shouldnt happen.
447 */
448 PBitNode *pbn = o_getPBitNode(address, False);
449
450 O_PDEBUG("o_clearPBit(%p)", address);
451
452 tl_assert(pbn);
453
454 /*
455 ** The PBit might not be set so only tweak things if it is.
456 */
457 if(pbn->pbits[PBIT_MAJOR_INDEX(address)] &
458 (1 << PBIT_MINOR_INDEX(address)))
459 {
460 /*
461 ** Clear the pbit and decrement the convenience count.
462 */
463 pbn->pbits[PBIT_MAJOR_INDEX(address)] &=
464 ~(1 << PBIT_MINOR_INDEX(address));
465 pbn->set_bits--;
466 }
467
468 return;
469}
470
471static Bool o_isPBitSet( Addr address )
472{
473 /*
474 ** Retrieve the node that contains this address. If the node does not exist,
475 ** the Pbit isnt set ;-)
476 */
477 PBitNode *pbn = o_getPBitNode(address, False);
478
479 O_PDEBUG("o_isPBitSet(%p)", address);
480
481 if(!pbn)
482 return 0;
483
484 /*
485 ** Return the Pbit status.
486 */
487 return ((pbn->pbits[PBIT_MAJOR_INDEX(address)] &
488 (1 << PBIT_MINOR_INDEX(address))) != 0);
489}
490
491/*
492** For ease of range checking PBits, we provide the following two functions.
493** The idea is that you call the first one with your start address and range.
494** It returns the first address that is marked by a PBit or 0 if the range is
495** clear (we overlap the supplied range in order to check partial pointers at
496** each end). By calling the second one with the same context until it returns
497** zero, you get all of the PBits within the range. You supply the context so
498** we should be able to nest calls if need be.
499*/
500static Addr o_firstPBit(PBitContext *context, Addr start, SizeT length)
501{
502 const Addr MASK = ~(VG_WORDSIZE - 1);
503
504 tl_assert(context);
505 tl_assert(start > VG_WORDSIZE);
506
507 O_PDEBUG("o_firstPBit(%p, %p)", start, length);
508 /*
509 ** Optimisation for single pointer ranges and bizarre 0 length calls.
510 */
511 if(!length)
512 {
513 return 0;
514 }
515 else if(length <= VG_WORDSIZE)
516 {
517 /*
518 ** Set the current address to 0.
519 */
520 context->currentAddress = 0;
521 return (o_isPBitSet(start)) ? (start & MASK) : 0;
522 }
523
524 /*
525 ** Setup the current and final addresses. Note that we set the current
526 ** address to one aligned address below because of how nextPBit works.
527 */
528 context->currentAddress = ((start & MASK) - VG_WORDSIZE);
529 context->finalAddress = ((start + length - 1) & MASK);
530
531 context->node = o_getPBitNode(context->currentAddress, False);
532
533 O_PDEBUG("o_firstPBit current %p, final %p",
534 context->currentAddress, context->finalAddress);
535
536 return o_nextPBit(context);
537}
538
539static Addr o_nextPBit(PBitContext *context)
540{
541 /*
542 ** Current address is the last address we returned.
543 ** We keep going until we have checked final address.
544 */
545 UInt pbits;
546 Addr startAddr;
547 Addr foundAddr = 0;
548 UInt majorIndex;
549 UInt minorIndex;
550
551 tl_assert(context);
552
553 /*
554 ** When the current address is set to 0, we just exit.
555 */
556 if(context->currentAddress == 0)
557 {
558 return 0;
559 }
560
561 O_PDEBUG("o_nextPBit(%p,%p)",
562 context->currentAddress, context->finalAddress);
563
564 while(!foundAddr &&
565 (context->currentAddress <= context->finalAddress))
566 {
567 /*
568 ** Check if we need another node and get it if we do.
569 */
570 startAddr = context->currentAddress + VG_WORDSIZE;
571
572 O_PDEBUG("o_nextPBit c %p s %p", context->currentAddress, startAddr);
573
574 if(PBIT_KEY(context->currentAddress) !=
575 PBIT_KEY(startAddr))
576 {
577 O_PDEBUG("o_nextPBit getting next node %p",
578 startAddr & PBIT_NODE_RANGE_MASK);
579
580 context->node = o_getPBitNode(startAddr, False);
581 }
582 context->currentAddress = startAddr;
583
584 /*
585 ** Check if we have a node - skip to next node (final address
586 ** permitting) if we dont. This is the 64k of addresses at a time
587 ** comparison.
588 */
589 if(!context->node)
590 {
591 O_PDEBUG("o_nextPbit: no node.");
592
593 if(context->currentAddress > context->finalAddress)
594 {
595 /*
596 ** We have passed the final address - time to stop looking.
597 */
598 O_PDEBUG("o_nextPbit: current > final");
599 continue;
600 }
601 else if((context->currentAddress & PBIT_NODE_RANGE_MASK) !=
602 (context->finalAddress & PBIT_NODE_RANGE_MASK))
603 {
604 /*
605 ** Align to VG_WORDSIZE below the next node range then loop.
606 */
607 O_PDEBUG("o_nextPbit: aligning to next node. (%p, %p)",
608 context->currentAddress,
609 context->finalAddress);
610
611 context->currentAddress += (PBIT_NODE_RANGE + 1);
612 context->currentAddress &= PBIT_NODE_RANGE_MASK;
613 context->currentAddress -= VG_WORDSIZE;
614
615 O_PDEBUG("o_nextPbit: aligned to %p",
616 context->currentAddress);
617
618 continue;
619 }
620 else
621 {
622 /*
623 ** Node range is the same but no node == no pbits.
624 */
625 context->currentAddress = context->finalAddress + VG_WORDSIZE;
626 break;
627 }
628 }
629
630 /*
631 ** The index of the PBit array item we want to check then get the pbits.
632 */
633 majorIndex = PBIT_MAJOR_INDEX(context->currentAddress);
634 minorIndex = PBIT_MINOR_INDEX(context->currentAddress);
635 pbits = context->node->pbits[majorIndex];
636
637 /*
638 ** Mask off addresses below the current address then test.
639 */
640 pbits &= ~((1 << minorIndex) - 1);
641
642 O_PDEBUG("o_nextPbit: major %d, minor %d, bit %p",
643 majorIndex, minorIndex, pbits);
644 /*
645 ** This checks up to PBIT_RANGE at a time (256 addresses on a
646 ** 64bit machine).
647 */
648 if(!pbits)
649 {
650 /*
651 ** No pbits set in this UInt. Set the current address to VG_WORDSIZE
652 ** below the next UInt then loop around.
653 */
654 context->currentAddress += PBIT_RANGE;
655 context->currentAddress &= ~(PBIT_RANGE - 1);
656 context->currentAddress -= VG_WORDSIZE;
657
658 continue;
659 }
660
661 /*
662 ** Now we walk the UInt a bit at a time.
663 */
664 for(;
665 ((minorIndex <= PBIT_MINOR_MASK) &&
666 (context->currentAddress <= context->finalAddress))
667 ; minorIndex++)
668 {
669 if(pbits & (1 << minorIndex))
670 {
671 /*
672 ** We have a match.
673 */
674 foundAddr = context->currentAddress;
675 O_PDEBUG("o_nextPbit found %p", foundAddr);
676 break;
677 }
678 else
679 {
680 context->currentAddress += VG_WORDSIZE;
681 }
682 }
683 }
684
685 /*
686 ** Final range check.
687 */
688 if(foundAddr > context->finalAddress)
689 {
690 foundAddr = 0;
691 }
692
693 /*
694 ** Store the result so that we know where to start from next time.
695 */
696 context->currentAddress = foundAddr;
697
698 O_PDEBUG("o_nextPbit returning %p", foundAddr);
699
700 return foundAddr;
701}
702
703/*------------------------------------------------------------*/
704/*--- Error Report and Suppression Tracking ---*/
705/*------------------------------------------------------------*/
706/*
707** We hold a doubley linked list of Exe contexts for leaks and suppressions.
708** If a block is tagged as leaked then comes back to life, we move it
709** into the suppression list. We always check the suppression list first
710** before adding a record to the leaked list.
711** We keep a count of how may times a record matches as it saves space.
712*/
713struct _BlockRecord {
714 struct _BlockRecord *next;
715 struct _BlockRecord *prev;
716 ExeContext *allocated;
717 ExeContext *leaked;
718 UInt bytes;
719 SizeT count;
720};
721
722typedef struct _BlockRecord BlockRecord;
723
724typedef struct {
725 BlockRecord *start;
726 BlockRecord *end;
727} BlockRecordList;
728static BlockRecordList o_leakRecords = {NULL, NULL};
729static BlockRecordList o_suppressionRecords = {NULL, NULL};
730
731#define DUMP_BLOCK(block) \
732 O_DEBUG("n %p, p %p, a %p, l %p, c %d b %p", \
733 block->next, block->prev, \
734 block->allocated, block->leaked, block->count, \
735 block->bytes);
736
737/*
738** List handling - we need to be able to add and remove a single block
739** from anywhere in the list but the chances are, removals will come from
740** the end, hence using a doubly linked list. We also need to walk the list
741** to find a matching item. Again, we do this backwards as it tends to get
742** a match faster in the case of moving newly leaked block records into
743** the suppression list.
744*/
745static void o_addBlockRecord(BlockRecordList *list, BlockRecord *item)
746{
747 /*
748 ** Catch start case.
749 */
750 tl_assert(list && item);
751
752 NO_DEBUG("o_addBlockRecord pre()");
753 //DUMP_BLOCK(item);
754
755 if(!list->start)
756 {
757 list->start = list->end = item;
758 item->prev = item->next = NULL;
759 }
760 else
761 {
762 /*
763 ** OK, add it onto the end.
764 */
765 item->prev = list->end;
766 item->next = NULL;
767 list->end->next = item;
768 list->end = item;
769 }
770 NO_DEBUG("o_addBlockRecord post()");
771 //DUMP_BLOCK(item);
772 return;
773}
774
775static void o_removeBlockRecord(BlockRecordList *list, BlockRecord *item)
776{
777 /*
778 ** We don't check that the item is in the list.
779 ** Ensure you check with the findBlockRecord function.
780 */
781 tl_assert(list && item);
782
783 NO_DEBUG("o_removeBlockRecord pre()");
784 //DUMP_BLOCK(item);
785 if(item->prev)
786 {
787 /*
788 ** Not at the start.
789 */
790 item->prev->next = item->next;
791 }
792 else
793 {
794 /*
795 ** At the start.
796 */
797 list->start = item->next;
798 }
799
800 if(item->next)
801 {
802 /*
803 ** Not at the end.
804 */
805 item->next->prev = item->prev;
806 }
807 else
808 {
809 /*
810 ** At the end.
811 */
812 list->end = item->prev;
813 }
814
815 NO_DEBUG("o_removeBlockRecord post()");
816 //DUMP_BLOCK(item);
817
818 return;
819}
820
821static BlockRecord *o_findBlockRecord(BlockRecordList *list,
822 ExeContext *allocated,
823 ExeContext *leaked)
824
825{
826 /*
827 ** Search backwards for the block record that matches the contexts.
828 ** We allow leaked to be null so that we can handle the circular checking
829 ** blocks as well which only have an allocated context.
830 */
831 BlockRecord *item = NULL;
832
833 tl_assert(list && allocated);
834
835 item = list->end;
836
837 while(item)
838 {
839 if(VG_(eq_ExeContext)(Vg_HighRes, item->allocated, allocated) &&
840 ((!item->leaked && !leaked) ||
841 ((item->leaked && leaked) &&
842 VG_(eq_ExeContext)(Vg_HighRes, item->leaked, leaked))))
843 {
844 break;
845 }
846
847 item = item->prev;
848 }
849
850 return item;
851}
852
853static Bool o_addLeakedBlock(ExeContext *allocated,
854 ExeContext *leaked,
855 SizeT size)
856{
857 BlockRecord *item = NULL;
858
859 tl_assert(allocated && leaked);
860
861 /*
862 ** See if we already have this block.
863 ** Check the suppression record first.
864 */
865 item = o_findBlockRecord(&o_suppressionRecords, allocated, leaked);
866
867 if(!item)
868 {
869 /*
870 ** Not in the suppression record.
871 ** Try the leaked block list.
872 */
873 item = o_findBlockRecord(&o_leakRecords, allocated, leaked);
874 }
875
876 if(item)
877 {
878 /*
879 ** Just increment the count.
880 */
881 item->count++;
882 item->bytes += size;
883 //O_DEBUG("o_addLeakedBlock - block exists");
884 //DUMP_BLOCK(item);
885 return False;
886 }
887 else
888 {
889 /*
890 ** Create a new block and add it to the leaked list.
891 */
892 item = VG_(malloc)(sizeof(BlockRecord));
893 tl_assert(item);
894
895 item->count = 1;
896 item->bytes = size;
897 item->next = item->prev = NULL;
898 item->allocated = allocated;
899 item->leaked = leaked;
900
901 o_addBlockRecord(&o_leakRecords, item);
902
903 return True;
904 }
905
906}
907
908static Bool o_addSuppressionBlock(ExeContext *allocated,
909 ExeContext *leaked)
910{
911 BlockRecord *item = NULL;
912
913 tl_assert(allocated && leaked);
914
915 /*
916 ** See if we already have this block.
917 ** Check the suppression record first.
918 */
919 item = o_findBlockRecord(&o_suppressionRecords, allocated, leaked);
920
921 if(!item)
922 {
923 /*
924 ** Not in the suppression record.
925 ** Try the leaked block list.
926 */
927 item = o_findBlockRecord(&o_leakRecords, allocated, leaked);
928
929 if(!item)
930 {
931 VG_(tool_panic)("suppressing block that didnt leak :-(");
932 }
933 else
934 {
935 /*
936 ** Move the block to the suppression list.
937 */
938 o_removeBlockRecord(&o_leakRecords, item);
939 o_addBlockRecord(&o_suppressionRecords, item);
940 }
941 }
942 else
943 {
944 /*
945 ** The block is already suppressed - just increase the count.
946 */
947 item->count++;
948
949 //O_DEBUG("o_addSuppressionBlock - block exists");
950 //DUMP_BLOCK(item);
951 return False;
952 }
953
954 return True;
955}
956
957/*------------------------------------------------------------*/
958/*--- Allocated Block and Pointer Tracking ---*/
959/*------------------------------------------------------------*/
960/*
961** Where these structures have address references, they are the address
962** of the item in client memory NOT the address of either of these
963** internal tracking structures.
964*/
965struct _MemBlock;
966typedef struct {
967 VgHashNode hdr; // Must be first item
968 Addr block; // Address of the allocated block start
969 SizeT length; // Length of the allocated block
970 struct _MemBlock *memBlock; // Pointer to the memblock
971} TrackedPointer;
972
973typedef struct _MemBlock {
974 VgHashNode hdr; // Must be first item
975 SizeT length; // Length of the allocated block
976 ExeContext *where; // Where the block was allocated
977 UInt refNum; // Number of back references
978 TrackedPointer **pointers; // Back references to TrackedPointer info
979 struct _MemBlock *shadowing; // Set to memblock of block that we shadow
980 struct _MemBlock *shadowed; // Set to memblock of our shadow
981 ExeContext *leaked; // Where we think the block leaked
982 UInt nonRegCount; // Non register tracked pointers
983 Int external; // Used in circular dependency checking
984
985 TrackedPointer *maybeLast; // Last live tracked pointer on function return
986 ExeContext *funcEnd; // matching exe context for the end of the function
987 Bool doLeak; // Set if this block should leak on instruction
988 // end. We have to make instructions atomic or we
989 // go bang on things like xchng as there is no way
990 // of telling which value gets overwritten first.
991 struct _MemBlock *next; // Linked list of blocks that might be leaking at
992 // instruction end.
993 int depth; // Depth that the potential leak occurred at.
994 TrackedPointer *wasLast; // Pointer t
995
996 UInt nonScratch; // Number of non-scratch registers.
997} MemBlock;
998
999/*
1000** Shadows?
1001** This helps to solve the problem of where a program does its own memory
1002** management of the kind:
1003
10041 secret *foo = malloc(sizeof(bar) + sizeof(secret) + alignment_correction);
10052 foo->secret_stuff = magic_key;
10063 etc.
10074 foo++;
10085 return (bar*)foo;
1009
1010** If the pointer to foo is shadowed at some internal offset to the block
1011** start, we create a shadow record and link it to the main block so that
1012** we can track references to either. Without this we do a leak alert at
1013** line 4 instead which is undesireable.
1014**
1015** There can only be one shadow to a block unless we need more and someone
1016** wants to code it. A side effect of the current implementation allows a
1017** shadow of a shadow but it is explicitly blocked for now.
1018*/
1019
1020/*
1021** We use separate hash tables to track the pointers and allocated blocks.
1022** The key of each node is the address of the corresponding item in client
1023** memory, shifted right to remove the wasted bits caused by alignment of
1024** pointers in memory.
1025*/
1026#if (VG_WORDSIZE == 4)
1027#define TRACK_MINOR_SHIFT 2
1028#define TRACK_MINOR_MASK ~0x03
1029#elif (VG_WORDSIZE == 8)
1030#define TRACK_MINOR_SHIFT 3
1031#define TRACK_MINOR_MASK ~0x07
1032#endif
1033
1034#define TRACKED_KEY( a ) ((UWord)(a) >> TRACK_MINOR_SHIFT)
1035#define FROM_TRACKED_KEY( a ) ((UWord)(a) << TRACK_MINOR_SHIFT)
1036
1037/*
1038** Storage for the two hash tables we need.
1039*/
1040static VgHashTable o_MemBlocks = NULL;
1041static VgHashTable o_TrackedPointers = NULL;
1042
1043/*
1044** Start of a linked list of blocks that may be leaking during this original
1045** processor instruction. Instructions are broken down inside VEX so a single
1046** original instruction can become many VEX instructions. By not doing leak
1047** reports until the end of the original instruction, everything becomes
1048** atomic again - the stack moves and the popped value appears in the register
1049** in one movement rather than two which cause a leak if the stack is
1050** invalidated before the value appears in the register. xchng works both ways
1051** around and so on.
1052*/
1053static MemBlock *doLeakList = NULL;
1054static UInt doLeakListCount = 0;
1055static Bool doLeakNow = False;
1056
1057/*
1058** Set when we are removing pointers within a free()ed block.
1059*/
1060static Bool o_clearingBlock = False;
1061
1062/*
1063** Set when we are removing pointers within a free()ed block or a
1064** block that leaked. It shows the indirection level in cascades.
1065*/
1066static UInt o_indirectChecking = 0;
1067static ExeContext *o_indirectStack = NULL;
1068
1069/*
1070** Set when the stack is unwinding.
1071*/
1072static Bool o_stackUnwind = False;
1073
1074static void o_killRange(Addr start, SizeT length);
1075
1076/*
1077** This is set to stop us from tracking leaks once we exit main.
1078** (May well need a per thread flag to catch when threads exit as well.)
1079*/
1080static Bool o_inhibitLeakDetect = False;
1081
1082
1083static void o_cleanupTrackedPointers( MemBlock * mb )
1084{
1085 UInt pointerIndex;
1086
1087 for(pointerIndex = 0; pointerIndex < mb->refNum; pointerIndex++)
1088 {
1089 TrackedPointer *p =
1090 VG_(HT_remove)(o_TrackedPointers,
1091 mb->pointers[pointerIndex]->hdr.key);
1092
1093 tl_assert(p);
1094 O_GDEBUG("Removing tracked pointer at %p pointing to %p",
1095 FROM_TRACKED_KEY(p->hdr.key),
1096 mb->hdr.key);
1097
1098 /*
1099 ** Remove the PBit for this tracked pointer.
1100 */
1101 o_clearPBit(FROM_TRACKED_KEY(p->hdr.key));
1102
1103 /*
1104 ** Show any pointers to this block as we deallocate them.
1105 */
1106 if(o_showHanging)
1107 {
1108 if(IS_REG(FROM_TRACKED_KEY(p->hdr.key)))
1109 {
1110 /*
1111 ** Maybe decode registers to names later?
1112 */
1113 O_DEBUG("Removing hanging pointer in a register to block %p",
1114 p->block);
1115 }
1116 else
1117 {
1118 O_DEBUG("Removing hanging pointer at %p to block %p",
1119 FROM_TRACKED_KEY(p->hdr.key),
1120 p->block);
1121 }
1122 }
1123 VG_(free)(p);
1124 o_stats.liveTrackedPointers--;
1125 }
1126
1127 /*
1128 ** Free off the pointers back reference.
1129 */
1130 VG_(free)(mb->pointers);
1131 mb->pointers = NULL;
1132 mb->refNum = 0;
1133
1134 return;
1135}
1136
1137static void o_cleanupMemBlock( MemBlock **mbpp )
1138{
1139 MemBlock *mb;
1140
1141 O_GDEBUG("o_cleanupMemBlock(%p)", mbpp);
1142 /*
1143 ** Sanity check.
1144 */
1145 if(!mbpp || !*mbpp)
1146 {
1147 O_DEBUG("o_cleanupMemBlock passed null memory block pointer.");
1148 return;
1149 }
1150
1151 /*
1152 ** Take a local copy with less indirection.
1153 */
1154 mb = *mbpp;
1155
1156 O_GDEBUG("o_cleanupMemBlock mb=%p", mb->hdr.key);
1157
1158 /*
1159 ** If this is a shadowed block, complain then return.
1160 */
1161 if(mb->shadowing)
1162 {
1163 O_DEBUG("Trying to cleanup a shadow block at %p tracking %p",
1164 mb->hdr.key,
1165 mb->shadowing->hdr.key);
1166 return;
1167 }
1168
1169 /*
1170 ** If a shadow exists, clean it up.
1171 */
1172 if(mb->shadowed)
1173 {
1174 MemBlock *shadowed = mb->shadowed;
1175
1176 /*
1177 ** Cleanup its pointers, remove it from the hash table then
1178 ** free off the block.
1179 */
1180 O_GDEBUG("cleanup shadow pointers");
1181 o_cleanupTrackedPointers(shadowed);
1182 (void)VG_(HT_remove)(o_MemBlocks, shadowed->hdr.key);
1183 VG_(free)(shadowed);
1184
1185 o_stats.liveMemoryBlocks--;
1186 }
1187
1188 /*
1189 ** Free off the tracked pointers.
1190 */
1191 O_GDEBUG("cleanup tracked pointers");
1192 o_cleanupTrackedPointers(mb);
1193
1194 /*
1195 ** Check for tracked pointers inside the allocated block being lost.
1196 */
1197 o_indirectChecking++;
1198 o_clearingBlock = True;
1199 o_killRange(mb->hdr.key,
1200 mb->length);
1201 o_clearingBlock = False;
1202 o_indirectChecking--;
1203
1204 /*
1205 ** Now free off the memory block.
1206 */
1207 VG_(free)(mb);
1208 o_stats.liveMemoryBlocks--;
1209
1210 /*
1211 ** Clear the passed in pointer.
1212 */
1213 *mbpp = NULL;
1214
1215 return;
1216}
1217
1218static void o_addMemBlockReference( MemBlock *mb, TrackedPointer *tp )
1219{
1220 MemBlock *smb = mb;
1221
1222 O_GDEBUG("o_addMemBlockReference tp=%p, mb=%p",
1223 FROM_TRACKED_KEY(tp->hdr.key),
1224 mb->hdr.key);
1225
1226 /*
1227 ** Check if we are shadowing.
1228 */
1229 if(mb->shadowing)
1230 {
1231 /*
1232 ** Get the mem block for the true allocated block.
1233 ** Note that this leaves smb pointing to the shadow block which is
1234 ** what we want.
1235 */
1236 mb = mb->shadowing;
1237 }
1238
1239 /*
1240 ** Check if the block previously leaked.
1241 */
1242 if(!mb->shadowed && !mb->refNum && mb->leaked)
1243 {
1244 /*
1245 ** Seems that the block didnt leak after all.
1246 */
1247 if(o_addSuppressionBlock(mb->where, mb->leaked) && !o_showSummaryOnly)
1248 {
1249 O_DEBUG("Welcome back to the supposedly leaked block at %p. Illegal read?",
1250 mb->hdr.key);
1251
1252 VG_(get_and_pp_StackTrace)(VG_(get_running_tid)(), VG_(clo_backtrace_size));
1253 O_DEBUG("");
1254 }
1255
1256 mb->leaked = NULL;
1257 o_stats.memoryBlocksLeaked--;
1258 o_stats.memoryBlocksLostAndFound++;
1259 }
1260
1261 /*
1262 ** Populate the tracked pointer then add it to the hash.
1263 ** We use the shadow block so that it points to the correct place.
1264 ** Add the back reference to the mem block.
1265 */
1266 tp->block = smb->hdr.key;
1267 tp->length = mb->length;
1268 tp->memBlock = smb;
1269 VG_(HT_add_node)(o_TrackedPointers, tp);
1270
1271 /*
1272 ** Do we need more memory for pointers?
1273 */
1274 if(!smb->pointers)
1275 {
1276 smb->pointers =
1277 VG_(malloc)((smb->refNum + 8) * sizeof(TrackedPointer *));
1278 tl_assert(smb->pointers);
1279 }
1280 else if(!((smb->refNum + 1) & 7))
1281 {
1282 /*
1283 ** Add space for another 8 back references.
1284 ** Note that this will also shrink us if needed.
1285 */
1286 smb->pointers =
1287 VG_(realloc)(smb->pointers, ((smb->refNum + 8) * sizeof(Addr)));
1288 tl_assert(smb->pointers);
1289 }
1290
1291 smb->pointers[smb->refNum] = tp;
1292
1293 /*
1294 ** Track register and memory pointers.
1295 */
1296 if(!IS_REG(FROM_TRACKED_KEY(smb->pointers[smb->refNum]->hdr.key)))
1297 {
1298 smb->nonRegCount++;
1299 }
1300 else if(!o_isReturnIgnoreReg(FROM_TRACKED_KEY(smb->pointers[smb->refNum]->hdr.key)))
1301 {
1302 smb->nonScratch++;
1303 }
1304
1305 /*
1306 ** Clear the maybeLast and funcEnd. Adding a reference means that
1307 ** the cached one wasnt the last.
1308 */
1309 smb->maybeLast = NULL;
1310 smb->funcEnd = NULL;
1311
1312 /*
1313 ** Clear the doLeak flag - we just added a reference so the block survived
1314 ** the instruction.
1315 */
1316 smb->doLeak = False;
1317
1318 smb->refNum++;
1319 O_MDEBUG("Added tracked pointer at %p pointing to %s%p",
1320 FROM_TRACKED_KEY(tp->hdr.key),
1321 smb->shadowing ? "(S)" : "",
1322 smb->hdr.key);
1323
1324 return;
1325}
1326
1327static void o_removePointerFromList(MemBlock *mb, TrackedPointer *tp)
1328{
1329 UInt pointerNum;
1330
1331 O_GDEBUG("removePointerFromList tp=%p mb=%p",
1332 FROM_TRACKED_KEY(tp->hdr.key),
1333 mb->hdr.key);
1334
1335 /*
1336 ** Check that this tracked pointer belongs to this block.
1337 */
1338 tl_assert(tp->memBlock == mb);
1339
1340 /*
1341 ** Find the tracked pointer in the memory blocks' list.
1342 */
1343 for(pointerNum = 0; pointerNum < mb->refNum; pointerNum++)
1344 {
1345 if(mb->pointers[pointerNum] == tp)
1346 {
1347 /*
1348 ** Found it.
1349 ** If this is not the last pointer in the list, copy the last
1350 ** one over it.
1351 */
1352 if((pointerNum + 1) != mb->refNum)
1353 {
1354 mb->pointers[pointerNum] = mb->pointers[(mb->refNum - 1)];
1355 }
1356
1357 break;
1358 }
1359 }
1360
1361 /*
1362 ** Track register and memory pointers.
1363 */
1364 if(!IS_REG(FROM_TRACKED_KEY(tp->hdr.key)))
1365 {
1366 mb->nonRegCount--;
1367 }
1368 else if(!o_isReturnIgnoreReg(FROM_TRACKED_KEY(tp->hdr.key)))
1369 {
1370 mb->nonScratch--;
1371 }
1372
1373 mb->refNum--;
1374
1375 return;
1376}
1377
1378static void o_doLeakReport(MemBlock *mb);
1379static void o_removeMemBlockReference( MemBlock *mb, TrackedPointer *tp )
1380{
1381 MemBlock *smb = NULL;
1382 SizeT refCount = 0;
1383 UInt nonRegCount = 0;
1384 Bool shadowed = False;
1385
1386 /*
1387 ** We need the tracked pointer object.
1388 */
1389 tl_assert(tp);
1390
1391 /*
1392 ** If we dont have the memory block, get it from the tracked pointer.
1393 */
1394 if(!mb)
1395 {
1396 mb = tp->memBlock;
1397 }
1398 tl_assert(mb);
1399
1400 O_GDEBUG("o_removeMemBlockReference tp=%p, mb=%p",
1401 FROM_TRACKED_KEY(tp->hdr.key),
1402 mb->hdr.key);
1403
1404 smb = mb;
1405 refCount = smb->refNum;
1406 nonRegCount = smb->nonRegCount;
1407
1408 O_GDEBUG("(A)refCount %d, o_stackUnwind %c, nonRegCount %d, isReg %c",
1409 refCount,
1410 (o_stackUnwind ? 'Y' : 'N'),
1411 nonRegCount,
1412 IS_REG(FROM_TRACKED_KEY(tp->hdr.key)) ? 'Y' : 'N');
1413
1414 /*
1415 ** Check if we are shadowing.
1416 */
1417 if(mb->shadowing)
1418 {
1419 /*
1420 ** Get the mem block for the true allocated block.
1421 ** Note that this leaves smb pointing to the shadow which is correct.
1422 */
1423 mb = mb->shadowing;
1424#if defined(O_MASTER_DEBUG)
1425 if(!o_traceStop)
1426 {
1427 int count;
1428 for(count = 0; count < mb->refNum && count < 6; count++)
1429 O_GDEBUG(" %p", FROM_TRACKED_KEY(mb->pointers[count]->hdr.key));
1430 }
1431#endif
1432 refCount += mb->refNum;
1433 shadowed = True;
1434 nonRegCount += mb->nonRegCount;
1435 }
1436 else if(mb->shadowed)
1437 {
1438 /*
1439 ** Get the mem block for the shadow as we need the refNum from it.
1440 */
1441 MemBlock *tmb = mb->shadowed;
1442#if defined(O_MASTER_DEBUG)
1443 if(!o_traceStop)
1444 {
1445 int count;
1446 for(count = 0; count < tmb->refNum && count < 6; count++)
1447 O_GDEBUG(" %p", FROM_TRACKED_KEY(tmb->pointers[count]->hdr.key));
1448 }
1449#endif
1450 refCount += tmb->refNum;
1451 shadowed = True;
1452 nonRegCount += tmb->nonRegCount;
1453 }
1454#if defined(O_MASTER_DEBUG)
1455 else if(!o_traceStop)
1456 {
1457 int count;
1458 for(count = 0; count < mb->refNum && count < 6; count++)
1459 O_GDEBUG(" %p", FROM_TRACKED_KEY(mb->pointers[count]->hdr.key));
1460
1461 }
1462#endif
1463
1464 O_GDEBUG("(B)rCnt %d, nRCnt %d, ns %d, shad %c, free %c",
1465 refCount,
1466 nonRegCount,
1467 mb->nonScratch,
1468 (shadowed ? 'Y' : 'N'),
1469 (o_clearingBlock ? 'Y' : 'N'));
1470 /*
1471 ** We really should have at least one tracked pointer.
1472 */
1473 tl_assert(refCount);
1474
1475#if defined(O_MASTER_DEBUG)
1476 if(!o_traceStop)
1477 {
1478 VG_(get_and_pp_StackTrace)(VG_(get_running_tid)(), 8);O_DEBUG("");
1479 }
1480#endif
1481
1482 /*
1483 ** We remove the tracked pointer from the hash table but do not delete it.
1484 ** This allows a slight gain where a tracked pointer can immediately be
1485 ** reused rather than free()ed off and a new one malloc()ed.
1486 ** We then remove the back reference from the memory block and
1487 ** squeal if it is the last one. We don't clean the tracked pointer as this
1488 ** is a waste if it is going to be free()ed off.
1489 ** If warn indirect is set and this is an indirect check, do nothing.
1490 */
1491 (void)VG_(HT_remove)(o_TrackedPointers, tp->hdr.key);
1492
1493 O_GDEBUG("Removing tracked pointer at %p pointing to %p",
1494 FROM_TRACKED_KEY(tp->hdr.key),
1495 smb->hdr.key);
1496
1497 if((refCount <= 1) // Last pointer
1498
1499 /*
1500 ** Catch cascades of memory blocks when we call free().
1501 */
1502 || (o_clearingBlock && !shadowed && !mb->nonScratch &&
1503 (nonRegCount == 1) && !IS_REG(FROM_TRACKED_KEY(tp->hdr.key)))
1504
1505#if defined(VGA_x86)
1506 /*
1507 ** Losing all in memory pointers within a basic block is not a good sign.
1508 */
1509 || (!o_stackUnwind && (nonRegCount == 1) &&
1510 !IS_REG(FROM_TRACKED_KEY(tp->hdr.key)))
1511#endif
1512 )
1513 {
1514 if((!o_inhibitLeakDetect)
1515 /*
1516 ** Don't report when there are just register based pointers left and
1517 ** we have already reported the block as leaked.
1518 */
1519 && !(mb->leaked && IS_REG(FROM_TRACKED_KEY(tp->hdr.key)))
1520 )
1521 {
1522 /*
1523 ** Set the doLeak flag for the block and add it to the doLeakList.
1524 ** We also need to stash the indirect depth value for possibly reporting
1525 ** later. Finally, if maybeLast matches the pointer that is being removed
1526 ** and thus causing the leak, we leave maybeLast and funcEnd otherwise, we
1527 ** zero them.
1528 */
1529 mb->depth = o_indirectChecking;
1530 if(mb->maybeLast != tp)
1531 {
1532 mb->maybeLast = NULL;
1533 mb->funcEnd = NULL;
1534 }
1535
1536 /*
1537 ** Cascades triggered by a doLeak being actioned should report
1538 ** immediately, rather than being added to the doLeakList. Likewise
1539 ** cascades caused by freeing a block.
1540 */
1541 if(doLeakNow || o_clearingBlock)
1542 {
1543 o_doLeakReport(mb);
1544 }
1545 else
1546 {
1547 mb->doLeak = True;
1548 mb->next = doLeakList;
1549 doLeakList = mb;
1550 doLeakListCount++;
1551 }
1552 }
1553 }
1554
1555 /*
1556 ** Finally, remove the pointer from the blocks' list.
1557 */
1558 o_removePointerFromList(smb, tp);
1559
1560 return;
1561}
1562
1563static void o_doLeakReport(MemBlock *mb)
1564{
1565 Bool doReport = True;
1566
1567 if(mb->maybeLast)
1568 {
1569 // This is the suspected last pointer - use the cached stacktrace
1570 O_MDEBUG("maybe last was the last");
1571 tl_assert(mb->funcEnd);
1572 mb->leaked = mb->funcEnd;
1573 o_indirectStack = mb->funcEnd;
1574 }
1575 else if(mb->depth && o_indirectStack)
1576 {
1577 O_MDEBUG("indirect with indirect stack set");
1578 // We are cascading - use the cached stacktrace, if there is one
1579 mb->leaked = o_indirectStack;
1580 }
1581 else
1582 {
1583 O_MDEBUG("creating new context maybeLast=0");
1584 // Get the current stacktrace
sewardj39f34232007-11-09 23:02:28 +00001585 mb->leaked = VG_(record_ExeContext)(VG_(get_running_tid)(),
1586 0/*first_ip_delta*/);
sewardj99a2ceb2007-11-09 12:30:36 +00001587 }
1588
1589 doReport = o_addLeakedBlock(mb->where, mb->leaked, mb->length);
1590 /*
1591 ** Report the probable leak.
1592 */
1593 o_stats.memoryBlocksLeaked++;
1594
1595 if(doReport && !o_showSummaryOnly)
1596 {
1597 if(mb->depth)
1598 {
1599 if(o_showIndirect)
1600 {
1601 VG_(message)(Vg_UserMsg,
1602 "Probably indirectly (level %d) leaking block of %d(%p) bytes",
1603 mb->depth,
1604 mb->length,
1605 mb->length);
1606 }
1607 }
1608 else
1609 {
1610 VG_(message)(Vg_UserMsg,
1611 "Probably leaking block of %d(%p) bytes",
1612 mb->length,
1613 mb->length);
1614 }
1615
1616 if(!mb->depth || o_showIndirect)
1617 {
1618 VG_(pp_ExeContext)(mb->leaked);
1619
1620 VG_(message)(Vg_UserMsg,
1621 " Block at %p allocated", mb->hdr.key);
1622 VG_(pp_ExeContext)(mb->where);
1623 VG_(message)(Vg_UserMsg,"");
1624 }
1625
1626 /*
1627 ** Only attach the debugger for the first leaking block in the chain
1628 ** and only when show summary is disabled (--instant-reports).
1629 */
1630 if(!mb->depth && VG_(clo_db_attach))
1631 {
1632 VG_(start_debugger)(VG_(get_running_tid)());
1633 }
1634 }
1635
1636 /*
1637 ** Check for tracked pointers inside the allocated block being lost.
1638 */
1639 o_indirectChecking++;
1640 o_killRange(mb->hdr.key, mb->length);
1641 o_indirectChecking--;
1642
1643 /*
1644 ** Poison the block if requested.
1645 */
1646 if(o_poison)
1647 VG_(memset)((Addr *)mb->hdr.key, 0, mb->length);
1648
1649 return;
1650}
1651
1652static Bool o_setupShadow(TrackedPointer *tp, Addr address)
1653{
1654 Bool doneShadow = False;
1655 MemBlock *mb = NULL;
1656 MemBlock *smb = NULL;
1657
1658 O_MDEBUG("setup shadow tp %p block %p address %p",
1659 FROM_TRACKED_KEY(tp->hdr.key), tp->block, address);
1660 /*
1661 ** Get the memory block for the tracked pointer.
1662 ** It should exist.
1663 */
1664 mb = tp->memBlock;
1665 tl_assert(mb);
1666
1667 /*
1668 ** If this is a shadow block, get the main block as well.
1669 ** It should exist.
1670 */
1671 smb = mb;
1672 if(mb->shadowing)
1673 {
1674 mb = mb->shadowing;
1675 tl_assert(mb);
1676 }
1677
1678 /*
1679 ** If the block is already shadowed at address, bail out and let the
1680 ** normal code handle it.
1681 */
1682 if(mb->shadowed)
1683 {
1684 if(mb->shadowed->hdr.key == address)
1685 {
1686 O_MDEBUG("already shadowed %p", address);
1687 return False;
1688 }
1689 /*
1690 ** Get the shadow block.
1691 */
1692 smb = mb->shadowed;
1693 tl_assert(smb);
1694 }
1695
1696 /*
1697 ** Check if address is within the block that we are tracking.
1698 ** If it is then we need to work out whether to create a
1699 ** new shadow or move an eixsting one.
1700 */
1701 if((address > mb->hdr.key) &&
1702 (address < (mb->hdr.key + mb->length)))
1703 {
1704 doneShadow = True;
1705
1706 O_MDEBUG("About to shadow internal address %p to block %p in %p",
1707 address,
1708 mb->hdr.key,
1709 FROM_TRACKED_KEY(tp->hdr.key));
1710
1711 if(smb == mb)
1712 {
1713 O_MDEBUG("creating new shadow");
1714 /*
1715 ** Create a new shadow for the block.
1716 */
1717 smb = VG_(malloc)( sizeof(MemBlock) );
1718 tl_assert(smb);
1719
1720 o_stats.shadowMemoryBlocksAllocated++;
1721 o_stats.liveMemoryBlocks++;
1722
1723 VG_(memset)(smb, 0, sizeof(MemBlock));
1724 smb->hdr.key = address;
1725 smb->length = 0;
1726 smb->where = 0; // Dont need this in the shadow.
1727 smb->shadowing = mb;
1728 mb->shadowed = smb;
1729 VG_(HT_add_node(o_MemBlocks, smb));
1730
1731 /*
1732 ** Move the tracked pointer from the main block to the shadow.
1733 */
1734 (void)VG_(HT_remove)(o_TrackedPointers, tp->hdr.key);
1735 o_removePointerFromList(mb, tp);
1736 o_addMemBlockReference(smb, tp);
1737 }
1738 else if((smb->refNum == 1) &&
1739 (smb == tp->memBlock))
1740 {
1741 O_MDEBUG("moving existing shadow at %p", smb->hdr.key);
1742 /*
1743 ** Move the existing shadow.
1744 */
1745 (void)VG_(HT_remove)(o_MemBlocks, smb->hdr.key);
1746 smb->hdr.key = address;
1747 smb->where = 0; // Dont need this in the shadow.
1748 VG_(HT_add_node(o_MemBlocks, smb));
1749
1750 /*
1751 ** Tweak the existing tracked pointer, leaving the PBit alone.
1752 */
1753 tp->block = address;
1754 }
1755 else
1756 {
1757 /*
1758 ** A shadow exists and has pointers assigned to it.
1759 ** We do not allow more than one shadow so deregister and
1760 ** free this tracked pointer and clear its PBit.
1761 */
1762 O_MDEBUG("Prevented second shadow %p (first %p) for %p",
1763 address,
1764 mb->shadowed,
1765 mb->hdr.key);
1766
1767 o_clearPBit(FROM_TRACKED_KEY(tp->hdr.key));
1768 o_removeMemBlockReference(NULL, tp);
1769 VG_(free)(tp);
1770
1771 o_stats.liveTrackedPointers--;
1772 }
1773
1774 O_MDEBUG("shadow creation / reallocation done");
1775 }
1776 else if((smb != mb) &&
1777 (address == mb->hdr.key))
1778 {
1779 /*
1780 ** Hmmm.
1781 ** Looks like we are setting the tracked pointer to the block start.
1782 ** If it was previously pointing at the shadow block, we need to move it
1783 ** manually.
1784 */
1785 if(tp->block == smb->hdr.key)
1786 {
1787 O_MDEBUG("moving pointer from shadow to main");
1788
1789 if(smb->refNum == 1)
1790 {
1791 doneShadow = True;
1792
1793 O_MDEBUG("destroying shadow of %p at %p",
1794 mb->hdr.key,
1795 smb->hdr.key);
1796 /*
1797 ** Remove the shadow block and move the pointer.
1798 */
1799 (void)VG_(HT_remove)(o_MemBlocks, smb->hdr.key);
1800 mb->shadowed = 0;
1801 VG_(free)(smb->pointers);
1802 VG_(free)(smb);
1803 o_stats.liveMemoryBlocks--;
1804
1805 (void)VG_(HT_remove)(o_TrackedPointers, tp->hdr.key);
1806 o_addMemBlockReference(mb, tp);
1807 }
1808 else
1809 {
1810 /*
1811 ** Let the normal code move the pointer.
1812 */
1813 }
1814 }
1815 }
1816 else
1817 {
1818 O_MDEBUG("tracked pointer out of range");
1819 }
1820
1821 return doneShadow;
1822}
1823
1824static void o_killTrackedPointer(Addr addr)
1825{
1826 TrackedPointer *tp = VG_(HT_lookup)(o_TrackedPointers, TRACKED_KEY(addr));
1827
1828 /*
1829 ** We really should have the tracked pointer.
1830 */
1831 tl_assert(tp);
1832
1833 /*
1834 ** Remove the tracked pointer from its memory block, causing
1835 ** a leak report as required then free it.
1836 */
1837 o_clearPBit(addr);
1838
1839 O_MDEBUG("Removing tracked pointer to %p at %p",
1840 tp->block, FROM_TRACKED_KEY(tp->hdr.key));
1841
1842 o_removeMemBlockReference(NULL, tp);
1843
1844 VG_(free)(tp);
1845
1846 o_stats.liveTrackedPointers--;
1847 return;
1848}
1849
1850static void o_killRange(Addr start, SizeT length)
1851{
1852 /*
1853 ** We need to check the PBits for the addresses starting at start.
1854 ** We use the firstPBit / nextPBit functions to get us a list of set
1855 ** pbits in the specified range.
1856 */
1857 PBitContext pb;
1858 Addr a;
1859
1860 O_MDEBUG("killing range %p bytes from %p", length, start);
1861
1862
1863 a = o_firstPBit(&pb, start, length);
1864 while(a)
1865 {
1866 o_killTrackedPointer(a);
1867 a = o_nextPBit(&pb);
1868 }
1869 O_MDEBUG("killing range %p bytes from %p done.", length, start);
1870}
1871
1872static void o_duplicateTrackedPointers(Addr dst, Addr src, SizeT length)
1873{
1874 /*
1875 ** For each set PBit in the src block, create a new tracked pointer
1876 ** in the destination block, pointing to the same memory block.
1877 */
1878 PBitContext pb;
1879 Addr address;
1880
1881 O_MDEBUG("o_duplicateTrackedPointers(%p, %p %d(%p))",
1882 dst, src, length, length);
1883
1884 address = o_firstPBit(&pb, src, length);
1885
1886 while(address)
1887 {
1888 /*
1889 ** Create a tracked pointer at the appropriate place within the new
1890 ** block of memory.
1891 */
1892 TrackedPointer *tp = VG_(HT_lookup)(o_TrackedPointers, TRACKED_KEY(address));
1893 Int diff = dst - src;
1894 TrackedPointer *ntp = VG_(malloc)((sizeof(TrackedPointer)));
1895 MemBlock *mb = NULL;
1896
1897 tl_assert(tp);
1898
1899 o_stats.liveTrackedPointers++;
1900 o_stats.trackedPointersAllocated++;
1901
1902 /*
1903 ** Get the memory block from the tracked pointer at this address.
1904 */
1905 mb = tp->memBlock;
1906
1907 if(!mb)
1908 {
1909 O_DEBUG("Oops! Copying pointer at %p to block that leaked(%p)",
1910 address, tp->block);
1911 VG_(get_and_pp_StackTrace)(VG_(get_running_tid)(), VG_(clo_backtrace_size));
1912 O_DEBUG("");
1913
1914 VG_(tool_panic)("we lost track of a pointer :-(");
1915 }
1916
1917 tl_assert(ntp);
1918
1919 VG_(memset)(ntp, 0, sizeof(TrackedPointer));
1920 ntp->hdr.key = TRACKED_KEY(address + diff);
1921 o_addMemBlockReference(mb, ntp);
1922
1923 /*
1924 ** Set the PBit for this tracked pointer.
1925 */
1926 o_setPBit(address + diff);
1927
1928 address = o_nextPBit(&pb);
1929 }
1930
1931}
1932
1933static void o_createMemBlock(ThreadId tid, Addr start, SizeT size)
1934{
1935 MemBlock *mb = VG_(malloc)(sizeof(MemBlock));
1936 tl_assert(mb);
1937
1938 o_stats.memoryBlocksAllocated++;
1939 o_stats.liveMemoryBlocks++;
1940
1941 VG_(memset)(mb, 0, sizeof(MemBlock));
1942
1943 /*
1944 ** Populate the block. Note that we have no pointers until one is written
1945 ** into memory.
1946 */
1947 mb->hdr.key = start;
1948 mb->length = size;
sewardj39f34232007-11-09 23:02:28 +00001949 mb->where = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardj99a2ceb2007-11-09 12:30:36 +00001950
1951 /*
1952 O_DEBUG("Creating new MemBlock (%p) key = %p, length %d",
1953 mb, (void *)start, size);
1954 VG_(pp_ExeContext)(mb->where);
1955 */
1956
1957 /*
1958 ** Add this node into the hash table.
1959 */
1960 VG_(HT_add_node)(o_MemBlocks, mb);
1961}
1962
1963static void o_destroyMemBlock(ThreadId tid, Addr start)
1964{
1965 /*
1966 ** Destroy our memory block.
1967 */
1968 MemBlock *mb = VG_(HT_remove)(o_MemBlocks, start);
1969
1970 /*
1971 ** The block really should exist, unless this is a double free attempt...
1972 */
1973 if(!mb)
1974 {
1975 O_DEBUG("Double/Invalid call to free(%p)", start);
1976 VG_(get_and_pp_StackTrace)(VG_(get_running_tid)(), VG_(clo_backtrace_size));
1977 O_DEBUG("");
1978 }
1979 else
1980 {
1981 if(mb->leaked)
1982 {
1983 /*
1984 ** Seems that the block didnt leak after all.
1985 ** *sigh*
1986 ** Why do so many libs access memory in blocks they free()ed?
1987 */
1988 if(o_addSuppressionBlock(mb->where, mb->leaked) && !o_showSummaryOnly)
1989 {
1990 O_DEBUG("Welcome back (and goodbye) to the supposedly leaked block at %p",
1991 start);
1992 }
1993 o_stats.memoryBlocksLeaked--;
1994 o_stats.memoryBlocksLostAndFound++;
1995 }
1996 /*
1997 ** Clean up the block - we pass a pointer pointer so that we can
1998 ** set it to NULL during the cleanup process.
1999 */
2000 o_cleanupMemBlock(&mb);
2001 }
2002
2003 return;
2004}
2005
2006
2007static void o_setupMaybeLast(Addr a)
2008{
2009 int refCount = 0;
2010 /*
2011 ** Maybe returning a value - set the maybeLast and funcEnd members
2012 ** in the memory block this register points to if it is the last
2013 ** item.
2014 */
2015 TrackedPointer *tp = VG_(HT_lookup)(o_TrackedPointers, TRACKED_KEY(a));
2016 /*
2017 ** We really should have the tracked pointer.
2018 */
2019 tl_assert(tp);
2020
2021 refCount = tp->memBlock->refNum;
2022 if(tp->memBlock->shadowing)
2023 {
2024 refCount += tp->memBlock->shadowing->refNum;
2025 }
2026 else if(tp->memBlock->shadowed)
2027 {
2028 refCount += tp->memBlock->shadowed->refNum;
2029 }
2030
2031 if(refCount == 1)
2032 {
2033 // Hmmm, last reference. If we haven't already done so,
2034 // save the context, just in case
2035 tl_assert(!tp->memBlock->maybeLast ||
2036 (tp->memBlock->maybeLast == tp));
2037 if(!tp->memBlock->maybeLast)
2038 {
2039 tp->memBlock->maybeLast = tp;
sewardj39f34232007-11-09 23:02:28 +00002040 tp->memBlock->funcEnd = VG_(record_ExeContext)(VG_(get_running_tid)(),
2041 0/*first_ip_delta*/);
sewardj99a2ceb2007-11-09 12:30:36 +00002042 O_MDEBUG("setting maybeLast to %p in block at %p",
2043 FROM_TRACKED_KEY(tp->hdr.key), tp->block);
2044 }
2045#if defined(O_MASTER_DEBUG)
2046 else
2047 {
2048 O_MDEBUG("leaving maybeLast at %p in block at %p",
2049 FROM_TRACKED_KEY(tp->hdr.key), tp->block);
2050 }
2051#endif
2052 }
2053 O_MDEBUG("leaving register %p", OFFSET_FROM_REG(a));
2054}
2055
2056/*------------------------------------------------------------*/
2057/*--- Helper functions called by instrumentation ---*/
2058/*------------------------------------------------------------*/
2059#if defined(O_TRACK_LOADS)
2060static VG_REGPARM(1)
2061void o_omegaLoadTracker( Addr address )
2062{
2063 O_MDEBUG("o_omegaLoadTracker(%p, %p)", address, *((Addr *)address));
2064
2065 return;
2066}
2067#endif
2068
2069static VG_REGPARM(2)
2070void o_omegaScratchRemover( Addr start, Addr length )
2071{
2072 O_MDEBUG("o_omegaScratchRemover(%p, %p)", start, length);
2073 o_killRange(start, length);
2074
2075 return;
2076}
2077
2078static VG_REGPARM(1)
2079void o_endOfInstruction( Addr address )
2080{
2081 /*
2082 ** Any generated leaks should report immediately.
2083 */
2084 doLeakNow = True;
2085
2086 O_MDEBUG("o_endOfInstruction %p doLeakListCount = %d",
2087 address, doLeakListCount);
2088
2089 if(doLeakListCount)
2090 {
2091 if(doLeakListCount > 1)
2092 {
2093 /*
2094 ** Reverse the list so the reports come out in the correct order.
2095 */
2096 MemBlock *front = NULL;
2097 MemBlock *temp = NULL;
2098
2099 do
2100 {
2101 temp = doLeakList->next;
2102
2103 if(front)
2104 {
2105 doLeakList->next = front;
2106 }
2107 else
2108 {
2109 doLeakList->next = NULL;
2110 }
2111 front = doLeakList;
2112
2113 doLeakList = temp;
2114 }
2115 while(doLeakList);
2116
2117 /*
2118 ** Now do the leak reports.
2119 */
2120 while(front)
2121 {
2122 temp = front;
2123 front = front->next;
2124
2125 if(temp->doLeak)
2126 {
2127 temp->doLeak = False;
2128 o_doLeakReport(temp);
2129 }
2130 else
2131 {
2132 O_MDEBUG("block at %p survived!", temp->hdr.key);
2133 }
2134 }
2135 }
2136 else
2137 {
2138 if(doLeakList->doLeak)
2139 {
2140 /*
2141 ** The block has leaked. Report it.
2142 */
2143 o_doLeakReport(doLeakList);
2144 }
2145 else
2146 {
2147 O_MDEBUG("block at %p survived!", doLeakList->hdr.key);
2148 }
2149
2150 doLeakList->doLeak = False;
2151 doLeakList = NULL;
2152 }
2153 }
2154
2155 O_MDEBUG("o_endOfInstruction done");
2156
2157 o_indirectStack = NULL;
2158 doLeakListCount = 0;
2159 doLeakNow = False;
2160}
2161
2162static
2163void o_omegaFunctionReturn( void )
2164{
2165 PBitContext pb;
2166 Addr a = 0;
2167
2168 /*
2169 ** Zap scratch registers.
2170 */
2171
2172#if defined(VGA_x86)
2173 a = o_firstPBit(&pb,
2174 MAP_TO_REG(VG_(get_running_tid)(), OFFSET_x86_ECX),
2175 OFFSET_x86_EDI + 4);
2176#elif defined(VGA_amd64)
2177 a = o_firstPBit(&pb,
2178 MAP_TO_REG(VG_(get_running_tid)(), OFFSET_amd64_RCX),
2179 OFFSET_amd64_R15 + 8);
2180#endif
2181 doLeakNow = True;
2182 while(a)
2183 {
2184 if(o_isReturnIgnoreReg(OFFSET_FROM_REG(a)))
2185 {
2186 O_MDEBUG("killing register %p", OFFSET_FROM_REG(a));
2187 o_killTrackedPointer(a);
2188 }
2189 a = o_nextPBit(&pb);
2190 }
2191 doLeakNow = False;
2192
2193 /*
2194 ** Now work out if we might be returning a value in the accumulator.
2195 */
2196#if defined(VGA_x86)
2197 a = MAP_TO_REG(VG_(get_running_tid)(), OFFSET_x86_EAX);
2198#elif defined(VGA_amd64)
2199 a = MAP_TO_REG(VG_(get_running_tid)(), OFFSET_amd64_RAX);
2200#endif
2201 if(o_isPBitSet(a))
2202 o_setupMaybeLast(a);
2203
2204#if defined(VGA_amd64)
2205 // Also need to check for the RDX register as it is a second return reg
2206 a = MAP_TO_REG(VG_(get_running_tid)(), OFFSET_amd64_RDX);
2207 if(o_isPBitSet(a))
2208 o_setupMaybeLast(a);
2209#endif
2210 return;
2211}
2212
2213static VG_REGPARM(2)
2214void o_omegaDetector( Addr address, Addr value)
2215{
2216 TrackedPointer *tp = NULL;
2217 MemBlock *mb = NULL;
2218
2219 /*
2220 ** We need to track the registers.
2221 ** To do this, if the address < 256, change it to our local shadow.
2222 **
2223 ** We really want to be able to track the proper shadow but I have no
2224 ** idea yet how to get the address for it. Once I do, use that in
2225 ** preference. Note that all we need is a unique memory location for
2226 ** the register in order to generate a tracked pointer.
2227 */
2228 if(address < 0x100)
2229 {
2230 O_MDEBUG("o_omegaDetector(%p, %p)", address, value);
2231 address = MAP_TO_REG(VG_(get_running_tid)(), address);
2232 }
2233 else
2234 {
2235 /*
2236 ** Check aligned - if not, align it and retrive the stored value.
2237 */
2238 if(address & ~TRACK_MINOR_MASK)
2239 {
2240 address &= TRACK_MINOR_MASK;
2241 value = *((Addr *)address);
2242 }
2243 O_MDEBUG("o_omegaDetector(%p, %p)", address, value);
2244 }
2245
2246 /*
2247 ** Done the alignment tweaks so do the more expensive lookups.
2248 */
2249 if(o_isPBitSet(address))
2250 {
2251 tp = VG_(HT_lookup)(o_TrackedPointers, TRACKED_KEY(address));
2252
2253 if(tp && (tp->block == value))
2254 {
2255 /*
2256 ** Unlikely but it seems that we are writing the same value back into
2257 ** the tracked pointer - don't process further for a small gain.
2258 */
2259 //O_DEBUG("writing duplicate into tracked pointer.");
2260 return;
2261 }
2262
2263 /*
2264 ** We always auto shadow.
2265 ** Note that auto shadowing only works if you overwrite a tracked pointer.
2266 ** Checking for the creation of a new tracked pointer at some internal
2267 ** address is too much overhead as we would have to scan backwards to find
2268 ** a memory block then check if the value is within it. For those cases,
2269 ** we need to get something going with the client request system.
2270 */
2271 if(tp && value)
2272 {
2273 if(o_setupShadow(tp, value))
2274 {
2275 return;
2276 }
2277 }
2278
2279 /*
2280 ** Remove the tracked pointer and clear the PBit,
2281 ** if we have one.
2282 */
2283 if(tp)
2284 {
2285 tl_assert(tp->hdr.key == TRACKED_KEY(address));
2286 O_MDEBUG("Removing tracked pointer to %p at %p",
2287 tp->block, FROM_TRACKED_KEY(tp->hdr.key));
2288 o_clearPBit(address);
2289 o_removeMemBlockReference(NULL, tp);
2290 }
2291 }
2292
2293 /*
2294 ** Get the mem block now - it might not exist if tp was the last
2295 ** reference to it. It might not exist anyway.
2296 */
2297 if(value)
2298 {
2299 mb = VG_(HT_lookup)(o_MemBlocks, value);
2300 }
2301
2302 /*
2303 ** If we have a memblock, clean the tracked pointer then add it.
2304 ** If not, free the tracked pointer.
2305 */
2306 if(mb)
2307 {
2308 if(!tp)
2309 {
2310 /*
2311 ** No tracked pointer - create one now.
2312 */
2313 tp = VG_(malloc)(sizeof(TrackedPointer));
2314 tl_assert(tp);
2315 o_stats.trackedPointersAllocated++;
2316 o_stats.liveTrackedPointers++;
2317 }
2318 VG_(memset)(tp, 0, sizeof(TrackedPointer));
2319 tp->hdr.key = TRACKED_KEY(address);
2320 o_addMemBlockReference(mb, tp);
2321 /*
2322 ** Set the PBit for this tracked pointer.
2323 */
2324 o_setPBit(address);
2325
2326 O_MDEBUG("Added tracked pointer to %p at %p",
2327 tp->block, FROM_TRACKED_KEY(tp->hdr.key));
2328
2329 }
2330 else if(tp)
2331 {
2332 VG_(free)(tp);
2333 o_stats.liveTrackedPointers--;
2334 }
2335
2336 return;
2337}
2338
2339/*------------------------------------------------------------*/
2340/*--- malloc() et al replacement wrappers ---*/
2341/*------------------------------------------------------------*/
2342
2343static
2344void* o_newBlock ( ThreadId tid, SizeT size, SizeT align, Bool is_zeroed )
2345{
2346 void* p = NULL;
2347
2348O_TRACE_ON();
2349#if defined(O_MASTER_DEBUG)
2350 if(!o_traceStop)
2351 {
2352 VG_(get_and_pp_StackTrace)(VG_(get_running_tid)(), 8);O_DEBUG("");
2353 }
2354#endif
2355
2356 O_MDEBUG("newBlock(%d, %d, %d, %d)",
2357 tid,
2358 size,
2359 align,
2360 (int)is_zeroed);
2361
2362 /*
2363 ** Allocate and zero if necessary.
2364 */
2365 p = VG_(cli_malloc)( align, size );
2366 if(!p)
2367 {
2368 O_DEBUG("Out of memory!");
2369 VG_(get_and_pp_StackTrace)(VG_(get_running_tid)(), VG_(clo_backtrace_size));
2370 O_DEBUG("");
2371
2372 return NULL;
2373 }
2374
2375 if(is_zeroed)
2376 {
2377 VG_(memset)(p, 0, size);
2378 }
2379
2380 if(!o_onlyMallocLike)
2381 {
2382 /*
2383 ** Create a new MemBlock.
2384 */
2385 o_createMemBlock(tid, (Addr)p, size);
2386 }
2387
2388 O_MDEBUG("o_newBlock returning %p", p);
2389
2390 return p;
2391}
2392
2393static
2394void o_dieBlock ( ThreadId tid, void* p )
2395{
2396 /*
2397 ** Free off the allocated memory block.
2398 */
2399 O_MDEBUG("o_dieBlock(%d, %p)", tid, p);
2400
2401 /*
2402 ** Check if we have a potentially valid pointer
2403 */
2404 if(!p)
2405 {
2406 return;
2407 }
2408
2409 /*
2410 ** If we are doing malloc like block handling, only free off the memory.
2411 */
2412 if(!o_onlyMallocLike)
2413 {
2414 o_destroyMemBlock(tid, (Addr)p);
2415 }
2416
2417 /*
2418 ** Actually free the heap block.
2419 */
2420 VG_(cli_free)( p );
2421
2422 return;
2423}
2424
2425static void* o_malloc ( ThreadId tid, SizeT n )
2426{
2427 return o_newBlock( tid, n, VG_(clo_alignment), /*is_zeroed*/False );
2428}
2429
2430static void* o__builtin_new ( ThreadId tid, SizeT n )
2431{
2432 return o_newBlock( tid, n, VG_(clo_alignment), /*is_zeroed*/False );
2433}
2434
2435static void* o__builtin_vec_new ( ThreadId tid, SizeT n )
2436{
2437 return o_newBlock( tid, n, VG_(clo_alignment), /*is_zeroed*/False );
2438}
2439
2440static void* o_calloc ( ThreadId tid, SizeT m, SizeT size )
2441{
2442 return o_newBlock( tid, m*size, VG_(clo_alignment), /*is_zeroed*/True );
2443}
2444
2445static void *o_memalign ( ThreadId tid, SizeT align, SizeT n )
2446{
2447 return o_newBlock( tid, n, align, False );
2448}
2449
2450static void o_free ( ThreadId tid, void* p )
2451{
2452 o_dieBlock( tid, p );
2453}
2454
2455static void o__builtin_delete ( ThreadId tid, void* p )
2456{
2457 o_dieBlock( tid, p );
2458}
2459
2460static void o__builtin_vec_delete ( ThreadId tid, void* p )
2461{
2462 o_dieBlock( tid, p );
2463}
2464
2465static void* o_realloc ( ThreadId tid, void* p_old, SizeT new_size )
2466{
2467 MemBlock *mb = NULL;
2468 void *p_new = NULL;
2469
2470 O_MDEBUG("o_realloc p_old %p, new_size %d",
2471 p_old, new_size);
2472
2473 if(!p_old)
2474 {
2475 /*
2476 ** Pointer == NULL so let new block do the work.
2477 */
2478 return o_newBlock(tid, new_size, VG_(clo_alignment), /*is_zeroed*/False);
2479 }
2480
2481 mb = VG_(HT_lookup)(o_MemBlocks, (Addr)p_old);
2482
2483 /*
2484 ** Check that we have this memory block.
2485 */
2486 if(!mb)
2487 {
2488 /*
2489 ** Log the bad call but return p_old so the program can continue.
2490 ** This might not be a good thing but some of the libraries are a
2491 ** little weird and returning NULL as per the spec blows them up...
2492 */
2493 O_DEBUG("Invalid call to realloc(%p)", p_old);
2494 VG_(get_and_pp_StackTrace)(VG_(get_running_tid)(), VG_(clo_backtrace_size));
2495 O_DEBUG("");
2496
2497 return p_old;
2498 }
2499
2500 if(mb->leaked)
2501 {
2502 /*
2503 ** Seems that the block didnt leak after all.
2504 */
2505 if(o_addSuppressionBlock(mb->where, mb->leaked) && !o_showSummaryOnly)
2506 {
2507 O_DEBUG("Welcome back to the supposedly leaked block at %p",
2508 p_old);
2509 }
2510 mb->leaked = NULL;
2511 o_stats.memoryBlocksLeaked--;
2512 o_stats.memoryBlocksLostAndFound++;
2513 }
2514
2515 if(new_size)
2516 {
2517 if(new_size > mb->length)
2518 {
2519 /*
2520 ** Make a new block, copy the data into it then free the old block.
2521 ** We lose all tracked pointers but that is to be expected as this is
2522 ** a new block at a new address. However, any tracked pointers within
2523 ** must be preserved.
2524 */
2525
2526 p_new = o_newBlock(tid, new_size, VG_(clo_alignment), False);
2527 tl_assert(p_new);
2528
2529 VG_(memcpy)(p_new, p_old, mb->length);
2530
2531 o_duplicateTrackedPointers((Addr)p_new, (Addr)p_old, mb->length);
2532 }
2533 else
2534 {
2535 /*
2536 ** Return the existing block.
2537 */
2538 return p_old;
2539 }
2540 }
2541
2542 /*
2543 ** This will remove all of the old tracked pointers within.
2544 */
2545 o_dieBlock(tid, p_old);
2546
2547 return p_new;
2548}
2549
2550static void o_dieMemStack(Addr start, SizeT length)
2551{
2552 /*
2553 ** Flag that this is a stack unwind.
2554 */
2555 o_stackUnwind = True;
2556 o_killRange(start, length);
2557 o_stackUnwind = False;
2558}
2559
2560static void o_post_clo_init(void)
2561{
2562 /*
2563 ** Allocate the hash tables.
2564 ** Note that we can improve performance at the cost of memory by initialising
2565 ** with a larger prime number so more of the key part of the address is
2566 ** unique. The defaults are probably OK for many programs but we expose them
2567 ** on the command line to make it easier for users to change them.
2568 */
2569 o_PBits = VG_(HT_construct)( "omega pbits" );
2570 tl_assert(o_PBits);
2571
2572 o_MemBlocks = VG_(HT_construct)( "omega memblocks" );
2573 tl_assert(o_MemBlocks);
2574
2575 o_TrackedPointers = VG_(HT_construct)( "omega tracked ptrs" );
2576 tl_assert(o_TrackedPointers);
2577
2578 /*
2579 ** We need precise instructions so that we can work out the range of the
2580 ** original machine instruction in terms of grouping together lumps of IR.
2581 ** We lose out big time on optimisation but we have to take the hit in order
2582 ** to deal with instructions like pop and xchg.
2583 */
2584 VG_(clo_vex_control).iropt_precise_memory_exns = True;
2585
2586}
2587
2588static IRSB *
2589o_instrument(VgCallbackClosure* closure,
2590 IRSB* bb_in,
2591 VexGuestLayout* layout,
2592 VexGuestExtents* vge,
2593 IRType gWordTy, IRType hWordTy)
2594{
2595 IRDirty* di;
2596 Int i;
2597 IRSB* bb;
2598 IRType type;
2599 Addr mask;
2600 IRStmt* stackReg = NULL;
2601
2602#if 0 //defined(O_MASTER_DEBUG)
2603
2604 static int thisBlock = 0;
2605 thisBlock++;
2606 if(thisBlock == 11377)
2607 {
2608 O_TRACE_ON();
2609 }
2610 else if(thisBlock == 11390)
2611 {
2612 VG_(tool_panic)("hit stop block");
2613 }
2614#endif
2615
2616 if (gWordTy != hWordTy)
2617 {
2618 /* We don't currently support this case. */
2619 VG_(tool_panic)("host/guest word size mismatch");
2620 }
2621
2622 /*
2623 ** Set up BB
2624 */
2625 bb = emptyIRSB();
2626 bb->tyenv = deepCopyIRTypeEnv(bb_in->tyenv);
2627 bb->next = deepCopyIRExpr(bb_in->next);
2628 bb->jumpkind = bb_in->jumpkind;
2629
2630#if (VG_WORDSIZE == 4)
2631 type = Ity_I32;
2632 mask = ~0x03;
2633#elif (VG_WORDSIZE == 8)
2634 type = Ity_I64;
2635 mask = ~0x07;
2636#endif
2637
2638 for (i = 0; i < bb_in->stmts_used; i++)
2639 {
2640 IRStmt* st = bb_in->stmts[i];
2641 if (!st || st->tag == Ist_NoOp)
2642 {
2643 continue;
2644 }
2645
2646 di = NULL;
2647
2648 switch (st->tag)
2649 {
2650 case Ist_AbiHint:
2651 /*
2652 ** An area just went undefined. There may be pointers in this
2653 ** scratch area that we should now ignore.
2654 ** Make sure that we do so.
2655 */
2656 if(stackReg)
2657 {
2658 addStmtToIRSB( bb, stackReg );
2659 stackReg = NULL;
2660 }
2661 di = unsafeIRDirty_0_N( 2, "o_omegaScratchRemover",
2662 &o_omegaScratchRemover,
2663 mkIRExprVec_2(st->Ist.AbiHint.base,
2664 mkIRExpr_HWord(st->Ist.AbiHint.len)));
2665 /*
2666 ** Add in the original instruction second.
2667 */
2668 addStmtToIRSB( bb, IRStmt_Dirty(di) );
2669 break;
2670
2671 case Ist_Store:
2672 if(stackReg)
2673 {
2674 addStmtToIRSB( bb, stackReg );
2675 stackReg = NULL;
2676 }
2677 if(typeOfIRExpr(bb->tyenv, st->Ist.Store.addr) == type)
2678 {
2679 /*
2680 ** We have an address of native size.
2681 */
2682 if(typeOfIRExpr(bb->tyenv, st->Ist.Store.data) == type)
2683 {
2684 /*
2685 ** We have data of native size - check if this is a pointer being
2686 ** written.
2687 */
2688 di = unsafeIRDirty_0_N( 2, "o_omegaDetector", &o_omegaDetector,
2689 mkIRExprVec_2(st->Ist.Store.addr,
2690 st->Ist.Store.data));
2691 /*
2692 ** Add in the original instruction second.
2693 */
2694 addStmtToIRSB( bb, IRStmt_Dirty(di) );
2695 addStmtToIRSB( bb, st );
2696 st = NULL;
2697 }
2698 else
2699 {
2700 /*
2701 ** There is no way that the data is a pointer but we still have to
2702 ** check if a pointer will be overwritten.
2703 */
2704 di = unsafeIRDirty_0_N( 2, "o_omegaDetector", &o_omegaDetector,
2705 mkIRExprVec_2(st->Ist.Store.addr,
2706 mkIRExpr_HWord(0)));
2707 /*
2708 ** Add in the original instruction first.
2709 */
2710 addStmtToIRSB( bb, st );
2711 addStmtToIRSB( bb, IRStmt_Dirty(di) );
2712 st = NULL;
2713 }
2714 }
2715 else
2716 {
2717 O_GDEBUG("o_instrument address type(%p) not a pointer",
2718 typeOfIRExpr(bb->tyenv, st->Ist.Store.addr));
2719 }
2720
2721 break;
2722
2723 case Ist_IMark:
2724 /*
2725 ** Call the end of instruction callback. This is to check what actually
2726 ** leaked as opposed to what appeared to leak in a transient fashion
2727 ** due to instructions getting broken up into more simple IR
2728 ** instructions. Note that stack register updates are moved to
2729 ** the end of the orginal instruction so that things like 'pop' get
2730 ** the values into registers BEFORE the stack is invalidated.
2731 */
2732 if(stackReg)
2733 {
2734 addStmtToIRSB( bb, stackReg );
2735 stackReg = NULL;
2736 }
2737 di = unsafeIRDirty_0_N( 1, "o_endOfInstruction", &o_endOfInstruction,
2738 mkIRExprVec_1(mkIRExpr_HWord(st->Ist.IMark.addr)));
2739 addStmtToIRSB( bb, IRStmt_Dirty(di) );
2740 addStmtToIRSB( bb, st );
2741#if defined(VGA_x86)
2742 /*
2743 ** Make sure the EIP sim cpu register gets updated or our stack
2744 ** traces go a little Pete Tong...
2745 ** If this duplicates, the ir optimisation will knock one of them out.
2746 */
2747 addStmtToIRSB( bb, IRStmt_Put(OFFSET_x86_EIP,
2748 mkIRExpr_HWord(st->Ist.IMark.addr)));
2749#endif
2750 st = NULL;
2751 break;
2752
2753 case Ist_Put:
2754 /*
2755 ** Track the general purpose registers.
2756 */
2757 switch(st->Ist.Put.offset & mask)
2758 {
2759#if defined(VGA_x86)
2760 case OFFSET_x86_ESP:
2761#elif defined(VGA_amd64)
2762 case OFFSET_amd64_RSP:
2763#endif
2764 /*
2765 ** Save the stack register update - we will add it at the end of
2766 ** the instruction.
2767 */
2768 stackReg = st;
2769 st = NULL;
2770 break;
2771
2772#if defined(VGA_x86)
2773
2774 case OFFSET_x86_EAX:
2775 case OFFSET_x86_EBX:
2776 case OFFSET_x86_ECX:
2777 case OFFSET_x86_EDX:
2778 case OFFSET_x86_ESI:
2779 case OFFSET_x86_EDI:
2780 case OFFSET_x86_EBP:
2781
2782#if 0 //defined(O_MASTER_DEBUG)
2783 case OFFSET_x86_EIP:
2784#endif
2785
2786#elif defined(VGA_amd64)
2787
2788 case OFFSET_amd64_RAX:
2789 case OFFSET_amd64_RBX:
2790 case OFFSET_amd64_RCX:
2791 case OFFSET_amd64_RDX:
2792 case OFFSET_amd64_RSI:
2793 case OFFSET_amd64_RDI:
2794 case OFFSET_amd64_RBP:
2795 case OFFSET_amd64_R8:
2796 case OFFSET_amd64_R9:
2797 case OFFSET_amd64_R10:
2798 case OFFSET_amd64_R11:
2799 case OFFSET_amd64_R12:
2800 case OFFSET_amd64_R13:
2801 case OFFSET_amd64_R14:
2802 case OFFSET_amd64_R15:
2803
2804#if 0 //defined(O_MASTER_DEBUG)
2805 case OFFSET_amd64_RIP:
2806#endif
2807
2808#elif defined(VGA_ppc32)
2809
2810#error I know even less about PPC than x86 - please add appropriate registers
2811
2812#elif defined(VGA_ppc64)
2813
2814#error I know even less about PPC than x86 - please add appropriate registers
2815
2816#else
2817
2818#error Unknown arch
2819
2820#endif
2821 {
2822 if(typeOfIRExpr(bb->tyenv, st->Ist.Put.data) == type)
2823 {
2824 /*
2825 ** This is a put to a register in the simulated processor of data
2826 ** that could be a pointer.
2827 */
2828 di = unsafeIRDirty_0_N( 2, "o_omegaDetector", &o_omegaDetector,
2829 mkIRExprVec_2(mkIRExpr_HWord(st->Ist.Put.offset),
2830 st->Ist.Put.data));
2831 }
2832 else
2833 {
2834 /*
2835 ** There is no way that the data is a pointer but we still have
2836 ** to check if a pointer in a register will be overwritten.
2837 */
2838 di = unsafeIRDirty_0_N( 2, "o_omegaDetector", &o_omegaDetector,
2839 mkIRExprVec_2(mkIRExpr_HWord(st->Ist.Put.offset),
2840 mkIRExpr_HWord(0)));
2841 }
2842 /*
2843 ** Add in the original instruction first.
2844 */
2845 addStmtToIRSB( bb, st );
2846 addStmtToIRSB( bb, IRStmt_Dirty(di) );
2847 st = NULL;
2848 }
2849 break; // Register Cases
2850 }
2851 break; // Ist_Put
2852
2853#if defined(O_TRACK_LOADS)
2854 case Ist_Tmp:
2855 /*
2856 ** Debug to see how 'leaked' references survive.
2857 ** (From experience, mostly through illegal reads from
2858 ** free()ed blocks.)
2859 */
2860 if(st->Ist.Tmp.data->tag == Iex_Load)
2861 {
2862 if(typeOfIRExpr(bb->tyenv, st->Ist.Tmp.data->Iex.Load.addr) == type)
2863 {
2864 di = unsafeIRDirty_0_N( 1, "o_omegaLoadTracker", &o_omegaLoadTracker,
2865 mkIRExprVec_1(st->Ist.Tmp.data->Iex.Load.addr));
2866 /*
2867 ** Add in the original instruction first.
2868 */
2869 addStmtToIRSB( bb, st );
2870 addStmtToIRSB( bb, IRStmt_Dirty(di) );
2871 st = NULL;
2872 }
2873 }
2874 break;
2875#endif
2876
2877 default:
2878 break;
2879 }
2880
2881 /*
2882 ** Add in the original instruction if we havent already done so.
2883 */
2884 if(st)
2885 {
2886 addStmtToIRSB( bb, st );
2887 }
2888 }
2889
2890 if(stackReg)
2891 {
2892 addStmtToIRSB( bb, stackReg );
2893 stackReg = NULL;
2894 }
2895
2896 if(bb->jumpkind == Ijk_Ret)
2897 {
2898 /*
2899 ** The client is doing a return. This is the point to invalidate
2900 ** registers that belong to the callee, possibly generating a
2901 ** leak report. This is to catch things like foo(malloc(128)).
2902 */
2903
2904 di = unsafeIRDirty_0_N( 0, "o_omegaFunctionReturn",
2905 &o_omegaFunctionReturn,
2906 mkIRExprVec_0());
2907 /*
2908 ** Add in the new instruction.
2909 */
2910 addStmtToIRSB( bb, IRStmt_Dirty(di) );
2911 }
2912
2913 return bb;
2914}
2915
2916/*------------------------------------------------------------*/
2917/*--- Client Request Handling ---*/
2918/*------------------------------------------------------------*/
2919static Bool o_handle_client_request ( ThreadId tid, UWord* arg, UWord* ret )
2920{
2921 if (!VG_IS_TOOL_USERREQ('O','M',arg[0]) &&
2922 VG_USERREQ__MALLOCLIKE_BLOCK != arg[0] &&
2923 VG_USERREQ__FREELIKE_BLOCK != arg[0])
2924 return False;
2925
2926 switch (arg[0])
2927 {
2928 case VG_USERREQ__ENTERING_MAIN:
2929 {
2930 /*
2931 ** Allow leak reports whilst inside main().
2932 */
2933 o_inhibitLeakDetect = False;
2934 }
2935 break;
2936
2937 case VG_USERREQ__LEAVING_MAIN:
2938 {
2939 /*
2940 ** Stop any more leak reports - they won't be helpfull.
2941 */
2942 o_inhibitLeakDetect = True;
2943
2944O_TRACE_OFF();
2945
2946 }
2947 break;
2948
2949 case VG_USERREQ__MALLOCLIKE_BLOCK:
2950 {
2951 if(o_onlyMallocLike)
2952 {
2953 /*
2954 ** Either we use malloc like block or we don't.
2955 ** Trying to auto track and do malloc like block handling together
2956 ** is asking for trouble.
2957 */
2958 Addr p = (Addr)arg[1];
2959 SizeT size = arg[2];
2960
2961 o_createMemBlock(tid, p, size);
2962 }
2963 }
2964 break;
2965
2966 case VG_USERREQ__FREELIKE_BLOCK:
2967 {
2968 if(o_onlyMallocLike)
2969 {
2970 /*
2971 ** Either we use malloc like block or we don't.
2972 ** Trying to auto track and do malloc like block handling together
2973 ** is asking for trouble.
2974 */
2975 Addr p = (Addr)arg[1];
2976
2977 o_destroyMemBlock(tid, p);
2978 }
2979 }
2980 break;
2981 }
2982
2983 return True;
2984}
2985
2986/*------------------------------------------------------------*/
2987/*--- Circular Reference Detection ---*/
2988/*------------------------------------------------------------*/
2989/*
2990** Check for circular references. This is where a memory block holds a
2991** reference to another memory block and vice versa but there are no
2992** references that are external. Like this:
2993
2994 typedef struct
2995 {
2996 void *linkedBlock;
2997 char padding[120];
2998 } block;
2999
3000 block *p1 = NULL;
3001 block *p2 = NULL;
3002
3003 p1 = (block *)malloc(sizeof(block));
3004 p2 = (block *)malloc(sizeof(block));
3005
3006 p1->linkedBlock = p2;
3007 p2->linkedBlock = p1;
3008
3009** As you can see, the blocks wont be seen to leak because they have a live
3010** reference but the reality is that without an external reference, these
3011** blocks are lost to the system.
3012**
3013** To perform this test, we go through the following stages:
3014**
3015** 1) Generate a binary tree of the memory covered by the allocated blocks
3016** 2) Check every tracked pointer of every allocated block and mark the
3017** block if any of them fall outside of an allocated block.
3018** 3) For each block with an external pointer, recursivly walk through the
3019** internal pointers to other blocks, marking the blocks as also having
3020** an external pointer.
3021** 4) Report any blocks without external references.
3022**
3023*/
3024
3025typedef struct _TreeNode{
3026 Addr start;
3027 Addr end;
3028 MemBlock *block;
3029 struct _TreeNode *left;
3030 struct _TreeNode *right;
3031} TreeNode;
3032
3033static TreeNode *o_treeRoot = NULL;
3034static MemBlock **o_memblockList = NULL;
3035static UInt o_memblockListCount = 0;
3036static BlockRecordList o_circularRecords = {NULL, NULL};
3037
3038static
3039TreeNode *o_findTreeNode(Addr addr, TreeNode *start, TreeNode ***parent)
3040{
3041 /*
3042 ** Find the treenode that this address falls within and return it.
3043 ** Return NULL if no matching node is found and return the parent if it is
3044 ** requested.
3045 */
3046
3047 /*
3048 ** If the treeRoot is NULL, we won't be finding anything.
3049 */
3050 if(!o_treeRoot)
3051 {
3052 if(parent)
3053 {
3054 *parent = &o_treeRoot;
3055 }
3056
3057 return NULL;
3058 }
3059
3060 /*
3061 ** The start should be a valid node.
3062 */
3063 tl_assert(start);
3064
3065 if((addr >= start->start) &&
3066 (addr <= start->end))
3067 {
3068 /*
3069 ** Found it
3070 */
3071 return start;
3072 }
3073
3074 if(addr < start->start)
3075 {
3076 /*
3077 ** Less than - go left if we can, return NULL if we can't.
3078 */
3079 if(start->left)
3080 {
3081 return o_findTreeNode(addr, start->left, parent);
3082 }
3083 else
3084 {
3085 if(parent)
3086 {
3087 *parent = &start->left;
3088 }
3089
3090 return NULL;
3091 }
3092 }
3093 else
3094 {
3095 /*
3096 ** Greater than - go right if we can, return NULL if we can't.
3097 */
3098 if(start->right)
3099 {
3100 return o_findTreeNode(addr, start->right, parent);
3101 }
3102 else
3103 {
3104 if(parent)
3105 {
3106 *parent = &start->right;
3107 }
3108
3109 return NULL;
3110 }
3111 }
3112
3113 VG_(tool_panic)("fell out of the binary tree");
3114}
3115
3116static UInt o_buildMemblockTree(void)
3117{
3118 /*
3119 ** Build a binary tree of the addresses covered by the memory blocks.
3120 ** We dont do anything to balance things so this could decompose to a
3121 ** linear structure. Thankfully, we are not in a time critical section.
3122 */
3123 UInt index;
3124
3125 o_memblockList = (MemBlock **)VG_(HT_to_array)(o_MemBlocks,
3126 &o_memblockListCount);
3127
3128 for(index = 0; index < o_memblockListCount; index++)
3129 {
3130 TreeNode **parent = NULL;
3131 TreeNode *tn = NULL;
3132 MemBlock *mb = o_memblockList[index];
3133
3134 /*
3135 ** Only process main blocks that havent leaked.
3136 */
3137 if(!mb->shadowing && !mb->leaked)
3138 {
3139 if(o_findTreeNode(mb->hdr.key, o_treeRoot, &parent))
3140 {
3141 VG_(tool_panic)("Failed to grow the binary tree.");
3142 }
3143
3144 /*
3145 ** We should have a pointer to the parent
3146 */
3147 tl_assert(parent);
3148
3149 /*
3150 ** Create and populate the new node
3151 */
3152 tn = VG_(malloc)(sizeof(TreeNode));
3153 VG_(memset)(tn, 0, sizeof(TreeNode));
3154
3155 tn->start = mb->hdr.key;
3156 tn->end = tn->start + mb->length;
3157 tn->block = mb;
3158
3159 /*
3160 ** Add this node into the parent node
3161 */
3162 *parent = tn;
3163 }
3164 }
3165
3166 return o_memblockListCount;
3167}
3168
3169static void o_checkExternalPointers(void)
3170{
3171 UInt index;
3172
3173 for(index = 0; index < o_memblockListCount; index++)
3174 {
3175 MemBlock *mb = o_memblockList[index];
3176
3177 /*
3178 ** Only check blocks that haven't leaked.
3179 ** We process through shadow blocks because we want the back references
3180 ** as they still point within the shadowed block.
3181 */
3182 if(!mb->leaked)
3183 {
3184 UInt pointerIndex;
3185
3186 for(pointerIndex = 0; pointerIndex < mb->refNum; pointerIndex++)
3187 {
3188 if(!o_findTreeNode(FROM_TRACKED_KEY(mb->pointers[pointerIndex]->hdr.key),
3189 o_treeRoot, NULL))
3190 {
3191 /*
3192 ** External reference. Mark the block and stop checking.
3193 */
3194 mb->external = 1;
3195 break;
3196 }
3197 }
3198 }
3199 }
3200}
3201
3202static void o_rippleExternelPointers(MemBlock *mb)
3203{
3204 UInt index;
3205
3206 if(!mb)
3207 {
3208 /*
3209 ** Iterate through the memory block list marking external blocks
3210 ** so that we dont process the same blocks twice.
3211 */
3212 for(index = 0; index < o_memblockListCount; index++)
3213 {
3214 if(o_memblockList[index]->external > 0)
3215 {
3216 o_memblockList[index]->external = -1;
3217 o_rippleExternelPointers(o_memblockList[index]);
3218 }
3219 }
3220 }
3221 else
3222 {
3223 /*
3224 ** We are recursing.
3225 ** Follow any tracked pointers within our block, marking the target
3226 ** blocks as external and recursing on those blocks.
3227 */
3228 PBitContext pb;
3229 Addr a;
3230 TreeNode *tn = NULL;
3231
3232 a = o_firstPBit(&pb, mb->hdr.key, mb->length);
3233 while(a)
3234 {
3235 tn = o_findTreeNode(a, o_treeRoot, NULL);
3236
3237 /*
3238 ** We really should have a node
3239 */
3240 tl_assert(tn);
3241
3242 /*
3243 ** If we havent already done so, mark the block as external and
3244 ** processed then recurse on it.
3245 */
3246 if(tn->block->external >= 0)
3247 {
3248 tn->block->external = -1;
3249 o_rippleExternelPointers(tn->block);
3250 }
3251
3252 /*
3253 ** Get the next tracked pointer within this block.
3254 */
3255 a = o_nextPBit(&pb);
3256 }
3257 }
3258}
3259
3260static int o_reportCircularBlocks(void)
3261{
3262 int count = 0;
3263 BlockRecord *block = NULL;
3264 int index;
3265
3266 /*
3267 ** Iterate through the memory block list reporting any blocks not marked
3268 ** as external.
3269 ** We aggregate the list of blocks as many could come from the same context.
3270 */
3271 for(index = 0; index < o_memblockListCount; index++)
3272 {
3273 MemBlock * mb = o_memblockList[index];
3274 if(!mb->shadowing && !mb->leaked && mb->external == 0)
3275 {
3276 block = o_findBlockRecord(&o_circularRecords, mb->where, NULL);
3277
3278 if(block)
3279 {
3280 /*
3281 ** Just increment the counts.
3282 */
3283 block->bytes += mb->length;
3284 block->count++;
3285 }
3286 else
3287 {
3288 /*
3289 ** Create a new block and add it to the circular records list.
3290 */
3291 BlockRecord *item = VG_(malloc)(sizeof(BlockRecord));
3292 tl_assert(item);
3293
3294 item->count = 1;
3295 item->bytes = mb->length;
3296 item->next = item->prev = NULL;
3297 item->allocated = mb->where;
3298 item->leaked = NULL;
3299
3300 o_addBlockRecord(&o_circularRecords, item);
3301 }
3302 }
3303 }
3304
3305 /*
3306 ** Now report the blocks.
3307 */
3308 block = o_circularRecords.start;
3309 while(block)
3310 {
3311 if(!count)
3312 {
3313 VG_(message)(Vg_UserMsg, "The following blocks only have circular references from other blocks");
3314 }
3315 count++;
3316
3317 VG_(message)(Vg_UserMsg, " Circular loss record %d", count);
3318 VG_(message)(Vg_UserMsg, " Leaked %d (%p) bytes in %d block%sallocated",
3319 block->bytes,
3320 block->bytes,
3321 block->count,
3322 (block->count == 1) ? " " : "s ");
3323 VG_(pp_ExeContext)(block->allocated);
3324 VG_(message)(Vg_UserMsg,"");
3325
3326 /*
3327 ** Get the next block, if any.
3328 */
3329 block = block->next;
3330 }
3331
3332 return count;
3333}
3334
3335static int o_checkCircular(void)
3336{
3337 int count = 0;
3338
3339 /*
3340 ** If there is nothing in the tree, there is nothing to check.
3341 */
3342 if(o_buildMemblockTree())
3343 {
3344 o_checkExternalPointers();
3345 o_rippleExternelPointers(NULL);
3346 count = o_reportCircularBlocks();
3347 }
3348
3349 return count;
3350}
3351
3352static void o_fini(Int exitcode)
3353{
3354 /*
3355 ** Iterate through the leaked block record list,
3356 ** printing out the stats as we go.
3357 */
3358 UInt count = 1;
3359 BlockRecord *record = o_leakRecords.start;
3360
3361 VG_(message)(Vg_UserMsg,"");
3362 VG_(message)(Vg_UserMsg,"");
3363 VG_(message)(Vg_UserMsg,"Omega Leak Summary");
3364 VG_(message)(Vg_UserMsg,"==================");
3365
3366 while(record)
3367 {
3368 VG_(message)(Vg_UserMsg,
3369 "Loss Record %d: Leaked %d (%p) bytes in %d block%s",
3370 count, record->bytes, record->bytes, record->count,
3371 (record->count > 1) ? "s" : "");
3372 VG_(pp_ExeContext)(record->leaked);
3373 VG_(message)(Vg_UserMsg, " Block%s allocated",
3374 (record->count > 1) ? "s" : "");
3375 VG_(pp_ExeContext)(record->allocated);
3376 VG_(message)(Vg_UserMsg,"");
3377
3378 count++;
3379 record = record->next;
3380 }
3381
3382 if(o_showCircular)
3383 {
3384 /*
3385 ** Now check for circular references.
3386 */
3387 count += o_checkCircular();
3388 }
3389
3390 if(count == 1)
3391 {
3392 /*
3393 ** Nothing leaked - assure the user.
3394 */
3395 VG_(message)(Vg_UserMsg,"No leaks to report.");
3396 VG_(message)(Vg_UserMsg,"");
3397 }
3398
3399 /*
3400 ** Remove the leaked blocks from the live blocks count - they wont be
3401 ** coming back now...
3402 */
3403 o_stats.liveMemoryBlocks -= o_stats.memoryBlocksLeaked;
3404
3405 if(o_showInternStats)
3406 {
3407 VG_(printf)("\n\n\n"
3408 "Omega internal statistics summary:\n"
3409 " Tracked Pointers still live: %ld\n"
3410 " Tracked Pointers Allocated: %ld\n"
3411 " Memory Blocks still live: %ld\n"
3412 " Memory Blocks Allocated: %ld\n"
3413 " Shadow Memory Blocks Allocated: %ld\n"
3414 " Memory Blocks Leaked: %ld\n"
3415 " Memory Blocks Lost and Found: %ld\n"
3416 " pbitNodes: %ld\n\n",
3417 o_stats.liveTrackedPointers,
3418 o_stats.trackedPointersAllocated,
3419 o_stats.liveMemoryBlocks,
3420 o_stats.memoryBlocksAllocated,
3421 o_stats.shadowMemoryBlocksAllocated,
3422 o_stats.memoryBlocksLeaked,
3423 o_stats.memoryBlocksLostAndFound,
3424 o_stats.pbitNodes);
3425 }
3426}
3427
3428static Bool o_process_cmd_line_option(Char *arg)
3429{
3430 /*
3431 ** Setup our processing state based upon what the user would like us to do.
3432 */
3433 Int pbithash = 0;
3434 Int mbhash = 0;
3435 Int tphash = 0;
3436
3437 /*
3438 ** Expose the hash sizes for simple performance tweaking.
3439 */
3440 VG_NUM_CLO(arg, "--pbithashsize", pbithash);
3441 VG_NUM_CLO(arg, "--mbhashsize", mbhash);
3442 VG_NUM_CLO(arg, "--tphashsize", tphash);
3443
3444 /*
3445 ** Only tweak upwards for now.
3446 */
3447 if(pbithash > o_pbitNodeHashSize)
3448 o_pbitNodeHashSize = pbithash;
3449
3450 if(mbhash > o_memblockHashSize)
3451 o_memblockHashSize = mbhash;
3452
3453 if(tphash > o_trackedPointerHashSize)
3454 o_trackedPointerHashSize = tphash;
3455
3456 /*
3457 ** Check the flags.
3458 */
3459 if(VG_CLO_STREQ(arg, "--only-malloclike"))
3460 o_onlyMallocLike = True;
3461 else if(VG_CLO_STREQ(arg, "--show-indirect"))
3462 o_showIndirect = True;
3463 else if(VG_CLO_STREQ(arg, "--show-circular"))
3464 o_showCircular = True;
3465 else if(VG_CLO_STREQ(arg, "--show-hanging"))
3466 o_showHanging = True;
3467 else if(VG_CLO_STREQ(arg, "--show-intern-stats"))
3468 o_showInternStats = True;
3469 else if(VG_CLO_STREQ(arg, "--instant-reports"))
3470 o_showSummaryOnly = False;
3471 else if(VG_CLO_STREQ(arg, "--poison"))
3472 o_poison = True;
3473 else
3474 return VG_(replacement_malloc_process_cmd_line_option)(arg);
3475
3476 return True;
3477}
3478
3479static void o_print_usage(void)
3480{
3481 /*
3482 ** Tell the average user what we support.
3483 */
3484 VG_(printf)("");
3485 VG_(printf)(
3486" --only-malloclike only track blocks passed through the\n"
3487" MALLOCLIKE_BLOCK user request.\n"
3488" --show-indirect show indirect leaks from leaked blocks.\n"
3489" --show-circular show blocks that just have circular references.\n"
3490" --instant-reports show leaks as they happen, not just a summary.\n"
3491" --show-hanging show hanging pointers to the block being\n"
3492" deallocated.\n"
3493 );
3494
3495}
3496
3497static void o_print_debug_usage(void)
3498{
3499 /*
3500 ** Tell the inquisitive user what else we support.
3501 */
3502 VG_(printf)("");
3503 VG_(printf)(
3504" --show-intern-stats show some internal statistics from the run.\n"
3505"\n"
3506" IMPORTANT! These next settings must be PRIME NUMBERS\n"
3507"\n"
3508" --pbithashsize=<number> number of pbit nodes to allocate [%d]\n"
3509" --mbhashsize=<number> number of mem block nodes to allocate [%d]\n"
3510" --tphashsize=<number> number of tracked pointer nodes to allocate [%d]\n",
3511 o_pbitNodeHashSize,
3512 o_memblockHashSize,
3513 o_trackedPointerHashSize
3514 );
3515}
3516
3517static void o_memRemapSupport(Addr src, Addr dst, SizeT length)
3518{
3519 /*
3520 ** The track_copy_mem_remap callback has the src and dst the opposite
3521 ** way around to our duplicate tracked pointers function so this tiny
3522 ** wrapper twizzles them around.
3523 */
3524 o_duplicateTrackedPointers(dst, src, length);
3525}
3526
3527static void o_pre_clo_init(void)
3528{
3529 // Details
3530 VG_(details_name) ("exp-omega");
3531 VG_(details_version) ("RC1");
3532 VG_(details_description) ("an instant memory leak detector");
3533 VG_(details_copyright_author)("Copyright (C) 2006-2007, and GNU GPL'd, "
3534 "by Bryan Meredith.");
3535 VG_(details_bug_reports_to) ("omega at brainmurders d eclipse d co d uk");
3536
3537 // Basic functions
3538 VG_(basic_tool_funcs) (o_post_clo_init,
3539 o_instrument,
3540 o_fini);
3541 // Needs
3542 VG_(needs_malloc_replacement) (o_malloc,
3543 o__builtin_new,
3544 o__builtin_vec_new,
3545 o_memalign,
3546 o_calloc,
3547 o_free,
3548 o__builtin_delete,
3549 o__builtin_vec_delete,
3550 o_realloc,
3551 0 );
3552 // Want stack unwinds
3553 VG_(track_die_mem_stack) (o_dieMemStack);
3554 // Need command line input
3555 VG_(needs_command_line_options) (o_process_cmd_line_option,
3556 o_print_usage,
3557 o_print_debug_usage);
3558 // Support MALLOCLIKE and FREELIKE
3559 VG_(needs_client_requests) (o_handle_client_request);
3560
3561 // Wholesale destruction of memory ranges
3562 VG_(track_copy_mem_remap) (o_memRemapSupport );
3563 VG_(track_die_mem_stack_signal)(o_killRange);
3564 VG_(track_die_mem_brk) (o_killRange);
3565 VG_(track_die_mem_munmap) (o_killRange);
3566
3567}
3568
3569VG_DETERMINE_INTERFACE_VERSION(o_pre_clo_init);
3570
3571/*--------------------------------------------------------------------*/
3572/*--- end ---*/
3573/*--------------------------------------------------------------------*/