blob: 605e9754dcb29b18cb083f48e514b0db8dd915c3 [file] [log] [blame]
sewardj99a2ceb2007-11-09 12:30:36 +00001/*--------------------------------------------------------------------*/
2/*--- The Omega tool: traces memory allocations and alerts when ---*/
3/*--- the final reference to an allocated block dies. ---*/
4/*--- o_main.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of Omega, a Valgrind tool for detecting memory
9 leaks as they occur.
10
11 Copyright (C) 2006-2007 Bryan "Brain Murders" Meredith
sewardj99a2ceb2007-11-09 12:30:36 +000012 (A note of personal thanks to my employers at Apertio (www.apertio.com)
13 for allowing the use of their time, equipment for 64bit testing and
14 providing moral support.)
15
16 Partly based upon other Valgrind tools
sewardj39f34232007-11-09 23:02:28 +000017 Copyright (C) 2000-2007 Julian Seward, Nicholas Nethercote et al.
sewardj99a2ceb2007-11-09 12:30:36 +000018 jseward@acm.org
19 njn@valgrind.org
20
21 This program is free software; you can redistribute it and/or
22 modify it under the terms of the GNU General Public License as
23 published by the Free Software Foundation; either version 2 of the
24 License, or (at your option) any later version.
25
26 This program is distributed in the hope that it will be useful, but
27 WITHOUT ANY WARRANTY; without even the implied warranty of
28 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
29 General Public License for more details.
30
31 You should have received a copy of the GNU General Public License
32 along with this program; if not, write to the Free Software
33 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
34 02111-1307, USA.
35
36 The GNU General Public License is contained in the file COPYING.
sewardj5b8ab5b2007-11-30 21:52:27 +000037
38 The current maintainer is Rich Coe <richard.coe@med.ge.com>.
sewardj99a2ceb2007-11-09 12:30:36 +000039*/
40
41/*
42** Read the tool documentation for an explaination of the ideas
43** behind this implementation.
44*/
45
46#include "pub_tool_basics.h"
47#include "pub_tool_libcassert.h"
48#include "pub_tool_tooliface.h"
49#include "pub_tool_hashtable.h"
50#include "pub_tool_libcbase.h"
51#include "pub_tool_libcprint.h"
52#include "pub_tool_libcassert.h"
53#include "pub_tool_mallocfree.h"
54#include "pub_tool_replacemalloc.h"
55#include "pub_tool_machine.h"
56#include "pub_tool_threadstate.h"
57#include "pub_tool_stacktrace.h"
58#include "pub_tool_options.h"
59#include "pub_tool_clreq.h"
60
61#include "coregrind/pub_core_options.h"
62#include "coregrind/pub_core_debugger.h"
63
64#include "libvex_guest_offsets.h"
65
66#include "omega.h"
67
68/*
69** A little sanity in a mad, mad world.
70*/
71#if !(VG_WORDSIZE == 4) && !(VG_WORDSIZE == 8)
72/*
73** We don't handle anything else yet.
74*/
75#error Unsupported VG_WORDSIZE
76#endif
77
78/*
79** 4 lots of debug - always, general, memory and pbit.
80** general, memory and pbit can also be turned off with a master switch.
81** You wont want any of this on unless you are hacking the source around.
82*/
83#define NO_DEBUG(fmt, args...)
84#define O_DEBUG(fmt, args...) VG_(message)(Vg_DebugMsg, fmt, ## args)
85
86// Set to 0 to remove almost all debug from compiled tool
87#if 0
88
89static Bool o_traceMem = True; //False;
90static Bool o_tracePBit = False;
91static Bool o_traceGeneral = True; //False;
92static Bool o_traceStop = True;
93
94#define O_GDEBUG(fmt, args...) \
95 if(o_traceGeneral && !o_traceStop) \
96 { \
97 VG_(message)(Vg_DebugMsg, fmt, ## args); \
98 }
99
100#define O_MDEBUG(fmt, args...) \
101 if(o_traceMem && !o_traceStop) \
102 { \
103 VG_(message)(Vg_DebugMsg, fmt, ## args); \
104 }
105
106#define O_PDEBUG(fmt, args...) \
107 if(o_tracePBit && !o_traceStop) \
108 { \
109 VG_(message)(Vg_DebugMsg, fmt, ## args); \
110 }
111
112#define O_TRACE_ON() {o_traceStop = False;}
113#define O_TRACE_OFF() {o_traceStop = True;}
114#define O_TRACE_MEM_ON() {o_traceMem = True;}
115#define O_TRACE_MEM_OFF() {o_traceMem = False;}
116#define O_TRACE_PBIT_ON() {o_tracePBit = True;}
117#define O_TRACE_PBIT_OFF() {o_tracePBit = False;}
118#define O_TRACE_GENERAL_ON() {o_traceGeneral = True;}
119#define O_TRACE_GENERAL_OFF() {o_traceGeneral = False;}
120#define O_MASTER_DEBUG 1
121
122/*
123** Should we instrument memory loads for debugging?
124** Comment out to stop register loads from showing.
125*/
126//#define O_TRACK_LOADS 1
127#else
128/*
129** No debug included at all.
130*/
131#define O_GDEBUG(fmt, args...)
132#define O_MDEBUG(fmt, args...)
133#define O_PDEBUG(fmt, args...)
134#define O_TRACE_ON()
135#define O_TRACE_OFF()
136#define O_TRACE_MEM_ON()
137#define O_TRACE_MEM_OFF()
138#define O_TRACE_PBIT_ON()
139#define O_TRACE_PBIT_OFF()
140#define O_TRACE_GENERAL_ON()
141#define O_TRACE_GENERAL_OFF()
142
143#endif
144
145/*
146** Need somewhere to give addresses to tracked pointers in registers.
147** We dont write to the locations, just use their addresses.
148** To make it easy to see, use the very top 64K of memory.
149** Note that we might have to map this somewhere else if this is in user space.
150*/
151#if (VG_WORDSIZE == 4)
152#define FAKE_REG_BASE 0xFFFF0000
153#else
154#define FAKE_REG_BASE 0xFFFFFFFFFFFF0000
155#endif
156#define MAP_TO_REG(tid, offset) \
157 (FAKE_REG_BASE + (0x0100 * ((tid) - 1)) + (offset))
158#define OFFSET_FROM_REG(regAddress) \
159 ((regAddress) & 0x00ff)
160#define IS_REG(addr) ((addr >= FAKE_REG_BASE) ? !0 : 0)
161
162static UInt o_isReturnIgnoreReg(Addr reg)
163{
164 /*
165 ** Indicate registers that are 'scratch' registers and should be ignored on
166 ** function return for tracked pointer purposes.
sewardj4cff56d2007-11-10 13:33:56 +0000167
168 JRS 10 Nov 2007: Seems to me this should be somehow related to
169 caller- vs callee-saved classification of registers, but not sure.
170 See docs/internal/register-uses.txt for a summary.
171
172 This fn really ought to be partitioned along VGP_arch_os lines
173 rather than VGA_arch lines, since register conventions are OS
174 dependant as well as CPU dependant.
sewardj99a2ceb2007-11-09 12:30:36 +0000175 */
sewardj4cff56d2007-11-10 13:33:56 +0000176#if defined(VGA_x86)
sewardj99a2ceb2007-11-09 12:30:36 +0000177 switch(OFFSET_FROM_REG(reg))
178 {
sewardj99a2ceb2007-11-09 12:30:36 +0000179 case OFFSET_x86_ECX:
180 case OFFSET_x86_EDX:
sewardj4cff56d2007-11-10 13:33:56 +0000181 return 1;
182 default:
183 return 0;
184 }
sewardj99a2ceb2007-11-09 12:30:36 +0000185#elif defined(VGA_amd64)
sewardj4cff56d2007-11-10 13:33:56 +0000186 switch(OFFSET_FROM_REG(reg))
187 {
sewardj99a2ceb2007-11-09 12:30:36 +0000188 case OFFSET_amd64_RCX:
189 case OFFSET_amd64_RSI:
190 case OFFSET_amd64_RDI:
191 case OFFSET_amd64_R8:
192 case OFFSET_amd64_R9:
193 case OFFSET_amd64_R10:
194 case OFFSET_amd64_R11:
sewardj99a2ceb2007-11-09 12:30:36 +0000195 return 1;
sewardj99a2ceb2007-11-09 12:30:36 +0000196 default:
sewardj4cff56d2007-11-10 13:33:56 +0000197 return 0;
sewardj99a2ceb2007-11-09 12:30:36 +0000198 }
sewardj4cff56d2007-11-10 13:33:56 +0000199#elif defined(VGA_ppc32) || defined(VGA_ppc64)
sewardjdcbb8d32007-11-26 21:34:30 +0000200 VG_(printf)("\nOmega does not currently work on PowerPC/POWER platforms."
sewardj4cff56d2007-11-10 13:33:56 +0000201 " Sorry.\n\n");
202 VG_(exit)(0);
203#else
204# error "Unknown arch"
205#endif
206
207 /*NOTREACHED*/
208 tl_assert(0);
sewardj99a2ceb2007-11-09 12:30:36 +0000209}
210
211
212/*------------------------------------------------------------*/
213/*--- Command Line Option Flags and Values ---*/
214/*------------------------------------------------------------*/
215/*
216** Should we track all memory block allocations or just blocks
217** indicated to us with the MALLOCLIKE_BLOCK user request?
218*/
219static Bool o_onlyMallocLike = False;
220/*
221** Should we show memory that leaks due to a block leaking?
222*/
223static Bool o_showIndirect = False;
224/*
225** Should we show pointers to a block that is deallocated?
226*/
227static Bool o_showHanging = False;
228/*
229** Should we show blocks with only circular references?
230*/
231static Bool o_showCircular = False;
232/*
233** Show interal stats at the end of the run.
234*/
235static Bool o_showInternStats = False;
236/*
237** Should we only show the summary report.
238*/
239static Bool o_showSummaryOnly = True;
240
241/*
242** Should we clear leaked blocks to try and force an error.
243*/
244static Bool o_poison = False;
245
246/*
247** These figures are pure wet finger in the air guestimates.
248** If the user has _lots_ of memory blocks / tracked pointers, they can
249** increase the prime number on the command line.
250*/
251/*
252** Number of PBit Node entries in the hash table.
253*/
254static UInt o_pbitNodeHashSize = 1031;
255/*
256** Number of MemBlock entries in the hash table.
257*/
258static UInt o_memblockHashSize = 65537;
259/*
260** Number of Tracked Pointer entries in the hash table.
261*/
262static UInt o_trackedPointerHashSize = 65537;
263
264/*------------------------------------------------------------*/
265/*--- Statistics ---*/
266/*------------------------------------------------------------*/
267typedef struct
268{
269 unsigned long liveTrackedPointers;
270 unsigned long trackedPointersAllocated;
271 unsigned long liveMemoryBlocks;
272 unsigned long memoryBlocksAllocated;
273 unsigned long shadowMemoryBlocksAllocated;
274 unsigned long memoryBlocksLeaked;
275 unsigned long memoryBlocksLostAndFound;
276 unsigned long pbitNodes;
277} Stats;
278
279static Stats o_stats;
280
281/*------------------------------------------------------------*/
282/*--- PBit Tracking ---*/
283/*------------------------------------------------------------*/
284/*
285** Setup constants for PBit tracking.
286*/
287#if (VG_WORDSIZE == 4)
288#define PBIT_MAJOR_SHIFT 7
289#define PBIT_MINOR_SHIFT 2
290#define PBIT_MINOR_MASK 0x1F
291#elif (VG_WORDSIZE == 8)
292#define PBIT_MAJOR_SHIFT 8
293#define PBIT_MINOR_SHIFT 3
294#define PBIT_MINOR_MASK 0x1F
295#endif
296
297/*
298** Work out how many bytes a UInt of pbits covers
299*/
300#define PBIT_RANGE (sizeof(UInt) * 8 * VG_WORDSIZE)
301
302/*
303** Number of UInts to store in a node so that the node covers 64K
304*/
305#define PBIT_NODE_UINTS ((64 * 1024) / PBIT_RANGE)
306
307/*
308** Memory range covered by a pbit node
309*/
310#define PBIT_NODE_RANGE 0xFFFF
311#define PBIT_NODE_RANGE_MASK (~PBIT_NODE_RANGE)
312#define PBIT_NODE_SHIFT 16
313
314/* Define the pbit storage node. */
315typedef struct {
316 VgHashNode hdr; // Must be first item
317 UInt set_bits; // Count of set bits
318 UInt pbits[PBIT_NODE_UINTS]; // 64K of coverage
319} PBitNode;
320
321/*
322** We use a hash table to track the p-bits.
323** The node is defined just above. The key to a node is the memory
324** address right shifted PBIT_NODE_SHIFT bits.
325*/
326static VgHashTable o_PBits = NULL;
327
328/*
329** For speed, we keep a node to track register allocations and cache the last
330** node that was accessed.
331*/
332static PBitNode o_registerPBits;
333static PBitNode *o_lastPBitNode = NULL;
334static Addr o_lastPBitNodeKey = 0;
335
336/*
337** Convenience macros for working out which bit in which PBIT_NODE_UINT we
338** wish to address.
339*/
340#define PBIT_MAJOR_INDEX( addr ) \
341 (((addr) & PBIT_NODE_RANGE) >> PBIT_MAJOR_SHIFT)
342#define PBIT_MINOR_INDEX( addr ) \
343 (((addr) >> PBIT_MINOR_SHIFT) & PBIT_MINOR_MASK)
344#define PBIT_KEY( addr ) ((Addr)(addr) >> PBIT_NODE_SHIFT)
345
346typedef struct {
347 PBitNode *node;
348 Addr currentAddress;
349 Addr finalAddress;
350} PBitContext;
351
352/*
353** Helper functions for doing fast searches through an address range.
354*/
355static Addr o_firstPBit(PBitContext *context, Addr start, SizeT length);
356static Addr o_nextPBit(PBitContext *context);
357
358/*
359** Basic PBit manipulation.
360*/
361static PBitNode *o_getPBitNode(Addr address, Bool create)
362{
363 Addr key = PBIT_KEY(address);
364
365 O_PDEBUG("o_getPBitNode(%p%s)", address,
366 create ? ", create" : "");
367
368 O_PDEBUG("o_getPBitNode last node %p, last key %p",
369 o_lastPBitNode, o_lastPBitNodeKey);
370
371 if(IS_REG(address))
372 {
373 /*
374 ** This is a register - use the register PBit node.
375 */
376 O_PDEBUG("o_getPBitNode returning register PBit node");
377 return &o_registerPBits;
378 }
379 else if((key == o_lastPBitNodeKey) &&
380 (o_lastPBitNode || !create))
381 {
382 /*
383 ** This is in the same node as last time.
384 */
385 O_PDEBUG("o_getPBitNode returning last PBit node");
386 return o_lastPBitNode;
387 }
388 else
389 {
390 /*
391 ** It's a new node.
392 ** Look it up then cache both the node and the node key.
393 */
394 o_lastPBitNode = VG_(HT_lookup)(o_PBits, key);
395 o_lastPBitNodeKey = key;
396
397 if(!o_lastPBitNode & create)
398 {
399 /*
400 ** We don't have a node for this address. Create one now.
401 */
402 o_lastPBitNode = VG_(malloc)( sizeof(PBitNode) );
403 tl_assert(o_lastPBitNode);
404 VG_(memset)(o_lastPBitNode, 0, sizeof(PBitNode));
405 o_lastPBitNode->hdr.key = key;
406
407 /*
408 ** Add this node into the hash table.
409 */
410 VG_(HT_add_node)(o_PBits, o_lastPBitNode);
411
412 O_PDEBUG("Created PBit node beginning %p for address %p",
413 (key << PBIT_NODE_SHIFT),
414 address);
415
416 o_stats.pbitNodes++;
417
418 }
419 O_PDEBUG("o_getPBitNode returning lookup PBit node");
420
421 return o_lastPBitNode;
422 }
423}
424
425static void o_setPBit( Addr address )
426{
427 /*
428 ** Retrieve the node that contains this address then set the appropriate bit.
429 */
430 PBitNode *pbn = o_getPBitNode(address, True);
431
432 O_PDEBUG("o_setPBit(%p)", address);
433
434 O_PDEBUG("o_setPBit - node = %p, MAJOR = %d, MINOR = %d",
435 pbn,
436 PBIT_MAJOR_INDEX(address),
437 PBIT_MINOR_INDEX(address));
438 /*
439 ** The PBit might not be clear so only tweak things if it is.
440 */
441 if(!(pbn->pbits[PBIT_MAJOR_INDEX(address)] &
442 (1 << PBIT_MINOR_INDEX(address))))
443 {
444 /*
445 ** Set the pbit and increment the convenience count.
446 */
447 pbn->pbits[PBIT_MAJOR_INDEX(address)] |=
448 (1 << PBIT_MINOR_INDEX(address));
449 pbn->set_bits++;
450 }
451
452 O_PDEBUG("o_setPBit done");
453 return;
454}
455
456static void o_clearPBit( Addr address )
457{
458 /*
459 ** Retrieve the node that contains this address. If the node does not exist,
460 ** we assert as this really shouldnt happen.
461 */
462 PBitNode *pbn = o_getPBitNode(address, False);
463
464 O_PDEBUG("o_clearPBit(%p)", address);
465
466 tl_assert(pbn);
467
468 /*
469 ** The PBit might not be set so only tweak things if it is.
470 */
471 if(pbn->pbits[PBIT_MAJOR_INDEX(address)] &
472 (1 << PBIT_MINOR_INDEX(address)))
473 {
474 /*
475 ** Clear the pbit and decrement the convenience count.
476 */
477 pbn->pbits[PBIT_MAJOR_INDEX(address)] &=
478 ~(1 << PBIT_MINOR_INDEX(address));
479 pbn->set_bits--;
480 }
481
482 return;
483}
484
485static Bool o_isPBitSet( Addr address )
486{
487 /*
488 ** Retrieve the node that contains this address. If the node does not exist,
489 ** the Pbit isnt set ;-)
490 */
491 PBitNode *pbn = o_getPBitNode(address, False);
492
493 O_PDEBUG("o_isPBitSet(%p)", address);
494
495 if(!pbn)
496 return 0;
497
498 /*
499 ** Return the Pbit status.
500 */
501 return ((pbn->pbits[PBIT_MAJOR_INDEX(address)] &
502 (1 << PBIT_MINOR_INDEX(address))) != 0);
503}
504
505/*
506** For ease of range checking PBits, we provide the following two functions.
507** The idea is that you call the first one with your start address and range.
508** It returns the first address that is marked by a PBit or 0 if the range is
509** clear (we overlap the supplied range in order to check partial pointers at
510** each end). By calling the second one with the same context until it returns
511** zero, you get all of the PBits within the range. You supply the context so
512** we should be able to nest calls if need be.
513*/
514static Addr o_firstPBit(PBitContext *context, Addr start, SizeT length)
515{
516 const Addr MASK = ~(VG_WORDSIZE - 1);
517
518 tl_assert(context);
519 tl_assert(start > VG_WORDSIZE);
520
521 O_PDEBUG("o_firstPBit(%p, %p)", start, length);
522 /*
523 ** Optimisation for single pointer ranges and bizarre 0 length calls.
524 */
525 if(!length)
526 {
527 return 0;
528 }
529 else if(length <= VG_WORDSIZE)
530 {
531 /*
532 ** Set the current address to 0.
533 */
534 context->currentAddress = 0;
535 return (o_isPBitSet(start)) ? (start & MASK) : 0;
536 }
537
538 /*
539 ** Setup the current and final addresses. Note that we set the current
540 ** address to one aligned address below because of how nextPBit works.
541 */
542 context->currentAddress = ((start & MASK) - VG_WORDSIZE);
543 context->finalAddress = ((start + length - 1) & MASK);
544
545 context->node = o_getPBitNode(context->currentAddress, False);
546
547 O_PDEBUG("o_firstPBit current %p, final %p",
548 context->currentAddress, context->finalAddress);
549
550 return o_nextPBit(context);
551}
552
553static Addr o_nextPBit(PBitContext *context)
554{
555 /*
556 ** Current address is the last address we returned.
557 ** We keep going until we have checked final address.
558 */
559 UInt pbits;
560 Addr startAddr;
561 Addr foundAddr = 0;
562 UInt majorIndex;
563 UInt minorIndex;
564
565 tl_assert(context);
566
567 /*
568 ** When the current address is set to 0, we just exit.
569 */
570 if(context->currentAddress == 0)
571 {
572 return 0;
573 }
574
575 O_PDEBUG("o_nextPBit(%p,%p)",
576 context->currentAddress, context->finalAddress);
577
578 while(!foundAddr &&
579 (context->currentAddress <= context->finalAddress))
580 {
581 /*
582 ** Check if we need another node and get it if we do.
583 */
584 startAddr = context->currentAddress + VG_WORDSIZE;
585
586 O_PDEBUG("o_nextPBit c %p s %p", context->currentAddress, startAddr);
587
588 if(PBIT_KEY(context->currentAddress) !=
589 PBIT_KEY(startAddr))
590 {
591 O_PDEBUG("o_nextPBit getting next node %p",
592 startAddr & PBIT_NODE_RANGE_MASK);
593
594 context->node = o_getPBitNode(startAddr, False);
595 }
596 context->currentAddress = startAddr;
597
598 /*
599 ** Check if we have a node - skip to next node (final address
600 ** permitting) if we dont. This is the 64k of addresses at a time
601 ** comparison.
602 */
603 if(!context->node)
604 {
605 O_PDEBUG("o_nextPbit: no node.");
606
607 if(context->currentAddress > context->finalAddress)
608 {
609 /*
610 ** We have passed the final address - time to stop looking.
611 */
612 O_PDEBUG("o_nextPbit: current > final");
613 continue;
614 }
615 else if((context->currentAddress & PBIT_NODE_RANGE_MASK) !=
616 (context->finalAddress & PBIT_NODE_RANGE_MASK))
617 {
618 /*
619 ** Align to VG_WORDSIZE below the next node range then loop.
620 */
621 O_PDEBUG("o_nextPbit: aligning to next node. (%p, %p)",
622 context->currentAddress,
623 context->finalAddress);
624
625 context->currentAddress += (PBIT_NODE_RANGE + 1);
626 context->currentAddress &= PBIT_NODE_RANGE_MASK;
627 context->currentAddress -= VG_WORDSIZE;
628
629 O_PDEBUG("o_nextPbit: aligned to %p",
630 context->currentAddress);
631
632 continue;
633 }
634 else
635 {
636 /*
637 ** Node range is the same but no node == no pbits.
638 */
639 context->currentAddress = context->finalAddress + VG_WORDSIZE;
640 break;
641 }
642 }
643
644 /*
645 ** The index of the PBit array item we want to check then get the pbits.
646 */
647 majorIndex = PBIT_MAJOR_INDEX(context->currentAddress);
648 minorIndex = PBIT_MINOR_INDEX(context->currentAddress);
649 pbits = context->node->pbits[majorIndex];
650
651 /*
652 ** Mask off addresses below the current address then test.
653 */
654 pbits &= ~((1 << minorIndex) - 1);
655
656 O_PDEBUG("o_nextPbit: major %d, minor %d, bit %p",
657 majorIndex, minorIndex, pbits);
658 /*
659 ** This checks up to PBIT_RANGE at a time (256 addresses on a
660 ** 64bit machine).
661 */
662 if(!pbits)
663 {
664 /*
665 ** No pbits set in this UInt. Set the current address to VG_WORDSIZE
666 ** below the next UInt then loop around.
667 */
668 context->currentAddress += PBIT_RANGE;
669 context->currentAddress &= ~(PBIT_RANGE - 1);
670 context->currentAddress -= VG_WORDSIZE;
671
672 continue;
673 }
674
675 /*
676 ** Now we walk the UInt a bit at a time.
677 */
678 for(;
679 ((minorIndex <= PBIT_MINOR_MASK) &&
680 (context->currentAddress <= context->finalAddress))
681 ; minorIndex++)
682 {
683 if(pbits & (1 << minorIndex))
684 {
685 /*
686 ** We have a match.
687 */
688 foundAddr = context->currentAddress;
689 O_PDEBUG("o_nextPbit found %p", foundAddr);
690 break;
691 }
692 else
693 {
694 context->currentAddress += VG_WORDSIZE;
695 }
696 }
697 }
698
699 /*
700 ** Final range check.
701 */
702 if(foundAddr > context->finalAddress)
703 {
704 foundAddr = 0;
705 }
706
707 /*
708 ** Store the result so that we know where to start from next time.
709 */
710 context->currentAddress = foundAddr;
711
712 O_PDEBUG("o_nextPbit returning %p", foundAddr);
713
714 return foundAddr;
715}
716
717/*------------------------------------------------------------*/
718/*--- Error Report and Suppression Tracking ---*/
719/*------------------------------------------------------------*/
720/*
721** We hold a doubley linked list of Exe contexts for leaks and suppressions.
722** If a block is tagged as leaked then comes back to life, we move it
723** into the suppression list. We always check the suppression list first
724** before adding a record to the leaked list.
725** We keep a count of how may times a record matches as it saves space.
726*/
727struct _BlockRecord {
728 struct _BlockRecord *next;
729 struct _BlockRecord *prev;
730 ExeContext *allocated;
731 ExeContext *leaked;
732 UInt bytes;
733 SizeT count;
734};
735
736typedef struct _BlockRecord BlockRecord;
737
738typedef struct {
739 BlockRecord *start;
740 BlockRecord *end;
741} BlockRecordList;
742static BlockRecordList o_leakRecords = {NULL, NULL};
743static BlockRecordList o_suppressionRecords = {NULL, NULL};
744
745#define DUMP_BLOCK(block) \
746 O_DEBUG("n %p, p %p, a %p, l %p, c %d b %p", \
747 block->next, block->prev, \
748 block->allocated, block->leaked, block->count, \
749 block->bytes);
750
751/*
752** List handling - we need to be able to add and remove a single block
753** from anywhere in the list but the chances are, removals will come from
754** the end, hence using a doubly linked list. We also need to walk the list
755** to find a matching item. Again, we do this backwards as it tends to get
756** a match faster in the case of moving newly leaked block records into
757** the suppression list.
758*/
759static void o_addBlockRecord(BlockRecordList *list, BlockRecord *item)
760{
761 /*
762 ** Catch start case.
763 */
764 tl_assert(list && item);
765
766 NO_DEBUG("o_addBlockRecord pre()");
767 //DUMP_BLOCK(item);
768
769 if(!list->start)
770 {
771 list->start = list->end = item;
772 item->prev = item->next = NULL;
773 }
774 else
775 {
776 /*
777 ** OK, add it onto the end.
778 */
779 item->prev = list->end;
780 item->next = NULL;
781 list->end->next = item;
782 list->end = item;
783 }
784 NO_DEBUG("o_addBlockRecord post()");
785 //DUMP_BLOCK(item);
786 return;
787}
788
789static void o_removeBlockRecord(BlockRecordList *list, BlockRecord *item)
790{
791 /*
792 ** We don't check that the item is in the list.
793 ** Ensure you check with the findBlockRecord function.
794 */
795 tl_assert(list && item);
796
797 NO_DEBUG("o_removeBlockRecord pre()");
798 //DUMP_BLOCK(item);
799 if(item->prev)
800 {
801 /*
802 ** Not at the start.
803 */
804 item->prev->next = item->next;
805 }
806 else
807 {
808 /*
809 ** At the start.
810 */
811 list->start = item->next;
812 }
813
814 if(item->next)
815 {
816 /*
817 ** Not at the end.
818 */
819 item->next->prev = item->prev;
820 }
821 else
822 {
823 /*
824 ** At the end.
825 */
826 list->end = item->prev;
827 }
828
829 NO_DEBUG("o_removeBlockRecord post()");
830 //DUMP_BLOCK(item);
831
832 return;
833}
834
835static BlockRecord *o_findBlockRecord(BlockRecordList *list,
836 ExeContext *allocated,
837 ExeContext *leaked)
838
839{
840 /*
841 ** Search backwards for the block record that matches the contexts.
842 ** We allow leaked to be null so that we can handle the circular checking
843 ** blocks as well which only have an allocated context.
844 */
845 BlockRecord *item = NULL;
846
847 tl_assert(list && allocated);
848
849 item = list->end;
850
851 while(item)
852 {
853 if(VG_(eq_ExeContext)(Vg_HighRes, item->allocated, allocated) &&
854 ((!item->leaked && !leaked) ||
855 ((item->leaked && leaked) &&
856 VG_(eq_ExeContext)(Vg_HighRes, item->leaked, leaked))))
857 {
858 break;
859 }
860
861 item = item->prev;
862 }
863
864 return item;
865}
866
867static Bool o_addLeakedBlock(ExeContext *allocated,
868 ExeContext *leaked,
869 SizeT size)
870{
871 BlockRecord *item = NULL;
872
873 tl_assert(allocated && leaked);
874
875 /*
876 ** See if we already have this block.
877 ** Check the suppression record first.
878 */
879 item = o_findBlockRecord(&o_suppressionRecords, allocated, leaked);
880
881 if(!item)
882 {
883 /*
884 ** Not in the suppression record.
885 ** Try the leaked block list.
886 */
887 item = o_findBlockRecord(&o_leakRecords, allocated, leaked);
888 }
889
890 if(item)
891 {
892 /*
893 ** Just increment the count.
894 */
895 item->count++;
896 item->bytes += size;
897 //O_DEBUG("o_addLeakedBlock - block exists");
898 //DUMP_BLOCK(item);
899 return False;
900 }
901 else
902 {
903 /*
904 ** Create a new block and add it to the leaked list.
905 */
906 item = VG_(malloc)(sizeof(BlockRecord));
907 tl_assert(item);
908
909 item->count = 1;
910 item->bytes = size;
911 item->next = item->prev = NULL;
912 item->allocated = allocated;
913 item->leaked = leaked;
914
915 o_addBlockRecord(&o_leakRecords, item);
916
917 return True;
918 }
919
920}
921
922static Bool o_addSuppressionBlock(ExeContext *allocated,
923 ExeContext *leaked)
924{
925 BlockRecord *item = NULL;
926
927 tl_assert(allocated && leaked);
928
929 /*
930 ** See if we already have this block.
931 ** Check the suppression record first.
932 */
933 item = o_findBlockRecord(&o_suppressionRecords, allocated, leaked);
934
935 if(!item)
936 {
937 /*
938 ** Not in the suppression record.
939 ** Try the leaked block list.
940 */
941 item = o_findBlockRecord(&o_leakRecords, allocated, leaked);
942
943 if(!item)
944 {
945 VG_(tool_panic)("suppressing block that didnt leak :-(");
946 }
947 else
948 {
949 /*
950 ** Move the block to the suppression list.
951 */
952 o_removeBlockRecord(&o_leakRecords, item);
953 o_addBlockRecord(&o_suppressionRecords, item);
954 }
955 }
956 else
957 {
958 /*
959 ** The block is already suppressed - just increase the count.
960 */
961 item->count++;
962
963 //O_DEBUG("o_addSuppressionBlock - block exists");
964 //DUMP_BLOCK(item);
965 return False;
966 }
967
968 return True;
969}
970
971/*------------------------------------------------------------*/
972/*--- Allocated Block and Pointer Tracking ---*/
973/*------------------------------------------------------------*/
974/*
975** Where these structures have address references, they are the address
976** of the item in client memory NOT the address of either of these
977** internal tracking structures.
978*/
979struct _MemBlock;
980typedef struct {
981 VgHashNode hdr; // Must be first item
982 Addr block; // Address of the allocated block start
983 SizeT length; // Length of the allocated block
984 struct _MemBlock *memBlock; // Pointer to the memblock
985} TrackedPointer;
986
987typedef struct _MemBlock {
988 VgHashNode hdr; // Must be first item
989 SizeT length; // Length of the allocated block
990 ExeContext *where; // Where the block was allocated
991 UInt refNum; // Number of back references
992 TrackedPointer **pointers; // Back references to TrackedPointer info
993 struct _MemBlock *shadowing; // Set to memblock of block that we shadow
994 struct _MemBlock *shadowed; // Set to memblock of our shadow
995 ExeContext *leaked; // Where we think the block leaked
996 UInt nonRegCount; // Non register tracked pointers
997 Int external; // Used in circular dependency checking
998
999 TrackedPointer *maybeLast; // Last live tracked pointer on function return
1000 ExeContext *funcEnd; // matching exe context for the end of the function
1001 Bool doLeak; // Set if this block should leak on instruction
1002 // end. We have to make instructions atomic or we
1003 // go bang on things like xchng as there is no way
1004 // of telling which value gets overwritten first.
1005 struct _MemBlock *next; // Linked list of blocks that might be leaking at
1006 // instruction end.
1007 int depth; // Depth that the potential leak occurred at.
1008 TrackedPointer *wasLast; // Pointer t
1009
1010 UInt nonScratch; // Number of non-scratch registers.
1011} MemBlock;
1012
1013/*
1014** Shadows?
1015** This helps to solve the problem of where a program does its own memory
1016** management of the kind:
1017
10181 secret *foo = malloc(sizeof(bar) + sizeof(secret) + alignment_correction);
10192 foo->secret_stuff = magic_key;
10203 etc.
10214 foo++;
10225 return (bar*)foo;
1023
1024** If the pointer to foo is shadowed at some internal offset to the block
1025** start, we create a shadow record and link it to the main block so that
1026** we can track references to either. Without this we do a leak alert at
1027** line 4 instead which is undesireable.
1028**
1029** There can only be one shadow to a block unless we need more and someone
1030** wants to code it. A side effect of the current implementation allows a
1031** shadow of a shadow but it is explicitly blocked for now.
1032*/
1033
1034/*
1035** We use separate hash tables to track the pointers and allocated blocks.
1036** The key of each node is the address of the corresponding item in client
1037** memory, shifted right to remove the wasted bits caused by alignment of
1038** pointers in memory.
1039*/
1040#if (VG_WORDSIZE == 4)
1041#define TRACK_MINOR_SHIFT 2
1042#define TRACK_MINOR_MASK ~0x03
1043#elif (VG_WORDSIZE == 8)
1044#define TRACK_MINOR_SHIFT 3
1045#define TRACK_MINOR_MASK ~0x07
1046#endif
1047
1048#define TRACKED_KEY( a ) ((UWord)(a) >> TRACK_MINOR_SHIFT)
1049#define FROM_TRACKED_KEY( a ) ((UWord)(a) << TRACK_MINOR_SHIFT)
1050
1051/*
1052** Storage for the two hash tables we need.
1053*/
1054static VgHashTable o_MemBlocks = NULL;
1055static VgHashTable o_TrackedPointers = NULL;
1056
1057/*
1058** Start of a linked list of blocks that may be leaking during this original
1059** processor instruction. Instructions are broken down inside VEX so a single
1060** original instruction can become many VEX instructions. By not doing leak
1061** reports until the end of the original instruction, everything becomes
1062** atomic again - the stack moves and the popped value appears in the register
1063** in one movement rather than two which cause a leak if the stack is
1064** invalidated before the value appears in the register. xchng works both ways
1065** around and so on.
1066*/
1067static MemBlock *doLeakList = NULL;
1068static UInt doLeakListCount = 0;
1069static Bool doLeakNow = False;
1070
1071/*
1072** Set when we are removing pointers within a free()ed block.
1073*/
1074static Bool o_clearingBlock = False;
1075
1076/*
1077** Set when we are removing pointers within a free()ed block or a
1078** block that leaked. It shows the indirection level in cascades.
1079*/
1080static UInt o_indirectChecking = 0;
1081static ExeContext *o_indirectStack = NULL;
1082
1083/*
1084** Set when the stack is unwinding.
1085*/
1086static Bool o_stackUnwind = False;
1087
1088static void o_killRange(Addr start, SizeT length);
1089
1090/*
1091** This is set to stop us from tracking leaks once we exit main.
1092** (May well need a per thread flag to catch when threads exit as well.)
1093*/
1094static Bool o_inhibitLeakDetect = False;
1095
1096
1097static void o_cleanupTrackedPointers( MemBlock * mb )
1098{
1099 UInt pointerIndex;
1100
1101 for(pointerIndex = 0; pointerIndex < mb->refNum; pointerIndex++)
1102 {
1103 TrackedPointer *p =
1104 VG_(HT_remove)(o_TrackedPointers,
1105 mb->pointers[pointerIndex]->hdr.key);
1106
1107 tl_assert(p);
1108 O_GDEBUG("Removing tracked pointer at %p pointing to %p",
1109 FROM_TRACKED_KEY(p->hdr.key),
1110 mb->hdr.key);
1111
1112 /*
1113 ** Remove the PBit for this tracked pointer.
1114 */
1115 o_clearPBit(FROM_TRACKED_KEY(p->hdr.key));
1116
1117 /*
1118 ** Show any pointers to this block as we deallocate them.
1119 */
1120 if(o_showHanging)
1121 {
1122 if(IS_REG(FROM_TRACKED_KEY(p->hdr.key)))
1123 {
1124 /*
1125 ** Maybe decode registers to names later?
1126 */
1127 O_DEBUG("Removing hanging pointer in a register to block %p",
1128 p->block);
1129 }
1130 else
1131 {
1132 O_DEBUG("Removing hanging pointer at %p to block %p",
1133 FROM_TRACKED_KEY(p->hdr.key),
1134 p->block);
1135 }
1136 }
1137 VG_(free)(p);
1138 o_stats.liveTrackedPointers--;
1139 }
1140
1141 /*
1142 ** Free off the pointers back reference.
1143 */
1144 VG_(free)(mb->pointers);
1145 mb->pointers = NULL;
1146 mb->refNum = 0;
1147
1148 return;
1149}
1150
1151static void o_cleanupMemBlock( MemBlock **mbpp )
1152{
1153 MemBlock *mb;
1154
1155 O_GDEBUG("o_cleanupMemBlock(%p)", mbpp);
1156 /*
1157 ** Sanity check.
1158 */
1159 if(!mbpp || !*mbpp)
1160 {
1161 O_DEBUG("o_cleanupMemBlock passed null memory block pointer.");
1162 return;
1163 }
1164
1165 /*
1166 ** Take a local copy with less indirection.
1167 */
1168 mb = *mbpp;
1169
1170 O_GDEBUG("o_cleanupMemBlock mb=%p", mb->hdr.key);
1171
1172 /*
1173 ** If this is a shadowed block, complain then return.
1174 */
1175 if(mb->shadowing)
1176 {
1177 O_DEBUG("Trying to cleanup a shadow block at %p tracking %p",
1178 mb->hdr.key,
1179 mb->shadowing->hdr.key);
1180 return;
1181 }
1182
1183 /*
1184 ** If a shadow exists, clean it up.
1185 */
1186 if(mb->shadowed)
1187 {
1188 MemBlock *shadowed = mb->shadowed;
1189
1190 /*
1191 ** Cleanup its pointers, remove it from the hash table then
1192 ** free off the block.
1193 */
1194 O_GDEBUG("cleanup shadow pointers");
1195 o_cleanupTrackedPointers(shadowed);
1196 (void)VG_(HT_remove)(o_MemBlocks, shadowed->hdr.key);
1197 VG_(free)(shadowed);
1198
1199 o_stats.liveMemoryBlocks--;
1200 }
1201
1202 /*
1203 ** Free off the tracked pointers.
1204 */
1205 O_GDEBUG("cleanup tracked pointers");
1206 o_cleanupTrackedPointers(mb);
1207
1208 /*
1209 ** Check for tracked pointers inside the allocated block being lost.
1210 */
1211 o_indirectChecking++;
1212 o_clearingBlock = True;
1213 o_killRange(mb->hdr.key,
1214 mb->length);
1215 o_clearingBlock = False;
1216 o_indirectChecking--;
1217
1218 /*
1219 ** Now free off the memory block.
1220 */
1221 VG_(free)(mb);
1222 o_stats.liveMemoryBlocks--;
1223
1224 /*
1225 ** Clear the passed in pointer.
1226 */
1227 *mbpp = NULL;
1228
1229 return;
1230}
1231
1232static void o_addMemBlockReference( MemBlock *mb, TrackedPointer *tp )
1233{
1234 MemBlock *smb = mb;
1235
1236 O_GDEBUG("o_addMemBlockReference tp=%p, mb=%p",
1237 FROM_TRACKED_KEY(tp->hdr.key),
1238 mb->hdr.key);
1239
1240 /*
1241 ** Check if we are shadowing.
1242 */
1243 if(mb->shadowing)
1244 {
1245 /*
1246 ** Get the mem block for the true allocated block.
1247 ** Note that this leaves smb pointing to the shadow block which is
1248 ** what we want.
1249 */
1250 mb = mb->shadowing;
1251 }
1252
1253 /*
1254 ** Check if the block previously leaked.
1255 */
1256 if(!mb->shadowed && !mb->refNum && mb->leaked)
1257 {
1258 /*
1259 ** Seems that the block didnt leak after all.
1260 */
1261 if(o_addSuppressionBlock(mb->where, mb->leaked) && !o_showSummaryOnly)
1262 {
1263 O_DEBUG("Welcome back to the supposedly leaked block at %p. Illegal read?",
1264 mb->hdr.key);
1265
1266 VG_(get_and_pp_StackTrace)(VG_(get_running_tid)(), VG_(clo_backtrace_size));
1267 O_DEBUG("");
1268 }
1269
1270 mb->leaked = NULL;
1271 o_stats.memoryBlocksLeaked--;
1272 o_stats.memoryBlocksLostAndFound++;
1273 }
1274
1275 /*
1276 ** Populate the tracked pointer then add it to the hash.
1277 ** We use the shadow block so that it points to the correct place.
1278 ** Add the back reference to the mem block.
1279 */
1280 tp->block = smb->hdr.key;
1281 tp->length = mb->length;
1282 tp->memBlock = smb;
1283 VG_(HT_add_node)(o_TrackedPointers, tp);
1284
1285 /*
1286 ** Do we need more memory for pointers?
1287 */
1288 if(!smb->pointers)
1289 {
1290 smb->pointers =
1291 VG_(malloc)((smb->refNum + 8) * sizeof(TrackedPointer *));
1292 tl_assert(smb->pointers);
1293 }
1294 else if(!((smb->refNum + 1) & 7))
1295 {
1296 /*
1297 ** Add space for another 8 back references.
1298 ** Note that this will also shrink us if needed.
1299 */
1300 smb->pointers =
1301 VG_(realloc)(smb->pointers, ((smb->refNum + 8) * sizeof(Addr)));
1302 tl_assert(smb->pointers);
1303 }
1304
1305 smb->pointers[smb->refNum] = tp;
1306
1307 /*
1308 ** Track register and memory pointers.
1309 */
1310 if(!IS_REG(FROM_TRACKED_KEY(smb->pointers[smb->refNum]->hdr.key)))
1311 {
1312 smb->nonRegCount++;
1313 }
1314 else if(!o_isReturnIgnoreReg(FROM_TRACKED_KEY(smb->pointers[smb->refNum]->hdr.key)))
1315 {
1316 smb->nonScratch++;
1317 }
1318
1319 /*
1320 ** Clear the maybeLast and funcEnd. Adding a reference means that
1321 ** the cached one wasnt the last.
1322 */
1323 smb->maybeLast = NULL;
1324 smb->funcEnd = NULL;
1325
1326 /*
1327 ** Clear the doLeak flag - we just added a reference so the block survived
1328 ** the instruction.
1329 */
1330 smb->doLeak = False;
1331
1332 smb->refNum++;
1333 O_MDEBUG("Added tracked pointer at %p pointing to %s%p",
1334 FROM_TRACKED_KEY(tp->hdr.key),
1335 smb->shadowing ? "(S)" : "",
1336 smb->hdr.key);
1337
1338 return;
1339}
1340
1341static void o_removePointerFromList(MemBlock *mb, TrackedPointer *tp)
1342{
1343 UInt pointerNum;
1344
1345 O_GDEBUG("removePointerFromList tp=%p mb=%p",
1346 FROM_TRACKED_KEY(tp->hdr.key),
1347 mb->hdr.key);
1348
1349 /*
1350 ** Check that this tracked pointer belongs to this block.
1351 */
1352 tl_assert(tp->memBlock == mb);
1353
1354 /*
1355 ** Find the tracked pointer in the memory blocks' list.
1356 */
1357 for(pointerNum = 0; pointerNum < mb->refNum; pointerNum++)
1358 {
1359 if(mb->pointers[pointerNum] == tp)
1360 {
1361 /*
1362 ** Found it.
1363 ** If this is not the last pointer in the list, copy the last
1364 ** one over it.
1365 */
1366 if((pointerNum + 1) != mb->refNum)
1367 {
1368 mb->pointers[pointerNum] = mb->pointers[(mb->refNum - 1)];
1369 }
1370
1371 break;
1372 }
1373 }
1374
1375 /*
1376 ** Track register and memory pointers.
1377 */
1378 if(!IS_REG(FROM_TRACKED_KEY(tp->hdr.key)))
1379 {
1380 mb->nonRegCount--;
1381 }
1382 else if(!o_isReturnIgnoreReg(FROM_TRACKED_KEY(tp->hdr.key)))
1383 {
1384 mb->nonScratch--;
1385 }
1386
1387 mb->refNum--;
1388
1389 return;
1390}
1391
1392static void o_doLeakReport(MemBlock *mb);
1393static void o_removeMemBlockReference( MemBlock *mb, TrackedPointer *tp )
1394{
1395 MemBlock *smb = NULL;
1396 SizeT refCount = 0;
1397 UInt nonRegCount = 0;
1398 Bool shadowed = False;
1399
1400 /*
1401 ** We need the tracked pointer object.
1402 */
1403 tl_assert(tp);
1404
1405 /*
1406 ** If we dont have the memory block, get it from the tracked pointer.
1407 */
1408 if(!mb)
1409 {
1410 mb = tp->memBlock;
1411 }
1412 tl_assert(mb);
1413
1414 O_GDEBUG("o_removeMemBlockReference tp=%p, mb=%p",
1415 FROM_TRACKED_KEY(tp->hdr.key),
1416 mb->hdr.key);
1417
1418 smb = mb;
1419 refCount = smb->refNum;
1420 nonRegCount = smb->nonRegCount;
1421
1422 O_GDEBUG("(A)refCount %d, o_stackUnwind %c, nonRegCount %d, isReg %c",
1423 refCount,
1424 (o_stackUnwind ? 'Y' : 'N'),
1425 nonRegCount,
1426 IS_REG(FROM_TRACKED_KEY(tp->hdr.key)) ? 'Y' : 'N');
1427
1428 /*
1429 ** Check if we are shadowing.
1430 */
1431 if(mb->shadowing)
1432 {
1433 /*
1434 ** Get the mem block for the true allocated block.
1435 ** Note that this leaves smb pointing to the shadow which is correct.
1436 */
1437 mb = mb->shadowing;
1438#if defined(O_MASTER_DEBUG)
1439 if(!o_traceStop)
1440 {
1441 int count;
1442 for(count = 0; count < mb->refNum && count < 6; count++)
1443 O_GDEBUG(" %p", FROM_TRACKED_KEY(mb->pointers[count]->hdr.key));
1444 }
1445#endif
1446 refCount += mb->refNum;
1447 shadowed = True;
1448 nonRegCount += mb->nonRegCount;
1449 }
1450 else if(mb->shadowed)
1451 {
1452 /*
1453 ** Get the mem block for the shadow as we need the refNum from it.
1454 */
1455 MemBlock *tmb = mb->shadowed;
1456#if defined(O_MASTER_DEBUG)
1457 if(!o_traceStop)
1458 {
1459 int count;
1460 for(count = 0; count < tmb->refNum && count < 6; count++)
1461 O_GDEBUG(" %p", FROM_TRACKED_KEY(tmb->pointers[count]->hdr.key));
1462 }
1463#endif
1464 refCount += tmb->refNum;
1465 shadowed = True;
1466 nonRegCount += tmb->nonRegCount;
1467 }
1468#if defined(O_MASTER_DEBUG)
1469 else if(!o_traceStop)
1470 {
1471 int count;
1472 for(count = 0; count < mb->refNum && count < 6; count++)
1473 O_GDEBUG(" %p", FROM_TRACKED_KEY(mb->pointers[count]->hdr.key));
1474
1475 }
1476#endif
1477
1478 O_GDEBUG("(B)rCnt %d, nRCnt %d, ns %d, shad %c, free %c",
1479 refCount,
1480 nonRegCount,
1481 mb->nonScratch,
1482 (shadowed ? 'Y' : 'N'),
1483 (o_clearingBlock ? 'Y' : 'N'));
1484 /*
1485 ** We really should have at least one tracked pointer.
1486 */
1487 tl_assert(refCount);
1488
1489#if defined(O_MASTER_DEBUG)
1490 if(!o_traceStop)
1491 {
1492 VG_(get_and_pp_StackTrace)(VG_(get_running_tid)(), 8);O_DEBUG("");
1493 }
1494#endif
1495
1496 /*
1497 ** We remove the tracked pointer from the hash table but do not delete it.
1498 ** This allows a slight gain where a tracked pointer can immediately be
1499 ** reused rather than free()ed off and a new one malloc()ed.
1500 ** We then remove the back reference from the memory block and
1501 ** squeal if it is the last one. We don't clean the tracked pointer as this
1502 ** is a waste if it is going to be free()ed off.
1503 ** If warn indirect is set and this is an indirect check, do nothing.
1504 */
1505 (void)VG_(HT_remove)(o_TrackedPointers, tp->hdr.key);
1506
1507 O_GDEBUG("Removing tracked pointer at %p pointing to %p",
1508 FROM_TRACKED_KEY(tp->hdr.key),
1509 smb->hdr.key);
1510
1511 if((refCount <= 1) // Last pointer
1512
1513 /*
1514 ** Catch cascades of memory blocks when we call free().
1515 */
1516 || (o_clearingBlock && !shadowed && !mb->nonScratch &&
1517 (nonRegCount == 1) && !IS_REG(FROM_TRACKED_KEY(tp->hdr.key)))
1518
1519#if defined(VGA_x86)
1520 /*
1521 ** Losing all in memory pointers within a basic block is not a good sign.
1522 */
1523 || (!o_stackUnwind && (nonRegCount == 1) &&
1524 !IS_REG(FROM_TRACKED_KEY(tp->hdr.key)))
1525#endif
1526 )
1527 {
1528 if((!o_inhibitLeakDetect)
1529 /*
1530 ** Don't report when there are just register based pointers left and
1531 ** we have already reported the block as leaked.
1532 */
1533 && !(mb->leaked && IS_REG(FROM_TRACKED_KEY(tp->hdr.key)))
1534 )
1535 {
1536 /*
1537 ** Set the doLeak flag for the block and add it to the doLeakList.
1538 ** We also need to stash the indirect depth value for possibly reporting
1539 ** later. Finally, if maybeLast matches the pointer that is being removed
1540 ** and thus causing the leak, we leave maybeLast and funcEnd otherwise, we
1541 ** zero them.
1542 */
1543 mb->depth = o_indirectChecking;
1544 if(mb->maybeLast != tp)
1545 {
1546 mb->maybeLast = NULL;
1547 mb->funcEnd = NULL;
1548 }
1549
1550 /*
1551 ** Cascades triggered by a doLeak being actioned should report
1552 ** immediately, rather than being added to the doLeakList. Likewise
1553 ** cascades caused by freeing a block.
1554 */
1555 if(doLeakNow || o_clearingBlock)
1556 {
1557 o_doLeakReport(mb);
1558 }
1559 else
1560 {
1561 mb->doLeak = True;
1562 mb->next = doLeakList;
1563 doLeakList = mb;
1564 doLeakListCount++;
1565 }
1566 }
1567 }
1568
1569 /*
1570 ** Finally, remove the pointer from the blocks' list.
1571 */
1572 o_removePointerFromList(smb, tp);
1573
1574 return;
1575}
1576
1577static void o_doLeakReport(MemBlock *mb)
1578{
1579 Bool doReport = True;
1580
1581 if(mb->maybeLast)
1582 {
1583 // This is the suspected last pointer - use the cached stacktrace
1584 O_MDEBUG("maybe last was the last");
1585 tl_assert(mb->funcEnd);
1586 mb->leaked = mb->funcEnd;
1587 o_indirectStack = mb->funcEnd;
1588 }
1589 else if(mb->depth && o_indirectStack)
1590 {
1591 O_MDEBUG("indirect with indirect stack set");
1592 // We are cascading - use the cached stacktrace, if there is one
1593 mb->leaked = o_indirectStack;
1594 }
1595 else
1596 {
1597 O_MDEBUG("creating new context maybeLast=0");
1598 // Get the current stacktrace
sewardj39f34232007-11-09 23:02:28 +00001599 mb->leaked = VG_(record_ExeContext)(VG_(get_running_tid)(),
1600 0/*first_ip_delta*/);
sewardj99a2ceb2007-11-09 12:30:36 +00001601 }
1602
1603 doReport = o_addLeakedBlock(mb->where, mb->leaked, mb->length);
1604 /*
1605 ** Report the probable leak.
1606 */
1607 o_stats.memoryBlocksLeaked++;
1608
1609 if(doReport && !o_showSummaryOnly)
1610 {
1611 if(mb->depth)
1612 {
1613 if(o_showIndirect)
1614 {
1615 VG_(message)(Vg_UserMsg,
1616 "Probably indirectly (level %d) leaking block of %d(%p) bytes",
1617 mb->depth,
1618 mb->length,
1619 mb->length);
1620 }
1621 }
1622 else
1623 {
1624 VG_(message)(Vg_UserMsg,
1625 "Probably leaking block of %d(%p) bytes",
1626 mb->length,
1627 mb->length);
1628 }
1629
1630 if(!mb->depth || o_showIndirect)
1631 {
1632 VG_(pp_ExeContext)(mb->leaked);
1633
1634 VG_(message)(Vg_UserMsg,
1635 " Block at %p allocated", mb->hdr.key);
1636 VG_(pp_ExeContext)(mb->where);
1637 VG_(message)(Vg_UserMsg,"");
1638 }
1639
1640 /*
1641 ** Only attach the debugger for the first leaking block in the chain
1642 ** and only when show summary is disabled (--instant-reports).
1643 */
1644 if(!mb->depth && VG_(clo_db_attach))
1645 {
1646 VG_(start_debugger)(VG_(get_running_tid)());
1647 }
1648 }
1649
1650 /*
1651 ** Check for tracked pointers inside the allocated block being lost.
1652 */
1653 o_indirectChecking++;
1654 o_killRange(mb->hdr.key, mb->length);
1655 o_indirectChecking--;
1656
1657 /*
1658 ** Poison the block if requested.
1659 */
1660 if(o_poison)
1661 VG_(memset)((Addr *)mb->hdr.key, 0, mb->length);
1662
1663 return;
1664}
1665
1666static Bool o_setupShadow(TrackedPointer *tp, Addr address)
1667{
1668 Bool doneShadow = False;
1669 MemBlock *mb = NULL;
1670 MemBlock *smb = NULL;
1671
1672 O_MDEBUG("setup shadow tp %p block %p address %p",
1673 FROM_TRACKED_KEY(tp->hdr.key), tp->block, address);
1674 /*
1675 ** Get the memory block for the tracked pointer.
1676 ** It should exist.
1677 */
1678 mb = tp->memBlock;
1679 tl_assert(mb);
1680
1681 /*
1682 ** If this is a shadow block, get the main block as well.
1683 ** It should exist.
1684 */
1685 smb = mb;
1686 if(mb->shadowing)
1687 {
1688 mb = mb->shadowing;
1689 tl_assert(mb);
1690 }
1691
1692 /*
1693 ** If the block is already shadowed at address, bail out and let the
1694 ** normal code handle it.
1695 */
1696 if(mb->shadowed)
1697 {
1698 if(mb->shadowed->hdr.key == address)
1699 {
1700 O_MDEBUG("already shadowed %p", address);
1701 return False;
1702 }
1703 /*
1704 ** Get the shadow block.
1705 */
1706 smb = mb->shadowed;
1707 tl_assert(smb);
1708 }
1709
1710 /*
1711 ** Check if address is within the block that we are tracking.
1712 ** If it is then we need to work out whether to create a
1713 ** new shadow or move an eixsting one.
1714 */
1715 if((address > mb->hdr.key) &&
1716 (address < (mb->hdr.key + mb->length)))
1717 {
1718 doneShadow = True;
1719
1720 O_MDEBUG("About to shadow internal address %p to block %p in %p",
1721 address,
1722 mb->hdr.key,
1723 FROM_TRACKED_KEY(tp->hdr.key));
1724
1725 if(smb == mb)
1726 {
1727 O_MDEBUG("creating new shadow");
1728 /*
1729 ** Create a new shadow for the block.
1730 */
1731 smb = VG_(malloc)( sizeof(MemBlock) );
1732 tl_assert(smb);
1733
1734 o_stats.shadowMemoryBlocksAllocated++;
1735 o_stats.liveMemoryBlocks++;
1736
1737 VG_(memset)(smb, 0, sizeof(MemBlock));
1738 smb->hdr.key = address;
1739 smb->length = 0;
1740 smb->where = 0; // Dont need this in the shadow.
1741 smb->shadowing = mb;
1742 mb->shadowed = smb;
1743 VG_(HT_add_node(o_MemBlocks, smb));
1744
1745 /*
1746 ** Move the tracked pointer from the main block to the shadow.
1747 */
1748 (void)VG_(HT_remove)(o_TrackedPointers, tp->hdr.key);
1749 o_removePointerFromList(mb, tp);
1750 o_addMemBlockReference(smb, tp);
1751 }
1752 else if((smb->refNum == 1) &&
1753 (smb == tp->memBlock))
1754 {
1755 O_MDEBUG("moving existing shadow at %p", smb->hdr.key);
1756 /*
1757 ** Move the existing shadow.
1758 */
1759 (void)VG_(HT_remove)(o_MemBlocks, smb->hdr.key);
1760 smb->hdr.key = address;
1761 smb->where = 0; // Dont need this in the shadow.
1762 VG_(HT_add_node(o_MemBlocks, smb));
1763
1764 /*
1765 ** Tweak the existing tracked pointer, leaving the PBit alone.
1766 */
1767 tp->block = address;
1768 }
1769 else
1770 {
1771 /*
1772 ** A shadow exists and has pointers assigned to it.
1773 ** We do not allow more than one shadow so deregister and
1774 ** free this tracked pointer and clear its PBit.
1775 */
1776 O_MDEBUG("Prevented second shadow %p (first %p) for %p",
1777 address,
1778 mb->shadowed,
1779 mb->hdr.key);
1780
1781 o_clearPBit(FROM_TRACKED_KEY(tp->hdr.key));
1782 o_removeMemBlockReference(NULL, tp);
1783 VG_(free)(tp);
1784
1785 o_stats.liveTrackedPointers--;
1786 }
1787
1788 O_MDEBUG("shadow creation / reallocation done");
1789 }
1790 else if((smb != mb) &&
1791 (address == mb->hdr.key))
1792 {
1793 /*
1794 ** Hmmm.
1795 ** Looks like we are setting the tracked pointer to the block start.
1796 ** If it was previously pointing at the shadow block, we need to move it
1797 ** manually.
1798 */
1799 if(tp->block == smb->hdr.key)
1800 {
1801 O_MDEBUG("moving pointer from shadow to main");
1802
1803 if(smb->refNum == 1)
1804 {
1805 doneShadow = True;
1806
1807 O_MDEBUG("destroying shadow of %p at %p",
1808 mb->hdr.key,
1809 smb->hdr.key);
1810 /*
1811 ** Remove the shadow block and move the pointer.
1812 */
1813 (void)VG_(HT_remove)(o_MemBlocks, smb->hdr.key);
1814 mb->shadowed = 0;
1815 VG_(free)(smb->pointers);
1816 VG_(free)(smb);
1817 o_stats.liveMemoryBlocks--;
1818
1819 (void)VG_(HT_remove)(o_TrackedPointers, tp->hdr.key);
1820 o_addMemBlockReference(mb, tp);
1821 }
1822 else
1823 {
1824 /*
1825 ** Let the normal code move the pointer.
1826 */
1827 }
1828 }
1829 }
1830 else
1831 {
1832 O_MDEBUG("tracked pointer out of range");
1833 }
1834
1835 return doneShadow;
1836}
1837
1838static void o_killTrackedPointer(Addr addr)
1839{
1840 TrackedPointer *tp = VG_(HT_lookup)(o_TrackedPointers, TRACKED_KEY(addr));
1841
1842 /*
1843 ** We really should have the tracked pointer.
1844 */
1845 tl_assert(tp);
1846
1847 /*
1848 ** Remove the tracked pointer from its memory block, causing
1849 ** a leak report as required then free it.
1850 */
1851 o_clearPBit(addr);
1852
1853 O_MDEBUG("Removing tracked pointer to %p at %p",
1854 tp->block, FROM_TRACKED_KEY(tp->hdr.key));
1855
1856 o_removeMemBlockReference(NULL, tp);
1857
1858 VG_(free)(tp);
1859
1860 o_stats.liveTrackedPointers--;
1861 return;
1862}
1863
1864static void o_killRange(Addr start, SizeT length)
1865{
1866 /*
1867 ** We need to check the PBits for the addresses starting at start.
1868 ** We use the firstPBit / nextPBit functions to get us a list of set
1869 ** pbits in the specified range.
1870 */
1871 PBitContext pb;
1872 Addr a;
1873
1874 O_MDEBUG("killing range %p bytes from %p", length, start);
1875
1876
1877 a = o_firstPBit(&pb, start, length);
1878 while(a)
1879 {
1880 o_killTrackedPointer(a);
1881 a = o_nextPBit(&pb);
1882 }
1883 O_MDEBUG("killing range %p bytes from %p done.", length, start);
1884}
1885
1886static void o_duplicateTrackedPointers(Addr dst, Addr src, SizeT length)
1887{
1888 /*
1889 ** For each set PBit in the src block, create a new tracked pointer
1890 ** in the destination block, pointing to the same memory block.
1891 */
1892 PBitContext pb;
1893 Addr address;
1894
1895 O_MDEBUG("o_duplicateTrackedPointers(%p, %p %d(%p))",
1896 dst, src, length, length);
1897
1898 address = o_firstPBit(&pb, src, length);
1899
1900 while(address)
1901 {
1902 /*
1903 ** Create a tracked pointer at the appropriate place within the new
1904 ** block of memory.
1905 */
1906 TrackedPointer *tp = VG_(HT_lookup)(o_TrackedPointers, TRACKED_KEY(address));
1907 Int diff = dst - src;
1908 TrackedPointer *ntp = VG_(malloc)((sizeof(TrackedPointer)));
1909 MemBlock *mb = NULL;
1910
1911 tl_assert(tp);
1912
1913 o_stats.liveTrackedPointers++;
1914 o_stats.trackedPointersAllocated++;
1915
1916 /*
1917 ** Get the memory block from the tracked pointer at this address.
1918 */
1919 mb = tp->memBlock;
1920
1921 if(!mb)
1922 {
1923 O_DEBUG("Oops! Copying pointer at %p to block that leaked(%p)",
1924 address, tp->block);
1925 VG_(get_and_pp_StackTrace)(VG_(get_running_tid)(), VG_(clo_backtrace_size));
1926 O_DEBUG("");
1927
1928 VG_(tool_panic)("we lost track of a pointer :-(");
1929 }
1930
1931 tl_assert(ntp);
1932
1933 VG_(memset)(ntp, 0, sizeof(TrackedPointer));
1934 ntp->hdr.key = TRACKED_KEY(address + diff);
1935 o_addMemBlockReference(mb, ntp);
1936
1937 /*
1938 ** Set the PBit for this tracked pointer.
1939 */
1940 o_setPBit(address + diff);
1941
1942 address = o_nextPBit(&pb);
1943 }
1944
1945}
1946
1947static void o_createMemBlock(ThreadId tid, Addr start, SizeT size)
1948{
1949 MemBlock *mb = VG_(malloc)(sizeof(MemBlock));
1950 tl_assert(mb);
1951
1952 o_stats.memoryBlocksAllocated++;
1953 o_stats.liveMemoryBlocks++;
1954
1955 VG_(memset)(mb, 0, sizeof(MemBlock));
1956
1957 /*
1958 ** Populate the block. Note that we have no pointers until one is written
1959 ** into memory.
1960 */
1961 mb->hdr.key = start;
1962 mb->length = size;
sewardj39f34232007-11-09 23:02:28 +00001963 mb->where = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardj99a2ceb2007-11-09 12:30:36 +00001964
1965 /*
1966 O_DEBUG("Creating new MemBlock (%p) key = %p, length %d",
1967 mb, (void *)start, size);
1968 VG_(pp_ExeContext)(mb->where);
1969 */
1970
1971 /*
1972 ** Add this node into the hash table.
1973 */
1974 VG_(HT_add_node)(o_MemBlocks, mb);
1975}
1976
1977static void o_destroyMemBlock(ThreadId tid, Addr start)
1978{
1979 /*
1980 ** Destroy our memory block.
1981 */
1982 MemBlock *mb = VG_(HT_remove)(o_MemBlocks, start);
1983
1984 /*
1985 ** The block really should exist, unless this is a double free attempt...
1986 */
1987 if(!mb)
1988 {
1989 O_DEBUG("Double/Invalid call to free(%p)", start);
1990 VG_(get_and_pp_StackTrace)(VG_(get_running_tid)(), VG_(clo_backtrace_size));
1991 O_DEBUG("");
1992 }
1993 else
1994 {
1995 if(mb->leaked)
1996 {
1997 /*
1998 ** Seems that the block didnt leak after all.
1999 ** *sigh*
2000 ** Why do so many libs access memory in blocks they free()ed?
2001 */
2002 if(o_addSuppressionBlock(mb->where, mb->leaked) && !o_showSummaryOnly)
2003 {
2004 O_DEBUG("Welcome back (and goodbye) to the supposedly leaked block at %p",
2005 start);
2006 }
2007 o_stats.memoryBlocksLeaked--;
2008 o_stats.memoryBlocksLostAndFound++;
2009 }
2010 /*
2011 ** Clean up the block - we pass a pointer pointer so that we can
2012 ** set it to NULL during the cleanup process.
2013 */
2014 o_cleanupMemBlock(&mb);
2015 }
2016
2017 return;
2018}
2019
2020
2021static void o_setupMaybeLast(Addr a)
2022{
2023 int refCount = 0;
2024 /*
2025 ** Maybe returning a value - set the maybeLast and funcEnd members
2026 ** in the memory block this register points to if it is the last
2027 ** item.
2028 */
2029 TrackedPointer *tp = VG_(HT_lookup)(o_TrackedPointers, TRACKED_KEY(a));
2030 /*
2031 ** We really should have the tracked pointer.
2032 */
2033 tl_assert(tp);
2034
2035 refCount = tp->memBlock->refNum;
2036 if(tp->memBlock->shadowing)
2037 {
2038 refCount += tp->memBlock->shadowing->refNum;
2039 }
2040 else if(tp->memBlock->shadowed)
2041 {
2042 refCount += tp->memBlock->shadowed->refNum;
2043 }
2044
2045 if(refCount == 1)
2046 {
2047 // Hmmm, last reference. If we haven't already done so,
2048 // save the context, just in case
2049 tl_assert(!tp->memBlock->maybeLast ||
2050 (tp->memBlock->maybeLast == tp));
2051 if(!tp->memBlock->maybeLast)
2052 {
2053 tp->memBlock->maybeLast = tp;
sewardj39f34232007-11-09 23:02:28 +00002054 tp->memBlock->funcEnd = VG_(record_ExeContext)(VG_(get_running_tid)(),
2055 0/*first_ip_delta*/);
sewardj99a2ceb2007-11-09 12:30:36 +00002056 O_MDEBUG("setting maybeLast to %p in block at %p",
2057 FROM_TRACKED_KEY(tp->hdr.key), tp->block);
2058 }
2059#if defined(O_MASTER_DEBUG)
2060 else
2061 {
2062 O_MDEBUG("leaving maybeLast at %p in block at %p",
2063 FROM_TRACKED_KEY(tp->hdr.key), tp->block);
2064 }
2065#endif
2066 }
2067 O_MDEBUG("leaving register %p", OFFSET_FROM_REG(a));
2068}
2069
2070/*------------------------------------------------------------*/
2071/*--- Helper functions called by instrumentation ---*/
2072/*------------------------------------------------------------*/
2073#if defined(O_TRACK_LOADS)
2074static VG_REGPARM(1)
2075void o_omegaLoadTracker( Addr address )
2076{
2077 O_MDEBUG("o_omegaLoadTracker(%p, %p)", address, *((Addr *)address));
2078
2079 return;
2080}
2081#endif
2082
2083static VG_REGPARM(2)
2084void o_omegaScratchRemover( Addr start, Addr length )
2085{
2086 O_MDEBUG("o_omegaScratchRemover(%p, %p)", start, length);
2087 o_killRange(start, length);
2088
2089 return;
2090}
2091
2092static VG_REGPARM(1)
2093void o_endOfInstruction( Addr address )
2094{
2095 /*
2096 ** Any generated leaks should report immediately.
2097 */
2098 doLeakNow = True;
2099
2100 O_MDEBUG("o_endOfInstruction %p doLeakListCount = %d",
2101 address, doLeakListCount);
2102
2103 if(doLeakListCount)
2104 {
2105 if(doLeakListCount > 1)
2106 {
2107 /*
2108 ** Reverse the list so the reports come out in the correct order.
2109 */
2110 MemBlock *front = NULL;
2111 MemBlock *temp = NULL;
2112
2113 do
2114 {
2115 temp = doLeakList->next;
2116
2117 if(front)
2118 {
2119 doLeakList->next = front;
2120 }
2121 else
2122 {
2123 doLeakList->next = NULL;
2124 }
2125 front = doLeakList;
2126
2127 doLeakList = temp;
2128 }
2129 while(doLeakList);
2130
2131 /*
2132 ** Now do the leak reports.
2133 */
2134 while(front)
2135 {
2136 temp = front;
2137 front = front->next;
2138
2139 if(temp->doLeak)
2140 {
2141 temp->doLeak = False;
2142 o_doLeakReport(temp);
2143 }
2144 else
2145 {
2146 O_MDEBUG("block at %p survived!", temp->hdr.key);
2147 }
2148 }
2149 }
2150 else
2151 {
2152 if(doLeakList->doLeak)
2153 {
2154 /*
2155 ** The block has leaked. Report it.
2156 */
2157 o_doLeakReport(doLeakList);
2158 }
2159 else
2160 {
2161 O_MDEBUG("block at %p survived!", doLeakList->hdr.key);
2162 }
2163
2164 doLeakList->doLeak = False;
2165 doLeakList = NULL;
2166 }
2167 }
2168
2169 O_MDEBUG("o_endOfInstruction done");
2170
2171 o_indirectStack = NULL;
2172 doLeakListCount = 0;
2173 doLeakNow = False;
2174}
2175
2176static
2177void o_omegaFunctionReturn( void )
2178{
2179 PBitContext pb;
2180 Addr a = 0;
2181
2182 /*
2183 ** Zap scratch registers.
2184 */
2185
2186#if defined(VGA_x86)
2187 a = o_firstPBit(&pb,
2188 MAP_TO_REG(VG_(get_running_tid)(), OFFSET_x86_ECX),
2189 OFFSET_x86_EDI + 4);
2190#elif defined(VGA_amd64)
2191 a = o_firstPBit(&pb,
2192 MAP_TO_REG(VG_(get_running_tid)(), OFFSET_amd64_RCX),
2193 OFFSET_amd64_R15 + 8);
2194#endif
2195 doLeakNow = True;
2196 while(a)
2197 {
2198 if(o_isReturnIgnoreReg(OFFSET_FROM_REG(a)))
2199 {
2200 O_MDEBUG("killing register %p", OFFSET_FROM_REG(a));
2201 o_killTrackedPointer(a);
2202 }
2203 a = o_nextPBit(&pb);
2204 }
2205 doLeakNow = False;
2206
2207 /*
2208 ** Now work out if we might be returning a value in the accumulator.
2209 */
2210#if defined(VGA_x86)
2211 a = MAP_TO_REG(VG_(get_running_tid)(), OFFSET_x86_EAX);
2212#elif defined(VGA_amd64)
2213 a = MAP_TO_REG(VG_(get_running_tid)(), OFFSET_amd64_RAX);
2214#endif
2215 if(o_isPBitSet(a))
2216 o_setupMaybeLast(a);
2217
2218#if defined(VGA_amd64)
2219 // Also need to check for the RDX register as it is a second return reg
2220 a = MAP_TO_REG(VG_(get_running_tid)(), OFFSET_amd64_RDX);
2221 if(o_isPBitSet(a))
2222 o_setupMaybeLast(a);
2223#endif
2224 return;
2225}
2226
2227static VG_REGPARM(2)
2228void o_omegaDetector( Addr address, Addr value)
2229{
2230 TrackedPointer *tp = NULL;
2231 MemBlock *mb = NULL;
2232
2233 /*
2234 ** We need to track the registers.
2235 ** To do this, if the address < 256, change it to our local shadow.
2236 **
2237 ** We really want to be able to track the proper shadow but I have no
2238 ** idea yet how to get the address for it. Once I do, use that in
2239 ** preference. Note that all we need is a unique memory location for
2240 ** the register in order to generate a tracked pointer.
2241 */
2242 if(address < 0x100)
2243 {
2244 O_MDEBUG("o_omegaDetector(%p, %p)", address, value);
2245 address = MAP_TO_REG(VG_(get_running_tid)(), address);
2246 }
2247 else
2248 {
2249 /*
2250 ** Check aligned - if not, align it and retrive the stored value.
2251 */
2252 if(address & ~TRACK_MINOR_MASK)
2253 {
2254 address &= TRACK_MINOR_MASK;
2255 value = *((Addr *)address);
2256 }
2257 O_MDEBUG("o_omegaDetector(%p, %p)", address, value);
2258 }
2259
2260 /*
2261 ** Done the alignment tweaks so do the more expensive lookups.
2262 */
2263 if(o_isPBitSet(address))
2264 {
2265 tp = VG_(HT_lookup)(o_TrackedPointers, TRACKED_KEY(address));
2266
2267 if(tp && (tp->block == value))
2268 {
2269 /*
2270 ** Unlikely but it seems that we are writing the same value back into
2271 ** the tracked pointer - don't process further for a small gain.
2272 */
2273 //O_DEBUG("writing duplicate into tracked pointer.");
2274 return;
2275 }
2276
2277 /*
2278 ** We always auto shadow.
2279 ** Note that auto shadowing only works if you overwrite a tracked pointer.
2280 ** Checking for the creation of a new tracked pointer at some internal
2281 ** address is too much overhead as we would have to scan backwards to find
2282 ** a memory block then check if the value is within it. For those cases,
2283 ** we need to get something going with the client request system.
2284 */
2285 if(tp && value)
2286 {
2287 if(o_setupShadow(tp, value))
2288 {
2289 return;
2290 }
2291 }
2292
2293 /*
2294 ** Remove the tracked pointer and clear the PBit,
2295 ** if we have one.
2296 */
2297 if(tp)
2298 {
2299 tl_assert(tp->hdr.key == TRACKED_KEY(address));
2300 O_MDEBUG("Removing tracked pointer to %p at %p",
2301 tp->block, FROM_TRACKED_KEY(tp->hdr.key));
2302 o_clearPBit(address);
2303 o_removeMemBlockReference(NULL, tp);
2304 }
2305 }
2306
2307 /*
2308 ** Get the mem block now - it might not exist if tp was the last
2309 ** reference to it. It might not exist anyway.
2310 */
2311 if(value)
2312 {
2313 mb = VG_(HT_lookup)(o_MemBlocks, value);
2314 }
2315
2316 /*
2317 ** If we have a memblock, clean the tracked pointer then add it.
2318 ** If not, free the tracked pointer.
2319 */
2320 if(mb)
2321 {
2322 if(!tp)
2323 {
2324 /*
2325 ** No tracked pointer - create one now.
2326 */
2327 tp = VG_(malloc)(sizeof(TrackedPointer));
2328 tl_assert(tp);
2329 o_stats.trackedPointersAllocated++;
2330 o_stats.liveTrackedPointers++;
2331 }
2332 VG_(memset)(tp, 0, sizeof(TrackedPointer));
2333 tp->hdr.key = TRACKED_KEY(address);
2334 o_addMemBlockReference(mb, tp);
2335 /*
2336 ** Set the PBit for this tracked pointer.
2337 */
2338 o_setPBit(address);
2339
2340 O_MDEBUG("Added tracked pointer to %p at %p",
2341 tp->block, FROM_TRACKED_KEY(tp->hdr.key));
2342
2343 }
2344 else if(tp)
2345 {
2346 VG_(free)(tp);
2347 o_stats.liveTrackedPointers--;
2348 }
2349
2350 return;
2351}
2352
2353/*------------------------------------------------------------*/
2354/*--- malloc() et al replacement wrappers ---*/
2355/*------------------------------------------------------------*/
2356
2357static
2358void* o_newBlock ( ThreadId tid, SizeT size, SizeT align, Bool is_zeroed )
2359{
2360 void* p = NULL;
2361
2362O_TRACE_ON();
2363#if defined(O_MASTER_DEBUG)
2364 if(!o_traceStop)
2365 {
2366 VG_(get_and_pp_StackTrace)(VG_(get_running_tid)(), 8);O_DEBUG("");
2367 }
2368#endif
2369
2370 O_MDEBUG("newBlock(%d, %d, %d, %d)",
2371 tid,
2372 size,
2373 align,
2374 (int)is_zeroed);
2375
2376 /*
2377 ** Allocate and zero if necessary.
2378 */
2379 p = VG_(cli_malloc)( align, size );
2380 if(!p)
2381 {
2382 O_DEBUG("Out of memory!");
2383 VG_(get_and_pp_StackTrace)(VG_(get_running_tid)(), VG_(clo_backtrace_size));
2384 O_DEBUG("");
2385
2386 return NULL;
2387 }
2388
2389 if(is_zeroed)
2390 {
2391 VG_(memset)(p, 0, size);
2392 }
2393
2394 if(!o_onlyMallocLike)
2395 {
2396 /*
2397 ** Create a new MemBlock.
2398 */
2399 o_createMemBlock(tid, (Addr)p, size);
2400 }
2401
2402 O_MDEBUG("o_newBlock returning %p", p);
2403
2404 return p;
2405}
2406
2407static
2408void o_dieBlock ( ThreadId tid, void* p )
2409{
2410 /*
2411 ** Free off the allocated memory block.
2412 */
2413 O_MDEBUG("o_dieBlock(%d, %p)", tid, p);
2414
2415 /*
2416 ** Check if we have a potentially valid pointer
2417 */
2418 if(!p)
2419 {
2420 return;
2421 }
2422
2423 /*
2424 ** If we are doing malloc like block handling, only free off the memory.
2425 */
2426 if(!o_onlyMallocLike)
2427 {
2428 o_destroyMemBlock(tid, (Addr)p);
2429 }
2430
2431 /*
2432 ** Actually free the heap block.
2433 */
2434 VG_(cli_free)( p );
2435
2436 return;
2437}
2438
2439static void* o_malloc ( ThreadId tid, SizeT n )
2440{
2441 return o_newBlock( tid, n, VG_(clo_alignment), /*is_zeroed*/False );
2442}
2443
2444static void* o__builtin_new ( ThreadId tid, SizeT n )
2445{
2446 return o_newBlock( tid, n, VG_(clo_alignment), /*is_zeroed*/False );
2447}
2448
2449static void* o__builtin_vec_new ( ThreadId tid, SizeT n )
2450{
2451 return o_newBlock( tid, n, VG_(clo_alignment), /*is_zeroed*/False );
2452}
2453
2454static void* o_calloc ( ThreadId tid, SizeT m, SizeT size )
2455{
2456 return o_newBlock( tid, m*size, VG_(clo_alignment), /*is_zeroed*/True );
2457}
2458
2459static void *o_memalign ( ThreadId tid, SizeT align, SizeT n )
2460{
2461 return o_newBlock( tid, n, align, False );
2462}
2463
2464static void o_free ( ThreadId tid, void* p )
2465{
2466 o_dieBlock( tid, p );
2467}
2468
2469static void o__builtin_delete ( ThreadId tid, void* p )
2470{
2471 o_dieBlock( tid, p );
2472}
2473
2474static void o__builtin_vec_delete ( ThreadId tid, void* p )
2475{
2476 o_dieBlock( tid, p );
2477}
2478
2479static void* o_realloc ( ThreadId tid, void* p_old, SizeT new_size )
2480{
2481 MemBlock *mb = NULL;
2482 void *p_new = NULL;
2483
2484 O_MDEBUG("o_realloc p_old %p, new_size %d",
2485 p_old, new_size);
2486
2487 if(!p_old)
2488 {
2489 /*
2490 ** Pointer == NULL so let new block do the work.
2491 */
2492 return o_newBlock(tid, new_size, VG_(clo_alignment), /*is_zeroed*/False);
2493 }
2494
2495 mb = VG_(HT_lookup)(o_MemBlocks, (Addr)p_old);
2496
2497 /*
2498 ** Check that we have this memory block.
2499 */
2500 if(!mb)
2501 {
2502 /*
2503 ** Log the bad call but return p_old so the program can continue.
2504 ** This might not be a good thing but some of the libraries are a
2505 ** little weird and returning NULL as per the spec blows them up...
2506 */
2507 O_DEBUG("Invalid call to realloc(%p)", p_old);
2508 VG_(get_and_pp_StackTrace)(VG_(get_running_tid)(), VG_(clo_backtrace_size));
2509 O_DEBUG("");
2510
2511 return p_old;
2512 }
2513
2514 if(mb->leaked)
2515 {
2516 /*
2517 ** Seems that the block didnt leak after all.
2518 */
2519 if(o_addSuppressionBlock(mb->where, mb->leaked) && !o_showSummaryOnly)
2520 {
2521 O_DEBUG("Welcome back to the supposedly leaked block at %p",
2522 p_old);
2523 }
2524 mb->leaked = NULL;
2525 o_stats.memoryBlocksLeaked--;
2526 o_stats.memoryBlocksLostAndFound++;
2527 }
2528
2529 if(new_size)
2530 {
2531 if(new_size > mb->length)
2532 {
2533 /*
2534 ** Make a new block, copy the data into it then free the old block.
2535 ** We lose all tracked pointers but that is to be expected as this is
2536 ** a new block at a new address. However, any tracked pointers within
2537 ** must be preserved.
2538 */
2539
2540 p_new = o_newBlock(tid, new_size, VG_(clo_alignment), False);
2541 tl_assert(p_new);
2542
2543 VG_(memcpy)(p_new, p_old, mb->length);
2544
2545 o_duplicateTrackedPointers((Addr)p_new, (Addr)p_old, mb->length);
2546 }
2547 else
2548 {
2549 /*
2550 ** Return the existing block.
2551 */
2552 return p_old;
2553 }
2554 }
2555
2556 /*
2557 ** This will remove all of the old tracked pointers within.
2558 */
2559 o_dieBlock(tid, p_old);
2560
2561 return p_new;
2562}
2563
2564static void o_dieMemStack(Addr start, SizeT length)
2565{
2566 /*
2567 ** Flag that this is a stack unwind.
2568 */
2569 o_stackUnwind = True;
2570 o_killRange(start, length);
2571 o_stackUnwind = False;
2572}
2573
2574static void o_post_clo_init(void)
2575{
2576 /*
2577 ** Allocate the hash tables.
2578 ** Note that we can improve performance at the cost of memory by initialising
2579 ** with a larger prime number so more of the key part of the address is
2580 ** unique. The defaults are probably OK for many programs but we expose them
2581 ** on the command line to make it easier for users to change them.
2582 */
2583 o_PBits = VG_(HT_construct)( "omega pbits" );
2584 tl_assert(o_PBits);
2585
2586 o_MemBlocks = VG_(HT_construct)( "omega memblocks" );
2587 tl_assert(o_MemBlocks);
2588
2589 o_TrackedPointers = VG_(HT_construct)( "omega tracked ptrs" );
2590 tl_assert(o_TrackedPointers);
2591
2592 /*
2593 ** We need precise instructions so that we can work out the range of the
2594 ** original machine instruction in terms of grouping together lumps of IR.
2595 ** We lose out big time on optimisation but we have to take the hit in order
2596 ** to deal with instructions like pop and xchg.
2597 */
2598 VG_(clo_vex_control).iropt_precise_memory_exns = True;
2599
2600}
2601
2602static IRSB *
2603o_instrument(VgCallbackClosure* closure,
2604 IRSB* bb_in,
2605 VexGuestLayout* layout,
2606 VexGuestExtents* vge,
2607 IRType gWordTy, IRType hWordTy)
2608{
2609 IRDirty* di;
2610 Int i;
2611 IRSB* bb;
2612 IRType type;
2613 Addr mask;
2614 IRStmt* stackReg = NULL;
2615
2616#if 0 //defined(O_MASTER_DEBUG)
2617
2618 static int thisBlock = 0;
2619 thisBlock++;
2620 if(thisBlock == 11377)
2621 {
2622 O_TRACE_ON();
2623 }
2624 else if(thisBlock == 11390)
2625 {
2626 VG_(tool_panic)("hit stop block");
2627 }
2628#endif
2629
2630 if (gWordTy != hWordTy)
2631 {
2632 /* We don't currently support this case. */
2633 VG_(tool_panic)("host/guest word size mismatch");
2634 }
2635
2636 /*
2637 ** Set up BB
2638 */
2639 bb = emptyIRSB();
2640 bb->tyenv = deepCopyIRTypeEnv(bb_in->tyenv);
2641 bb->next = deepCopyIRExpr(bb_in->next);
2642 bb->jumpkind = bb_in->jumpkind;
2643
2644#if (VG_WORDSIZE == 4)
2645 type = Ity_I32;
2646 mask = ~0x03;
2647#elif (VG_WORDSIZE == 8)
2648 type = Ity_I64;
2649 mask = ~0x07;
2650#endif
2651
2652 for (i = 0; i < bb_in->stmts_used; i++)
2653 {
2654 IRStmt* st = bb_in->stmts[i];
2655 if (!st || st->tag == Ist_NoOp)
2656 {
2657 continue;
2658 }
2659
2660 di = NULL;
2661
2662 switch (st->tag)
2663 {
2664 case Ist_AbiHint:
2665 /*
2666 ** An area just went undefined. There may be pointers in this
2667 ** scratch area that we should now ignore.
2668 ** Make sure that we do so.
2669 */
2670 if(stackReg)
2671 {
2672 addStmtToIRSB( bb, stackReg );
2673 stackReg = NULL;
2674 }
2675 di = unsafeIRDirty_0_N( 2, "o_omegaScratchRemover",
2676 &o_omegaScratchRemover,
2677 mkIRExprVec_2(st->Ist.AbiHint.base,
2678 mkIRExpr_HWord(st->Ist.AbiHint.len)));
2679 /*
2680 ** Add in the original instruction second.
2681 */
2682 addStmtToIRSB( bb, IRStmt_Dirty(di) );
2683 break;
2684
2685 case Ist_Store:
2686 if(stackReg)
2687 {
2688 addStmtToIRSB( bb, stackReg );
2689 stackReg = NULL;
2690 }
2691 if(typeOfIRExpr(bb->tyenv, st->Ist.Store.addr) == type)
2692 {
2693 /*
2694 ** We have an address of native size.
2695 */
2696 if(typeOfIRExpr(bb->tyenv, st->Ist.Store.data) == type)
2697 {
2698 /*
2699 ** We have data of native size - check if this is a pointer being
2700 ** written.
2701 */
2702 di = unsafeIRDirty_0_N( 2, "o_omegaDetector", &o_omegaDetector,
2703 mkIRExprVec_2(st->Ist.Store.addr,
2704 st->Ist.Store.data));
2705 /*
2706 ** Add in the original instruction second.
2707 */
2708 addStmtToIRSB( bb, IRStmt_Dirty(di) );
2709 addStmtToIRSB( bb, st );
2710 st = NULL;
2711 }
2712 else
2713 {
2714 /*
2715 ** There is no way that the data is a pointer but we still have to
2716 ** check if a pointer will be overwritten.
2717 */
2718 di = unsafeIRDirty_0_N( 2, "o_omegaDetector", &o_omegaDetector,
2719 mkIRExprVec_2(st->Ist.Store.addr,
2720 mkIRExpr_HWord(0)));
2721 /*
2722 ** Add in the original instruction first.
2723 */
2724 addStmtToIRSB( bb, st );
2725 addStmtToIRSB( bb, IRStmt_Dirty(di) );
2726 st = NULL;
2727 }
2728 }
2729 else
2730 {
2731 O_GDEBUG("o_instrument address type(%p) not a pointer",
2732 typeOfIRExpr(bb->tyenv, st->Ist.Store.addr));
2733 }
2734
2735 break;
2736
2737 case Ist_IMark:
2738 /*
2739 ** Call the end of instruction callback. This is to check what actually
2740 ** leaked as opposed to what appeared to leak in a transient fashion
2741 ** due to instructions getting broken up into more simple IR
2742 ** instructions. Note that stack register updates are moved to
2743 ** the end of the orginal instruction so that things like 'pop' get
2744 ** the values into registers BEFORE the stack is invalidated.
2745 */
2746 if(stackReg)
2747 {
2748 addStmtToIRSB( bb, stackReg );
2749 stackReg = NULL;
2750 }
2751 di = unsafeIRDirty_0_N( 1, "o_endOfInstruction", &o_endOfInstruction,
2752 mkIRExprVec_1(mkIRExpr_HWord(st->Ist.IMark.addr)));
2753 addStmtToIRSB( bb, IRStmt_Dirty(di) );
2754 addStmtToIRSB( bb, st );
2755#if defined(VGA_x86)
2756 /*
2757 ** Make sure the EIP sim cpu register gets updated or our stack
2758 ** traces go a little Pete Tong...
2759 ** If this duplicates, the ir optimisation will knock one of them out.
2760 */
2761 addStmtToIRSB( bb, IRStmt_Put(OFFSET_x86_EIP,
2762 mkIRExpr_HWord(st->Ist.IMark.addr)));
2763#endif
2764 st = NULL;
2765 break;
2766
2767 case Ist_Put:
2768 /*
2769 ** Track the general purpose registers.
2770 */
2771 switch(st->Ist.Put.offset & mask)
2772 {
2773#if defined(VGA_x86)
2774 case OFFSET_x86_ESP:
2775#elif defined(VGA_amd64)
2776 case OFFSET_amd64_RSP:
2777#endif
2778 /*
2779 ** Save the stack register update - we will add it at the end of
2780 ** the instruction.
2781 */
2782 stackReg = st;
2783 st = NULL;
2784 break;
2785
2786#if defined(VGA_x86)
2787
2788 case OFFSET_x86_EAX:
2789 case OFFSET_x86_EBX:
2790 case OFFSET_x86_ECX:
2791 case OFFSET_x86_EDX:
2792 case OFFSET_x86_ESI:
2793 case OFFSET_x86_EDI:
2794 case OFFSET_x86_EBP:
2795
2796#if 0 //defined(O_MASTER_DEBUG)
2797 case OFFSET_x86_EIP:
2798#endif
2799
2800#elif defined(VGA_amd64)
2801
2802 case OFFSET_amd64_RAX:
2803 case OFFSET_amd64_RBX:
2804 case OFFSET_amd64_RCX:
2805 case OFFSET_amd64_RDX:
2806 case OFFSET_amd64_RSI:
2807 case OFFSET_amd64_RDI:
2808 case OFFSET_amd64_RBP:
2809 case OFFSET_amd64_R8:
2810 case OFFSET_amd64_R9:
2811 case OFFSET_amd64_R10:
2812 case OFFSET_amd64_R11:
2813 case OFFSET_amd64_R12:
2814 case OFFSET_amd64_R13:
2815 case OFFSET_amd64_R14:
2816 case OFFSET_amd64_R15:
2817
2818#if 0 //defined(O_MASTER_DEBUG)
2819 case OFFSET_amd64_RIP:
2820#endif
2821
sewardj4cff56d2007-11-10 13:33:56 +00002822#elif defined(VGA_ppc32) || defined(VGA_ppc64)
2823 default:
sewardjdcbb8d32007-11-26 21:34:30 +00002824 VG_(printf)("\nOmega does not currently work on PowerPC/POWER platforms."
sewardj4cff56d2007-11-10 13:33:56 +00002825 " Sorry.\n\n");
2826 VG_(exit)(0);
sewardj99a2ceb2007-11-09 12:30:36 +00002827#else
2828
2829#error Unknown arch
2830
2831#endif
2832 {
2833 if(typeOfIRExpr(bb->tyenv, st->Ist.Put.data) == type)
2834 {
2835 /*
2836 ** This is a put to a register in the simulated processor of data
2837 ** that could be a pointer.
2838 */
2839 di = unsafeIRDirty_0_N( 2, "o_omegaDetector", &o_omegaDetector,
2840 mkIRExprVec_2(mkIRExpr_HWord(st->Ist.Put.offset),
2841 st->Ist.Put.data));
2842 }
2843 else
2844 {
2845 /*
2846 ** There is no way that the data is a pointer but we still have
2847 ** to check if a pointer in a register will be overwritten.
2848 */
2849 di = unsafeIRDirty_0_N( 2, "o_omegaDetector", &o_omegaDetector,
2850 mkIRExprVec_2(mkIRExpr_HWord(st->Ist.Put.offset),
2851 mkIRExpr_HWord(0)));
2852 }
2853 /*
2854 ** Add in the original instruction first.
2855 */
2856 addStmtToIRSB( bb, st );
2857 addStmtToIRSB( bb, IRStmt_Dirty(di) );
2858 st = NULL;
2859 }
2860 break; // Register Cases
2861 }
2862 break; // Ist_Put
2863
2864#if defined(O_TRACK_LOADS)
2865 case Ist_Tmp:
2866 /*
2867 ** Debug to see how 'leaked' references survive.
2868 ** (From experience, mostly through illegal reads from
2869 ** free()ed blocks.)
2870 */
2871 if(st->Ist.Tmp.data->tag == Iex_Load)
2872 {
2873 if(typeOfIRExpr(bb->tyenv, st->Ist.Tmp.data->Iex.Load.addr) == type)
2874 {
2875 di = unsafeIRDirty_0_N( 1, "o_omegaLoadTracker", &o_omegaLoadTracker,
2876 mkIRExprVec_1(st->Ist.Tmp.data->Iex.Load.addr));
2877 /*
2878 ** Add in the original instruction first.
2879 */
2880 addStmtToIRSB( bb, st );
2881 addStmtToIRSB( bb, IRStmt_Dirty(di) );
2882 st = NULL;
2883 }
2884 }
2885 break;
2886#endif
2887
2888 default:
2889 break;
2890 }
2891
2892 /*
2893 ** Add in the original instruction if we havent already done so.
2894 */
2895 if(st)
2896 {
2897 addStmtToIRSB( bb, st );
2898 }
2899 }
2900
2901 if(stackReg)
2902 {
2903 addStmtToIRSB( bb, stackReg );
2904 stackReg = NULL;
2905 }
2906
2907 if(bb->jumpkind == Ijk_Ret)
2908 {
2909 /*
2910 ** The client is doing a return. This is the point to invalidate
2911 ** registers that belong to the callee, possibly generating a
2912 ** leak report. This is to catch things like foo(malloc(128)).
2913 */
2914
2915 di = unsafeIRDirty_0_N( 0, "o_omegaFunctionReturn",
2916 &o_omegaFunctionReturn,
2917 mkIRExprVec_0());
2918 /*
2919 ** Add in the new instruction.
2920 */
2921 addStmtToIRSB( bb, IRStmt_Dirty(di) );
2922 }
2923
2924 return bb;
2925}
2926
2927/*------------------------------------------------------------*/
2928/*--- Client Request Handling ---*/
2929/*------------------------------------------------------------*/
2930static Bool o_handle_client_request ( ThreadId tid, UWord* arg, UWord* ret )
2931{
2932 if (!VG_IS_TOOL_USERREQ('O','M',arg[0]) &&
2933 VG_USERREQ__MALLOCLIKE_BLOCK != arg[0] &&
2934 VG_USERREQ__FREELIKE_BLOCK != arg[0])
2935 return False;
2936
2937 switch (arg[0])
2938 {
2939 case VG_USERREQ__ENTERING_MAIN:
2940 {
2941 /*
2942 ** Allow leak reports whilst inside main().
2943 */
2944 o_inhibitLeakDetect = False;
2945 }
2946 break;
2947
2948 case VG_USERREQ__LEAVING_MAIN:
2949 {
2950 /*
2951 ** Stop any more leak reports - they won't be helpfull.
2952 */
2953 o_inhibitLeakDetect = True;
2954
2955O_TRACE_OFF();
2956
2957 }
2958 break;
2959
2960 case VG_USERREQ__MALLOCLIKE_BLOCK:
2961 {
2962 if(o_onlyMallocLike)
2963 {
2964 /*
2965 ** Either we use malloc like block or we don't.
2966 ** Trying to auto track and do malloc like block handling together
2967 ** is asking for trouble.
2968 */
2969 Addr p = (Addr)arg[1];
2970 SizeT size = arg[2];
2971
2972 o_createMemBlock(tid, p, size);
2973 }
2974 }
2975 break;
2976
2977 case VG_USERREQ__FREELIKE_BLOCK:
2978 {
2979 if(o_onlyMallocLike)
2980 {
2981 /*
2982 ** Either we use malloc like block or we don't.
2983 ** Trying to auto track and do malloc like block handling together
2984 ** is asking for trouble.
2985 */
2986 Addr p = (Addr)arg[1];
2987
2988 o_destroyMemBlock(tid, p);
2989 }
2990 }
2991 break;
2992 }
2993
2994 return True;
2995}
2996
2997/*------------------------------------------------------------*/
2998/*--- Circular Reference Detection ---*/
2999/*------------------------------------------------------------*/
3000/*
3001** Check for circular references. This is where a memory block holds a
3002** reference to another memory block and vice versa but there are no
3003** references that are external. Like this:
3004
3005 typedef struct
3006 {
3007 void *linkedBlock;
3008 char padding[120];
3009 } block;
3010
3011 block *p1 = NULL;
3012 block *p2 = NULL;
3013
3014 p1 = (block *)malloc(sizeof(block));
3015 p2 = (block *)malloc(sizeof(block));
3016
3017 p1->linkedBlock = p2;
3018 p2->linkedBlock = p1;
3019
3020** As you can see, the blocks wont be seen to leak because they have a live
3021** reference but the reality is that without an external reference, these
3022** blocks are lost to the system.
3023**
3024** To perform this test, we go through the following stages:
3025**
3026** 1) Generate a binary tree of the memory covered by the allocated blocks
3027** 2) Check every tracked pointer of every allocated block and mark the
3028** block if any of them fall outside of an allocated block.
3029** 3) For each block with an external pointer, recursivly walk through the
3030** internal pointers to other blocks, marking the blocks as also having
3031** an external pointer.
3032** 4) Report any blocks without external references.
3033**
3034*/
3035
3036typedef struct _TreeNode{
3037 Addr start;
3038 Addr end;
3039 MemBlock *block;
3040 struct _TreeNode *left;
3041 struct _TreeNode *right;
3042} TreeNode;
3043
3044static TreeNode *o_treeRoot = NULL;
3045static MemBlock **o_memblockList = NULL;
3046static UInt o_memblockListCount = 0;
3047static BlockRecordList o_circularRecords = {NULL, NULL};
3048
3049static
3050TreeNode *o_findTreeNode(Addr addr, TreeNode *start, TreeNode ***parent)
3051{
3052 /*
3053 ** Find the treenode that this address falls within and return it.
3054 ** Return NULL if no matching node is found and return the parent if it is
3055 ** requested.
3056 */
3057
3058 /*
3059 ** If the treeRoot is NULL, we won't be finding anything.
3060 */
3061 if(!o_treeRoot)
3062 {
3063 if(parent)
3064 {
3065 *parent = &o_treeRoot;
3066 }
3067
3068 return NULL;
3069 }
3070
3071 /*
3072 ** The start should be a valid node.
3073 */
3074 tl_assert(start);
3075
3076 if((addr >= start->start) &&
3077 (addr <= start->end))
3078 {
3079 /*
3080 ** Found it
3081 */
3082 return start;
3083 }
3084
3085 if(addr < start->start)
3086 {
3087 /*
3088 ** Less than - go left if we can, return NULL if we can't.
3089 */
3090 if(start->left)
3091 {
3092 return o_findTreeNode(addr, start->left, parent);
3093 }
3094 else
3095 {
3096 if(parent)
3097 {
3098 *parent = &start->left;
3099 }
3100
3101 return NULL;
3102 }
3103 }
3104 else
3105 {
3106 /*
3107 ** Greater than - go right if we can, return NULL if we can't.
3108 */
3109 if(start->right)
3110 {
3111 return o_findTreeNode(addr, start->right, parent);
3112 }
3113 else
3114 {
3115 if(parent)
3116 {
3117 *parent = &start->right;
3118 }
3119
3120 return NULL;
3121 }
3122 }
3123
3124 VG_(tool_panic)("fell out of the binary tree");
3125}
3126
3127static UInt o_buildMemblockTree(void)
3128{
3129 /*
3130 ** Build a binary tree of the addresses covered by the memory blocks.
3131 ** We dont do anything to balance things so this could decompose to a
3132 ** linear structure. Thankfully, we are not in a time critical section.
3133 */
sewardj1c814ec2007-11-11 05:59:22 +00003134 UInt indx;
sewardj99a2ceb2007-11-09 12:30:36 +00003135
3136 o_memblockList = (MemBlock **)VG_(HT_to_array)(o_MemBlocks,
3137 &o_memblockListCount);
3138
sewardj1c814ec2007-11-11 05:59:22 +00003139 for(indx = 0; indx < o_memblockListCount; indx++)
sewardj99a2ceb2007-11-09 12:30:36 +00003140 {
3141 TreeNode **parent = NULL;
3142 TreeNode *tn = NULL;
sewardj1c814ec2007-11-11 05:59:22 +00003143 MemBlock *mb = o_memblockList[indx];
sewardj99a2ceb2007-11-09 12:30:36 +00003144
3145 /*
3146 ** Only process main blocks that havent leaked.
3147 */
3148 if(!mb->shadowing && !mb->leaked)
3149 {
3150 if(o_findTreeNode(mb->hdr.key, o_treeRoot, &parent))
3151 {
3152 VG_(tool_panic)("Failed to grow the binary tree.");
3153 }
3154
3155 /*
3156 ** We should have a pointer to the parent
3157 */
3158 tl_assert(parent);
3159
3160 /*
3161 ** Create and populate the new node
3162 */
3163 tn = VG_(malloc)(sizeof(TreeNode));
3164 VG_(memset)(tn, 0, sizeof(TreeNode));
3165
3166 tn->start = mb->hdr.key;
3167 tn->end = tn->start + mb->length;
3168 tn->block = mb;
3169
3170 /*
3171 ** Add this node into the parent node
3172 */
3173 *parent = tn;
3174 }
3175 }
3176
3177 return o_memblockListCount;
3178}
3179
3180static void o_checkExternalPointers(void)
3181{
sewardj1c814ec2007-11-11 05:59:22 +00003182 UInt indx;
sewardj99a2ceb2007-11-09 12:30:36 +00003183
sewardj1c814ec2007-11-11 05:59:22 +00003184 for(indx = 0; indx < o_memblockListCount; indx++)
sewardj99a2ceb2007-11-09 12:30:36 +00003185 {
sewardj1c814ec2007-11-11 05:59:22 +00003186 MemBlock *mb = o_memblockList[indx];
sewardj99a2ceb2007-11-09 12:30:36 +00003187
3188 /*
3189 ** Only check blocks that haven't leaked.
3190 ** We process through shadow blocks because we want the back references
3191 ** as they still point within the shadowed block.
3192 */
3193 if(!mb->leaked)
3194 {
3195 UInt pointerIndex;
3196
3197 for(pointerIndex = 0; pointerIndex < mb->refNum; pointerIndex++)
3198 {
3199 if(!o_findTreeNode(FROM_TRACKED_KEY(mb->pointers[pointerIndex]->hdr.key),
3200 o_treeRoot, NULL))
3201 {
3202 /*
3203 ** External reference. Mark the block and stop checking.
3204 */
3205 mb->external = 1;
3206 break;
3207 }
3208 }
3209 }
3210 }
3211}
3212
3213static void o_rippleExternelPointers(MemBlock *mb)
3214{
sewardj1c814ec2007-11-11 05:59:22 +00003215 UInt indx;
sewardj99a2ceb2007-11-09 12:30:36 +00003216
3217 if(!mb)
3218 {
3219 /*
3220 ** Iterate through the memory block list marking external blocks
3221 ** so that we dont process the same blocks twice.
3222 */
sewardj1c814ec2007-11-11 05:59:22 +00003223 for(indx = 0; indx < o_memblockListCount; indx++)
sewardj99a2ceb2007-11-09 12:30:36 +00003224 {
sewardj1c814ec2007-11-11 05:59:22 +00003225 if(o_memblockList[indx]->external > 0)
sewardj99a2ceb2007-11-09 12:30:36 +00003226 {
sewardj1c814ec2007-11-11 05:59:22 +00003227 o_memblockList[indx]->external = -1;
3228 o_rippleExternelPointers(o_memblockList[indx]);
sewardj99a2ceb2007-11-09 12:30:36 +00003229 }
3230 }
3231 }
3232 else
3233 {
3234 /*
3235 ** We are recursing.
3236 ** Follow any tracked pointers within our block, marking the target
3237 ** blocks as external and recursing on those blocks.
3238 */
3239 PBitContext pb;
3240 Addr a;
3241 TreeNode *tn = NULL;
3242
3243 a = o_firstPBit(&pb, mb->hdr.key, mb->length);
3244 while(a)
3245 {
3246 tn = o_findTreeNode(a, o_treeRoot, NULL);
3247
3248 /*
3249 ** We really should have a node
3250 */
3251 tl_assert(tn);
3252
3253 /*
3254 ** If we havent already done so, mark the block as external and
3255 ** processed then recurse on it.
3256 */
3257 if(tn->block->external >= 0)
3258 {
3259 tn->block->external = -1;
3260 o_rippleExternelPointers(tn->block);
3261 }
3262
3263 /*
3264 ** Get the next tracked pointer within this block.
3265 */
3266 a = o_nextPBit(&pb);
3267 }
3268 }
3269}
3270
3271static int o_reportCircularBlocks(void)
3272{
3273 int count = 0;
3274 BlockRecord *block = NULL;
sewardj1c814ec2007-11-11 05:59:22 +00003275 int indx;
sewardj99a2ceb2007-11-09 12:30:36 +00003276
3277 /*
3278 ** Iterate through the memory block list reporting any blocks not marked
3279 ** as external.
3280 ** We aggregate the list of blocks as many could come from the same context.
3281 */
sewardj1c814ec2007-11-11 05:59:22 +00003282 for(indx = 0; indx < o_memblockListCount; indx++)
sewardj99a2ceb2007-11-09 12:30:36 +00003283 {
sewardj1c814ec2007-11-11 05:59:22 +00003284 MemBlock * mb = o_memblockList[indx];
sewardj99a2ceb2007-11-09 12:30:36 +00003285 if(!mb->shadowing && !mb->leaked && mb->external == 0)
3286 {
3287 block = o_findBlockRecord(&o_circularRecords, mb->where, NULL);
3288
3289 if(block)
3290 {
3291 /*
3292 ** Just increment the counts.
3293 */
3294 block->bytes += mb->length;
3295 block->count++;
3296 }
3297 else
3298 {
3299 /*
3300 ** Create a new block and add it to the circular records list.
3301 */
3302 BlockRecord *item = VG_(malloc)(sizeof(BlockRecord));
3303 tl_assert(item);
3304
3305 item->count = 1;
3306 item->bytes = mb->length;
3307 item->next = item->prev = NULL;
3308 item->allocated = mb->where;
3309 item->leaked = NULL;
3310
3311 o_addBlockRecord(&o_circularRecords, item);
3312 }
3313 }
3314 }
3315
3316 /*
3317 ** Now report the blocks.
3318 */
3319 block = o_circularRecords.start;
3320 while(block)
3321 {
3322 if(!count)
3323 {
3324 VG_(message)(Vg_UserMsg, "The following blocks only have circular references from other blocks");
3325 }
3326 count++;
3327
3328 VG_(message)(Vg_UserMsg, " Circular loss record %d", count);
3329 VG_(message)(Vg_UserMsg, " Leaked %d (%p) bytes in %d block%sallocated",
3330 block->bytes,
3331 block->bytes,
3332 block->count,
3333 (block->count == 1) ? " " : "s ");
3334 VG_(pp_ExeContext)(block->allocated);
3335 VG_(message)(Vg_UserMsg,"");
3336
3337 /*
3338 ** Get the next block, if any.
3339 */
3340 block = block->next;
3341 }
3342
3343 return count;
3344}
3345
3346static int o_checkCircular(void)
3347{
3348 int count = 0;
3349
3350 /*
3351 ** If there is nothing in the tree, there is nothing to check.
3352 */
3353 if(o_buildMemblockTree())
3354 {
3355 o_checkExternalPointers();
3356 o_rippleExternelPointers(NULL);
3357 count = o_reportCircularBlocks();
3358 }
3359
3360 return count;
3361}
3362
3363static void o_fini(Int exitcode)
3364{
3365 /*
3366 ** Iterate through the leaked block record list,
3367 ** printing out the stats as we go.
3368 */
3369 UInt count = 1;
3370 BlockRecord *record = o_leakRecords.start;
3371
3372 VG_(message)(Vg_UserMsg,"");
3373 VG_(message)(Vg_UserMsg,"");
3374 VG_(message)(Vg_UserMsg,"Omega Leak Summary");
3375 VG_(message)(Vg_UserMsg,"==================");
3376
3377 while(record)
3378 {
3379 VG_(message)(Vg_UserMsg,
3380 "Loss Record %d: Leaked %d (%p) bytes in %d block%s",
3381 count, record->bytes, record->bytes, record->count,
3382 (record->count > 1) ? "s" : "");
3383 VG_(pp_ExeContext)(record->leaked);
3384 VG_(message)(Vg_UserMsg, " Block%s allocated",
3385 (record->count > 1) ? "s" : "");
3386 VG_(pp_ExeContext)(record->allocated);
3387 VG_(message)(Vg_UserMsg,"");
3388
3389 count++;
3390 record = record->next;
3391 }
3392
3393 if(o_showCircular)
3394 {
3395 /*
3396 ** Now check for circular references.
3397 */
3398 count += o_checkCircular();
3399 }
3400
3401 if(count == 1)
3402 {
3403 /*
3404 ** Nothing leaked - assure the user.
3405 */
3406 VG_(message)(Vg_UserMsg,"No leaks to report.");
3407 VG_(message)(Vg_UserMsg,"");
3408 }
3409
3410 /*
3411 ** Remove the leaked blocks from the live blocks count - they wont be
3412 ** coming back now...
3413 */
3414 o_stats.liveMemoryBlocks -= o_stats.memoryBlocksLeaked;
3415
3416 if(o_showInternStats)
3417 {
3418 VG_(printf)("\n\n\n"
3419 "Omega internal statistics summary:\n"
3420 " Tracked Pointers still live: %ld\n"
3421 " Tracked Pointers Allocated: %ld\n"
3422 " Memory Blocks still live: %ld\n"
3423 " Memory Blocks Allocated: %ld\n"
3424 " Shadow Memory Blocks Allocated: %ld\n"
3425 " Memory Blocks Leaked: %ld\n"
3426 " Memory Blocks Lost and Found: %ld\n"
3427 " pbitNodes: %ld\n\n",
3428 o_stats.liveTrackedPointers,
3429 o_stats.trackedPointersAllocated,
3430 o_stats.liveMemoryBlocks,
3431 o_stats.memoryBlocksAllocated,
3432 o_stats.shadowMemoryBlocksAllocated,
3433 o_stats.memoryBlocksLeaked,
3434 o_stats.memoryBlocksLostAndFound,
3435 o_stats.pbitNodes);
3436 }
3437}
3438
3439static Bool o_process_cmd_line_option(Char *arg)
3440{
3441 /*
3442 ** Setup our processing state based upon what the user would like us to do.
3443 */
3444 Int pbithash = 0;
3445 Int mbhash = 0;
3446 Int tphash = 0;
3447
3448 /*
3449 ** Expose the hash sizes for simple performance tweaking.
3450 */
3451 VG_NUM_CLO(arg, "--pbithashsize", pbithash);
3452 VG_NUM_CLO(arg, "--mbhashsize", mbhash);
3453 VG_NUM_CLO(arg, "--tphashsize", tphash);
3454
3455 /*
3456 ** Only tweak upwards for now.
3457 */
3458 if(pbithash > o_pbitNodeHashSize)
3459 o_pbitNodeHashSize = pbithash;
3460
3461 if(mbhash > o_memblockHashSize)
3462 o_memblockHashSize = mbhash;
3463
3464 if(tphash > o_trackedPointerHashSize)
3465 o_trackedPointerHashSize = tphash;
3466
3467 /*
3468 ** Check the flags.
3469 */
3470 if(VG_CLO_STREQ(arg, "--only-malloclike"))
3471 o_onlyMallocLike = True;
3472 else if(VG_CLO_STREQ(arg, "--show-indirect"))
3473 o_showIndirect = True;
3474 else if(VG_CLO_STREQ(arg, "--show-circular"))
3475 o_showCircular = True;
3476 else if(VG_CLO_STREQ(arg, "--show-hanging"))
3477 o_showHanging = True;
3478 else if(VG_CLO_STREQ(arg, "--show-intern-stats"))
3479 o_showInternStats = True;
3480 else if(VG_CLO_STREQ(arg, "--instant-reports"))
3481 o_showSummaryOnly = False;
3482 else if(VG_CLO_STREQ(arg, "--poison"))
3483 o_poison = True;
3484 else
3485 return VG_(replacement_malloc_process_cmd_line_option)(arg);
3486
3487 return True;
3488}
3489
3490static void o_print_usage(void)
3491{
3492 /*
3493 ** Tell the average user what we support.
3494 */
3495 VG_(printf)("");
3496 VG_(printf)(
3497" --only-malloclike only track blocks passed through the\n"
3498" MALLOCLIKE_BLOCK user request.\n"
3499" --show-indirect show indirect leaks from leaked blocks.\n"
3500" --show-circular show blocks that just have circular references.\n"
3501" --instant-reports show leaks as they happen, not just a summary.\n"
3502" --show-hanging show hanging pointers to the block being\n"
3503" deallocated.\n"
3504 );
3505
3506}
3507
3508static void o_print_debug_usage(void)
3509{
3510 /*
3511 ** Tell the inquisitive user what else we support.
3512 */
3513 VG_(printf)("");
3514 VG_(printf)(
3515" --show-intern-stats show some internal statistics from the run.\n"
3516"\n"
3517" IMPORTANT! These next settings must be PRIME NUMBERS\n"
3518"\n"
3519" --pbithashsize=<number> number of pbit nodes to allocate [%d]\n"
3520" --mbhashsize=<number> number of mem block nodes to allocate [%d]\n"
3521" --tphashsize=<number> number of tracked pointer nodes to allocate [%d]\n",
3522 o_pbitNodeHashSize,
3523 o_memblockHashSize,
3524 o_trackedPointerHashSize
3525 );
3526}
3527
3528static void o_memRemapSupport(Addr src, Addr dst, SizeT length)
3529{
3530 /*
3531 ** The track_copy_mem_remap callback has the src and dst the opposite
3532 ** way around to our duplicate tracked pointers function so this tiny
3533 ** wrapper twizzles them around.
3534 */
3535 o_duplicateTrackedPointers(dst, src, length);
3536}
3537
3538static void o_pre_clo_init(void)
3539{
3540 // Details
3541 VG_(details_name) ("exp-omega");
3542 VG_(details_version) ("RC1");
3543 VG_(details_description) ("an instant memory leak detector");
3544 VG_(details_copyright_author)("Copyright (C) 2006-2007, and GNU GPL'd, "
3545 "by Bryan Meredith.");
sewardj5b8ab5b2007-11-30 21:52:27 +00003546 VG_(details_bug_reports_to) ("richard.coe@med.ge.com");
sewardj99a2ceb2007-11-09 12:30:36 +00003547
3548 // Basic functions
3549 VG_(basic_tool_funcs) (o_post_clo_init,
3550 o_instrument,
3551 o_fini);
3552 // Needs
3553 VG_(needs_malloc_replacement) (o_malloc,
3554 o__builtin_new,
3555 o__builtin_vec_new,
3556 o_memalign,
3557 o_calloc,
3558 o_free,
3559 o__builtin_delete,
3560 o__builtin_vec_delete,
3561 o_realloc,
3562 0 );
3563 // Want stack unwinds
3564 VG_(track_die_mem_stack) (o_dieMemStack);
3565 // Need command line input
3566 VG_(needs_command_line_options) (o_process_cmd_line_option,
3567 o_print_usage,
3568 o_print_debug_usage);
3569 // Support MALLOCLIKE and FREELIKE
3570 VG_(needs_client_requests) (o_handle_client_request);
3571
3572 // Wholesale destruction of memory ranges
3573 VG_(track_copy_mem_remap) (o_memRemapSupport );
3574 VG_(track_die_mem_stack_signal)(o_killRange);
3575 VG_(track_die_mem_brk) (o_killRange);
3576 VG_(track_die_mem_munmap) (o_killRange);
3577
3578}
3579
3580VG_DETERMINE_INTERFACE_VERSION(o_pre_clo_init);
3581
3582/*--------------------------------------------------------------------*/
3583/*--- end ---*/
3584/*--------------------------------------------------------------------*/