blob: 7b9a9bcfd62f1aa199d4cb9415071a6adb9400c3 [file] [log] [blame]
njn43c799e2003-04-08 00:08:52 +00001
2/*--------------------------------------------------------------------*/
njn1d0825f2006-03-27 11:37:07 +00003/*--- The leak checker. mc_leakcheck.c ---*/
njn43c799e2003-04-08 00:08:52 +00004/*--------------------------------------------------------------------*/
5
6/*
nethercote137bc552003-11-14 17:47:54 +00007 This file is part of MemCheck, a heavyweight Valgrind tool for
njn1d0825f2006-03-27 11:37:07 +00008 detecting memory errors.
njn43c799e2003-04-08 00:08:52 +00009
sewardj4d474d02008-02-11 11:34:59 +000010 Copyright (C) 2000-2008 Julian Seward
njn43c799e2003-04-08 00:08:52 +000011 jseward@acm.org
12
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 02111-1307, USA.
27
28 The GNU General Public License is contained in the file COPYING.
29*/
30
njnc7561b92005-06-19 01:24:32 +000031#include "pub_tool_basics.h"
sewardj4cfea4f2006-10-14 19:26:10 +000032#include "pub_tool_vki.h"
njn4802b382005-06-11 04:58:29 +000033#include "pub_tool_aspacemgr.h"
njn1d0825f2006-03-27 11:37:07 +000034#include "pub_tool_execontext.h"
35#include "pub_tool_hashtable.h"
njn97405b22005-06-02 03:39:33 +000036#include "pub_tool_libcbase.h"
njn132bfcc2005-06-04 19:16:06 +000037#include "pub_tool_libcassert.h"
njn36a20fa2005-06-03 03:08:39 +000038#include "pub_tool_libcprint.h"
njnde62cbf2005-06-10 22:08:14 +000039#include "pub_tool_libcsignal.h"
njn6ace3ea2005-06-17 03:06:27 +000040#include "pub_tool_machine.h"
njnc7561b92005-06-19 01:24:32 +000041#include "pub_tool_mallocfree.h"
42#include "pub_tool_options.h"
njn73c51342005-06-18 15:18:15 +000043#include "pub_tool_signals.h"
njn1d0825f2006-03-27 11:37:07 +000044#include "pub_tool_tooliface.h" // Needed for mc_include.h
njn43c799e2003-04-08 00:08:52 +000045
njn1d0825f2006-03-27 11:37:07 +000046#include "mc_include.h"
njnc7561b92005-06-19 01:24:32 +000047
48#include <setjmp.h> // For jmp_buf
49
njn8225cc02009-03-09 22:52:24 +000050/*------------------------------------------------------------*/
51/*--- An overview of leak checking. ---*/
52/*------------------------------------------------------------*/
njnc7561b92005-06-19 01:24:32 +000053
njn8225cc02009-03-09 22:52:24 +000054// Leak-checking is a directed-graph traversal problem. The graph has
55// two kinds of nodes:
56// - root-set nodes:
57// - GP registers of all threads;
58// - valid, aligned, pointer-sized data words in valid client memory,
59// including stacks, but excluding words within client heap-allocated
60// blocks (they are excluded so that later on we can differentiate
61// between heap blocks that are indirectly leaked vs. directly leaked).
62// - heap-allocated blocks. A block is a mempool chunk or a malloc chunk
63// that doesn't contain a mempool chunk. Nb: the terms "blocks" and
64// "chunks" are used interchangeably below.
65//
66// There are two kinds of edges:
67// - start-pointers, i.e. pointers to the start of a block;
68// - interior-pointers, i.e. pointers to the interior of a block.
69//
70// We use "pointers" rather than "edges" below.
71//
72// Root set nodes only point to blocks. Blocks only point to blocks;
73// a block can point to itself.
74//
75// The aim is to traverse the graph and determine the status of each block.
76//
77// There are 9 distinct cases. See memcheck/docs/mc-manual.xml for details.
78// Presenting all nine categories to the user is probably too much.
79// Currently we do this:
80// - definitely lost: case 3
81// - indirectly lost: case 4, 9
82// - possibly lost: cases 5..8
83// - still reachable: cases 1, 2
84//
85// It's far from clear that this is the best possible categorisation; it's
86// accreted over time without any central guiding principle.
87
88/*------------------------------------------------------------*/
89/*--- XXX: Thoughts for improvement. ---*/
90/*------------------------------------------------------------*/
91
92// From the user's point of view:
93// - If they aren't using interior-pointers, they just have to fix the
94// directly lost blocks, and the indirectly lost ones will be fixed as
95// part of that. Any possibly lost blocks will just be due to random
96// pointer garbage and can be ignored.
97//
98// - If they are using interior-pointers, the fact that they currently are not
99// being told which ones might be directly lost vs. indirectly lost makes
100// it hard to know where to begin.
101//
102// All this makes me wonder if new option is warranted:
103// --follow-interior-pointers. By default it would be off, the leak checker
104// wouldn't follow interior-pointers and there would only be 3 categories:
105// R, DL, IL.
106//
107// If turned on, then it would show 7 categories (R, DL, IL, DR/DL, IR/IL,
108// IR/IL/DL, IL/DL). That output is harder to understand but it's your own
109// damn fault for using interior-pointers...
110//
111// ----
112//
113// Also, why are two blank lines printed between each loss record?
114//
115// ----
116//
117// Also, --show-reachable is a bad name because it also turns on the showing
118// of indirectly leaked blocks(!) It would be better named --show-all or
119// --show-all-heap-blocks, because that's the end result.
120//
121// ----
122//
123// Also, the VALGRIND_LEAK_CHECK and VALGRIND_QUICK_LEAK_CHECK aren't great
124// names. VALGRIND_FULL_LEAK_CHECK and VALGRIND_SUMMARY_LEAK_CHECK would be
125// better.
126//
127// ----
128//
129// Also, VALGRIND_COUNT_LEAKS and VALGRIND_COUNT_LEAK_BLOCKS aren't great as
130// they combine direct leaks and indirect leaks into one. New, more precise
131// ones (they'll need new names) would be good. If more categories are
132// used, as per the --follow-interior-pointers option, they should be
133// updated accordingly. And they should use a struct to return the values.
134//
135// ----
136//
137// Also, for this case:
138//
139// (4) p4 BBB ---> AAA
140//
141// BBB is definitely directly lost. AAA is definitely indirectly lost.
142// Here's the relevant loss records printed for a full check (each block is
143// 16 bytes):
144//
145// ==20397== 16 bytes in 1 blocks are indirectly lost in loss record 9 of 15
146// ==20397== at 0x4C2694E: malloc (vg_replace_malloc.c:177)
147// ==20397== by 0x400521: mk (leak-cases.c:49)
148// ==20397== by 0x400578: main (leak-cases.c:72)
149//
150// ==20397== 32 (16 direct, 16 indirect) bytes in 1 blocks are definitely
151// lost in loss record 14 of 15
152// ==20397== at 0x4C2694E: malloc (vg_replace_malloc.c:177)
153// ==20397== by 0x400521: mk (leak-cases.c:49)
154// ==20397== by 0x400580: main (leak-cases.c:72)
155//
156// The first one is fine -- it describes AAA.
157//
158// The second one is for BBB. It's correct in that 16 bytes in 1 block are
159// directly lost. It's also correct that 16 are indirectly lost as a result,
160// but it means that AAA is being counted twice in the loss records. (It's
161// not, thankfully, counted twice in the summary counts). Argh.
162//
163// This would be less confusing for the second one:
164//
165// ==20397== 16 bytes in 1 blocks are definitely lost in loss record 14
166// of 15 (and 16 bytes in 1 block are indirectly lost as a result; they
167// are mentioned elsewhere (if --show-reachable=yes is given!))
168// ==20397== at 0x4C2694E: malloc (vg_replace_malloc.c:177)
169// ==20397== by 0x400521: mk (leak-cases.c:49)
170// ==20397== by 0x400580: main (leak-cases.c:72)
171//
172// But ideally we'd present the loss record for the directly lost block and
173// then the resultant indirectly lost blocks and make it clear the
174// dependence. Double argh.
175
176/*------------------------------------------------------------*/
177/*--- The actual algorithm. ---*/
178/*------------------------------------------------------------*/
179
180// - Find all the blocks (a.k.a. chunks) to check. Mempool chunks require
181// some special treatment because they can be within malloc'd blocks.
182// - Scan every word in the root set (GP registers and valid
183// non-heap memory words).
184// - First, we skip if it doesn't point to valid memory.
185// - Then, we see if it points to the start or interior of a block. If
186// so, we push the block onto the mark stack and mark it as having been
187// reached.
188// - Then, we process the mark stack, repeating the scanning for each block;
189// this can push more blocks onto the mark stack. We repeat until the
190// mark stack is empty. Each block is marked as definitely or possibly
191// reachable, depending on whether interior-pointers were required to
192// reach it.
193// - At this point we know for every block if it's reachable or not.
194// - We then push each unreached block onto the mark stack, using the block
195// number as the "clique" number.
196// - We process the mark stack again, this time grouping blocks into cliques
197// in order to facilitate the directly/indirectly lost categorisation.
198// - We group blocks by their ExeContexts and categorisation, and print them
199// if --leak-check=full. We also print summary numbers.
200//
201// A note on "cliques":
202// - A directly lost block is one with no pointers to it. An indirectly
203// lost block is one that is pointed to by a directly or indirectly lost
204// block.
205// - Each directly lost block has zero or more indirectly lost blocks
206// hanging off it. All these blocks together form a "clique". The
207// directly lost block is called the "clique leader". The clique number
208// is the number (in lc_chunks[]) of the clique leader.
209// - Actually, a directly lost block may be pointed to if it's part of a
210// cycle. In that case, there may be more than one choice for the clique
211// leader, and the choice is arbitrary. Eg. if you have A-->B and B-->A
212// either A or B could be the clique leader.
213// - Cliques cannot overlap, and will be truncated to avoid this. Eg. if we
214// have A-->C and B-->C, the two cliques will be {A,C} and {B}, or {A} and
215// {B,C} (again the choice is arbitrary). This is because we don't want
216// to count a block as indirectly lost more than once.
217//
218// A note on 'is_prior_definite':
219// - This is a boolean used in various places that indicates if the chain
220// up to the prior node (prior to the one being considered) is definite.
221// - In the clique == -1 case:
222// - if True it means that the prior node is a root-set node, or that the
223// prior node is a block which is reachable from the root-set via
224// start-pointers.
225// - if False it means that the prior node is a block that is only
226// reachable from the root-set via a path including at least one
227// interior-pointer.
228// - In the clique != -1 case, currently it's always True because we treat
229// start-pointers and interior-pointers the same for direct/indirect leak
230// checking. If we added a PossibleIndirectLeak state then this would
231// change.
232
233
234// Define to debug the memory-leak-detector.
sewardjb5f6f512005-03-10 23:59:00 +0000235#define VG_DEBUG_LEAKCHECK 0
njn8225cc02009-03-09 22:52:24 +0000236#define VG_DEBUG_CLIQUE 0
237
238#define UMSG(args...) VG_(message)(Vg_UserMsg, ##args)
sewardjb5f6f512005-03-10 23:59:00 +0000239
njn43c799e2003-04-08 00:08:52 +0000240/*------------------------------------------------------------*/
njn8225cc02009-03-09 22:52:24 +0000241/*--- Getting the initial chunks, and searching them. ---*/
njn43c799e2003-04-08 00:08:52 +0000242/*------------------------------------------------------------*/
243
njn8225cc02009-03-09 22:52:24 +0000244// Compare the MC_Chunks by 'data' (i.e. the address of the block).
245static Int compare_MC_Chunks(void* n1, void* n2)
njn43c799e2003-04-08 00:08:52 +0000246{
njn8225cc02009-03-09 22:52:24 +0000247 MC_Chunk* mc1 = *(MC_Chunk**)n1;
248 MC_Chunk* mc2 = *(MC_Chunk**)n2;
249 if (mc1->data < mc2->data) return -1;
250 if (mc1->data > mc2->data) return 1;
251 return 0;
njn43c799e2003-04-08 00:08:52 +0000252}
253
njn8225cc02009-03-09 22:52:24 +0000254#if VG_DEBUG_LEAKCHECK
255// Used to sanity-check the fast binary-search mechanism.
256static
257Int find_chunk_for_OLD ( Addr ptr,
258 MC_Chunk** chunks,
259 Int n_chunks )
260
261{
262 Int i;
263 Addr a_lo, a_hi;
264 PROF_EVENT(70, "find_chunk_for_OLD");
265 for (i = 0; i < n_chunks; i++) {
266 PROF_EVENT(71, "find_chunk_for_OLD(loop)");
267 a_lo = chunks[i]->data;
268 a_hi = ((Addr)chunks[i]->data) + chunks[i]->szB;
269 if (a_lo <= ptr && ptr < a_hi)
270 return i;
271 }
272 return -1;
273}
274#endif
275
276// Find the i such that ptr points at or inside the block described by
277// chunks[i]. Return -1 if none found. This assumes that chunks[]
278// has been sorted on the 'data' field.
279static
280Int find_chunk_for ( Addr ptr,
281 MC_Chunk** chunks,
282 Int n_chunks )
283{
284 Addr a_mid_lo, a_mid_hi;
285 Int lo, mid, hi, retVal;
286 // VG_(printf)("find chunk for %p = ", ptr);
287 retVal = -1;
288 lo = 0;
289 hi = n_chunks-1;
290 while (True) {
291 // Invariant: current unsearched space is from lo to hi, inclusive.
292 if (lo > hi) break; // not found
293
294 mid = (lo + hi) / 2;
295 a_mid_lo = chunks[mid]->data;
296 a_mid_hi = chunks[mid]->data + chunks[mid]->szB;
297 // Extent of block 'mid' is [a_mid_lo .. a_mid_hi).
298 // Special-case zero-sized blocks - treat them as if they had
299 // size 1. Not doing so causes them to not cover any address
300 // range at all and so will never be identified as the target of
301 // any pointer, which causes them to be incorrectly reported as
302 // definitely leaked.
303 if (chunks[mid]->szB == 0)
304 a_mid_hi++;
305
306 if (ptr < a_mid_lo) {
307 hi = mid-1;
308 continue;
309 }
310 if (ptr >= a_mid_hi) {
311 lo = mid+1;
312 continue;
313 }
314 tl_assert(ptr >= a_mid_lo && ptr < a_mid_hi);
315 retVal = mid;
316 break;
317 }
318
319# if VG_DEBUG_LEAKCHECK
320 tl_assert(retVal == find_chunk_for_OLD ( ptr, chunks, n_chunks ));
321# endif
322 // VG_(printf)("%d\n", retVal);
323 return retVal;
324}
325
326
327static MC_Chunk**
328find_active_chunks(UInt* pn_chunks)
329{
330 // Our goal is to construct a set of chunks that includes every
331 // mempool chunk, and every malloc region that *doesn't* contain a
332 // mempool chunk.
333 MC_Mempool *mp;
334 MC_Chunk **mallocs, **chunks, *mc;
335 UInt n_mallocs, n_chunks, m, s;
336 Bool *malloc_chunk_holds_a_pool_chunk;
337
338 // First we collect all the malloc chunks into an array and sort it.
339 // We do this because we want to query the chunks by interior
340 // pointers, requiring binary search.
341 mallocs = (MC_Chunk**) VG_(HT_to_array)( MC_(malloc_list), &n_mallocs );
342 if (n_mallocs == 0) {
343 tl_assert(mallocs == NULL);
344 *pn_chunks = 0;
345 return NULL;
346 }
347 VG_(ssort)(mallocs, n_mallocs, sizeof(VgHashNode*), compare_MC_Chunks);
348
349 // Then we build an array containing a Bool for each malloc chunk,
350 // indicating whether it contains any mempools.
351 malloc_chunk_holds_a_pool_chunk = VG_(calloc)( "mc.fas.1",
352 n_mallocs, sizeof(Bool) );
353 n_chunks = n_mallocs;
354
355 // Then we loop over the mempool tables. For each chunk in each
356 // pool, we set the entry in the Bool array corresponding to the
357 // malloc chunk containing the mempool chunk.
358 VG_(HT_ResetIter)(MC_(mempool_list));
359 while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) {
360 VG_(HT_ResetIter)(mp->chunks);
361 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
362
363 // We'll need to record this chunk.
364 n_chunks++;
365
366 // Possibly invalidate the malloc holding the beginning of this chunk.
367 m = find_chunk_for(mc->data, mallocs, n_mallocs);
368 if (m != -1 && malloc_chunk_holds_a_pool_chunk[m] == False) {
369 tl_assert(n_chunks > 0);
370 n_chunks--;
371 malloc_chunk_holds_a_pool_chunk[m] = True;
372 }
373
374 // Possibly invalidate the malloc holding the end of this chunk.
375 if (mc->szB > 1) {
376 m = find_chunk_for(mc->data + (mc->szB - 1), mallocs, n_mallocs);
377 if (m != -1 && malloc_chunk_holds_a_pool_chunk[m] == False) {
378 tl_assert(n_chunks > 0);
379 n_chunks--;
380 malloc_chunk_holds_a_pool_chunk[m] = True;
381 }
382 }
383 }
384 }
385 tl_assert(n_chunks > 0);
386
387 // Create final chunk array.
388 chunks = VG_(malloc)("mc.fas.2", sizeof(VgHashNode*) * (n_chunks));
389 s = 0;
390
391 // Copy the mempool chunks and the non-marked malloc chunks into a
392 // combined array of chunks.
393 VG_(HT_ResetIter)(MC_(mempool_list));
394 while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) {
395 VG_(HT_ResetIter)(mp->chunks);
396 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
397 tl_assert(s < n_chunks);
398 chunks[s++] = mc;
399 }
400 }
401 for (m = 0; m < n_mallocs; ++m) {
402 if (!malloc_chunk_holds_a_pool_chunk[m]) {
403 tl_assert(s < n_chunks);
404 chunks[s++] = mallocs[m];
405 }
406 }
407 tl_assert(s == n_chunks);
408
409 // Free temporaries.
410 VG_(free)(mallocs);
411 VG_(free)(malloc_chunk_holds_a_pool_chunk);
412
413 *pn_chunks = n_chunks;
414
415 return chunks;
416}
417
418/*------------------------------------------------------------*/
419/*--- The leak detector proper. ---*/
420/*------------------------------------------------------------*/
421
422// Holds extra info about each block during leak checking.
423typedef
424 struct {
425 UInt state:2; // Reachedness.
426 SizeT indirect_szB : (sizeof(SizeT)*8)-2; // If Unreached, how many bytes
427 // are unreachable from here.
428 }
429 LC_Extra;
430
431// An array holding pointers to every chunk we're checking. Sorted by address.
432static MC_Chunk** lc_chunks;
433// How many chunks we're dealing with.
434static Int lc_n_chunks;
435
436// This has the same number of entries as lc_chunks, and each entry
437// in lc_chunks corresponds with the entry here (ie. lc_chunks[i] and
438// lc_extras[i] describe the same block).
439static LC_Extra* lc_extras;
440
441// Records chunks that are currently being processed. Each element in the
442// stack is an index into lc_chunks and lc_extras. Its size is
443// 'lc_n_chunks' because in the worst case that's how many chunks could be
444// pushed onto it (actually I think the maximum is lc_n_chunks-1 but let's
445// be conservative).
446static Int* lc_markstack;
447// The index of the top element of the stack; -1 if the stack is empty, 0 if
448// the stack has one element, 1 if it has two, etc.
449static Int lc_markstack_top;
450
451// Keeps track of how many bytes of memory we've scanned, for printing.
452// (Nb: We don't keep track of how many register bytes we've scanned.)
453static SizeT lc_scanned_szB;
454
455
456SizeT MC_(bytes_leaked) = 0;
457SizeT MC_(bytes_indirect) = 0;
458SizeT MC_(bytes_dubious) = 0;
459SizeT MC_(bytes_reachable) = 0;
460SizeT MC_(bytes_suppressed) = 0;
461
462SizeT MC_(blocks_leaked) = 0;
463SizeT MC_(blocks_indirect) = 0;
464SizeT MC_(blocks_dubious) = 0;
465SizeT MC_(blocks_reachable) = 0;
466SizeT MC_(blocks_suppressed) = 0;
467
sewardj45f4e7c2005-09-27 19:20:21 +0000468
469/* TODO: GIVE THIS A PROPER HOME
njn1d0825f2006-03-27 11:37:07 +0000470 TODO: MERGE THIS WITH DUPLICATE IN m_main.c and coredump-elf.c.
sewardj45f4e7c2005-09-27 19:20:21 +0000471 Extract from aspacem a vector of the current segment start
472 addresses. The vector is dynamically allocated and should be freed
473 by the caller when done. REQUIRES m_mallocfree to be running.
474 Writes the number of addresses required into *n_acquired. */
475
476static Addr* get_seg_starts ( /*OUT*/Int* n_acquired )
477{
478 Addr* starts;
sewardjae986ca2005-10-12 12:53:20 +0000479 Int n_starts, r = 0;
sewardj45f4e7c2005-09-27 19:20:21 +0000480
481 n_starts = 1;
482 while (True) {
sewardj9c606bd2008-09-18 18:12:50 +0000483 starts = VG_(malloc)( "mc.gss.1", n_starts * sizeof(Addr) );
sewardj45f4e7c2005-09-27 19:20:21 +0000484 if (starts == NULL)
485 break;
486 r = VG_(am_get_segment_starts)( starts, n_starts );
487 if (r >= 0)
488 break;
489 VG_(free)(starts);
490 n_starts *= 2;
491 }
492
493 if (starts == NULL) {
494 *n_acquired = 0;
495 return NULL;
496 }
497
498 *n_acquired = r;
499 return starts;
500}
501
502
njn8225cc02009-03-09 22:52:24 +0000503// Determines if a pointer is to a chunk. Returns the chunk number et al
504// via call-by-reference.
505static Bool
506lc_is_a_chunk_ptr(Addr ptr, Int* pch_no, MC_Chunk** pch, LC_Extra** pex)
njn43c799e2003-04-08 00:08:52 +0000507{
njn8225cc02009-03-09 22:52:24 +0000508 Int ch_no;
509 MC_Chunk* ch;
510 LC_Extra* ex;
njn43c799e2003-04-08 00:08:52 +0000511
njn8225cc02009-03-09 22:52:24 +0000512 // Quick filter.
513 if (!VG_(am_is_valid_for_client)(ptr, 1, VKI_PROT_READ)) {
514 return False;
sewardjb5f6f512005-03-10 23:59:00 +0000515 } else {
njn8225cc02009-03-09 22:52:24 +0000516 ch_no = find_chunk_for(ptr, lc_chunks, lc_n_chunks);
517 tl_assert(ch_no >= -1 && ch_no < lc_n_chunks);
518
519 if (ch_no == -1) {
520 return False;
521 } else {
522 // Ok, we've found a pointer to a chunk. Get the MC_Chunk and its
523 // LC_Extra.
524 ch = lc_chunks[ch_no];
525 ex = &(lc_extras[ch_no]);
526
527 tl_assert(ptr >= ch->data);
528 tl_assert(ptr < ch->data + ch->szB + (ch->szB==0 ? 1 : 0));
529
530 if (VG_DEBUG_LEAKCHECK)
531 VG_(printf)("ptr=%#lx -> block %d\n", ptr, ch_no);
532
533 *pch_no = ch_no;
534 *pch = ch;
535 *pex = ex;
536
537 return True;
538 }
sewardjb5f6f512005-03-10 23:59:00 +0000539 }
540}
541
njn8225cc02009-03-09 22:52:24 +0000542// Push a chunk (well, just its index) onto the mark stack.
543static void lc_push(Int ch_no, MC_Chunk* ch)
sewardjb5f6f512005-03-10 23:59:00 +0000544{
njn8225cc02009-03-09 22:52:24 +0000545 if (0) {
546 VG_(printf)("pushing %#lx-%#lx\n", ch->data, ch->data + ch->szB);
547 }
548 lc_markstack_top++;
549 tl_assert(lc_markstack_top < lc_n_chunks);
550 lc_markstack[lc_markstack_top] = ch_no;
sewardjb5f6f512005-03-10 23:59:00 +0000551}
552
njn8225cc02009-03-09 22:52:24 +0000553// Return the index of the chunk on the top of the mark stack, or -1 if
554// there isn't one.
555static Bool lc_pop(Int* ret)
sewardjb5f6f512005-03-10 23:59:00 +0000556{
njn8225cc02009-03-09 22:52:24 +0000557 if (-1 == lc_markstack_top) {
558 return False;
559 } else {
560 tl_assert(0 <= lc_markstack_top && lc_markstack_top < lc_n_chunks);
561 *ret = lc_markstack[lc_markstack_top];
562 lc_markstack_top--;
563 return True;
564 }
565}
sewardjb5f6f512005-03-10 23:59:00 +0000566
njn8225cc02009-03-09 22:52:24 +0000567
568// If 'ptr' is pointing to a heap-allocated block which hasn't been seen
569// before, push it onto the mark stack.
570static void
571lc_push_without_clique_if_a_chunk_ptr(Addr ptr, Bool is_prior_definite)
572{
573 Int ch_no;
574 MC_Chunk* ch;
575 LC_Extra* ex;
576
577 if ( ! lc_is_a_chunk_ptr(ptr, &ch_no, &ch, &ex) )
578 return;
579
580 // Only push it if it hasn't been seen previously.
581 if (ex->state == Unreached) {
582 lc_push(ch_no, ch);
sewardjb5f6f512005-03-10 23:59:00 +0000583 }
584
njn8225cc02009-03-09 22:52:24 +0000585 // Possibly upgrade the state, ie. one of:
586 // - Unreached --> Possible
587 // - Unreached --> Reachable
588 // - Possible --> Reachable
589 if (ptr == ch->data && is_prior_definite) {
590 // 'ptr' points to the start of the block, and the prior node is
591 // definite, which means that this block is definitely reachable.
592 ex->state = Reachable;
593
594 } else if (ex->state == Unreached) {
595 // Either 'ptr' is a interior-pointer, or the prior node isn't definite,
596 // which means that we can only mark this block as possibly reachable.
597 ex->state = Possible;
598 }
599}
600
601static void
602lc_push_if_a_chunk_ptr_register(Addr ptr)
603{
604 lc_push_without_clique_if_a_chunk_ptr(ptr, /*is_prior_definite*/True);
605}
606
607// If ptr is pointing to a heap-allocated block which hasn't been seen
608// before, push it onto the mark stack. Clique is the index of the
609// clique leader.
610static void
611lc_push_with_clique_if_a_chunk_ptr(Addr ptr, Int clique)
612{
613 Int ch_no;
614 MC_Chunk* ch;
615 LC_Extra* ex;
616
617 tl_assert(0 <= clique && clique < lc_n_chunks);
618
619 if ( ! lc_is_a_chunk_ptr(ptr, &ch_no, &ch, &ex) )
620 return;
621
622 // If it's not Unreached, it's already been handled so ignore it.
623 // If ch_no==clique, it's the clique leader, which means this is a cyclic
624 // structure; again ignore it because it's already been handled.
625 if (ex->state == Unreached && ch_no != clique) {
626 // Note that, unlike reachable blocks, we currently don't distinguish
627 // between start-pointers and interior-pointers here. We probably
628 // should, though.
629 ex->state = IndirectLeak;
630 lc_push(ch_no, ch);
631
632 // Add the block to the clique, and add its size to the
633 // clique-leader's indirect size. Also, if the new block was
634 // itself a clique leader, it isn't any more, so add its
635 // indirect_szB to the new clique leader.
636 if (VG_DEBUG_CLIQUE) {
637 if (ex->indirect_szB > 0)
638 VG_(printf)(" clique %d joining clique %d adding %lu+%lu\n",
639 ch_no, clique, (SizeT)ch->szB, (SizeT)ex->indirect_szB);
640 else
641 VG_(printf)(" block %d joining clique %d adding %lu\n",
642 ch_no, clique, (SizeT)ch->szB);
643 }
644
645 lc_extras[clique].indirect_szB += ch->szB;
646 lc_extras[clique].indirect_szB += ex->indirect_szB;
647 ex->indirect_szB = 0; // Shouldn't matter.
648 }
649}
650
651static void
652lc_push_if_a_chunk_ptr(Addr ptr, Int clique, Bool is_prior_definite)
653{
654 if (-1 == clique)
655 lc_push_without_clique_if_a_chunk_ptr(ptr, is_prior_definite);
656 else
657 lc_push_with_clique_if_a_chunk_ptr(ptr, clique);
sewardjb5f6f512005-03-10 23:59:00 +0000658}
659
sewardj45d94cc2005-04-20 14:44:11 +0000660
njn8225cc02009-03-09 22:52:24 +0000661static jmp_buf memscan_jmpbuf;
sewardjb5f6f512005-03-10 23:59:00 +0000662
njn8225cc02009-03-09 22:52:24 +0000663static
664void scan_all_valid_memory_catcher ( Int sigNo, Addr addr )
sewardjb5f6f512005-03-10 23:59:00 +0000665{
njn8225cc02009-03-09 22:52:24 +0000666 if (0)
667 VG_(printf)("OUCH! sig=%d addr=%#lx\n", sigNo, addr);
668 if (sigNo == VKI_SIGSEGV || sigNo == VKI_SIGBUS)
669 __builtin_longjmp(memscan_jmpbuf, 1);
670}
671
672// Scan a block of memory between [start, start+len). This range may
673// be bogus, inaccessable, or otherwise strange; we deal with it. For each
674// valid aligned word we assume it's a pointer to a chunk a push the chunk
675// onto the mark stack if so.
676static void
677lc_scan_memory(Addr start, SizeT len, Bool is_prior_definite, Int clique)
678{
679 Addr ptr = VG_ROUNDUP(start, sizeof(Addr));
njn13bfd852005-06-02 03:52:53 +0000680 Addr end = VG_ROUNDDN(start+len, sizeof(Addr));
sewardjb5f6f512005-03-10 23:59:00 +0000681 vki_sigset_t sigmask;
682
683 if (VG_DEBUG_LEAKCHECK)
njn8225cc02009-03-09 22:52:24 +0000684 VG_(printf)("scan %#lx-%#lx (%lu)\n", start, end, len);
685
sewardjb5f6f512005-03-10 23:59:00 +0000686 VG_(sigprocmask)(VKI_SIG_SETMASK, NULL, &sigmask);
njn695c16e2005-03-27 03:40:28 +0000687 VG_(set_fault_catcher)(scan_all_valid_memory_catcher);
sewardjb5f6f512005-03-10 23:59:00 +0000688
njn8225cc02009-03-09 22:52:24 +0000689 // We might be in the middle of a page. Do a cheap check to see if
690 // it's valid; if not, skip onto the next page.
sewardj45f4e7c2005-09-27 19:20:21 +0000691 if (!VG_(am_is_valid_for_client)(ptr, sizeof(Addr), VKI_PROT_READ))
njn8225cc02009-03-09 22:52:24 +0000692 ptr = VG_PGROUNDUP(ptr+1); // First page is bad - skip it.
sewardjb5f6f512005-03-10 23:59:00 +0000693
sewardj05fe85e2005-04-27 22:46:36 +0000694 while (ptr < end) {
sewardjb5f6f512005-03-10 23:59:00 +0000695 Addr addr;
696
njn8225cc02009-03-09 22:52:24 +0000697 // Skip invalid chunks.
698 if ( ! MC_(is_within_valid_secondary)(ptr) ) {
699 ptr = VG_ROUNDUP(ptr+1, SM_SIZE);
700 continue;
sewardjb5f6f512005-03-10 23:59:00 +0000701 }
702
njn8225cc02009-03-09 22:52:24 +0000703 // Look to see if this page seems reasonable.
sewardjb5f6f512005-03-10 23:59:00 +0000704 if ((ptr % VKI_PAGE_SIZE) == 0) {
njn8225cc02009-03-09 22:52:24 +0000705 if (!VG_(am_is_valid_for_client)(ptr, sizeof(Addr), VKI_PROT_READ)) {
706 ptr += VKI_PAGE_SIZE; // Bad page - skip it.
707 continue;
708 }
sewardjb5f6f512005-03-10 23:59:00 +0000709 }
710
711 if (__builtin_setjmp(memscan_jmpbuf) == 0) {
njn8225cc02009-03-09 22:52:24 +0000712 if ( MC_(is_valid_aligned_word)(ptr) ) {
713 lc_scanned_szB += sizeof(Addr);
714 addr = *(Addr *)ptr;
715 // If we get here, the scanned word is in valid memory. Now
716 // let's see if its contents point to a chunk.
717 lc_push_if_a_chunk_ptr(addr, clique, is_prior_definite);
718 } else if (0 && VG_DEBUG_LEAKCHECK) {
719 VG_(printf)("%#lx not valid\n", ptr);
720 }
721 ptr += sizeof(Addr);
sewardjb5f6f512005-03-10 23:59:00 +0000722 } else {
njn8225cc02009-03-09 22:52:24 +0000723 // We need to restore the signal mask, because we were
724 // longjmped out of a signal handler.
725 VG_(sigprocmask)(VKI_SIG_SETMASK, &sigmask, NULL);
sewardjb5f6f512005-03-10 23:59:00 +0000726
njn8225cc02009-03-09 22:52:24 +0000727 ptr = VG_PGROUNDUP(ptr+1); // Bad page - skip it.
sewardjb5f6f512005-03-10 23:59:00 +0000728 }
729 }
730
731 VG_(sigprocmask)(VKI_SIG_SETMASK, &sigmask, NULL);
732 VG_(set_fault_catcher)(NULL);
733}
734
sewardj45d94cc2005-04-20 14:44:11 +0000735
njn8225cc02009-03-09 22:52:24 +0000736// Process the mark stack until empty.
737static void lc_process_markstack(Int clique)
sewardjb5f6f512005-03-10 23:59:00 +0000738{
njn8225cc02009-03-09 22:52:24 +0000739 Int top;
740 Bool is_prior_definite;
sewardjb5f6f512005-03-10 23:59:00 +0000741
njn8225cc02009-03-09 22:52:24 +0000742 while (lc_pop(&top)) {
743 tl_assert(top >= 0 && top < lc_n_chunks);
sewardjb5f6f512005-03-10 23:59:00 +0000744
njn8225cc02009-03-09 22:52:24 +0000745 // See comment about 'is_prior_definite' at the top to understand this.
746 is_prior_definite = ( Possible != lc_extras[top].state );
sewardjb5f6f512005-03-10 23:59:00 +0000747
njn8225cc02009-03-09 22:52:24 +0000748 lc_scan_memory(lc_chunks[top]->data, lc_chunks[top]->szB,
749 is_prior_definite, clique);
sewardjb5f6f512005-03-10 23:59:00 +0000750 }
751}
752
njn8225cc02009-03-09 22:52:24 +0000753static void print_results(ThreadId tid, Bool is_full_check)
sewardjb5f6f512005-03-10 23:59:00 +0000754{
njn8225cc02009-03-09 22:52:24 +0000755 Int i, n_lossrecords;
sewardjb5f6f512005-03-10 23:59:00 +0000756 LossRecord* errlist;
757 LossRecord* p;
njn8225cc02009-03-09 22:52:24 +0000758 Bool is_suppressed;
sewardjb5f6f512005-03-10 23:59:00 +0000759
njn8225cc02009-03-09 22:52:24 +0000760 // Common up the lost blocks so we can print sensible error messages.
sewardjb5f6f512005-03-10 23:59:00 +0000761 n_lossrecords = 0;
762 errlist = NULL;
njn8225cc02009-03-09 22:52:24 +0000763 for (i = 0; i < lc_n_chunks; i++) {
764 MC_Chunk* ch = lc_chunks[i];
765 LC_Extra* ex = &(lc_extras)[i];
sewardjb5f6f512005-03-10 23:59:00 +0000766
767 for (p = errlist; p != NULL; p = p->next) {
njn8225cc02009-03-09 22:52:24 +0000768 if (p->loss_mode == ex->state
njn1d0825f2006-03-27 11:37:07 +0000769 && VG_(eq_ExeContext) ( MC_(clo_leak_resolution),
sewardjb5f6f512005-03-10 23:59:00 +0000770 p->allocated_at,
njn8225cc02009-03-09 22:52:24 +0000771 ch->where) ) {
sewardjb5f6f512005-03-10 23:59:00 +0000772 break;
njn8225cc02009-03-09 22:52:24 +0000773 }
sewardjb5f6f512005-03-10 23:59:00 +0000774 }
775 if (p != NULL) {
njn8225cc02009-03-09 22:52:24 +0000776 p->num_blocks++;
777 p->total_bytes += ch->szB;
778 p->indirect_szB += ex->indirect_szB;
sewardjb5f6f512005-03-10 23:59:00 +0000779 } else {
njn8225cc02009-03-09 22:52:24 +0000780 n_lossrecords++;
sewardj9c606bd2008-09-18 18:12:50 +0000781 p = VG_(malloc)( "mc.fr.1", sizeof(LossRecord));
njn8225cc02009-03-09 22:52:24 +0000782 p->loss_mode = ex->state;
783 p->allocated_at = ch->where;
784 p->total_bytes = ch->szB;
785 p->indirect_szB = ex->indirect_szB;
sewardjb5f6f512005-03-10 23:59:00 +0000786 p->num_blocks = 1;
787 p->next = errlist;
788 errlist = p;
789 }
790 }
791
njn8225cc02009-03-09 22:52:24 +0000792 MC_(blocks_leaked) = MC_(bytes_leaked) = 0;
793 MC_(blocks_indirect) = MC_(bytes_indirect) = 0;
794 MC_(blocks_dubious) = MC_(bytes_dubious) = 0;
795 MC_(blocks_reachable) = MC_(bytes_reachable) = 0;
796 MC_(blocks_suppressed) = MC_(bytes_suppressed) = 0;
797
798 // Print out the commoned-up blocks and collect summary stats.
sewardjb5f6f512005-03-10 23:59:00 +0000799 for (i = 0; i < n_lossrecords; i++) {
800 Bool print_record;
801 LossRecord* p_min = NULL;
njn0fd92f42005-10-06 03:32:42 +0000802 SizeT n_min = ~(0x0L);
sewardjb5f6f512005-03-10 23:59:00 +0000803 for (p = errlist; p != NULL; p = p->next) {
804 if (p->num_blocks > 0 && p->total_bytes < n_min) {
njn8225cc02009-03-09 22:52:24 +0000805 n_min = p->total_bytes + p->indirect_szB;
sewardjb5f6f512005-03-10 23:59:00 +0000806 p_min = p;
807 }
808 }
sewardj76754cf2005-03-14 00:14:04 +0000809 tl_assert(p_min != NULL);
sewardjb5f6f512005-03-10 23:59:00 +0000810
njn8225cc02009-03-09 22:52:24 +0000811 // Rules for printing:
812 // - We don't show suppressed loss records ever (and that's controlled
813 // within the error manager).
814 // - We show non-suppressed loss records that are not "reachable" if
815 // --leak-check=yes.
816 // - We show all non-suppressed loss records if --leak-check=yes and
817 // --show-reachable=yes.
818 //
819 // Nb: here "reachable" means Reachable *or* IndirectLeak; note that
820 // this is different to "still reachable" used elsewhere because it
821 // includes indirectly lost blocks!
822 //
823 print_record = is_full_check &&
824 ( MC_(clo_show_reachable) ||
825 Unreached == p_min->loss_mode ||
826 Possible == p_min->loss_mode );
sewardjb5f6f512005-03-10 23:59:00 +0000827 is_suppressed =
njn718d3b12006-12-16 00:54:12 +0000828 MC_(record_leak_error) ( tid, i+1, n_lossrecords, p_min,
njn1d0825f2006-03-27 11:37:07 +0000829 print_record );
sewardjb5f6f512005-03-10 23:59:00 +0000830
831 if (is_suppressed) {
njn8df80b22009-03-02 05:11:06 +0000832 MC_(blocks_suppressed) += p_min->num_blocks;
833 MC_(bytes_suppressed) += p_min->total_bytes;
sewardjb5f6f512005-03-10 23:59:00 +0000834
njn1d0825f2006-03-27 11:37:07 +0000835 } else if (Unreached == p_min->loss_mode) {
njn8225cc02009-03-09 22:52:24 +0000836 MC_(blocks_leaked) += p_min->num_blocks;
837 MC_(bytes_leaked) += p_min->total_bytes;
sewardjb5f6f512005-03-10 23:59:00 +0000838
njn1d0825f2006-03-27 11:37:07 +0000839 } else if (IndirectLeak == p_min->loss_mode) {
njn8225cc02009-03-09 22:52:24 +0000840 MC_(blocks_indirect) += p_min->num_blocks;
841 MC_(bytes_indirect) += p_min->total_bytes;
sewardjb5f6f512005-03-10 23:59:00 +0000842
njn8225cc02009-03-09 22:52:24 +0000843 } else if (Possible == p_min->loss_mode) {
844 MC_(blocks_dubious) += p_min->num_blocks;
845 MC_(bytes_dubious) += p_min->total_bytes;
sewardjb5f6f512005-03-10 23:59:00 +0000846
njn8225cc02009-03-09 22:52:24 +0000847 } else if (Reachable == p_min->loss_mode) {
848 MC_(blocks_reachable) += p_min->num_blocks;
849 MC_(bytes_reachable) += p_min->total_bytes;
sewardjb5f6f512005-03-10 23:59:00 +0000850
851 } else {
njn8225cc02009-03-09 22:52:24 +0000852 VG_(tool_panic)("unknown loss mode");
sewardjb5f6f512005-03-10 23:59:00 +0000853 }
854 p_min->num_blocks = 0;
855 }
sewardjb5f6f512005-03-10 23:59:00 +0000856
njn8225cc02009-03-09 22:52:24 +0000857 if (VG_(clo_verbosity) > 0 && !VG_(clo_xml)) {
858 UMSG("");
859 UMSG("LEAK SUMMARY:");
860 UMSG(" definitely lost: %'lu bytes in %'lu blocks.",
861 MC_(bytes_leaked), MC_(blocks_leaked) );
862 UMSG(" indirectly lost: %'lu bytes in %'lu blocks.",
863 MC_(bytes_indirect), MC_(blocks_indirect) );
864 UMSG(" possibly lost: %'lu bytes in %'lu blocks.",
865 MC_(bytes_dubious), MC_(blocks_dubious) );
866 UMSG(" still reachable: %'lu bytes in %'lu blocks.",
867 MC_(bytes_reachable), MC_(blocks_reachable) );
868 UMSG(" suppressed: %'lu bytes in %'lu blocks.",
869 MC_(bytes_suppressed), MC_(blocks_suppressed) );
870 if (!is_full_check &&
871 (MC_(blocks_leaked) + MC_(blocks_indirect) +
872 MC_(blocks_dubious) + MC_(blocks_reachable)) > 0) {
873 UMSG("Rerun with --leak-check=full to see details of leaked memory.");
874 }
875 if (is_full_check &&
876 MC_(blocks_reachable) > 0 && !MC_(clo_show_reachable))
877 {
878 UMSG("Reachable blocks (those to which a pointer was found) are not shown.");
879 UMSG("To see them, rerun with: --leak-check=full --show-reachable=yes");
sewardjb5f6f512005-03-10 23:59:00 +0000880 }
881 }
882}
883
njn8225cc02009-03-09 22:52:24 +0000884/*------------------------------------------------------------*/
885/*--- Top-level entry point. ---*/
886/*------------------------------------------------------------*/
sewardj3cf26a52006-07-27 23:48:53 +0000887
njn8225cc02009-03-09 22:52:24 +0000888void MC_(detect_memory_leaks) ( ThreadId tid, LeakCheckMode mode )
njn43c799e2003-04-08 00:08:52 +0000889{
njnb8dca862005-03-14 02:42:44 +0000890 Int i;
njn43c799e2003-04-08 00:08:52 +0000891
sewardj76754cf2005-03-14 00:14:04 +0000892 tl_assert(mode != LC_Off);
njn43c799e2003-04-08 00:08:52 +0000893
njn8225cc02009-03-09 22:52:24 +0000894 // Get the chunks, stop if there were none.
895 lc_chunks = find_active_chunks(&lc_n_chunks);
896 if (lc_n_chunks == 0) {
897 tl_assert(lc_chunks == NULL);
sewardj71bc3cb2005-05-19 00:25:45 +0000898 if (VG_(clo_verbosity) >= 1 && !VG_(clo_xml)) {
njn8225cc02009-03-09 22:52:24 +0000899 UMSG("All heap blocks were freed -- no leaks are possible.");
sewardj37d06f22003-09-17 21:48:26 +0000900 }
njn43c799e2003-04-08 00:08:52 +0000901 return;
902 }
903
njn8225cc02009-03-09 22:52:24 +0000904 // Sort the array so blocks are in ascending order in memory.
905 VG_(ssort)(lc_chunks, lc_n_chunks, sizeof(VgHashNode*), compare_MC_Chunks);
njn43c799e2003-04-08 00:08:52 +0000906
njn8225cc02009-03-09 22:52:24 +0000907 // Sanity check -- make sure they're in order.
908 for (i = 0; i < lc_n_chunks-1; i++) {
909 tl_assert( lc_chunks[i]->data <= lc_chunks[i+1]->data);
910 }
njn43c799e2003-04-08 00:08:52 +0000911
njn8225cc02009-03-09 22:52:24 +0000912 // Sanity check -- make sure they don't overlap. But do allow exact
913 // duplicates. If this assertion fails, it may mean that the application
914 // has done something stupid with VALGRIND_MALLOCLIKE_BLOCK client
915 // requests, specifically, has made overlapping requests (which are
916 // nonsensical). Another way to screw up is to use
917 // VALGRIND_MALLOCLIKE_BLOCK for stack locations; again nonsensical.
918 for (i = 0; i < lc_n_chunks-1; i++) {
919 MC_Chunk* ch1 = lc_chunks[i];
920 MC_Chunk* ch2 = lc_chunks[i+1];
921 Bool nonsense_overlap = ! (
922 // Normal case - no overlap.
923 (ch1->data + ch1->szB <= ch2->data) ||
924 // Degenerate case: exact duplicates.
925 (ch1->data == ch2->data && ch1->szB == ch2->szB)
926 );
927 if (nonsense_overlap) {
928 UMSG("Block [0x%lx, 0x%lx) overlaps with block [0x%lx, 0x%lx)",
929 ch1->data, (ch1->data + ch1->szB),
930 ch2->data, (ch2->data + ch2->szB));
931 }
932 tl_assert (!nonsense_overlap);
933 }
934
935 // Initialise lc_extras.
936 lc_extras = VG_(malloc)( "mc.dml.2", lc_n_chunks * sizeof(LC_Extra) );
937 for (i = 0; i < lc_n_chunks; i++) {
938 lc_extras[i].state = Unreached;
939 lc_extras[i].indirect_szB = 0;
940 }
941
942 // Initialise lc_markstack.
943 lc_markstack = VG_(malloc)( "mc.dml.2", lc_n_chunks * sizeof(Int) );
944 for (i = 0; i < lc_n_chunks; i++) {
945 lc_markstack[i] = -1;
sewardjb5f6f512005-03-10 23:59:00 +0000946 }
947 lc_markstack_top = -1;
njn43c799e2003-04-08 00:08:52 +0000948
njn8225cc02009-03-09 22:52:24 +0000949 // Verbosity.
950 if (VG_(clo_verbosity) > 0 && !VG_(clo_xml))
951 UMSG( "searching for pointers to %'d not-freed blocks.", lc_n_chunks );
sewardjb5f6f512005-03-10 23:59:00 +0000952
njn8225cc02009-03-09 22:52:24 +0000953 // Scan the memory root-set, pushing onto the mark stack any blocks
954 // pointed to.
955 {
956 Int n_seg_starts;
957 Addr* seg_starts = get_seg_starts( &n_seg_starts );
sewardjb5f6f512005-03-10 23:59:00 +0000958
njn8225cc02009-03-09 22:52:24 +0000959 tl_assert(seg_starts && n_seg_starts > 0);
sewardjde3ad732006-07-27 23:12:17 +0000960
njn8225cc02009-03-09 22:52:24 +0000961 lc_scanned_szB = 0;
sewardjde3ad732006-07-27 23:12:17 +0000962
njn8225cc02009-03-09 22:52:24 +0000963 // VG_(am_show_nsegments)( 0, "leakcheck");
964 for (i = 0; i < n_seg_starts; i++) {
965 SizeT seg_size;
966 NSegment const* seg = VG_(am_find_nsegment)( seg_starts[i] );
967 tl_assert(seg);
968
969 if (seg->kind != SkFileC && seg->kind != SkAnonC) continue;
970 if (!(seg->hasR && seg->hasW)) continue;
971 if (seg->isCH) continue;
972
973 // Don't poke around in device segments as this may cause
974 // hangs. Exclude /dev/zero just in case someone allocated
975 // memory by explicitly mapping /dev/zero.
976 if (seg->kind == SkFileC
977 && (VKI_S_ISCHR(seg->mode) || VKI_S_ISBLK(seg->mode))) {
978 HChar* dev_name = VG_(am_get_filename)( (NSegment*)seg );
979 if (dev_name && 0 == VG_(strcmp)(dev_name, "/dev/zero")) {
980 // Don't skip /dev/zero.
981 } else {
982 // Skip this device mapping.
983 continue;
984 }
985 }
986
987 if (0)
988 VG_(printf)("ACCEPT %2d %#lx %#lx\n", i, seg->start, seg->end);
989
990 // Scan the segment. We use -1 for the clique number, because this
991 // is a root-set.
992 seg_size = seg->end - seg->start + 1;
993 if (VG_(clo_verbosity) > 2) {
994 VG_(message)(Vg_DebugMsg,
995 " Scanning root segment: %#lx..%#lx (%lu)",
996 seg->start, seg->end, seg_size);
997 }
998 lc_scan_memory(seg->start, seg_size, /*is_prior_definite*/True, -1);
999 }
sewardj45f4e7c2005-09-27 19:20:21 +00001000 }
sewardjb5f6f512005-03-10 23:59:00 +00001001
njn8225cc02009-03-09 22:52:24 +00001002 // Scan GP registers for chunk pointers.
1003 VG_(apply_to_GP_regs)(lc_push_if_a_chunk_ptr_register);
sewardjb5f6f512005-03-10 23:59:00 +00001004
njn8225cc02009-03-09 22:52:24 +00001005 // Process the pushed blocks. After this, every block that is reachable
1006 // from the root-set has been traced.
1007 lc_process_markstack(/*clique*/-1);
njn43c799e2003-04-08 00:08:52 +00001008
sewardj71bc3cb2005-05-19 00:25:45 +00001009 if (VG_(clo_verbosity) > 0 && !VG_(clo_xml))
njn8225cc02009-03-09 22:52:24 +00001010 UMSG("checked %'lu bytes.", lc_scanned_szB);
njn43c799e2003-04-08 00:08:52 +00001011
njn8225cc02009-03-09 22:52:24 +00001012 // Trace all the leaked blocks to determine which are directly leaked and
1013 // which are indirectly leaked. For each Unreached block, push it onto
1014 // the mark stack, and find all the as-yet-Unreached blocks reachable
1015 // from it. These form a clique and are marked IndirectLeak, and their
1016 // size is added to the clique leader's indirect size. If one of the
1017 // found blocks was itself a clique leader (from a previous clique), then
1018 // the cliques are merged.
1019 for (i = 0; i < lc_n_chunks; i++) {
1020 MC_Chunk* ch = lc_chunks[i];
1021 LC_Extra* ex = &(lc_extras[i]);
njn43c799e2003-04-08 00:08:52 +00001022
njn8225cc02009-03-09 22:52:24 +00001023 if (VG_DEBUG_CLIQUE)
1024 VG_(printf)("cliques: %d at %#lx -> Loss state %d\n",
1025 i, ch->data, ex->state);
njn43c799e2003-04-08 00:08:52 +00001026
njn8225cc02009-03-09 22:52:24 +00001027 tl_assert(lc_markstack_top == -1);
1028
1029 if (ex->state == Unreached) {
1030 if (VG_DEBUG_CLIQUE)
1031 VG_(printf)("%d: gathering clique %#lx\n", i, ch->data);
1032
1033 // Push this Unreached block onto the stack and process it.
1034 lc_push(i, ch);
1035 lc_process_markstack(i);
1036
1037 tl_assert(lc_markstack_top == -1);
1038 tl_assert(ex->state == Unreached);
nethercote0f19bce2003-12-02 10:17:44 +00001039 }
njn43c799e2003-04-08 00:08:52 +00001040 }
njn8225cc02009-03-09 22:52:24 +00001041
1042 print_results( tid, ( mode == LC_Full ? True : False ) );
njn43c799e2003-04-08 00:08:52 +00001043
njn8225cc02009-03-09 22:52:24 +00001044 VG_(free) ( lc_chunks );
1045 VG_(free) ( lc_extras );
sewardjb5f6f512005-03-10 23:59:00 +00001046 VG_(free) ( lc_markstack );
njn43c799e2003-04-08 00:08:52 +00001047}
1048
1049/*--------------------------------------------------------------------*/
njn1d0825f2006-03-27 11:37:07 +00001050/*--- end ---*/
njn43c799e2003-04-08 00:08:52 +00001051/*--------------------------------------------------------------------*/
1052