blob: 2e7513aa9960effc9fc2fe2d82fb820ee647151f [file] [log] [blame]
njn43c799e2003-04-08 00:08:52 +00001
2/*--------------------------------------------------------------------*/
njn1d0825f2006-03-27 11:37:07 +00003/*--- The leak checker. mc_leakcheck.c ---*/
njn43c799e2003-04-08 00:08:52 +00004/*--------------------------------------------------------------------*/
5
6/*
nethercote137bc552003-11-14 17:47:54 +00007 This file is part of MemCheck, a heavyweight Valgrind tool for
njn1d0825f2006-03-27 11:37:07 +00008 detecting memory errors.
njn43c799e2003-04-08 00:08:52 +00009
njn9f207462009-03-10 22:02:09 +000010 Copyright (C) 2000-2009 Julian Seward
njn43c799e2003-04-08 00:08:52 +000011 jseward@acm.org
12
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 02111-1307, USA.
27
28 The GNU General Public License is contained in the file COPYING.
29*/
30
njnc7561b92005-06-19 01:24:32 +000031#include "pub_tool_basics.h"
sewardj4cfea4f2006-10-14 19:26:10 +000032#include "pub_tool_vki.h"
njnac1e0332009-05-08 00:39:31 +000033#include "pub_tool_aspacehl.h"
njn4802b382005-06-11 04:58:29 +000034#include "pub_tool_aspacemgr.h"
njn1d0825f2006-03-27 11:37:07 +000035#include "pub_tool_execontext.h"
36#include "pub_tool_hashtable.h"
njn97405b22005-06-02 03:39:33 +000037#include "pub_tool_libcbase.h"
njn132bfcc2005-06-04 19:16:06 +000038#include "pub_tool_libcassert.h"
njn36a20fa2005-06-03 03:08:39 +000039#include "pub_tool_libcprint.h"
njnde62cbf2005-06-10 22:08:14 +000040#include "pub_tool_libcsignal.h"
njn6ace3ea2005-06-17 03:06:27 +000041#include "pub_tool_machine.h"
njnc7561b92005-06-19 01:24:32 +000042#include "pub_tool_mallocfree.h"
43#include "pub_tool_options.h"
njn29a5c012009-05-06 06:15:55 +000044#include "pub_tool_oset.h"
njn73c51342005-06-18 15:18:15 +000045#include "pub_tool_signals.h"
njn1d0825f2006-03-27 11:37:07 +000046#include "pub_tool_tooliface.h" // Needed for mc_include.h
njn43c799e2003-04-08 00:08:52 +000047
njn1d0825f2006-03-27 11:37:07 +000048#include "mc_include.h"
njnc7561b92005-06-19 01:24:32 +000049
50#include <setjmp.h> // For jmp_buf
51
njn8225cc02009-03-09 22:52:24 +000052/*------------------------------------------------------------*/
53/*--- An overview of leak checking. ---*/
54/*------------------------------------------------------------*/
njnc7561b92005-06-19 01:24:32 +000055
njn8225cc02009-03-09 22:52:24 +000056// Leak-checking is a directed-graph traversal problem. The graph has
57// two kinds of nodes:
58// - root-set nodes:
59// - GP registers of all threads;
60// - valid, aligned, pointer-sized data words in valid client memory,
61// including stacks, but excluding words within client heap-allocated
62// blocks (they are excluded so that later on we can differentiate
63// between heap blocks that are indirectly leaked vs. directly leaked).
64// - heap-allocated blocks. A block is a mempool chunk or a malloc chunk
65// that doesn't contain a mempool chunk. Nb: the terms "blocks" and
66// "chunks" are used interchangeably below.
67//
68// There are two kinds of edges:
69// - start-pointers, i.e. pointers to the start of a block;
70// - interior-pointers, i.e. pointers to the interior of a block.
71//
72// We use "pointers" rather than "edges" below.
73//
74// Root set nodes only point to blocks. Blocks only point to blocks;
75// a block can point to itself.
76//
77// The aim is to traverse the graph and determine the status of each block.
78//
79// There are 9 distinct cases. See memcheck/docs/mc-manual.xml for details.
80// Presenting all nine categories to the user is probably too much.
81// Currently we do this:
82// - definitely lost: case 3
83// - indirectly lost: case 4, 9
84// - possibly lost: cases 5..8
85// - still reachable: cases 1, 2
86//
87// It's far from clear that this is the best possible categorisation; it's
88// accreted over time without any central guiding principle.
89
90/*------------------------------------------------------------*/
91/*--- XXX: Thoughts for improvement. ---*/
92/*------------------------------------------------------------*/
93
94// From the user's point of view:
95// - If they aren't using interior-pointers, they just have to fix the
96// directly lost blocks, and the indirectly lost ones will be fixed as
97// part of that. Any possibly lost blocks will just be due to random
98// pointer garbage and can be ignored.
99//
100// - If they are using interior-pointers, the fact that they currently are not
101// being told which ones might be directly lost vs. indirectly lost makes
102// it hard to know where to begin.
103//
104// All this makes me wonder if new option is warranted:
105// --follow-interior-pointers. By default it would be off, the leak checker
106// wouldn't follow interior-pointers and there would only be 3 categories:
107// R, DL, IL.
108//
109// If turned on, then it would show 7 categories (R, DL, IL, DR/DL, IR/IL,
110// IR/IL/DL, IL/DL). That output is harder to understand but it's your own
111// damn fault for using interior-pointers...
112//
113// ----
114//
115// Also, why are two blank lines printed between each loss record?
116//
117// ----
118//
119// Also, --show-reachable is a bad name because it also turns on the showing
120// of indirectly leaked blocks(!) It would be better named --show-all or
121// --show-all-heap-blocks, because that's the end result.
122//
123// ----
124//
125// Also, the VALGRIND_LEAK_CHECK and VALGRIND_QUICK_LEAK_CHECK aren't great
126// names. VALGRIND_FULL_LEAK_CHECK and VALGRIND_SUMMARY_LEAK_CHECK would be
127// better.
128//
129// ----
130//
131// Also, VALGRIND_COUNT_LEAKS and VALGRIND_COUNT_LEAK_BLOCKS aren't great as
132// they combine direct leaks and indirect leaks into one. New, more precise
133// ones (they'll need new names) would be good. If more categories are
134// used, as per the --follow-interior-pointers option, they should be
135// updated accordingly. And they should use a struct to return the values.
136//
137// ----
138//
139// Also, for this case:
140//
141// (4) p4 BBB ---> AAA
142//
143// BBB is definitely directly lost. AAA is definitely indirectly lost.
144// Here's the relevant loss records printed for a full check (each block is
145// 16 bytes):
146//
147// ==20397== 16 bytes in 1 blocks are indirectly lost in loss record 9 of 15
148// ==20397== at 0x4C2694E: malloc (vg_replace_malloc.c:177)
149// ==20397== by 0x400521: mk (leak-cases.c:49)
150// ==20397== by 0x400578: main (leak-cases.c:72)
151//
152// ==20397== 32 (16 direct, 16 indirect) bytes in 1 blocks are definitely
153// lost in loss record 14 of 15
154// ==20397== at 0x4C2694E: malloc (vg_replace_malloc.c:177)
155// ==20397== by 0x400521: mk (leak-cases.c:49)
156// ==20397== by 0x400580: main (leak-cases.c:72)
157//
158// The first one is fine -- it describes AAA.
159//
160// The second one is for BBB. It's correct in that 16 bytes in 1 block are
161// directly lost. It's also correct that 16 are indirectly lost as a result,
162// but it means that AAA is being counted twice in the loss records. (It's
163// not, thankfully, counted twice in the summary counts). Argh.
164//
165// This would be less confusing for the second one:
166//
167// ==20397== 16 bytes in 1 blocks are definitely lost in loss record 14
168// of 15 (and 16 bytes in 1 block are indirectly lost as a result; they
169// are mentioned elsewhere (if --show-reachable=yes is given!))
170// ==20397== at 0x4C2694E: malloc (vg_replace_malloc.c:177)
171// ==20397== by 0x400521: mk (leak-cases.c:49)
172// ==20397== by 0x400580: main (leak-cases.c:72)
173//
174// But ideally we'd present the loss record for the directly lost block and
175// then the resultant indirectly lost blocks and make it clear the
176// dependence. Double argh.
177
178/*------------------------------------------------------------*/
179/*--- The actual algorithm. ---*/
180/*------------------------------------------------------------*/
181
182// - Find all the blocks (a.k.a. chunks) to check. Mempool chunks require
183// some special treatment because they can be within malloc'd blocks.
184// - Scan every word in the root set (GP registers and valid
185// non-heap memory words).
186// - First, we skip if it doesn't point to valid memory.
187// - Then, we see if it points to the start or interior of a block. If
188// so, we push the block onto the mark stack and mark it as having been
189// reached.
190// - Then, we process the mark stack, repeating the scanning for each block;
191// this can push more blocks onto the mark stack. We repeat until the
192// mark stack is empty. Each block is marked as definitely or possibly
193// reachable, depending on whether interior-pointers were required to
194// reach it.
195// - At this point we know for every block if it's reachable or not.
196// - We then push each unreached block onto the mark stack, using the block
197// number as the "clique" number.
198// - We process the mark stack again, this time grouping blocks into cliques
199// in order to facilitate the directly/indirectly lost categorisation.
200// - We group blocks by their ExeContexts and categorisation, and print them
201// if --leak-check=full. We also print summary numbers.
202//
203// A note on "cliques":
204// - A directly lost block is one with no pointers to it. An indirectly
205// lost block is one that is pointed to by a directly or indirectly lost
206// block.
207// - Each directly lost block has zero or more indirectly lost blocks
208// hanging off it. All these blocks together form a "clique". The
209// directly lost block is called the "clique leader". The clique number
210// is the number (in lc_chunks[]) of the clique leader.
211// - Actually, a directly lost block may be pointed to if it's part of a
212// cycle. In that case, there may be more than one choice for the clique
213// leader, and the choice is arbitrary. Eg. if you have A-->B and B-->A
214// either A or B could be the clique leader.
215// - Cliques cannot overlap, and will be truncated to avoid this. Eg. if we
216// have A-->C and B-->C, the two cliques will be {A,C} and {B}, or {A} and
217// {B,C} (again the choice is arbitrary). This is because we don't want
218// to count a block as indirectly lost more than once.
219//
220// A note on 'is_prior_definite':
221// - This is a boolean used in various places that indicates if the chain
222// up to the prior node (prior to the one being considered) is definite.
223// - In the clique == -1 case:
224// - if True it means that the prior node is a root-set node, or that the
225// prior node is a block which is reachable from the root-set via
226// start-pointers.
227// - if False it means that the prior node is a block that is only
228// reachable from the root-set via a path including at least one
229// interior-pointer.
230// - In the clique != -1 case, currently it's always True because we treat
231// start-pointers and interior-pointers the same for direct/indirect leak
232// checking. If we added a PossibleIndirectLeak state then this would
233// change.
234
235
236// Define to debug the memory-leak-detector.
sewardjb5f6f512005-03-10 23:59:00 +0000237#define VG_DEBUG_LEAKCHECK 0
njn8225cc02009-03-09 22:52:24 +0000238#define VG_DEBUG_CLIQUE 0
239
sewardjb5f6f512005-03-10 23:59:00 +0000240
njn43c799e2003-04-08 00:08:52 +0000241/*------------------------------------------------------------*/
njn8225cc02009-03-09 22:52:24 +0000242/*--- Getting the initial chunks, and searching them. ---*/
njn43c799e2003-04-08 00:08:52 +0000243/*------------------------------------------------------------*/
244
njn8225cc02009-03-09 22:52:24 +0000245// Compare the MC_Chunks by 'data' (i.e. the address of the block).
246static Int compare_MC_Chunks(void* n1, void* n2)
njn43c799e2003-04-08 00:08:52 +0000247{
njn8225cc02009-03-09 22:52:24 +0000248 MC_Chunk* mc1 = *(MC_Chunk**)n1;
249 MC_Chunk* mc2 = *(MC_Chunk**)n2;
250 if (mc1->data < mc2->data) return -1;
251 if (mc1->data > mc2->data) return 1;
252 return 0;
njn43c799e2003-04-08 00:08:52 +0000253}
254
njn8225cc02009-03-09 22:52:24 +0000255#if VG_DEBUG_LEAKCHECK
256// Used to sanity-check the fast binary-search mechanism.
257static
258Int find_chunk_for_OLD ( Addr ptr,
259 MC_Chunk** chunks,
260 Int n_chunks )
261
262{
263 Int i;
264 Addr a_lo, a_hi;
265 PROF_EVENT(70, "find_chunk_for_OLD");
266 for (i = 0; i < n_chunks; i++) {
267 PROF_EVENT(71, "find_chunk_for_OLD(loop)");
268 a_lo = chunks[i]->data;
269 a_hi = ((Addr)chunks[i]->data) + chunks[i]->szB;
270 if (a_lo <= ptr && ptr < a_hi)
271 return i;
272 }
273 return -1;
274}
275#endif
276
277// Find the i such that ptr points at or inside the block described by
278// chunks[i]. Return -1 if none found. This assumes that chunks[]
279// has been sorted on the 'data' field.
280static
281Int find_chunk_for ( Addr ptr,
282 MC_Chunk** chunks,
283 Int n_chunks )
284{
285 Addr a_mid_lo, a_mid_hi;
286 Int lo, mid, hi, retVal;
287 // VG_(printf)("find chunk for %p = ", ptr);
288 retVal = -1;
289 lo = 0;
290 hi = n_chunks-1;
291 while (True) {
292 // Invariant: current unsearched space is from lo to hi, inclusive.
293 if (lo > hi) break; // not found
294
295 mid = (lo + hi) / 2;
296 a_mid_lo = chunks[mid]->data;
297 a_mid_hi = chunks[mid]->data + chunks[mid]->szB;
298 // Extent of block 'mid' is [a_mid_lo .. a_mid_hi).
299 // Special-case zero-sized blocks - treat them as if they had
300 // size 1. Not doing so causes them to not cover any address
301 // range at all and so will never be identified as the target of
302 // any pointer, which causes them to be incorrectly reported as
303 // definitely leaked.
304 if (chunks[mid]->szB == 0)
305 a_mid_hi++;
306
307 if (ptr < a_mid_lo) {
308 hi = mid-1;
309 continue;
310 }
311 if (ptr >= a_mid_hi) {
312 lo = mid+1;
313 continue;
314 }
315 tl_assert(ptr >= a_mid_lo && ptr < a_mid_hi);
316 retVal = mid;
317 break;
318 }
319
320# if VG_DEBUG_LEAKCHECK
321 tl_assert(retVal == find_chunk_for_OLD ( ptr, chunks, n_chunks ));
322# endif
323 // VG_(printf)("%d\n", retVal);
324 return retVal;
325}
326
327
328static MC_Chunk**
329find_active_chunks(UInt* pn_chunks)
330{
331 // Our goal is to construct a set of chunks that includes every
332 // mempool chunk, and every malloc region that *doesn't* contain a
333 // mempool chunk.
334 MC_Mempool *mp;
335 MC_Chunk **mallocs, **chunks, *mc;
336 UInt n_mallocs, n_chunks, m, s;
337 Bool *malloc_chunk_holds_a_pool_chunk;
338
339 // First we collect all the malloc chunks into an array and sort it.
340 // We do this because we want to query the chunks by interior
341 // pointers, requiring binary search.
342 mallocs = (MC_Chunk**) VG_(HT_to_array)( MC_(malloc_list), &n_mallocs );
343 if (n_mallocs == 0) {
344 tl_assert(mallocs == NULL);
345 *pn_chunks = 0;
346 return NULL;
347 }
348 VG_(ssort)(mallocs, n_mallocs, sizeof(VgHashNode*), compare_MC_Chunks);
349
350 // Then we build an array containing a Bool for each malloc chunk,
351 // indicating whether it contains any mempools.
352 malloc_chunk_holds_a_pool_chunk = VG_(calloc)( "mc.fas.1",
353 n_mallocs, sizeof(Bool) );
354 n_chunks = n_mallocs;
355
356 // Then we loop over the mempool tables. For each chunk in each
357 // pool, we set the entry in the Bool array corresponding to the
358 // malloc chunk containing the mempool chunk.
359 VG_(HT_ResetIter)(MC_(mempool_list));
360 while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) {
361 VG_(HT_ResetIter)(mp->chunks);
362 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
363
364 // We'll need to record this chunk.
365 n_chunks++;
366
367 // Possibly invalidate the malloc holding the beginning of this chunk.
368 m = find_chunk_for(mc->data, mallocs, n_mallocs);
369 if (m != -1 && malloc_chunk_holds_a_pool_chunk[m] == False) {
370 tl_assert(n_chunks > 0);
371 n_chunks--;
372 malloc_chunk_holds_a_pool_chunk[m] = True;
373 }
374
375 // Possibly invalidate the malloc holding the end of this chunk.
376 if (mc->szB > 1) {
377 m = find_chunk_for(mc->data + (mc->szB - 1), mallocs, n_mallocs);
378 if (m != -1 && malloc_chunk_holds_a_pool_chunk[m] == False) {
379 tl_assert(n_chunks > 0);
380 n_chunks--;
381 malloc_chunk_holds_a_pool_chunk[m] = True;
382 }
383 }
384 }
385 }
386 tl_assert(n_chunks > 0);
387
388 // Create final chunk array.
389 chunks = VG_(malloc)("mc.fas.2", sizeof(VgHashNode*) * (n_chunks));
390 s = 0;
391
392 // Copy the mempool chunks and the non-marked malloc chunks into a
393 // combined array of chunks.
394 VG_(HT_ResetIter)(MC_(mempool_list));
395 while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) {
396 VG_(HT_ResetIter)(mp->chunks);
397 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
398 tl_assert(s < n_chunks);
399 chunks[s++] = mc;
400 }
401 }
402 for (m = 0; m < n_mallocs; ++m) {
403 if (!malloc_chunk_holds_a_pool_chunk[m]) {
404 tl_assert(s < n_chunks);
405 chunks[s++] = mallocs[m];
406 }
407 }
408 tl_assert(s == n_chunks);
409
410 // Free temporaries.
411 VG_(free)(mallocs);
412 VG_(free)(malloc_chunk_holds_a_pool_chunk);
413
414 *pn_chunks = n_chunks;
415
416 return chunks;
417}
418
419/*------------------------------------------------------------*/
420/*--- The leak detector proper. ---*/
421/*------------------------------------------------------------*/
422
423// Holds extra info about each block during leak checking.
424typedef
425 struct {
426 UInt state:2; // Reachedness.
427 SizeT indirect_szB : (sizeof(SizeT)*8)-2; // If Unreached, how many bytes
428 // are unreachable from here.
429 }
430 LC_Extra;
431
432// An array holding pointers to every chunk we're checking. Sorted by address.
433static MC_Chunk** lc_chunks;
434// How many chunks we're dealing with.
435static Int lc_n_chunks;
436
437// This has the same number of entries as lc_chunks, and each entry
438// in lc_chunks corresponds with the entry here (ie. lc_chunks[i] and
439// lc_extras[i] describe the same block).
440static LC_Extra* lc_extras;
441
442// Records chunks that are currently being processed. Each element in the
443// stack is an index into lc_chunks and lc_extras. Its size is
444// 'lc_n_chunks' because in the worst case that's how many chunks could be
445// pushed onto it (actually I think the maximum is lc_n_chunks-1 but let's
446// be conservative).
447static Int* lc_markstack;
448// The index of the top element of the stack; -1 if the stack is empty, 0 if
449// the stack has one element, 1 if it has two, etc.
450static Int lc_markstack_top;
451
452// Keeps track of how many bytes of memory we've scanned, for printing.
453// (Nb: We don't keep track of how many register bytes we've scanned.)
454static SizeT lc_scanned_szB;
455
456
457SizeT MC_(bytes_leaked) = 0;
458SizeT MC_(bytes_indirect) = 0;
459SizeT MC_(bytes_dubious) = 0;
460SizeT MC_(bytes_reachable) = 0;
461SizeT MC_(bytes_suppressed) = 0;
462
463SizeT MC_(blocks_leaked) = 0;
464SizeT MC_(blocks_indirect) = 0;
465SizeT MC_(blocks_dubious) = 0;
466SizeT MC_(blocks_reachable) = 0;
467SizeT MC_(blocks_suppressed) = 0;
468
sewardj45f4e7c2005-09-27 19:20:21 +0000469
njn8225cc02009-03-09 22:52:24 +0000470// Determines if a pointer is to a chunk. Returns the chunk number et al
471// via call-by-reference.
472static Bool
473lc_is_a_chunk_ptr(Addr ptr, Int* pch_no, MC_Chunk** pch, LC_Extra** pex)
njn43c799e2003-04-08 00:08:52 +0000474{
njn8225cc02009-03-09 22:52:24 +0000475 Int ch_no;
476 MC_Chunk* ch;
477 LC_Extra* ex;
njn43c799e2003-04-08 00:08:52 +0000478
njn8225cc02009-03-09 22:52:24 +0000479 // Quick filter.
480 if (!VG_(am_is_valid_for_client)(ptr, 1, VKI_PROT_READ)) {
481 return False;
sewardjb5f6f512005-03-10 23:59:00 +0000482 } else {
njn8225cc02009-03-09 22:52:24 +0000483 ch_no = find_chunk_for(ptr, lc_chunks, lc_n_chunks);
484 tl_assert(ch_no >= -1 && ch_no < lc_n_chunks);
485
486 if (ch_no == -1) {
487 return False;
488 } else {
489 // Ok, we've found a pointer to a chunk. Get the MC_Chunk and its
490 // LC_Extra.
491 ch = lc_chunks[ch_no];
492 ex = &(lc_extras[ch_no]);
493
494 tl_assert(ptr >= ch->data);
495 tl_assert(ptr < ch->data + ch->szB + (ch->szB==0 ? 1 : 0));
496
497 if (VG_DEBUG_LEAKCHECK)
498 VG_(printf)("ptr=%#lx -> block %d\n", ptr, ch_no);
499
500 *pch_no = ch_no;
501 *pch = ch;
502 *pex = ex;
503
504 return True;
505 }
sewardjb5f6f512005-03-10 23:59:00 +0000506 }
507}
508
njn8225cc02009-03-09 22:52:24 +0000509// Push a chunk (well, just its index) onto the mark stack.
510static void lc_push(Int ch_no, MC_Chunk* ch)
sewardjb5f6f512005-03-10 23:59:00 +0000511{
njn8225cc02009-03-09 22:52:24 +0000512 if (0) {
513 VG_(printf)("pushing %#lx-%#lx\n", ch->data, ch->data + ch->szB);
514 }
515 lc_markstack_top++;
516 tl_assert(lc_markstack_top < lc_n_chunks);
517 lc_markstack[lc_markstack_top] = ch_no;
sewardjb5f6f512005-03-10 23:59:00 +0000518}
519
njn8225cc02009-03-09 22:52:24 +0000520// Return the index of the chunk on the top of the mark stack, or -1 if
521// there isn't one.
522static Bool lc_pop(Int* ret)
sewardjb5f6f512005-03-10 23:59:00 +0000523{
njn8225cc02009-03-09 22:52:24 +0000524 if (-1 == lc_markstack_top) {
525 return False;
526 } else {
527 tl_assert(0 <= lc_markstack_top && lc_markstack_top < lc_n_chunks);
528 *ret = lc_markstack[lc_markstack_top];
529 lc_markstack_top--;
530 return True;
531 }
532}
sewardjb5f6f512005-03-10 23:59:00 +0000533
njn8225cc02009-03-09 22:52:24 +0000534
535// If 'ptr' is pointing to a heap-allocated block which hasn't been seen
536// before, push it onto the mark stack.
537static void
538lc_push_without_clique_if_a_chunk_ptr(Addr ptr, Bool is_prior_definite)
539{
540 Int ch_no;
541 MC_Chunk* ch;
542 LC_Extra* ex;
543
544 if ( ! lc_is_a_chunk_ptr(ptr, &ch_no, &ch, &ex) )
545 return;
546
547 // Only push it if it hasn't been seen previously.
548 if (ex->state == Unreached) {
549 lc_push(ch_no, ch);
sewardjb5f6f512005-03-10 23:59:00 +0000550 }
551
njn8225cc02009-03-09 22:52:24 +0000552 // Possibly upgrade the state, ie. one of:
553 // - Unreached --> Possible
554 // - Unreached --> Reachable
555 // - Possible --> Reachable
556 if (ptr == ch->data && is_prior_definite) {
557 // 'ptr' points to the start of the block, and the prior node is
558 // definite, which means that this block is definitely reachable.
559 ex->state = Reachable;
560
561 } else if (ex->state == Unreached) {
562 // Either 'ptr' is a interior-pointer, or the prior node isn't definite,
563 // which means that we can only mark this block as possibly reachable.
564 ex->state = Possible;
565 }
566}
567
568static void
569lc_push_if_a_chunk_ptr_register(Addr ptr)
570{
571 lc_push_without_clique_if_a_chunk_ptr(ptr, /*is_prior_definite*/True);
572}
573
574// If ptr is pointing to a heap-allocated block which hasn't been seen
575// before, push it onto the mark stack. Clique is the index of the
576// clique leader.
577static void
578lc_push_with_clique_if_a_chunk_ptr(Addr ptr, Int clique)
579{
580 Int ch_no;
581 MC_Chunk* ch;
582 LC_Extra* ex;
583
584 tl_assert(0 <= clique && clique < lc_n_chunks);
585
586 if ( ! lc_is_a_chunk_ptr(ptr, &ch_no, &ch, &ex) )
587 return;
588
589 // If it's not Unreached, it's already been handled so ignore it.
590 // If ch_no==clique, it's the clique leader, which means this is a cyclic
591 // structure; again ignore it because it's already been handled.
592 if (ex->state == Unreached && ch_no != clique) {
593 // Note that, unlike reachable blocks, we currently don't distinguish
594 // between start-pointers and interior-pointers here. We probably
595 // should, though.
596 ex->state = IndirectLeak;
597 lc_push(ch_no, ch);
598
599 // Add the block to the clique, and add its size to the
600 // clique-leader's indirect size. Also, if the new block was
601 // itself a clique leader, it isn't any more, so add its
602 // indirect_szB to the new clique leader.
603 if (VG_DEBUG_CLIQUE) {
604 if (ex->indirect_szB > 0)
605 VG_(printf)(" clique %d joining clique %d adding %lu+%lu\n",
606 ch_no, clique, (SizeT)ch->szB, (SizeT)ex->indirect_szB);
607 else
608 VG_(printf)(" block %d joining clique %d adding %lu\n",
609 ch_no, clique, (SizeT)ch->szB);
610 }
611
612 lc_extras[clique].indirect_szB += ch->szB;
613 lc_extras[clique].indirect_szB += ex->indirect_szB;
614 ex->indirect_szB = 0; // Shouldn't matter.
615 }
616}
617
618static void
619lc_push_if_a_chunk_ptr(Addr ptr, Int clique, Bool is_prior_definite)
620{
621 if (-1 == clique)
622 lc_push_without_clique_if_a_chunk_ptr(ptr, is_prior_definite);
623 else
624 lc_push_with_clique_if_a_chunk_ptr(ptr, clique);
sewardjb5f6f512005-03-10 23:59:00 +0000625}
626
sewardj45d94cc2005-04-20 14:44:11 +0000627
njn8225cc02009-03-09 22:52:24 +0000628static jmp_buf memscan_jmpbuf;
sewardjb5f6f512005-03-10 23:59:00 +0000629
njn8225cc02009-03-09 22:52:24 +0000630static
631void scan_all_valid_memory_catcher ( Int sigNo, Addr addr )
sewardjb5f6f512005-03-10 23:59:00 +0000632{
njn8225cc02009-03-09 22:52:24 +0000633 if (0)
634 VG_(printf)("OUCH! sig=%d addr=%#lx\n", sigNo, addr);
635 if (sigNo == VKI_SIGSEGV || sigNo == VKI_SIGBUS)
636 __builtin_longjmp(memscan_jmpbuf, 1);
637}
638
639// Scan a block of memory between [start, start+len). This range may
640// be bogus, inaccessable, or otherwise strange; we deal with it. For each
641// valid aligned word we assume it's a pointer to a chunk a push the chunk
642// onto the mark stack if so.
643static void
644lc_scan_memory(Addr start, SizeT len, Bool is_prior_definite, Int clique)
645{
646 Addr ptr = VG_ROUNDUP(start, sizeof(Addr));
njn13bfd852005-06-02 03:52:53 +0000647 Addr end = VG_ROUNDDN(start+len, sizeof(Addr));
sewardjb5f6f512005-03-10 23:59:00 +0000648 vki_sigset_t sigmask;
649
650 if (VG_DEBUG_LEAKCHECK)
njn8225cc02009-03-09 22:52:24 +0000651 VG_(printf)("scan %#lx-%#lx (%lu)\n", start, end, len);
652
sewardjb5f6f512005-03-10 23:59:00 +0000653 VG_(sigprocmask)(VKI_SIG_SETMASK, NULL, &sigmask);
njn695c16e2005-03-27 03:40:28 +0000654 VG_(set_fault_catcher)(scan_all_valid_memory_catcher);
sewardjb5f6f512005-03-10 23:59:00 +0000655
njn8225cc02009-03-09 22:52:24 +0000656 // We might be in the middle of a page. Do a cheap check to see if
657 // it's valid; if not, skip onto the next page.
sewardj45f4e7c2005-09-27 19:20:21 +0000658 if (!VG_(am_is_valid_for_client)(ptr, sizeof(Addr), VKI_PROT_READ))
njn8225cc02009-03-09 22:52:24 +0000659 ptr = VG_PGROUNDUP(ptr+1); // First page is bad - skip it.
sewardjb5f6f512005-03-10 23:59:00 +0000660
sewardj05fe85e2005-04-27 22:46:36 +0000661 while (ptr < end) {
sewardjb5f6f512005-03-10 23:59:00 +0000662 Addr addr;
663
njn8225cc02009-03-09 22:52:24 +0000664 // Skip invalid chunks.
665 if ( ! MC_(is_within_valid_secondary)(ptr) ) {
666 ptr = VG_ROUNDUP(ptr+1, SM_SIZE);
667 continue;
sewardjb5f6f512005-03-10 23:59:00 +0000668 }
669
njn8225cc02009-03-09 22:52:24 +0000670 // Look to see if this page seems reasonable.
sewardjb5f6f512005-03-10 23:59:00 +0000671 if ((ptr % VKI_PAGE_SIZE) == 0) {
njn8225cc02009-03-09 22:52:24 +0000672 if (!VG_(am_is_valid_for_client)(ptr, sizeof(Addr), VKI_PROT_READ)) {
673 ptr += VKI_PAGE_SIZE; // Bad page - skip it.
674 continue;
675 }
sewardjb5f6f512005-03-10 23:59:00 +0000676 }
677
678 if (__builtin_setjmp(memscan_jmpbuf) == 0) {
njn8225cc02009-03-09 22:52:24 +0000679 if ( MC_(is_valid_aligned_word)(ptr) ) {
680 lc_scanned_szB += sizeof(Addr);
681 addr = *(Addr *)ptr;
682 // If we get here, the scanned word is in valid memory. Now
683 // let's see if its contents point to a chunk.
684 lc_push_if_a_chunk_ptr(addr, clique, is_prior_definite);
685 } else if (0 && VG_DEBUG_LEAKCHECK) {
686 VG_(printf)("%#lx not valid\n", ptr);
687 }
688 ptr += sizeof(Addr);
sewardjb5f6f512005-03-10 23:59:00 +0000689 } else {
njn8225cc02009-03-09 22:52:24 +0000690 // We need to restore the signal mask, because we were
691 // longjmped out of a signal handler.
692 VG_(sigprocmask)(VKI_SIG_SETMASK, &sigmask, NULL);
sewardjb5f6f512005-03-10 23:59:00 +0000693
njn8225cc02009-03-09 22:52:24 +0000694 ptr = VG_PGROUNDUP(ptr+1); // Bad page - skip it.
sewardjb5f6f512005-03-10 23:59:00 +0000695 }
696 }
697
698 VG_(sigprocmask)(VKI_SIG_SETMASK, &sigmask, NULL);
699 VG_(set_fault_catcher)(NULL);
700}
701
sewardj45d94cc2005-04-20 14:44:11 +0000702
njn8225cc02009-03-09 22:52:24 +0000703// Process the mark stack until empty.
704static void lc_process_markstack(Int clique)
sewardjb5f6f512005-03-10 23:59:00 +0000705{
njne3675d62009-05-19 02:08:25 +0000706 Int top = -1; // shut gcc up
njn8225cc02009-03-09 22:52:24 +0000707 Bool is_prior_definite;
sewardjb5f6f512005-03-10 23:59:00 +0000708
njn8225cc02009-03-09 22:52:24 +0000709 while (lc_pop(&top)) {
710 tl_assert(top >= 0 && top < lc_n_chunks);
sewardjb5f6f512005-03-10 23:59:00 +0000711
njn8225cc02009-03-09 22:52:24 +0000712 // See comment about 'is_prior_definite' at the top to understand this.
713 is_prior_definite = ( Possible != lc_extras[top].state );
sewardjb5f6f512005-03-10 23:59:00 +0000714
njn8225cc02009-03-09 22:52:24 +0000715 lc_scan_memory(lc_chunks[top]->data, lc_chunks[top]->szB,
716 is_prior_definite, clique);
sewardjb5f6f512005-03-10 23:59:00 +0000717 }
718}
719
njn29a5c012009-05-06 06:15:55 +0000720static Word cmp_LossRecordKey_LossRecord(const void* key, const void* elem)
721{
722 LossRecordKey* a = (LossRecordKey*)key;
723 LossRecordKey* b = &(((LossRecord*)elem)->key);
724
725 // Compare on states first because that's fast.
726 if (a->state < b->state) return -1;
727 if (a->state > b->state) return 1;
728 // Ok, the states are equal. Now compare the locations, which is slower.
729 if (VG_(eq_ExeContext)(
730 MC_(clo_leak_resolution), a->allocated_at, b->allocated_at))
731 return 0;
732 // Different locations. Ordering is arbitrary, just use the ec pointer.
733 if (a->allocated_at < b->allocated_at) return -1;
734 if (a->allocated_at > b->allocated_at) return 1;
735 VG_(tool_panic)("bad LossRecord comparison");
736}
737
738static Int cmp_LossRecords(void* va, void* vb)
739{
740 LossRecord* lr_a = *(LossRecord**)va;
741 LossRecord* lr_b = *(LossRecord**)vb;
742 SizeT total_szB_a = lr_a->szB + lr_a->indirect_szB;
743 SizeT total_szB_b = lr_b->szB + lr_b->indirect_szB;
744
745 // First compare by sizes.
746 if (total_szB_a < total_szB_b) return -1;
747 if (total_szB_a > total_szB_b) return 1;
748 // If size are equal, compare by states.
749 if (lr_a->key.state < lr_b->key.state) return -1;
750 if (lr_a->key.state > lr_b->key.state) return 1;
njne10c7f82009-05-06 06:52:47 +0000751 // If they're still equal here, it doesn't matter that much, but we keep
752 // comparing other things so that regtests are as deterministic as
753 // possible. So: compare num_blocks.
754 if (lr_a->num_blocks < lr_b->num_blocks) return -1;
755 if (lr_a->num_blocks > lr_b->num_blocks) return 1;
756 // Finally, compare ExeContext addresses... older ones are likely to have
757 // lower addresses.
758 if (lr_a->key.allocated_at < lr_b->key.allocated_at) return -1;
759 if (lr_a->key.allocated_at > lr_b->key.allocated_at) return 1;
njn29a5c012009-05-06 06:15:55 +0000760 return 0;
761}
762
njn8225cc02009-03-09 22:52:24 +0000763static void print_results(ThreadId tid, Bool is_full_check)
sewardjb5f6f512005-03-10 23:59:00 +0000764{
njn29a5c012009-05-06 06:15:55 +0000765 Int i, n_lossrecords;
766 OSet* lr_table;
767 LossRecord** lr_array;
768 LossRecord* lr;
769 Bool is_suppressed;
sewardjb5f6f512005-03-10 23:59:00 +0000770
njn29a5c012009-05-06 06:15:55 +0000771 // Create the lr_table, which holds the loss records.
772 lr_table =
773 VG_(OSetGen_Create)(offsetof(LossRecord, key),
774 cmp_LossRecordKey_LossRecord,
775 VG_(malloc), "mc.pr.1",
776 VG_(free));
777
778 // Convert the chunks into loss records, merging them where appropriate.
njn8225cc02009-03-09 22:52:24 +0000779 for (i = 0; i < lc_n_chunks; i++) {
njn29a5c012009-05-06 06:15:55 +0000780 MC_Chunk* ch = lc_chunks[i];
781 LC_Extra* ex = &(lc_extras)[i];
782 LossRecord* old_lr;
783 LossRecordKey lrkey;
784 lrkey.state = ex->state;
785 lrkey.allocated_at = ch->where;
sewardjb5f6f512005-03-10 23:59:00 +0000786
njn29a5c012009-05-06 06:15:55 +0000787 old_lr = VG_(OSetGen_Lookup)(lr_table, &lrkey);
788 if (old_lr) {
789 // We found an existing loss record matching this chunk. Update the
790 // loss record's details in-situ. This is safe because we don't
791 // change the elements used as the OSet key.
792 old_lr->szB += ch->szB;
793 old_lr->indirect_szB += ex->indirect_szB;
794 old_lr->num_blocks++;
sewardjb5f6f512005-03-10 23:59:00 +0000795 } else {
njn29a5c012009-05-06 06:15:55 +0000796 // No existing loss record matches this chunk. Create a new loss
797 // record, initialise it from the chunk, and insert it into lr_table.
798 lr = VG_(OSetGen_AllocNode)(lr_table, sizeof(LossRecord));
799 lr->key = lrkey;
800 lr->szB = ch->szB;
801 lr->indirect_szB = ex->indirect_szB;
802 lr->num_blocks = 1;
803 VG_(OSetGen_Insert)(lr_table, lr);
sewardjb5f6f512005-03-10 23:59:00 +0000804 }
805 }
njn29a5c012009-05-06 06:15:55 +0000806 n_lossrecords = VG_(OSetGen_Size)(lr_table);
sewardjb5f6f512005-03-10 23:59:00 +0000807
njn29a5c012009-05-06 06:15:55 +0000808 // Create an array of pointers to the loss records.
809 lr_array = VG_(malloc)("mc.pr.2", n_lossrecords * sizeof(LossRecord*));
810 i = 0;
811 VG_(OSetGen_ResetIter)(lr_table);
812 while ( (lr = VG_(OSetGen_Next)(lr_table)) ) {
813 lr_array[i++] = lr;
814 }
815 tl_assert(i == n_lossrecords);
816
817 // Sort the array by loss record sizes.
818 VG_(ssort)(lr_array, n_lossrecords, sizeof(LossRecord*),
819 cmp_LossRecords);
820
821 // Zero totals.
njn8225cc02009-03-09 22:52:24 +0000822 MC_(blocks_leaked) = MC_(bytes_leaked) = 0;
823 MC_(blocks_indirect) = MC_(bytes_indirect) = 0;
824 MC_(blocks_dubious) = MC_(bytes_dubious) = 0;
825 MC_(blocks_reachable) = MC_(bytes_reachable) = 0;
826 MC_(blocks_suppressed) = MC_(bytes_suppressed) = 0;
827
njn29a5c012009-05-06 06:15:55 +0000828 // Print the loss records (in size order) and collect summary stats.
sewardjb5f6f512005-03-10 23:59:00 +0000829 for (i = 0; i < n_lossrecords; i++) {
njn29a5c012009-05-06 06:15:55 +0000830 Bool print_record;
njn8225cc02009-03-09 22:52:24 +0000831 // Rules for printing:
832 // - We don't show suppressed loss records ever (and that's controlled
833 // within the error manager).
834 // - We show non-suppressed loss records that are not "reachable" if
835 // --leak-check=yes.
836 // - We show all non-suppressed loss records if --leak-check=yes and
837 // --show-reachable=yes.
838 //
839 // Nb: here "reachable" means Reachable *or* IndirectLeak; note that
840 // this is different to "still reachable" used elsewhere because it
841 // includes indirectly lost blocks!
842 //
njn29a5c012009-05-06 06:15:55 +0000843 lr = lr_array[i];
njn8225cc02009-03-09 22:52:24 +0000844 print_record = is_full_check &&
845 ( MC_(clo_show_reachable) ||
njn29a5c012009-05-06 06:15:55 +0000846 Unreached == lr->key.state ||
847 Possible == lr->key.state );
sewardjb5f6f512005-03-10 23:59:00 +0000848 is_suppressed =
njn29a5c012009-05-06 06:15:55 +0000849 MC_(record_leak_error) ( tid, i+1, n_lossrecords, lr, print_record );
sewardjb5f6f512005-03-10 23:59:00 +0000850
851 if (is_suppressed) {
njn29a5c012009-05-06 06:15:55 +0000852 MC_(blocks_suppressed) += lr->num_blocks;
853 MC_(bytes_suppressed) += lr->szB;
sewardjb5f6f512005-03-10 23:59:00 +0000854
njn29a5c012009-05-06 06:15:55 +0000855 } else if (Unreached == lr->key.state) {
856 MC_(blocks_leaked) += lr->num_blocks;
857 MC_(bytes_leaked) += lr->szB;
sewardjb5f6f512005-03-10 23:59:00 +0000858
njn29a5c012009-05-06 06:15:55 +0000859 } else if (IndirectLeak == lr->key.state) {
860 MC_(blocks_indirect) += lr->num_blocks;
861 MC_(bytes_indirect) += lr->szB;
sewardjb5f6f512005-03-10 23:59:00 +0000862
njn29a5c012009-05-06 06:15:55 +0000863 } else if (Possible == lr->key.state) {
864 MC_(blocks_dubious) += lr->num_blocks;
865 MC_(bytes_dubious) += lr->szB;
sewardjb5f6f512005-03-10 23:59:00 +0000866
njn29a5c012009-05-06 06:15:55 +0000867 } else if (Reachable == lr->key.state) {
868 MC_(blocks_reachable) += lr->num_blocks;
869 MC_(bytes_reachable) += lr->szB;
sewardjb5f6f512005-03-10 23:59:00 +0000870
871 } else {
njn8225cc02009-03-09 22:52:24 +0000872 VG_(tool_panic)("unknown loss mode");
sewardjb5f6f512005-03-10 23:59:00 +0000873 }
sewardjb5f6f512005-03-10 23:59:00 +0000874 }
sewardjb5f6f512005-03-10 23:59:00 +0000875
njn8225cc02009-03-09 22:52:24 +0000876 if (VG_(clo_verbosity) > 0 && !VG_(clo_xml)) {
sewardj6b523cd2009-07-15 14:49:40 +0000877 VG_(umsg)("\n");
878 VG_(umsg)("LEAK SUMMARY:\n");
879 VG_(umsg)(" definitely lost: %'lu bytes in %'lu blocks.\n",
880 MC_(bytes_leaked), MC_(blocks_leaked) );
881 VG_(umsg)(" indirectly lost: %'lu bytes in %'lu blocks.\n",
882 MC_(bytes_indirect), MC_(blocks_indirect) );
883 VG_(umsg)(" possibly lost: %'lu bytes in %'lu blocks.\n",
884 MC_(bytes_dubious), MC_(blocks_dubious) );
885 VG_(umsg)(" still reachable: %'lu bytes in %'lu blocks.\n",
886 MC_(bytes_reachable), MC_(blocks_reachable) );
887 VG_(umsg)(" suppressed: %'lu bytes in %'lu blocks.\n",
888 MC_(bytes_suppressed), MC_(blocks_suppressed) );
njn8225cc02009-03-09 22:52:24 +0000889 if (!is_full_check &&
890 (MC_(blocks_leaked) + MC_(blocks_indirect) +
891 MC_(blocks_dubious) + MC_(blocks_reachable)) > 0) {
sewardj6b523cd2009-07-15 14:49:40 +0000892 VG_(umsg)("Rerun with --leak-check=full to see details "
893 "of leaked memory.\n");
njn8225cc02009-03-09 22:52:24 +0000894 }
895 if (is_full_check &&
896 MC_(blocks_reachable) > 0 && !MC_(clo_show_reachable))
897 {
sewardj6b523cd2009-07-15 14:49:40 +0000898 VG_(umsg)("Reachable blocks (those to which a pointer "
899 "was found) are not shown.\n");
900 VG_(umsg)("To see them, rerun with: --leak-check=full "
901 "--show-reachable=yes\n");
sewardjb5f6f512005-03-10 23:59:00 +0000902 }
903 }
904}
905
njn8225cc02009-03-09 22:52:24 +0000906/*------------------------------------------------------------*/
907/*--- Top-level entry point. ---*/
908/*------------------------------------------------------------*/
sewardj3cf26a52006-07-27 23:48:53 +0000909
njn8225cc02009-03-09 22:52:24 +0000910void MC_(detect_memory_leaks) ( ThreadId tid, LeakCheckMode mode )
njn43c799e2003-04-08 00:08:52 +0000911{
njnb8dca862005-03-14 02:42:44 +0000912 Int i;
njn43c799e2003-04-08 00:08:52 +0000913
sewardj76754cf2005-03-14 00:14:04 +0000914 tl_assert(mode != LC_Off);
njn43c799e2003-04-08 00:08:52 +0000915
njn8225cc02009-03-09 22:52:24 +0000916 // Get the chunks, stop if there were none.
917 lc_chunks = find_active_chunks(&lc_n_chunks);
918 if (lc_n_chunks == 0) {
919 tl_assert(lc_chunks == NULL);
sewardj71bc3cb2005-05-19 00:25:45 +0000920 if (VG_(clo_verbosity) >= 1 && !VG_(clo_xml)) {
sewardj2d9e8742009-08-07 15:46:56 +0000921 VG_(umsg)("\n");
sewardj6b523cd2009-07-15 14:49:40 +0000922 VG_(umsg)("All heap blocks were freed -- no leaks are possible.\n");
sewardj37d06f22003-09-17 21:48:26 +0000923 }
njn43c799e2003-04-08 00:08:52 +0000924 return;
925 }
926
njn8225cc02009-03-09 22:52:24 +0000927 // Sort the array so blocks are in ascending order in memory.
928 VG_(ssort)(lc_chunks, lc_n_chunks, sizeof(VgHashNode*), compare_MC_Chunks);
njn43c799e2003-04-08 00:08:52 +0000929
njn8225cc02009-03-09 22:52:24 +0000930 // Sanity check -- make sure they're in order.
931 for (i = 0; i < lc_n_chunks-1; i++) {
932 tl_assert( lc_chunks[i]->data <= lc_chunks[i+1]->data);
933 }
njn43c799e2003-04-08 00:08:52 +0000934
njn8225cc02009-03-09 22:52:24 +0000935 // Sanity check -- make sure they don't overlap. But do allow exact
936 // duplicates. If this assertion fails, it may mean that the application
937 // has done something stupid with VALGRIND_MALLOCLIKE_BLOCK client
938 // requests, specifically, has made overlapping requests (which are
939 // nonsensical). Another way to screw up is to use
940 // VALGRIND_MALLOCLIKE_BLOCK for stack locations; again nonsensical.
941 for (i = 0; i < lc_n_chunks-1; i++) {
942 MC_Chunk* ch1 = lc_chunks[i];
943 MC_Chunk* ch2 = lc_chunks[i+1];
944 Bool nonsense_overlap = ! (
945 // Normal case - no overlap.
946 (ch1->data + ch1->szB <= ch2->data) ||
947 // Degenerate case: exact duplicates.
948 (ch1->data == ch2->data && ch1->szB == ch2->szB)
949 );
950 if (nonsense_overlap) {
sewardj6b523cd2009-07-15 14:49:40 +0000951 VG_(umsg)("Block [0x%lx, 0x%lx) overlaps with block [0x%lx, 0x%lx)\n",
952 ch1->data, (ch1->data + ch1->szB),
953 ch2->data, (ch2->data + ch2->szB));
njn8225cc02009-03-09 22:52:24 +0000954 }
955 tl_assert (!nonsense_overlap);
956 }
957
958 // Initialise lc_extras.
959 lc_extras = VG_(malloc)( "mc.dml.2", lc_n_chunks * sizeof(LC_Extra) );
960 for (i = 0; i < lc_n_chunks; i++) {
961 lc_extras[i].state = Unreached;
962 lc_extras[i].indirect_szB = 0;
963 }
964
965 // Initialise lc_markstack.
966 lc_markstack = VG_(malloc)( "mc.dml.2", lc_n_chunks * sizeof(Int) );
967 for (i = 0; i < lc_n_chunks; i++) {
968 lc_markstack[i] = -1;
sewardjb5f6f512005-03-10 23:59:00 +0000969 }
970 lc_markstack_top = -1;
njn43c799e2003-04-08 00:08:52 +0000971
njn8225cc02009-03-09 22:52:24 +0000972 // Verbosity.
sewardj2d9e8742009-08-07 15:46:56 +0000973 if (VG_(clo_verbosity) > 1 && !VG_(clo_xml)) {
974 VG_(umsg)( "\n" );
975 VG_(umsg)( "Searching for pointers to %'d not-freed blocks.\n",
sewardj6b523cd2009-07-15 14:49:40 +0000976 lc_n_chunks );
sewardj2d9e8742009-08-07 15:46:56 +0000977 }
sewardjb5f6f512005-03-10 23:59:00 +0000978
njn8225cc02009-03-09 22:52:24 +0000979 // Scan the memory root-set, pushing onto the mark stack any blocks
980 // pointed to.
981 {
982 Int n_seg_starts;
njnac1e0332009-05-08 00:39:31 +0000983 Addr* seg_starts = VG_(get_segment_starts)( &n_seg_starts );
sewardjb5f6f512005-03-10 23:59:00 +0000984
njn8225cc02009-03-09 22:52:24 +0000985 tl_assert(seg_starts && n_seg_starts > 0);
sewardjde3ad732006-07-27 23:12:17 +0000986
njn8225cc02009-03-09 22:52:24 +0000987 lc_scanned_szB = 0;
sewardjde3ad732006-07-27 23:12:17 +0000988
njn8225cc02009-03-09 22:52:24 +0000989 // VG_(am_show_nsegments)( 0, "leakcheck");
990 for (i = 0; i < n_seg_starts; i++) {
991 SizeT seg_size;
992 NSegment const* seg = VG_(am_find_nsegment)( seg_starts[i] );
993 tl_assert(seg);
994
995 if (seg->kind != SkFileC && seg->kind != SkAnonC) continue;
996 if (!(seg->hasR && seg->hasW)) continue;
997 if (seg->isCH) continue;
998
999 // Don't poke around in device segments as this may cause
1000 // hangs. Exclude /dev/zero just in case someone allocated
1001 // memory by explicitly mapping /dev/zero.
1002 if (seg->kind == SkFileC
1003 && (VKI_S_ISCHR(seg->mode) || VKI_S_ISBLK(seg->mode))) {
1004 HChar* dev_name = VG_(am_get_filename)( (NSegment*)seg );
1005 if (dev_name && 0 == VG_(strcmp)(dev_name, "/dev/zero")) {
1006 // Don't skip /dev/zero.
1007 } else {
1008 // Skip this device mapping.
1009 continue;
1010 }
1011 }
1012
1013 if (0)
1014 VG_(printf)("ACCEPT %2d %#lx %#lx\n", i, seg->start, seg->end);
1015
1016 // Scan the segment. We use -1 for the clique number, because this
1017 // is a root-set.
1018 seg_size = seg->end - seg->start + 1;
1019 if (VG_(clo_verbosity) > 2) {
1020 VG_(message)(Vg_DebugMsg,
sewardj6b523cd2009-07-15 14:49:40 +00001021 " Scanning root segment: %#lx..%#lx (%lu)\n",
njn8225cc02009-03-09 22:52:24 +00001022 seg->start, seg->end, seg_size);
1023 }
1024 lc_scan_memory(seg->start, seg_size, /*is_prior_definite*/True, -1);
1025 }
sewardj45f4e7c2005-09-27 19:20:21 +00001026 }
sewardjb5f6f512005-03-10 23:59:00 +00001027
njn8225cc02009-03-09 22:52:24 +00001028 // Scan GP registers for chunk pointers.
1029 VG_(apply_to_GP_regs)(lc_push_if_a_chunk_ptr_register);
sewardjb5f6f512005-03-10 23:59:00 +00001030
njn8225cc02009-03-09 22:52:24 +00001031 // Process the pushed blocks. After this, every block that is reachable
1032 // from the root-set has been traced.
1033 lc_process_markstack(/*clique*/-1);
njn43c799e2003-04-08 00:08:52 +00001034
sewardj2d9e8742009-08-07 15:46:56 +00001035 if (VG_(clo_verbosity) > 1 && !VG_(clo_xml))
1036 VG_(umsg)("Checked %'lu bytes.\n", lc_scanned_szB);
njn43c799e2003-04-08 00:08:52 +00001037
njn8225cc02009-03-09 22:52:24 +00001038 // Trace all the leaked blocks to determine which are directly leaked and
1039 // which are indirectly leaked. For each Unreached block, push it onto
1040 // the mark stack, and find all the as-yet-Unreached blocks reachable
1041 // from it. These form a clique and are marked IndirectLeak, and their
1042 // size is added to the clique leader's indirect size. If one of the
1043 // found blocks was itself a clique leader (from a previous clique), then
1044 // the cliques are merged.
1045 for (i = 0; i < lc_n_chunks; i++) {
1046 MC_Chunk* ch = lc_chunks[i];
1047 LC_Extra* ex = &(lc_extras[i]);
njn43c799e2003-04-08 00:08:52 +00001048
njn8225cc02009-03-09 22:52:24 +00001049 if (VG_DEBUG_CLIQUE)
1050 VG_(printf)("cliques: %d at %#lx -> Loss state %d\n",
1051 i, ch->data, ex->state);
njn43c799e2003-04-08 00:08:52 +00001052
njn8225cc02009-03-09 22:52:24 +00001053 tl_assert(lc_markstack_top == -1);
1054
1055 if (ex->state == Unreached) {
1056 if (VG_DEBUG_CLIQUE)
1057 VG_(printf)("%d: gathering clique %#lx\n", i, ch->data);
1058
1059 // Push this Unreached block onto the stack and process it.
1060 lc_push(i, ch);
1061 lc_process_markstack(i);
1062
1063 tl_assert(lc_markstack_top == -1);
1064 tl_assert(ex->state == Unreached);
nethercote0f19bce2003-12-02 10:17:44 +00001065 }
njn43c799e2003-04-08 00:08:52 +00001066 }
njn8225cc02009-03-09 22:52:24 +00001067
1068 print_results( tid, ( mode == LC_Full ? True : False ) );
njn43c799e2003-04-08 00:08:52 +00001069
njn8225cc02009-03-09 22:52:24 +00001070 VG_(free) ( lc_chunks );
1071 VG_(free) ( lc_extras );
sewardjb5f6f512005-03-10 23:59:00 +00001072 VG_(free) ( lc_markstack );
njn43c799e2003-04-08 00:08:52 +00001073}
1074
1075/*--------------------------------------------------------------------*/
njn1d0825f2006-03-27 11:37:07 +00001076/*--- end ---*/
njn43c799e2003-04-08 00:08:52 +00001077/*--------------------------------------------------------------------*/
1078