blob: 1ebd682727fa0f74279cc70695c9e3b6bae7ae55 [file] [log] [blame]
njn43c799e2003-04-08 00:08:52 +00001
2/*--------------------------------------------------------------------*/
njn1d0825f2006-03-27 11:37:07 +00003/*--- The leak checker. mc_leakcheck.c ---*/
njn43c799e2003-04-08 00:08:52 +00004/*--------------------------------------------------------------------*/
5
6/*
nethercote137bc552003-11-14 17:47:54 +00007 This file is part of MemCheck, a heavyweight Valgrind tool for
njn1d0825f2006-03-27 11:37:07 +00008 detecting memory errors.
njn43c799e2003-04-08 00:08:52 +00009
sewardj03f8d3f2012-08-05 15:46:46 +000010 Copyright (C) 2000-2012 Julian Seward
njn43c799e2003-04-08 00:08:52 +000011 jseward@acm.org
12
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 02111-1307, USA.
27
28 The GNU General Public License is contained in the file COPYING.
29*/
30
njnc7561b92005-06-19 01:24:32 +000031#include "pub_tool_basics.h"
sewardj4cfea4f2006-10-14 19:26:10 +000032#include "pub_tool_vki.h"
njnac1e0332009-05-08 00:39:31 +000033#include "pub_tool_aspacehl.h"
njn4802b382005-06-11 04:58:29 +000034#include "pub_tool_aspacemgr.h"
njn1d0825f2006-03-27 11:37:07 +000035#include "pub_tool_execontext.h"
36#include "pub_tool_hashtable.h"
njn97405b22005-06-02 03:39:33 +000037#include "pub_tool_libcbase.h"
njn132bfcc2005-06-04 19:16:06 +000038#include "pub_tool_libcassert.h"
njn36a20fa2005-06-03 03:08:39 +000039#include "pub_tool_libcprint.h"
njnde62cbf2005-06-10 22:08:14 +000040#include "pub_tool_libcsignal.h"
njn6ace3ea2005-06-17 03:06:27 +000041#include "pub_tool_machine.h"
njnc7561b92005-06-19 01:24:32 +000042#include "pub_tool_mallocfree.h"
43#include "pub_tool_options.h"
njn29a5c012009-05-06 06:15:55 +000044#include "pub_tool_oset.h"
philippe6643e962012-01-17 21:16:30 +000045#include "pub_tool_poolalloc.h"
46#include "pub_tool_signals.h" // Needed for mc_include.h
sewardj6c591e12011-04-11 16:17:51 +000047#include "pub_tool_libcsetjmp.h" // setjmp facilities
njn1d0825f2006-03-27 11:37:07 +000048#include "pub_tool_tooliface.h" // Needed for mc_include.h
njn43c799e2003-04-08 00:08:52 +000049
njn1d0825f2006-03-27 11:37:07 +000050#include "mc_include.h"
njnc7561b92005-06-19 01:24:32 +000051
njn8225cc02009-03-09 22:52:24 +000052/*------------------------------------------------------------*/
53/*--- An overview of leak checking. ---*/
54/*------------------------------------------------------------*/
njnc7561b92005-06-19 01:24:32 +000055
njn8225cc02009-03-09 22:52:24 +000056// Leak-checking is a directed-graph traversal problem. The graph has
57// two kinds of nodes:
58// - root-set nodes:
59// - GP registers of all threads;
60// - valid, aligned, pointer-sized data words in valid client memory,
61// including stacks, but excluding words within client heap-allocated
62// blocks (they are excluded so that later on we can differentiate
63// between heap blocks that are indirectly leaked vs. directly leaked).
64// - heap-allocated blocks. A block is a mempool chunk or a malloc chunk
65// that doesn't contain a mempool chunk. Nb: the terms "blocks" and
66// "chunks" are used interchangeably below.
67//
68// There are two kinds of edges:
69// - start-pointers, i.e. pointers to the start of a block;
70// - interior-pointers, i.e. pointers to the interior of a block.
71//
72// We use "pointers" rather than "edges" below.
73//
74// Root set nodes only point to blocks. Blocks only point to blocks;
75// a block can point to itself.
76//
77// The aim is to traverse the graph and determine the status of each block.
78//
79// There are 9 distinct cases. See memcheck/docs/mc-manual.xml for details.
80// Presenting all nine categories to the user is probably too much.
81// Currently we do this:
82// - definitely lost: case 3
83// - indirectly lost: case 4, 9
84// - possibly lost: cases 5..8
85// - still reachable: cases 1, 2
86//
87// It's far from clear that this is the best possible categorisation; it's
88// accreted over time without any central guiding principle.
89
90/*------------------------------------------------------------*/
91/*--- XXX: Thoughts for improvement. ---*/
92/*------------------------------------------------------------*/
93
94// From the user's point of view:
95// - If they aren't using interior-pointers, they just have to fix the
96// directly lost blocks, and the indirectly lost ones will be fixed as
97// part of that. Any possibly lost blocks will just be due to random
98// pointer garbage and can be ignored.
99//
100// - If they are using interior-pointers, the fact that they currently are not
101// being told which ones might be directly lost vs. indirectly lost makes
102// it hard to know where to begin.
103//
104// All this makes me wonder if new option is warranted:
105// --follow-interior-pointers. By default it would be off, the leak checker
106// wouldn't follow interior-pointers and there would only be 3 categories:
107// R, DL, IL.
108//
109// If turned on, then it would show 7 categories (R, DL, IL, DR/DL, IR/IL,
110// IR/IL/DL, IL/DL). That output is harder to understand but it's your own
111// damn fault for using interior-pointers...
112//
113// ----
114//
115// Also, why are two blank lines printed between each loss record?
njnc2f8b1b2009-08-10 06:47:00 +0000116// [bug 197930]
njn8225cc02009-03-09 22:52:24 +0000117//
118// ----
119//
120// Also, --show-reachable is a bad name because it also turns on the showing
121// of indirectly leaked blocks(!) It would be better named --show-all or
122// --show-all-heap-blocks, because that's the end result.
philippe2193a7c2012-12-08 17:54:16 +0000123// We now have the option --show-leak-kinds=... which allows to specify =all.
njn8225cc02009-03-09 22:52:24 +0000124//
125// ----
126//
127// Also, the VALGRIND_LEAK_CHECK and VALGRIND_QUICK_LEAK_CHECK aren't great
128// names. VALGRIND_FULL_LEAK_CHECK and VALGRIND_SUMMARY_LEAK_CHECK would be
129// better.
130//
131// ----
132//
133// Also, VALGRIND_COUNT_LEAKS and VALGRIND_COUNT_LEAK_BLOCKS aren't great as
134// they combine direct leaks and indirect leaks into one. New, more precise
135// ones (they'll need new names) would be good. If more categories are
136// used, as per the --follow-interior-pointers option, they should be
137// updated accordingly. And they should use a struct to return the values.
138//
139// ----
140//
141// Also, for this case:
142//
143// (4) p4 BBB ---> AAA
144//
145// BBB is definitely directly lost. AAA is definitely indirectly lost.
146// Here's the relevant loss records printed for a full check (each block is
147// 16 bytes):
148//
149// ==20397== 16 bytes in 1 blocks are indirectly lost in loss record 9 of 15
150// ==20397== at 0x4C2694E: malloc (vg_replace_malloc.c:177)
151// ==20397== by 0x400521: mk (leak-cases.c:49)
152// ==20397== by 0x400578: main (leak-cases.c:72)
153//
154// ==20397== 32 (16 direct, 16 indirect) bytes in 1 blocks are definitely
155// lost in loss record 14 of 15
156// ==20397== at 0x4C2694E: malloc (vg_replace_malloc.c:177)
157// ==20397== by 0x400521: mk (leak-cases.c:49)
158// ==20397== by 0x400580: main (leak-cases.c:72)
159//
160// The first one is fine -- it describes AAA.
161//
162// The second one is for BBB. It's correct in that 16 bytes in 1 block are
163// directly lost. It's also correct that 16 are indirectly lost as a result,
164// but it means that AAA is being counted twice in the loss records. (It's
165// not, thankfully, counted twice in the summary counts). Argh.
166//
167// This would be less confusing for the second one:
168//
169// ==20397== 16 bytes in 1 blocks are definitely lost in loss record 14
170// of 15 (and 16 bytes in 1 block are indirectly lost as a result; they
philippe2193a7c2012-12-08 17:54:16 +0000171// are mentioned elsewhere (if --show-reachable=yes or indirect is given
172// in --show-leak-kinds=... !))
njn8225cc02009-03-09 22:52:24 +0000173// ==20397== at 0x4C2694E: malloc (vg_replace_malloc.c:177)
174// ==20397== by 0x400521: mk (leak-cases.c:49)
175// ==20397== by 0x400580: main (leak-cases.c:72)
176//
177// But ideally we'd present the loss record for the directly lost block and
178// then the resultant indirectly lost blocks and make it clear the
179// dependence. Double argh.
180
181/*------------------------------------------------------------*/
182/*--- The actual algorithm. ---*/
183/*------------------------------------------------------------*/
184
185// - Find all the blocks (a.k.a. chunks) to check. Mempool chunks require
186// some special treatment because they can be within malloc'd blocks.
187// - Scan every word in the root set (GP registers and valid
188// non-heap memory words).
189// - First, we skip if it doesn't point to valid memory.
190// - Then, we see if it points to the start or interior of a block. If
191// so, we push the block onto the mark stack and mark it as having been
192// reached.
193// - Then, we process the mark stack, repeating the scanning for each block;
194// this can push more blocks onto the mark stack. We repeat until the
195// mark stack is empty. Each block is marked as definitely or possibly
196// reachable, depending on whether interior-pointers were required to
197// reach it.
198// - At this point we know for every block if it's reachable or not.
199// - We then push each unreached block onto the mark stack, using the block
200// number as the "clique" number.
201// - We process the mark stack again, this time grouping blocks into cliques
202// in order to facilitate the directly/indirectly lost categorisation.
203// - We group blocks by their ExeContexts and categorisation, and print them
204// if --leak-check=full. We also print summary numbers.
205//
206// A note on "cliques":
207// - A directly lost block is one with no pointers to it. An indirectly
208// lost block is one that is pointed to by a directly or indirectly lost
209// block.
210// - Each directly lost block has zero or more indirectly lost blocks
211// hanging off it. All these blocks together form a "clique". The
212// directly lost block is called the "clique leader". The clique number
213// is the number (in lc_chunks[]) of the clique leader.
214// - Actually, a directly lost block may be pointed to if it's part of a
215// cycle. In that case, there may be more than one choice for the clique
216// leader, and the choice is arbitrary. Eg. if you have A-->B and B-->A
217// either A or B could be the clique leader.
218// - Cliques cannot overlap, and will be truncated to avoid this. Eg. if we
219// have A-->C and B-->C, the two cliques will be {A,C} and {B}, or {A} and
220// {B,C} (again the choice is arbitrary). This is because we don't want
221// to count a block as indirectly lost more than once.
222//
223// A note on 'is_prior_definite':
224// - This is a boolean used in various places that indicates if the chain
225// up to the prior node (prior to the one being considered) is definite.
226// - In the clique == -1 case:
227// - if True it means that the prior node is a root-set node, or that the
228// prior node is a block which is reachable from the root-set via
229// start-pointers.
230// - if False it means that the prior node is a block that is only
231// reachable from the root-set via a path including at least one
232// interior-pointer.
233// - In the clique != -1 case, currently it's always True because we treat
234// start-pointers and interior-pointers the same for direct/indirect leak
235// checking. If we added a PossibleIndirectLeak state then this would
236// change.
237
238
239// Define to debug the memory-leak-detector.
sewardjb5f6f512005-03-10 23:59:00 +0000240#define VG_DEBUG_LEAKCHECK 0
njn8225cc02009-03-09 22:52:24 +0000241#define VG_DEBUG_CLIQUE 0
242
sewardjb5f6f512005-03-10 23:59:00 +0000243
njn43c799e2003-04-08 00:08:52 +0000244/*------------------------------------------------------------*/
njn8225cc02009-03-09 22:52:24 +0000245/*--- Getting the initial chunks, and searching them. ---*/
njn43c799e2003-04-08 00:08:52 +0000246/*------------------------------------------------------------*/
247
njn8225cc02009-03-09 22:52:24 +0000248// Compare the MC_Chunks by 'data' (i.e. the address of the block).
florian6bd9dc12012-11-23 16:17:43 +0000249static Int compare_MC_Chunks(const void* n1, const void* n2)
njn43c799e2003-04-08 00:08:52 +0000250{
florian3e798632012-11-24 19:41:54 +0000251 const MC_Chunk* mc1 = *(const MC_Chunk *const *)n1;
252 const MC_Chunk* mc2 = *(const MC_Chunk *const *)n2;
njn8225cc02009-03-09 22:52:24 +0000253 if (mc1->data < mc2->data) return -1;
254 if (mc1->data > mc2->data) return 1;
255 return 0;
njn43c799e2003-04-08 00:08:52 +0000256}
257
njn8225cc02009-03-09 22:52:24 +0000258#if VG_DEBUG_LEAKCHECK
259// Used to sanity-check the fast binary-search mechanism.
260static
261Int find_chunk_for_OLD ( Addr ptr,
262 MC_Chunk** chunks,
263 Int n_chunks )
264
265{
266 Int i;
267 Addr a_lo, a_hi;
268 PROF_EVENT(70, "find_chunk_for_OLD");
269 for (i = 0; i < n_chunks; i++) {
270 PROF_EVENT(71, "find_chunk_for_OLD(loop)");
271 a_lo = chunks[i]->data;
272 a_hi = ((Addr)chunks[i]->data) + chunks[i]->szB;
273 if (a_lo <= ptr && ptr < a_hi)
274 return i;
275 }
276 return -1;
277}
278#endif
279
280// Find the i such that ptr points at or inside the block described by
281// chunks[i]. Return -1 if none found. This assumes that chunks[]
282// has been sorted on the 'data' field.
283static
284Int find_chunk_for ( Addr ptr,
285 MC_Chunk** chunks,
286 Int n_chunks )
287{
288 Addr a_mid_lo, a_mid_hi;
289 Int lo, mid, hi, retVal;
290 // VG_(printf)("find chunk for %p = ", ptr);
291 retVal = -1;
292 lo = 0;
293 hi = n_chunks-1;
294 while (True) {
295 // Invariant: current unsearched space is from lo to hi, inclusive.
296 if (lo > hi) break; // not found
297
298 mid = (lo + hi) / 2;
299 a_mid_lo = chunks[mid]->data;
300 a_mid_hi = chunks[mid]->data + chunks[mid]->szB;
301 // Extent of block 'mid' is [a_mid_lo .. a_mid_hi).
302 // Special-case zero-sized blocks - treat them as if they had
303 // size 1. Not doing so causes them to not cover any address
304 // range at all and so will never be identified as the target of
305 // any pointer, which causes them to be incorrectly reported as
306 // definitely leaked.
307 if (chunks[mid]->szB == 0)
308 a_mid_hi++;
309
310 if (ptr < a_mid_lo) {
311 hi = mid-1;
312 continue;
313 }
314 if (ptr >= a_mid_hi) {
315 lo = mid+1;
316 continue;
317 }
318 tl_assert(ptr >= a_mid_lo && ptr < a_mid_hi);
319 retVal = mid;
320 break;
321 }
322
323# if VG_DEBUG_LEAKCHECK
324 tl_assert(retVal == find_chunk_for_OLD ( ptr, chunks, n_chunks ));
325# endif
326 // VG_(printf)("%d\n", retVal);
327 return retVal;
328}
329
330
331static MC_Chunk**
florian54fe2022012-10-27 23:07:42 +0000332find_active_chunks(Int* pn_chunks)
njn8225cc02009-03-09 22:52:24 +0000333{
334 // Our goal is to construct a set of chunks that includes every
335 // mempool chunk, and every malloc region that *doesn't* contain a
336 // mempool chunk.
337 MC_Mempool *mp;
338 MC_Chunk **mallocs, **chunks, *mc;
339 UInt n_mallocs, n_chunks, m, s;
340 Bool *malloc_chunk_holds_a_pool_chunk;
341
342 // First we collect all the malloc chunks into an array and sort it.
343 // We do this because we want to query the chunks by interior
344 // pointers, requiring binary search.
345 mallocs = (MC_Chunk**) VG_(HT_to_array)( MC_(malloc_list), &n_mallocs );
346 if (n_mallocs == 0) {
347 tl_assert(mallocs == NULL);
348 *pn_chunks = 0;
349 return NULL;
350 }
351 VG_(ssort)(mallocs, n_mallocs, sizeof(VgHashNode*), compare_MC_Chunks);
352
353 // Then we build an array containing a Bool for each malloc chunk,
354 // indicating whether it contains any mempools.
355 malloc_chunk_holds_a_pool_chunk = VG_(calloc)( "mc.fas.1",
356 n_mallocs, sizeof(Bool) );
357 n_chunks = n_mallocs;
358
359 // Then we loop over the mempool tables. For each chunk in each
360 // pool, we set the entry in the Bool array corresponding to the
361 // malloc chunk containing the mempool chunk.
362 VG_(HT_ResetIter)(MC_(mempool_list));
363 while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) {
364 VG_(HT_ResetIter)(mp->chunks);
365 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
366
367 // We'll need to record this chunk.
368 n_chunks++;
369
370 // Possibly invalidate the malloc holding the beginning of this chunk.
371 m = find_chunk_for(mc->data, mallocs, n_mallocs);
372 if (m != -1 && malloc_chunk_holds_a_pool_chunk[m] == False) {
373 tl_assert(n_chunks > 0);
374 n_chunks--;
375 malloc_chunk_holds_a_pool_chunk[m] = True;
376 }
377
378 // Possibly invalidate the malloc holding the end of this chunk.
379 if (mc->szB > 1) {
380 m = find_chunk_for(mc->data + (mc->szB - 1), mallocs, n_mallocs);
381 if (m != -1 && malloc_chunk_holds_a_pool_chunk[m] == False) {
382 tl_assert(n_chunks > 0);
383 n_chunks--;
384 malloc_chunk_holds_a_pool_chunk[m] = True;
385 }
386 }
387 }
388 }
389 tl_assert(n_chunks > 0);
390
391 // Create final chunk array.
392 chunks = VG_(malloc)("mc.fas.2", sizeof(VgHashNode*) * (n_chunks));
393 s = 0;
394
395 // Copy the mempool chunks and the non-marked malloc chunks into a
396 // combined array of chunks.
397 VG_(HT_ResetIter)(MC_(mempool_list));
398 while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) {
399 VG_(HT_ResetIter)(mp->chunks);
400 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
401 tl_assert(s < n_chunks);
402 chunks[s++] = mc;
403 }
404 }
405 for (m = 0; m < n_mallocs; ++m) {
406 if (!malloc_chunk_holds_a_pool_chunk[m]) {
407 tl_assert(s < n_chunks);
408 chunks[s++] = mallocs[m];
409 }
410 }
411 tl_assert(s == n_chunks);
412
413 // Free temporaries.
414 VG_(free)(mallocs);
415 VG_(free)(malloc_chunk_holds_a_pool_chunk);
416
417 *pn_chunks = n_chunks;
418
419 return chunks;
420}
421
422/*------------------------------------------------------------*/
423/*--- The leak detector proper. ---*/
424/*------------------------------------------------------------*/
425
426// Holds extra info about each block during leak checking.
427typedef
428 struct {
429 UInt state:2; // Reachedness.
tom1d0f3f62010-10-04 20:55:21 +0000430 UInt pending:1; // Scan pending.
philippea22f59d2012-01-26 23:13:52 +0000431 union {
432 SizeT indirect_szB : (sizeof(SizeT)*8)-3; // If Unreached, how many bytes
433 // are unreachable from here.
434 SizeT clique : (sizeof(SizeT)*8)-3; // if IndirectLeak, clique leader
435 // to which it belongs.
436 } IorC;
njn8225cc02009-03-09 22:52:24 +0000437 }
438 LC_Extra;
439
440// An array holding pointers to every chunk we're checking. Sorted by address.
philippea22f59d2012-01-26 23:13:52 +0000441// lc_chunks is initialised during leak search. It is kept after leak search
442// to support printing the list of blocks belonging to a loss record.
443// lc_chunk array can only be used validly till the next "free" operation
444// (as a free operation potentially destroys one or more chunks).
445// To detect lc_chunk is valid, we store the nr of frees operations done
446// when lc_chunk was build : lc_chunks (and lc_extras) stays valid as
447// long as no free operations has been done since lc_chunks building.
njn8225cc02009-03-09 22:52:24 +0000448static MC_Chunk** lc_chunks;
449// How many chunks we're dealing with.
450static Int lc_n_chunks;
philippea22f59d2012-01-26 23:13:52 +0000451static SizeT lc_chunks_n_frees_marker;
452// This has the same number of entries as lc_chunks, and each entry
453// in lc_chunks corresponds with the entry here (ie. lc_chunks[i] and
454// lc_extras[i] describe the same block).
455static LC_Extra* lc_extras;
456
sewardjc8bd1df2011-06-26 12:41:33 +0000457// chunks will be converted and merged in loss record, maintained in lr_table
458// lr_table elements are kept from one leak_search to another to implement
459// the "print new/changed leaks" client request
460static OSet* lr_table;
philippea22f59d2012-01-26 23:13:52 +0000461// Array of sorted loss record (produced during last leak search).
462static LossRecord** lr_array;
463
sewardjc8bd1df2011-06-26 12:41:33 +0000464
465// DeltaMode used the last time we called detect_memory_leaks.
466// The recorded leak errors must be output using a logic based on this delta_mode.
467// The below avoids replicating the delta_mode in each LossRecord.
468LeakCheckDeltaMode MC_(detect_memory_leaks_last_delta_mode);
469
njn8225cc02009-03-09 22:52:24 +0000470
njn8225cc02009-03-09 22:52:24 +0000471// Records chunks that are currently being processed. Each element in the
472// stack is an index into lc_chunks and lc_extras. Its size is
473// 'lc_n_chunks' because in the worst case that's how many chunks could be
474// pushed onto it (actually I think the maximum is lc_n_chunks-1 but let's
475// be conservative).
476static Int* lc_markstack;
477// The index of the top element of the stack; -1 if the stack is empty, 0 if
478// the stack has one element, 1 if it has two, etc.
479static Int lc_markstack_top;
480
481// Keeps track of how many bytes of memory we've scanned, for printing.
482// (Nb: We don't keep track of how many register bytes we've scanned.)
483static SizeT lc_scanned_szB;
484
485
486SizeT MC_(bytes_leaked) = 0;
487SizeT MC_(bytes_indirect) = 0;
488SizeT MC_(bytes_dubious) = 0;
489SizeT MC_(bytes_reachable) = 0;
490SizeT MC_(bytes_suppressed) = 0;
491
492SizeT MC_(blocks_leaked) = 0;
493SizeT MC_(blocks_indirect) = 0;
494SizeT MC_(blocks_dubious) = 0;
495SizeT MC_(blocks_reachable) = 0;
496SizeT MC_(blocks_suppressed) = 0;
497
njn8225cc02009-03-09 22:52:24 +0000498// Determines if a pointer is to a chunk. Returns the chunk number et al
499// via call-by-reference.
500static Bool
501lc_is_a_chunk_ptr(Addr ptr, Int* pch_no, MC_Chunk** pch, LC_Extra** pex)
njn43c799e2003-04-08 00:08:52 +0000502{
njn8225cc02009-03-09 22:52:24 +0000503 Int ch_no;
504 MC_Chunk* ch;
505 LC_Extra* ex;
njn43c799e2003-04-08 00:08:52 +0000506
philippe57a16a22012-07-18 22:26:51 +0000507 // Quick filter. Note: implemented with am, not with get_vabits2
508 // as ptr might be random data pointing anywhere. On 64 bit
509 // platforms, getting va bits for random data can be quite costly
510 // due to the secondary map.
njn8225cc02009-03-09 22:52:24 +0000511 if (!VG_(am_is_valid_for_client)(ptr, 1, VKI_PROT_READ)) {
512 return False;
sewardjb5f6f512005-03-10 23:59:00 +0000513 } else {
njn8225cc02009-03-09 22:52:24 +0000514 ch_no = find_chunk_for(ptr, lc_chunks, lc_n_chunks);
515 tl_assert(ch_no >= -1 && ch_no < lc_n_chunks);
516
517 if (ch_no == -1) {
518 return False;
519 } else {
520 // Ok, we've found a pointer to a chunk. Get the MC_Chunk and its
521 // LC_Extra.
522 ch = lc_chunks[ch_no];
523 ex = &(lc_extras[ch_no]);
524
525 tl_assert(ptr >= ch->data);
526 tl_assert(ptr < ch->data + ch->szB + (ch->szB==0 ? 1 : 0));
527
528 if (VG_DEBUG_LEAKCHECK)
529 VG_(printf)("ptr=%#lx -> block %d\n", ptr, ch_no);
530
531 *pch_no = ch_no;
532 *pch = ch;
533 *pex = ex;
534
535 return True;
536 }
sewardjb5f6f512005-03-10 23:59:00 +0000537 }
538}
539
njn8225cc02009-03-09 22:52:24 +0000540// Push a chunk (well, just its index) onto the mark stack.
541static void lc_push(Int ch_no, MC_Chunk* ch)
sewardjb5f6f512005-03-10 23:59:00 +0000542{
tom1d0f3f62010-10-04 20:55:21 +0000543 if (!lc_extras[ch_no].pending) {
544 if (0) {
545 VG_(printf)("pushing %#lx-%#lx\n", ch->data, ch->data + ch->szB);
546 }
547 lc_markstack_top++;
548 tl_assert(lc_markstack_top < lc_n_chunks);
549 lc_markstack[lc_markstack_top] = ch_no;
550 tl_assert(!lc_extras[ch_no].pending);
551 lc_extras[ch_no].pending = True;
njn8225cc02009-03-09 22:52:24 +0000552 }
sewardjb5f6f512005-03-10 23:59:00 +0000553}
554
njn8225cc02009-03-09 22:52:24 +0000555// Return the index of the chunk on the top of the mark stack, or -1 if
556// there isn't one.
557static Bool lc_pop(Int* ret)
sewardjb5f6f512005-03-10 23:59:00 +0000558{
njn8225cc02009-03-09 22:52:24 +0000559 if (-1 == lc_markstack_top) {
560 return False;
561 } else {
562 tl_assert(0 <= lc_markstack_top && lc_markstack_top < lc_n_chunks);
563 *ret = lc_markstack[lc_markstack_top];
564 lc_markstack_top--;
tom1d0f3f62010-10-04 20:55:21 +0000565 tl_assert(lc_extras[*ret].pending);
566 lc_extras[*ret].pending = False;
njn8225cc02009-03-09 22:52:24 +0000567 return True;
568 }
569}
sewardjb5f6f512005-03-10 23:59:00 +0000570
njn8225cc02009-03-09 22:52:24 +0000571
572// If 'ptr' is pointing to a heap-allocated block which hasn't been seen
573// before, push it onto the mark stack.
574static void
575lc_push_without_clique_if_a_chunk_ptr(Addr ptr, Bool is_prior_definite)
576{
577 Int ch_no;
578 MC_Chunk* ch;
579 LC_Extra* ex;
580
581 if ( ! lc_is_a_chunk_ptr(ptr, &ch_no, &ch, &ex) )
582 return;
tom1d0f3f62010-10-04 20:55:21 +0000583
njn8225cc02009-03-09 22:52:24 +0000584 // Possibly upgrade the state, ie. one of:
585 // - Unreached --> Possible
586 // - Unreached --> Reachable
587 // - Possible --> Reachable
tom1d0f3f62010-10-04 20:55:21 +0000588 if (ptr == ch->data && is_prior_definite && ex->state != Reachable) {
njn8225cc02009-03-09 22:52:24 +0000589 // 'ptr' points to the start of the block, and the prior node is
590 // definite, which means that this block is definitely reachable.
591 ex->state = Reachable;
592
tom1d0f3f62010-10-04 20:55:21 +0000593 // State has changed to Reachable so (re)scan the block to make
594 // sure any blocks it points to are correctly marked.
595 lc_push(ch_no, ch);
596
njn8225cc02009-03-09 22:52:24 +0000597 } else if (ex->state == Unreached) {
598 // Either 'ptr' is a interior-pointer, or the prior node isn't definite,
599 // which means that we can only mark this block as possibly reachable.
600 ex->state = Possible;
tom1d0f3f62010-10-04 20:55:21 +0000601
602 // State has changed to Possible so (re)scan the block to make
603 // sure any blocks it points to are correctly marked.
604 lc_push(ch_no, ch);
njn8225cc02009-03-09 22:52:24 +0000605 }
606}
607
608static void
florian6bd9dc12012-11-23 16:17:43 +0000609lc_push_if_a_chunk_ptr_register(ThreadId tid, const HChar* regname, Addr ptr)
njn8225cc02009-03-09 22:52:24 +0000610{
611 lc_push_without_clique_if_a_chunk_ptr(ptr, /*is_prior_definite*/True);
612}
613
614// If ptr is pointing to a heap-allocated block which hasn't been seen
615// before, push it onto the mark stack. Clique is the index of the
616// clique leader.
617static void
philippea22f59d2012-01-26 23:13:52 +0000618lc_push_with_clique_if_a_chunk_ptr(Addr ptr, Int clique, Int cur_clique)
njn8225cc02009-03-09 22:52:24 +0000619{
620 Int ch_no;
621 MC_Chunk* ch;
622 LC_Extra* ex;
623
624 tl_assert(0 <= clique && clique < lc_n_chunks);
625
626 if ( ! lc_is_a_chunk_ptr(ptr, &ch_no, &ch, &ex) )
627 return;
628
629 // If it's not Unreached, it's already been handled so ignore it.
630 // If ch_no==clique, it's the clique leader, which means this is a cyclic
631 // structure; again ignore it because it's already been handled.
632 if (ex->state == Unreached && ch_no != clique) {
633 // Note that, unlike reachable blocks, we currently don't distinguish
634 // between start-pointers and interior-pointers here. We probably
635 // should, though.
njn8225cc02009-03-09 22:52:24 +0000636 lc_push(ch_no, ch);
637
638 // Add the block to the clique, and add its size to the
639 // clique-leader's indirect size. Also, if the new block was
640 // itself a clique leader, it isn't any more, so add its
641 // indirect_szB to the new clique leader.
642 if (VG_DEBUG_CLIQUE) {
philippea22f59d2012-01-26 23:13:52 +0000643 if (ex->IorC.indirect_szB > 0)
njn8225cc02009-03-09 22:52:24 +0000644 VG_(printf)(" clique %d joining clique %d adding %lu+%lu\n",
bartdc429d12011-07-29 14:24:07 +0000645 ch_no, clique, (unsigned long)ch->szB,
philippea22f59d2012-01-26 23:13:52 +0000646 (unsigned long)ex->IorC.indirect_szB);
njn8225cc02009-03-09 22:52:24 +0000647 else
648 VG_(printf)(" block %d joining clique %d adding %lu\n",
bartdc429d12011-07-29 14:24:07 +0000649 ch_no, clique, (unsigned long)ch->szB);
njn8225cc02009-03-09 22:52:24 +0000650 }
651
philippea22f59d2012-01-26 23:13:52 +0000652 lc_extras[clique].IorC.indirect_szB += ch->szB;
653 lc_extras[clique].IorC.indirect_szB += ex->IorC.indirect_szB;
654 ex->state = IndirectLeak;
655 ex->IorC.clique = (SizeT) cur_clique;
njn8225cc02009-03-09 22:52:24 +0000656 }
657}
658
659static void
philippea22f59d2012-01-26 23:13:52 +0000660lc_push_if_a_chunk_ptr(Addr ptr, Int clique, Int cur_clique, Bool is_prior_definite)
njn8225cc02009-03-09 22:52:24 +0000661{
662 if (-1 == clique)
663 lc_push_without_clique_if_a_chunk_ptr(ptr, is_prior_definite);
664 else
philippea22f59d2012-01-26 23:13:52 +0000665 lc_push_with_clique_if_a_chunk_ptr(ptr, clique, cur_clique);
sewardjb5f6f512005-03-10 23:59:00 +0000666}
667
sewardj45d94cc2005-04-20 14:44:11 +0000668
sewardj97d3ebb2011-04-11 18:36:34 +0000669static VG_MINIMAL_JMP_BUF(memscan_jmpbuf);
sewardjb5f6f512005-03-10 23:59:00 +0000670
njn8225cc02009-03-09 22:52:24 +0000671static
672void scan_all_valid_memory_catcher ( Int sigNo, Addr addr )
sewardjb5f6f512005-03-10 23:59:00 +0000673{
njn8225cc02009-03-09 22:52:24 +0000674 if (0)
675 VG_(printf)("OUCH! sig=%d addr=%#lx\n", sigNo, addr);
676 if (sigNo == VKI_SIGSEGV || sigNo == VKI_SIGBUS)
sewardj6c591e12011-04-11 16:17:51 +0000677 VG_MINIMAL_LONGJMP(memscan_jmpbuf);
njn8225cc02009-03-09 22:52:24 +0000678}
679
philippea22f59d2012-01-26 23:13:52 +0000680// lc_scan_memory has 2 modes:
681//
682// 1. Leak check mode (searched == 0).
683// -----------------------------------
njn8225cc02009-03-09 22:52:24 +0000684// Scan a block of memory between [start, start+len). This range may
685// be bogus, inaccessable, or otherwise strange; we deal with it. For each
686// valid aligned word we assume it's a pointer to a chunk a push the chunk
687// onto the mark stack if so.
philippea22f59d2012-01-26 23:13:52 +0000688// clique is the "highest level clique" in which indirectly leaked blocks have
689// to be collected. cur_clique is the current "lower" level clique through which
690// the memory to be scanned has been found.
691// Example: in the below tree if A is leaked, the top level clique will
692// be A, while lower level cliques will be B and C.
693/*
694 A
695 / \
696 B C
697 / \ / \
698 D E F G
699*/
700// Proper handling of top and lowest level clique allows block_list of a loss
701// record to describe the hierarchy of indirectly leaked blocks.
702//
703// 2. Search ptr mode (searched != 0).
704// -----------------------------------
705// In this mode, searches for pointers to a specific address range
706// In such a case, lc_scan_memory just scans [start..start+len[ for pointers to searched
707// and outputs the places where searched is found. It does not recursively scans the
708// found memory.
njn8225cc02009-03-09 22:52:24 +0000709static void
philippea22f59d2012-01-26 23:13:52 +0000710lc_scan_memory(Addr start, SizeT len, Bool is_prior_definite, Int clique, Int cur_clique,
711 Addr searched, SizeT szB)
njn8225cc02009-03-09 22:52:24 +0000712{
philippe57a16a22012-07-18 22:26:51 +0000713 /* memory scan is based on the assumption that valid pointers are aligned
714 on a multiple of sizeof(Addr). So, we can (and must) skip the begin and
715 end portions of the block if they are not aligned on sizeof(Addr):
716 These cannot be a valid pointer, and calls to MC_(is_valid_aligned_word)
717 will assert for a non aligned address. */
njn8225cc02009-03-09 22:52:24 +0000718 Addr ptr = VG_ROUNDUP(start, sizeof(Addr));
njn13bfd852005-06-02 03:52:53 +0000719 Addr end = VG_ROUNDDN(start+len, sizeof(Addr));
sewardjb5f6f512005-03-10 23:59:00 +0000720 vki_sigset_t sigmask;
721
722 if (VG_DEBUG_LEAKCHECK)
njn8225cc02009-03-09 22:52:24 +0000723 VG_(printf)("scan %#lx-%#lx (%lu)\n", start, end, len);
724
sewardjb5f6f512005-03-10 23:59:00 +0000725 VG_(sigprocmask)(VKI_SIG_SETMASK, NULL, &sigmask);
njn695c16e2005-03-27 03:40:28 +0000726 VG_(set_fault_catcher)(scan_all_valid_memory_catcher);
sewardjb5f6f512005-03-10 23:59:00 +0000727
philippe57a16a22012-07-18 22:26:51 +0000728 /* Optimisation: the loop below will check for each begin
729 of SM chunk if the chunk is fully unaddressable. The idea is to
730 skip efficiently such fully unaddressable SM chunks.
731 So, we preferrably start the loop on a chunk boundary.
732 If the chunk is not fully unaddressable, we might be in
733 an unaddressable page. Again, the idea is to skip efficiently
734 such unaddressable page : this is the "else" part.
735 We use an "else" so that two consecutive fully unaddressable
736 SM chunks will be skipped efficiently: first one is skipped
737 by this piece of code. The next SM chunk will be skipped inside
738 the loop. */
739 if ( ! MC_(is_within_valid_secondary)(ptr) ) {
740 // Skip an invalid SM chunk till the beginning of the next SM Chunk.
741 ptr = VG_ROUNDUP(ptr+1, SM_SIZE);
742 } else if (!VG_(am_is_valid_for_client)(ptr, sizeof(Addr), VKI_PROT_READ)) {
743 // else we are in a (at least partially) valid SM chunk.
744 // We might be in the middle of an unreadable page.
745 // Do a cheap check to see if it's valid;
746 // if not, skip onto the next page.
njn8225cc02009-03-09 22:52:24 +0000747 ptr = VG_PGROUNDUP(ptr+1); // First page is bad - skip it.
philippe57a16a22012-07-18 22:26:51 +0000748 }
749 /* This optimisation and below loop is based on some relationships between
750 VKI_PAGE_SIZE, SM_SIZE and sizeof(Addr) which are asserted in
751 MC_(detect_memory_leaks). */
sewardjb5f6f512005-03-10 23:59:00 +0000752
sewardj05fe85e2005-04-27 22:46:36 +0000753 while (ptr < end) {
sewardjb5f6f512005-03-10 23:59:00 +0000754 Addr addr;
755
njn8225cc02009-03-09 22:52:24 +0000756 // Skip invalid chunks.
philippe57a16a22012-07-18 22:26:51 +0000757 if (UNLIKELY((ptr % SM_SIZE) == 0)) {
758 if (! MC_(is_within_valid_secondary)(ptr) ) {
759 ptr = VG_ROUNDUP(ptr+1, SM_SIZE);
760 continue;
761 }
sewardjb5f6f512005-03-10 23:59:00 +0000762 }
763
njn8225cc02009-03-09 22:52:24 +0000764 // Look to see if this page seems reasonable.
philippe57a16a22012-07-18 22:26:51 +0000765 if (UNLIKELY((ptr % VKI_PAGE_SIZE) == 0)) {
njn8225cc02009-03-09 22:52:24 +0000766 if (!VG_(am_is_valid_for_client)(ptr, sizeof(Addr), VKI_PROT_READ)) {
767 ptr += VKI_PAGE_SIZE; // Bad page - skip it.
768 continue;
769 }
philippe57a16a22012-07-18 22:26:51 +0000770 // aspacemgr indicates the page is readable and belongs to client.
771 // We still probe the page explicitely in case aspacemgr is
772 // desynchronised with the real page mappings.
773 // Such a desynchronisation can happen due to an aspacemgr bug.
774 // Note that if the application is using mprotect(NONE), then
775 // a page can be unreadable but have addressable and defined
776 // VA bits (see mc_main.c function mc_new_mem_mprotect).
777 if (VG_MINIMAL_SETJMP(memscan_jmpbuf) == 0) {
778 // Try a read in the beginning of the page ...
779 Addr test = *(volatile Addr *)ptr;
780 __asm__ __volatile__("": :"r"(test) : "cc","memory");
781 } else {
782 // Catch read error ...
783 // We need to restore the signal mask, because we were
784 // longjmped out of a signal handler.
785 VG_(sigprocmask)(VKI_SIG_SETMASK, &sigmask, NULL);
786 ptr += VKI_PAGE_SIZE; // Bad page - skip it.
787 continue;
788 }
sewardjb5f6f512005-03-10 23:59:00 +0000789 }
790
philippe57a16a22012-07-18 22:26:51 +0000791 if ( MC_(is_valid_aligned_word)(ptr) ) {
792 lc_scanned_szB += sizeof(Addr);
793 addr = *(Addr *)ptr;
794 // If we get here, the scanned word is in valid memory. Now
795 // let's see if its contents point to a chunk.
796 if (UNLIKELY(searched)) {
797 if (addr >= searched && addr < searched + szB) {
798 if (addr == searched)
799 VG_(umsg)("*%#lx points at %#lx\n", ptr, searched);
800 else
801 VG_(umsg)("*%#lx interior points at %lu bytes inside %#lx\n",
802 ptr, (long unsigned) addr - searched, searched);
803 MC_(pp_describe_addr) (ptr);
philippea22f59d2012-01-26 23:13:52 +0000804 }
philippe57a16a22012-07-18 22:26:51 +0000805 } else {
806 lc_push_if_a_chunk_ptr(addr, clique, cur_clique, is_prior_definite);
njn8225cc02009-03-09 22:52:24 +0000807 }
philippe57a16a22012-07-18 22:26:51 +0000808 } else if (0 && VG_DEBUG_LEAKCHECK) {
809 VG_(printf)("%#lx not valid\n", ptr);
sewardjb5f6f512005-03-10 23:59:00 +0000810 }
philippe57a16a22012-07-18 22:26:51 +0000811 ptr += sizeof(Addr);
sewardjb5f6f512005-03-10 23:59:00 +0000812 }
813
814 VG_(sigprocmask)(VKI_SIG_SETMASK, &sigmask, NULL);
815 VG_(set_fault_catcher)(NULL);
816}
817
sewardj45d94cc2005-04-20 14:44:11 +0000818
njn8225cc02009-03-09 22:52:24 +0000819// Process the mark stack until empty.
820static void lc_process_markstack(Int clique)
sewardjb5f6f512005-03-10 23:59:00 +0000821{
njne3675d62009-05-19 02:08:25 +0000822 Int top = -1; // shut gcc up
njn8225cc02009-03-09 22:52:24 +0000823 Bool is_prior_definite;
sewardjb5f6f512005-03-10 23:59:00 +0000824
njn8225cc02009-03-09 22:52:24 +0000825 while (lc_pop(&top)) {
tom1d0f3f62010-10-04 20:55:21 +0000826 tl_assert(top >= 0 && top < lc_n_chunks);
sewardjb5f6f512005-03-10 23:59:00 +0000827
njn8225cc02009-03-09 22:52:24 +0000828 // See comment about 'is_prior_definite' at the top to understand this.
829 is_prior_definite = ( Possible != lc_extras[top].state );
sewardjb5f6f512005-03-10 23:59:00 +0000830
njn8225cc02009-03-09 22:52:24 +0000831 lc_scan_memory(lc_chunks[top]->data, lc_chunks[top]->szB,
philippea22f59d2012-01-26 23:13:52 +0000832 is_prior_definite, clique, (clique == -1 ? -1 : top),
833 /*searched*/ 0, 0);
sewardjb5f6f512005-03-10 23:59:00 +0000834 }
835}
836
njn29a5c012009-05-06 06:15:55 +0000837static Word cmp_LossRecordKey_LossRecord(const void* key, const void* elem)
838{
florian3e798632012-11-24 19:41:54 +0000839 const LossRecordKey* a = key;
840 const LossRecordKey* b = &(((const LossRecord*)elem)->key);
njn29a5c012009-05-06 06:15:55 +0000841
842 // Compare on states first because that's fast.
843 if (a->state < b->state) return -1;
844 if (a->state > b->state) return 1;
845 // Ok, the states are equal. Now compare the locations, which is slower.
846 if (VG_(eq_ExeContext)(
847 MC_(clo_leak_resolution), a->allocated_at, b->allocated_at))
848 return 0;
849 // Different locations. Ordering is arbitrary, just use the ec pointer.
850 if (a->allocated_at < b->allocated_at) return -1;
851 if (a->allocated_at > b->allocated_at) return 1;
852 VG_(tool_panic)("bad LossRecord comparison");
853}
854
florian6bd9dc12012-11-23 16:17:43 +0000855static Int cmp_LossRecords(const void* va, const void* vb)
njn29a5c012009-05-06 06:15:55 +0000856{
florian3e798632012-11-24 19:41:54 +0000857 const LossRecord* lr_a = *(const LossRecord *const *)va;
858 const LossRecord* lr_b = *(const LossRecord *const *)vb;
njn29a5c012009-05-06 06:15:55 +0000859 SizeT total_szB_a = lr_a->szB + lr_a->indirect_szB;
860 SizeT total_szB_b = lr_b->szB + lr_b->indirect_szB;
861
862 // First compare by sizes.
863 if (total_szB_a < total_szB_b) return -1;
864 if (total_szB_a > total_szB_b) return 1;
865 // If size are equal, compare by states.
866 if (lr_a->key.state < lr_b->key.state) return -1;
867 if (lr_a->key.state > lr_b->key.state) return 1;
njne10c7f82009-05-06 06:52:47 +0000868 // If they're still equal here, it doesn't matter that much, but we keep
869 // comparing other things so that regtests are as deterministic as
870 // possible. So: compare num_blocks.
871 if (lr_a->num_blocks < lr_b->num_blocks) return -1;
872 if (lr_a->num_blocks > lr_b->num_blocks) return 1;
873 // Finally, compare ExeContext addresses... older ones are likely to have
874 // lower addresses.
875 if (lr_a->key.allocated_at < lr_b->key.allocated_at) return -1;
876 if (lr_a->key.allocated_at > lr_b->key.allocated_at) return 1;
njn29a5c012009-05-06 06:15:55 +0000877 return 0;
878}
879
philippea22f59d2012-01-26 23:13:52 +0000880// allocates or reallocates lr_array, and set its elements to the loss records
881// contains in lr_table.
882static Int get_lr_array_from_lr_table(void) {
883 Int i, n_lossrecords;
884 LossRecord* lr;
885
886 n_lossrecords = VG_(OSetGen_Size)(lr_table);
887
888 // (re-)create the array of pointers to the loss records.
889 // lr_array is kept to allow producing the block list from gdbserver.
890 if (lr_array != NULL)
891 VG_(free)(lr_array);
892 lr_array = VG_(malloc)("mc.pr.2", n_lossrecords * sizeof(LossRecord*));
893 i = 0;
894 VG_(OSetGen_ResetIter)(lr_table);
895 while ( (lr = VG_(OSetGen_Next)(lr_table)) ) {
896 lr_array[i++] = lr;
897 }
898 tl_assert(i == n_lossrecords);
899 return n_lossrecords;
900}
901
philippe84234902012-01-14 13:53:13 +0000902
903static void get_printing_rules(LeakCheckParams* lcp,
904 LossRecord* lr,
905 Bool* count_as_error,
906 Bool* print_record)
sewardjb5f6f512005-03-10 23:59:00 +0000907{
philippe84234902012-01-14 13:53:13 +0000908 // Rules for printing:
909 // - We don't show suppressed loss records ever (and that's controlled
910 // within the error manager).
philippe2193a7c2012-12-08 17:54:16 +0000911 // - We show non-suppressed loss records that are specified in
912 // --show-leak-kinds=... if --leak-check=yes.
philippe84234902012-01-14 13:53:13 +0000913
914 Bool delta_considered;
915
916 switch (lcp->deltamode) {
917 case LCD_Any:
918 delta_considered = lr->num_blocks > 0;
919 break;
920 case LCD_Increased:
921 delta_considered
922 = lr->szB > lr->old_szB
923 || lr->indirect_szB > lr->old_indirect_szB
924 || lr->num_blocks > lr->old_num_blocks;
925 break;
926 case LCD_Changed:
927 delta_considered = lr->szB != lr->old_szB
928 || lr->indirect_szB != lr->old_indirect_szB
929 || lr->num_blocks != lr->old_num_blocks;
930 break;
931 default:
932 tl_assert(0);
933 }
934
philippe2193a7c2012-12-08 17:54:16 +0000935 *print_record = lcp->mode == LC_Full && delta_considered
936 && RiS(lr->key.state,lcp->show_leak_kinds);
philippe84234902012-01-14 13:53:13 +0000937 // We don't count a leaks as errors with lcp->mode==LC_Summary.
938 // Otherwise you can get high error counts with few or no error
philippe2193a7c2012-12-08 17:54:16 +0000939 // messages, which can be confusing. Otherwise, we count as errors
940 // the leak kinds requested by --errors-for-leak-kinds=...
941 *count_as_error = lcp->mode == LC_Full && delta_considered
942 && RiS(lr->key.state,lcp->errors_for_leak_kinds);
philippe84234902012-01-14 13:53:13 +0000943}
944
945static void print_results(ThreadId tid, LeakCheckParams* lcp)
946{
947 Int i, n_lossrecords, start_lr_output_scan;
njn29a5c012009-05-06 06:15:55 +0000948 LossRecord* lr;
949 Bool is_suppressed;
sewardjc8bd1df2011-06-26 12:41:33 +0000950 SizeT old_bytes_leaked = MC_(bytes_leaked); /* to report delta in summary */
951 SizeT old_bytes_indirect = MC_(bytes_indirect);
952 SizeT old_bytes_dubious = MC_(bytes_dubious);
953 SizeT old_bytes_reachable = MC_(bytes_reachable);
954 SizeT old_bytes_suppressed = MC_(bytes_suppressed);
955 SizeT old_blocks_leaked = MC_(blocks_leaked);
956 SizeT old_blocks_indirect = MC_(blocks_indirect);
957 SizeT old_blocks_dubious = MC_(blocks_dubious);
958 SizeT old_blocks_reachable = MC_(blocks_reachable);
959 SizeT old_blocks_suppressed = MC_(blocks_suppressed);
sewardjb5f6f512005-03-10 23:59:00 +0000960
sewardjc8bd1df2011-06-26 12:41:33 +0000961 if (lr_table == NULL)
962 // Create the lr_table, which holds the loss records.
963 // If the lr_table already exists, it means it contains
964 // loss_records from the previous leak search. The old_*
965 // values in these records are used to implement the
966 // leak check delta mode
967 lr_table =
968 VG_(OSetGen_Create)(offsetof(LossRecord, key),
969 cmp_LossRecordKey_LossRecord,
970 VG_(malloc), "mc.pr.1",
971 VG_(free));
972
philippea22f59d2012-01-26 23:13:52 +0000973 // If we have loss records from a previous search, reset values to have
974 // proper printing of the deltas between previous search and this search.
975 n_lossrecords = get_lr_array_from_lr_table();
976 for (i = 0; i < n_lossrecords; i++) {
philippe4bbfc5f2012-02-27 21:52:45 +0000977 if (lr_array[i]->num_blocks == 0) {
philippea22f59d2012-01-26 23:13:52 +0000978 // remove from lr_table the old loss_records with 0 bytes found
979 VG_(OSetGen_Remove) (lr_table, &lr_array[i]->key);
philippe4bbfc5f2012-02-27 21:52:45 +0000980 VG_(OSetGen_FreeNode)(lr_table, lr_array[i]);
981 } else {
philippea22f59d2012-01-26 23:13:52 +0000982 // move the leak sizes to old_* and zero the current sizes
983 // for next leak search
984 lr_array[i]->old_szB = lr_array[i]->szB;
985 lr_array[i]->old_indirect_szB = lr_array[i]->indirect_szB;
986 lr_array[i]->old_num_blocks = lr_array[i]->num_blocks;
987 lr_array[i]->szB = 0;
988 lr_array[i]->indirect_szB = 0;
989 lr_array[i]->num_blocks = 0;
990 }
991 }
992 // lr_array now contains "invalid" loss records => free it.
993 // lr_array will be re-created below with the kept and new loss records.
994 VG_(free) (lr_array);
995 lr_array = NULL;
njn29a5c012009-05-06 06:15:55 +0000996
997 // Convert the chunks into loss records, merging them where appropriate.
njn8225cc02009-03-09 22:52:24 +0000998 for (i = 0; i < lc_n_chunks; i++) {
njn29a5c012009-05-06 06:15:55 +0000999 MC_Chunk* ch = lc_chunks[i];
1000 LC_Extra* ex = &(lc_extras)[i];
1001 LossRecord* old_lr;
1002 LossRecordKey lrkey;
1003 lrkey.state = ex->state;
1004 lrkey.allocated_at = ch->where;
sewardjb5f6f512005-03-10 23:59:00 +00001005
njn29a5c012009-05-06 06:15:55 +00001006 old_lr = VG_(OSetGen_Lookup)(lr_table, &lrkey);
1007 if (old_lr) {
1008 // We found an existing loss record matching this chunk. Update the
1009 // loss record's details in-situ. This is safe because we don't
1010 // change the elements used as the OSet key.
1011 old_lr->szB += ch->szB;
philippea22f59d2012-01-26 23:13:52 +00001012 if (ex->state == Unreached)
1013 old_lr->indirect_szB += ex->IorC.indirect_szB;
njn29a5c012009-05-06 06:15:55 +00001014 old_lr->num_blocks++;
sewardjb5f6f512005-03-10 23:59:00 +00001015 } else {
njn29a5c012009-05-06 06:15:55 +00001016 // No existing loss record matches this chunk. Create a new loss
1017 // record, initialise it from the chunk, and insert it into lr_table.
1018 lr = VG_(OSetGen_AllocNode)(lr_table, sizeof(LossRecord));
1019 lr->key = lrkey;
1020 lr->szB = ch->szB;
philippea22f59d2012-01-26 23:13:52 +00001021 if (ex->state == Unreached)
1022 lr->indirect_szB = ex->IorC.indirect_szB;
1023 else
1024 lr->indirect_szB = 0;
njn29a5c012009-05-06 06:15:55 +00001025 lr->num_blocks = 1;
sewardjc8bd1df2011-06-26 12:41:33 +00001026 lr->old_szB = 0;
1027 lr->old_indirect_szB = 0;
1028 lr->old_num_blocks = 0;
njn29a5c012009-05-06 06:15:55 +00001029 VG_(OSetGen_Insert)(lr_table, lr);
sewardjb5f6f512005-03-10 23:59:00 +00001030 }
1031 }
1032
philippea22f59d2012-01-26 23:13:52 +00001033 // (re-)create the array of pointers to the (new) loss records.
1034 n_lossrecords = get_lr_array_from_lr_table ();
1035 tl_assert(VG_(OSetGen_Size)(lr_table) == n_lossrecords);
njn29a5c012009-05-06 06:15:55 +00001036
1037 // Sort the array by loss record sizes.
1038 VG_(ssort)(lr_array, n_lossrecords, sizeof(LossRecord*),
1039 cmp_LossRecords);
1040
1041 // Zero totals.
njn8225cc02009-03-09 22:52:24 +00001042 MC_(blocks_leaked) = MC_(bytes_leaked) = 0;
1043 MC_(blocks_indirect) = MC_(bytes_indirect) = 0;
1044 MC_(blocks_dubious) = MC_(bytes_dubious) = 0;
1045 MC_(blocks_reachable) = MC_(bytes_reachable) = 0;
1046 MC_(blocks_suppressed) = MC_(bytes_suppressed) = 0;
1047
philippe84234902012-01-14 13:53:13 +00001048 // If there is a maximum nr of loss records we can output, then first
1049 // compute from where the output scan has to start.
1050 // By default, start from the first loss record. Compute a higher
1051 // value if there is a maximum to respect. We need to print the last
1052 // records, as the one with the biggest sizes are more interesting.
1053 start_lr_output_scan = 0;
1054 if (lcp->mode == LC_Full && lcp->max_loss_records_output < n_lossrecords) {
1055 Int nr_printable_records = 0;
1056 for (i = n_lossrecords - 1; i >= 0 && start_lr_output_scan == 0; i--) {
1057 Bool count_as_error, print_record;
1058 lr = lr_array[i];
1059 get_printing_rules (lcp, lr, &count_as_error, &print_record);
1060 // Do not use get_printing_rules results for is_suppressed, as we
1061 // only want to check if the record would be suppressed.
1062 is_suppressed =
1063 MC_(record_leak_error) ( tid, i+1, n_lossrecords, lr,
1064 False /* print_record */,
1065 False /* count_as_error */);
1066 if (print_record && !is_suppressed) {
1067 nr_printable_records++;
1068 if (nr_printable_records == lcp->max_loss_records_output)
1069 start_lr_output_scan = i;
1070 }
sewardjc8bd1df2011-06-26 12:41:33 +00001071 }
philippe84234902012-01-14 13:53:13 +00001072 }
sewardjc8bd1df2011-06-26 12:41:33 +00001073
philippe84234902012-01-14 13:53:13 +00001074 // Print the loss records (in size order) and collect summary stats.
1075 for (i = start_lr_output_scan; i < n_lossrecords; i++) {
1076 Bool count_as_error, print_record;
1077 lr = lr_array[i];
1078 get_printing_rules(lcp, lr, &count_as_error, &print_record);
sewardjb5f6f512005-03-10 23:59:00 +00001079 is_suppressed =
njn18afe5d2009-08-10 08:25:39 +00001080 MC_(record_leak_error) ( tid, i+1, n_lossrecords, lr, print_record,
1081 count_as_error );
sewardjb5f6f512005-03-10 23:59:00 +00001082
1083 if (is_suppressed) {
njn29a5c012009-05-06 06:15:55 +00001084 MC_(blocks_suppressed) += lr->num_blocks;
1085 MC_(bytes_suppressed) += lr->szB;
sewardjb5f6f512005-03-10 23:59:00 +00001086
njn29a5c012009-05-06 06:15:55 +00001087 } else if (Unreached == lr->key.state) {
1088 MC_(blocks_leaked) += lr->num_blocks;
1089 MC_(bytes_leaked) += lr->szB;
sewardjb5f6f512005-03-10 23:59:00 +00001090
njn29a5c012009-05-06 06:15:55 +00001091 } else if (IndirectLeak == lr->key.state) {
1092 MC_(blocks_indirect) += lr->num_blocks;
1093 MC_(bytes_indirect) += lr->szB;
sewardjb5f6f512005-03-10 23:59:00 +00001094
njn29a5c012009-05-06 06:15:55 +00001095 } else if (Possible == lr->key.state) {
1096 MC_(blocks_dubious) += lr->num_blocks;
1097 MC_(bytes_dubious) += lr->szB;
sewardjb5f6f512005-03-10 23:59:00 +00001098
njn29a5c012009-05-06 06:15:55 +00001099 } else if (Reachable == lr->key.state) {
1100 MC_(blocks_reachable) += lr->num_blocks;
1101 MC_(bytes_reachable) += lr->szB;
sewardjb5f6f512005-03-10 23:59:00 +00001102
1103 } else {
njn8225cc02009-03-09 22:52:24 +00001104 VG_(tool_panic)("unknown loss mode");
sewardjb5f6f512005-03-10 23:59:00 +00001105 }
sewardjb5f6f512005-03-10 23:59:00 +00001106 }
sewardjb5f6f512005-03-10 23:59:00 +00001107
njn8225cc02009-03-09 22:52:24 +00001108 if (VG_(clo_verbosity) > 0 && !VG_(clo_xml)) {
florian19f91bb2012-11-10 22:29:54 +00001109 HChar d_bytes[20];
1110 HChar d_blocks[20];
sewardjc8bd1df2011-06-26 12:41:33 +00001111
sewardj6b523cd2009-07-15 14:49:40 +00001112 VG_(umsg)("LEAK SUMMARY:\n");
sewardjc8bd1df2011-06-26 12:41:33 +00001113 VG_(umsg)(" definitely lost: %'lu%s bytes in %'lu%s blocks\n",
1114 MC_(bytes_leaked),
philippe84234902012-01-14 13:53:13 +00001115 MC_(snprintf_delta) (d_bytes, 20, MC_(bytes_leaked), old_bytes_leaked, lcp->deltamode),
sewardjc8bd1df2011-06-26 12:41:33 +00001116 MC_(blocks_leaked),
philippe84234902012-01-14 13:53:13 +00001117 MC_(snprintf_delta) (d_blocks, 20, MC_(blocks_leaked), old_blocks_leaked, lcp->deltamode));
sewardjc8bd1df2011-06-26 12:41:33 +00001118 VG_(umsg)(" indirectly lost: %'lu%s bytes in %'lu%s blocks\n",
1119 MC_(bytes_indirect),
philippe84234902012-01-14 13:53:13 +00001120 MC_(snprintf_delta) (d_bytes, 20, MC_(bytes_indirect), old_bytes_indirect, lcp->deltamode),
sewardjc8bd1df2011-06-26 12:41:33 +00001121 MC_(blocks_indirect),
philippe84234902012-01-14 13:53:13 +00001122 MC_(snprintf_delta) (d_blocks, 20, MC_(blocks_indirect), old_blocks_indirect, lcp->deltamode) );
sewardjc8bd1df2011-06-26 12:41:33 +00001123 VG_(umsg)(" possibly lost: %'lu%s bytes in %'lu%s blocks\n",
1124 MC_(bytes_dubious),
philippe84234902012-01-14 13:53:13 +00001125 MC_(snprintf_delta) (d_bytes, 20, MC_(bytes_dubious), old_bytes_dubious, lcp->deltamode),
sewardjc8bd1df2011-06-26 12:41:33 +00001126 MC_(blocks_dubious),
philippe84234902012-01-14 13:53:13 +00001127 MC_(snprintf_delta) (d_blocks, 20, MC_(blocks_dubious), old_blocks_dubious, lcp->deltamode) );
sewardjc8bd1df2011-06-26 12:41:33 +00001128 VG_(umsg)(" still reachable: %'lu%s bytes in %'lu%s blocks\n",
1129 MC_(bytes_reachable),
philippe84234902012-01-14 13:53:13 +00001130 MC_(snprintf_delta) (d_bytes, 20, MC_(bytes_reachable), old_bytes_reachable, lcp->deltamode),
sewardjc8bd1df2011-06-26 12:41:33 +00001131 MC_(blocks_reachable),
philippe84234902012-01-14 13:53:13 +00001132 MC_(snprintf_delta) (d_blocks, 20, MC_(blocks_reachable), old_blocks_reachable, lcp->deltamode) );
sewardjc8bd1df2011-06-26 12:41:33 +00001133 VG_(umsg)(" suppressed: %'lu%s bytes in %'lu%s blocks\n",
1134 MC_(bytes_suppressed),
philippe84234902012-01-14 13:53:13 +00001135 MC_(snprintf_delta) (d_bytes, 20, MC_(bytes_suppressed), old_bytes_suppressed, lcp->deltamode),
sewardjc8bd1df2011-06-26 12:41:33 +00001136 MC_(blocks_suppressed),
philippe84234902012-01-14 13:53:13 +00001137 MC_(snprintf_delta) (d_blocks, 20, MC_(blocks_suppressed), old_blocks_suppressed, lcp->deltamode) );
1138 if (lcp->mode != LC_Full &&
njn8225cc02009-03-09 22:52:24 +00001139 (MC_(blocks_leaked) + MC_(blocks_indirect) +
1140 MC_(blocks_dubious) + MC_(blocks_reachable)) > 0) {
philippe84234902012-01-14 13:53:13 +00001141 if (lcp->requested_by_monitor_command)
sewardj30b3eca2011-06-28 08:20:39 +00001142 VG_(umsg)("To see details of leaked memory, give 'full' arg to leak_check\n");
sewardjc8bd1df2011-06-26 12:41:33 +00001143 else
1144 VG_(umsg)("Rerun with --leak-check=full to see details "
1145 "of leaked memory\n");
njn8225cc02009-03-09 22:52:24 +00001146 }
philippe84234902012-01-14 13:53:13 +00001147 if (lcp->mode == LC_Full &&
philippe2193a7c2012-12-08 17:54:16 +00001148 MC_(blocks_reachable) > 0 && !RiS(Reachable,lcp->show_leak_kinds))
njn8225cc02009-03-09 22:52:24 +00001149 {
sewardj6b523cd2009-07-15 14:49:40 +00001150 VG_(umsg)("Reachable blocks (those to which a pointer "
1151 "was found) are not shown.\n");
philippe84234902012-01-14 13:53:13 +00001152 if (lcp->requested_by_monitor_command)
sewardj30b3eca2011-06-28 08:20:39 +00001153 VG_(umsg)("To see them, add 'reachable any' args to leak_check\n");
sewardjc8bd1df2011-06-26 12:41:33 +00001154 else
1155 VG_(umsg)("To see them, rerun with: --leak-check=full "
philippe2193a7c2012-12-08 17:54:16 +00001156 "--show-leak-kinds=all\n");
sewardjb5f6f512005-03-10 23:59:00 +00001157 }
njnb6267bd2009-08-12 00:14:16 +00001158 VG_(umsg)("\n");
sewardjb5f6f512005-03-10 23:59:00 +00001159 }
1160}
1161
philippea22f59d2012-01-26 23:13:52 +00001162// print recursively all indirectly leaked blocks collected in clique.
1163static void print_clique (Int clique, UInt level)
1164{
1165 Int ind;
1166 Int i, n_lossrecords;;
1167
1168 n_lossrecords = VG_(OSetGen_Size)(lr_table);
1169
1170 for (ind = 0; ind < lc_n_chunks; ind++) {
1171 LC_Extra* ind_ex = &(lc_extras)[ind];
1172 if (ind_ex->state == IndirectLeak && ind_ex->IorC.clique == (SizeT) clique) {
1173 MC_Chunk* ind_ch = lc_chunks[ind];
1174 LossRecord* ind_lr;
1175 LossRecordKey ind_lrkey;
1176 Int lr_i;
1177 ind_lrkey.state = ind_ex->state;
1178 ind_lrkey.allocated_at = ind_ch->where;
1179 ind_lr = VG_(OSetGen_Lookup)(lr_table, &ind_lrkey);
1180 for (lr_i = 0; lr_i < n_lossrecords; lr_i++)
1181 if (ind_lr == lr_array[lr_i])
1182 break;
1183 for (i = 0; i < level; i++)
1184 VG_(umsg)(" ");
1185 VG_(umsg)("%p[%lu] indirect loss record %d\n",
1186 (void *)ind_ch->data, (unsigned long)ind_ch->szB,
1187 lr_i+1); // lr_i+1 for user numbering.
1188 if (lr_i >= n_lossrecords)
1189 VG_(umsg)
1190 ("error: no indirect loss record found for %p[%lu]?????\n",
1191 (void *)ind_ch->data, (unsigned long)ind_ch->szB);
1192 print_clique(ind, level+1);
1193 }
1194 }
1195 }
1196
1197Bool MC_(print_block_list) ( UInt loss_record_nr)
1198{
1199 Int i, n_lossrecords;
1200 LossRecord* lr;
1201
1202 if (lr_table == NULL || lc_chunks == NULL || lc_extras == NULL) {
1203 VG_(umsg)("Can't print block list : no valid leak search result\n");
1204 return False;
1205 }
1206
1207 if (lc_chunks_n_frees_marker != MC_(get_cmalloc_n_frees)()) {
1208 VG_(umsg)("Can't print obsolete block list : redo a leak search first\n");
1209 return False;
1210 }
1211
1212 n_lossrecords = VG_(OSetGen_Size)(lr_table);
1213 if (loss_record_nr >= n_lossrecords)
1214 return False; // Invalid loss record nr.
1215
1216 tl_assert (lr_array);
1217 lr = lr_array[loss_record_nr];
1218
1219 // (re-)print the loss record details.
1220 // (+1 on loss_record_nr as user numbering for loss records starts at 1).
1221 MC_(pp_LossRecord)(loss_record_nr+1, n_lossrecords, lr);
1222
1223 // Match the chunks with loss records.
1224 for (i = 0; i < lc_n_chunks; i++) {
1225 MC_Chunk* ch = lc_chunks[i];
1226 LC_Extra* ex = &(lc_extras)[i];
1227 LossRecord* old_lr;
1228 LossRecordKey lrkey;
1229 lrkey.state = ex->state;
1230 lrkey.allocated_at = ch->where;
1231
1232 old_lr = VG_(OSetGen_Lookup)(lr_table, &lrkey);
1233 if (old_lr) {
1234 // We found an existing loss record matching this chunk.
1235 // If this is the loss record we are looking for, then output the pointer.
1236 if (old_lr == lr_array[loss_record_nr]) {
1237 VG_(umsg)("%p[%lu]\n",
1238 (void *)ch->data, (unsigned long) ch->szB);
1239 if (ex->state != Reachable) {
1240 // We can print the clique in all states, except Reachable.
1241 // In Unreached state, lc_chunk[i] is the clique leader.
1242 // In IndirectLeak, lc_chunk[i] might have been a clique leader
1243 // which was later collected in another clique.
1244 // For Possible, lc_chunk[i] might be the top of a clique
1245 // or an intermediate clique.
1246 print_clique(i, 1);
1247 }
1248 }
1249 } else {
1250 // No existing loss record matches this chunk ???
1251 VG_(umsg)("error: no loss record found for %p[%lu]?????\n",
1252 (void *)ch->data, (unsigned long) ch->szB);
1253 }
1254 }
1255 return True;
1256}
1257
1258// If searched = 0, scan memory root set, pushing onto the mark stack the blocks
1259// encountered.
1260// Otherwise (searched != 0), scan the memory root set searching for ptr pointing
1261// inside [searched, searched+szB[.
1262static void scan_memory_root_set(Addr searched, SizeT szB)
1263{
1264 Int i;
1265 Int n_seg_starts;
1266 Addr* seg_starts = VG_(get_segment_starts)( &n_seg_starts );
1267
1268 tl_assert(seg_starts && n_seg_starts > 0);
1269
1270 lc_scanned_szB = 0;
1271
1272 // VG_(am_show_nsegments)( 0, "leakcheck");
1273 for (i = 0; i < n_seg_starts; i++) {
1274 SizeT seg_size;
1275 NSegment const* seg = VG_(am_find_nsegment)( seg_starts[i] );
1276 tl_assert(seg);
1277
1278 if (seg->kind != SkFileC && seg->kind != SkAnonC) continue;
1279 if (!(seg->hasR && seg->hasW)) continue;
1280 if (seg->isCH) continue;
1281
1282 // Don't poke around in device segments as this may cause
1283 // hangs. Exclude /dev/zero just in case someone allocated
1284 // memory by explicitly mapping /dev/zero.
1285 if (seg->kind == SkFileC
1286 && (VKI_S_ISCHR(seg->mode) || VKI_S_ISBLK(seg->mode))) {
florian3e798632012-11-24 19:41:54 +00001287 HChar* dev_name = VG_(am_get_filename)( seg );
philippea22f59d2012-01-26 23:13:52 +00001288 if (dev_name && 0 == VG_(strcmp)(dev_name, "/dev/zero")) {
1289 // Don't skip /dev/zero.
1290 } else {
1291 // Skip this device mapping.
1292 continue;
1293 }
1294 }
1295
1296 if (0)
1297 VG_(printf)("ACCEPT %2d %#lx %#lx\n", i, seg->start, seg->end);
1298
1299 // Scan the segment. We use -1 for the clique number, because this
1300 // is a root-set.
1301 seg_size = seg->end - seg->start + 1;
1302 if (VG_(clo_verbosity) > 2) {
1303 VG_(message)(Vg_DebugMsg,
1304 " Scanning root segment: %#lx..%#lx (%lu)\n",
1305 seg->start, seg->end, seg_size);
1306 }
1307 lc_scan_memory(seg->start, seg_size, /*is_prior_definite*/True,
1308 /*clique*/-1, /*cur_clique*/-1,
1309 searched, szB);
1310 }
philippe7d69fd92012-02-26 21:26:00 +00001311 VG_(free)(seg_starts);
philippea22f59d2012-01-26 23:13:52 +00001312}
1313
njn8225cc02009-03-09 22:52:24 +00001314/*------------------------------------------------------------*/
1315/*--- Top-level entry point. ---*/
1316/*------------------------------------------------------------*/
sewardj3cf26a52006-07-27 23:48:53 +00001317
philippe84234902012-01-14 13:53:13 +00001318void MC_(detect_memory_leaks) ( ThreadId tid, LeakCheckParams* lcp)
njn43c799e2003-04-08 00:08:52 +00001319{
njnb965efb2009-08-10 07:36:54 +00001320 Int i, j;
njn43c799e2003-04-08 00:08:52 +00001321
philippe84234902012-01-14 13:53:13 +00001322 tl_assert(lcp->mode != LC_Off);
sewardjc8bd1df2011-06-26 12:41:33 +00001323
philippe57a16a22012-07-18 22:26:51 +00001324 // Verify some assertions which are used in lc_scan_memory.
1325 tl_assert((VKI_PAGE_SIZE % sizeof(Addr)) == 0);
1326 tl_assert((SM_SIZE % sizeof(Addr)) == 0);
1327 // Above two assertions are critical, while below assertion
1328 // ensures that the optimisation in the loop is done in the
1329 // correct order : the loop checks for (big) SM chunk skipping
1330 // before checking for (smaller) page skipping.
1331 tl_assert((SM_SIZE % VKI_PAGE_SIZE) == 0);
1332
1333
philippe84234902012-01-14 13:53:13 +00001334 MC_(detect_memory_leaks_last_delta_mode) = lcp->deltamode;
njn43c799e2003-04-08 00:08:52 +00001335
njn8225cc02009-03-09 22:52:24 +00001336 // Get the chunks, stop if there were none.
philippea22f59d2012-01-26 23:13:52 +00001337 if (lc_chunks) {
1338 VG_(free)(lc_chunks);
1339 lc_chunks = NULL;
1340 }
njn8225cc02009-03-09 22:52:24 +00001341 lc_chunks = find_active_chunks(&lc_n_chunks);
philippea22f59d2012-01-26 23:13:52 +00001342 lc_chunks_n_frees_marker = MC_(get_cmalloc_n_frees)();
njn8225cc02009-03-09 22:52:24 +00001343 if (lc_n_chunks == 0) {
1344 tl_assert(lc_chunks == NULL);
sewardjc8bd1df2011-06-26 12:41:33 +00001345 if (lr_table != NULL) {
philippea22f59d2012-01-26 23:13:52 +00001346 // forget the previous recorded LossRecords as next leak search
1347 // can in any case just create new leaks.
sewardjc8bd1df2011-06-26 12:41:33 +00001348 // Maybe it would be better to rather call print_result ?
philippea22f59d2012-01-26 23:13:52 +00001349 // (at least when leak decreases are requested)
sewardjc8bd1df2011-06-26 12:41:33 +00001350 // This will then output all LossRecords with a size decreasing to 0
1351 VG_(OSetGen_Destroy) (lr_table);
philippea22f59d2012-01-26 23:13:52 +00001352 lr_table = NULL;
sewardjc8bd1df2011-06-26 12:41:33 +00001353 }
sewardj71bc3cb2005-05-19 00:25:45 +00001354 if (VG_(clo_verbosity) >= 1 && !VG_(clo_xml)) {
njnb6267bd2009-08-12 00:14:16 +00001355 VG_(umsg)("All heap blocks were freed -- no leaks are possible\n");
sewardj2d9e8742009-08-07 15:46:56 +00001356 VG_(umsg)("\n");
sewardj37d06f22003-09-17 21:48:26 +00001357 }
njn43c799e2003-04-08 00:08:52 +00001358 return;
1359 }
1360
njn8225cc02009-03-09 22:52:24 +00001361 // Sort the array so blocks are in ascending order in memory.
1362 VG_(ssort)(lc_chunks, lc_n_chunks, sizeof(VgHashNode*), compare_MC_Chunks);
njn43c799e2003-04-08 00:08:52 +00001363
njn8225cc02009-03-09 22:52:24 +00001364 // Sanity check -- make sure they're in order.
1365 for (i = 0; i < lc_n_chunks-1; i++) {
1366 tl_assert( lc_chunks[i]->data <= lc_chunks[i+1]->data);
1367 }
njn43c799e2003-04-08 00:08:52 +00001368
njnb965efb2009-08-10 07:36:54 +00001369 // Sanity check -- make sure they don't overlap. The one exception is that
1370 // we allow a MALLOCLIKE block to sit entirely within a malloc() block.
1371 // This is for bug 100628. If this occurs, we ignore the malloc() block
1372 // for leak-checking purposes. This is a hack and probably should be done
1373 // better, but at least it's consistent with mempools (which are treated
1374 // like this in find_active_chunks). Mempools have a separate VgHashTable
1375 // for mempool chunks, but if custom-allocated blocks are put in a separate
1376 // table from normal heap blocks it makes free-mismatch checking more
1377 // difficult.
1378 //
1379 // If this check fails, it probably means that the application
njn8225cc02009-03-09 22:52:24 +00001380 // has done something stupid with VALGRIND_MALLOCLIKE_BLOCK client
njnb965efb2009-08-10 07:36:54 +00001381 // requests, eg. has made overlapping requests (which are
1382 // nonsensical), or used VALGRIND_MALLOCLIKE_BLOCK for stack locations;
1383 // again nonsensical.
1384 //
njn8225cc02009-03-09 22:52:24 +00001385 for (i = 0; i < lc_n_chunks-1; i++) {
1386 MC_Chunk* ch1 = lc_chunks[i];
1387 MC_Chunk* ch2 = lc_chunks[i+1];
njnb965efb2009-08-10 07:36:54 +00001388
1389 Addr start1 = ch1->data;
1390 Addr start2 = ch2->data;
1391 Addr end1 = ch1->data + ch1->szB - 1;
1392 Addr end2 = ch2->data + ch2->szB - 1;
1393 Bool isCustom1 = ch1->allockind == MC_AllocCustom;
1394 Bool isCustom2 = ch2->allockind == MC_AllocCustom;
1395
1396 if (end1 < start2) {
1397 // Normal case - no overlap.
1398
1399 // We used to allow exact duplicates, I'm not sure why. --njn
1400 //} else if (start1 == start2 && end1 == end2) {
1401 // Degenerate case: exact duplicates.
1402
1403 } else if (start1 >= start2 && end1 <= end2 && isCustom1 && !isCustom2) {
1404 // Block i is MALLOCLIKE and entirely within block i+1.
1405 // Remove block i+1.
1406 for (j = i+1; j < lc_n_chunks-1; j++) {
1407 lc_chunks[j] = lc_chunks[j+1];
1408 }
1409 lc_n_chunks--;
1410
1411 } else if (start2 >= start1 && end2 <= end1 && isCustom2 && !isCustom1) {
1412 // Block i+1 is MALLOCLIKE and entirely within block i.
1413 // Remove block i.
1414 for (j = i; j < lc_n_chunks-1; j++) {
1415 lc_chunks[j] = lc_chunks[j+1];
1416 }
1417 lc_n_chunks--;
1418
1419 } else {
philippe09007e32012-03-01 22:00:36 +00001420 VG_(umsg)("Block 0x%lx..0x%lx overlaps with block 0x%lx..0x%lx\n",
bart3c4fa9f2011-05-09 10:46:55 +00001421 start1, end1, start2, end2);
philippe09007e32012-03-01 22:00:36 +00001422 VG_(umsg)("Blocks allocation contexts:\n"),
1423 VG_(pp_ExeContext)( ch1->where);
1424 VG_(umsg)("\n"),
1425 VG_(pp_ExeContext)( ch2->where);
njnb965efb2009-08-10 07:36:54 +00001426 VG_(umsg)("This is usually caused by using VALGRIND_MALLOCLIKE_BLOCK");
philippe09007e32012-03-01 22:00:36 +00001427 VG_(umsg)("in an inappropriate way.\n");
njnb965efb2009-08-10 07:36:54 +00001428 tl_assert (0);
njn8225cc02009-03-09 22:52:24 +00001429 }
njn8225cc02009-03-09 22:52:24 +00001430 }
1431
1432 // Initialise lc_extras.
philippea22f59d2012-01-26 23:13:52 +00001433 if (lc_extras) {
1434 VG_(free)(lc_extras);
1435 lc_extras = NULL;
1436 }
njn8225cc02009-03-09 22:52:24 +00001437 lc_extras = VG_(malloc)( "mc.dml.2", lc_n_chunks * sizeof(LC_Extra) );
1438 for (i = 0; i < lc_n_chunks; i++) {
1439 lc_extras[i].state = Unreached;
tom1d0f3f62010-10-04 20:55:21 +00001440 lc_extras[i].pending = False;
philippea22f59d2012-01-26 23:13:52 +00001441 lc_extras[i].IorC.indirect_szB = 0;
njn8225cc02009-03-09 22:52:24 +00001442 }
1443
1444 // Initialise lc_markstack.
1445 lc_markstack = VG_(malloc)( "mc.dml.2", lc_n_chunks * sizeof(Int) );
1446 for (i = 0; i < lc_n_chunks; i++) {
1447 lc_markstack[i] = -1;
sewardjb5f6f512005-03-10 23:59:00 +00001448 }
1449 lc_markstack_top = -1;
njn43c799e2003-04-08 00:08:52 +00001450
njn8225cc02009-03-09 22:52:24 +00001451 // Verbosity.
sewardj2d9e8742009-08-07 15:46:56 +00001452 if (VG_(clo_verbosity) > 1 && !VG_(clo_xml)) {
njnb6267bd2009-08-12 00:14:16 +00001453 VG_(umsg)( "Searching for pointers to %'d not-freed blocks\n",
sewardj6b523cd2009-07-15 14:49:40 +00001454 lc_n_chunks );
sewardj2d9e8742009-08-07 15:46:56 +00001455 }
sewardjb5f6f512005-03-10 23:59:00 +00001456
njn8225cc02009-03-09 22:52:24 +00001457 // Scan the memory root-set, pushing onto the mark stack any blocks
1458 // pointed to.
philippea22f59d2012-01-26 23:13:52 +00001459 scan_memory_root_set(/*searched*/0, 0);
sewardjb5f6f512005-03-10 23:59:00 +00001460
njn8225cc02009-03-09 22:52:24 +00001461 // Scan GP registers for chunk pointers.
1462 VG_(apply_to_GP_regs)(lc_push_if_a_chunk_ptr_register);
sewardjb5f6f512005-03-10 23:59:00 +00001463
njn8225cc02009-03-09 22:52:24 +00001464 // Process the pushed blocks. After this, every block that is reachable
1465 // from the root-set has been traced.
1466 lc_process_markstack(/*clique*/-1);
njn43c799e2003-04-08 00:08:52 +00001467
njnb6267bd2009-08-12 00:14:16 +00001468 if (VG_(clo_verbosity) > 1 && !VG_(clo_xml)) {
1469 VG_(umsg)("Checked %'lu bytes\n", lc_scanned_szB);
1470 VG_(umsg)( "\n" );
1471 }
njn43c799e2003-04-08 00:08:52 +00001472
njn8225cc02009-03-09 22:52:24 +00001473 // Trace all the leaked blocks to determine which are directly leaked and
1474 // which are indirectly leaked. For each Unreached block, push it onto
1475 // the mark stack, and find all the as-yet-Unreached blocks reachable
1476 // from it. These form a clique and are marked IndirectLeak, and their
1477 // size is added to the clique leader's indirect size. If one of the
1478 // found blocks was itself a clique leader (from a previous clique), then
1479 // the cliques are merged.
1480 for (i = 0; i < lc_n_chunks; i++) {
1481 MC_Chunk* ch = lc_chunks[i];
1482 LC_Extra* ex = &(lc_extras[i]);
njn43c799e2003-04-08 00:08:52 +00001483
njn8225cc02009-03-09 22:52:24 +00001484 if (VG_DEBUG_CLIQUE)
1485 VG_(printf)("cliques: %d at %#lx -> Loss state %d\n",
1486 i, ch->data, ex->state);
njn43c799e2003-04-08 00:08:52 +00001487
njn8225cc02009-03-09 22:52:24 +00001488 tl_assert(lc_markstack_top == -1);
1489
1490 if (ex->state == Unreached) {
1491 if (VG_DEBUG_CLIQUE)
1492 VG_(printf)("%d: gathering clique %#lx\n", i, ch->data);
1493
1494 // Push this Unreached block onto the stack and process it.
1495 lc_push(i, ch);
philippea22f59d2012-01-26 23:13:52 +00001496 lc_process_markstack(/*clique*/i);
njn8225cc02009-03-09 22:52:24 +00001497
1498 tl_assert(lc_markstack_top == -1);
1499 tl_assert(ex->state == Unreached);
nethercote0f19bce2003-12-02 10:17:44 +00001500 }
njn43c799e2003-04-08 00:08:52 +00001501 }
njn8225cc02009-03-09 22:52:24 +00001502
sewardjc8bd1df2011-06-26 12:41:33 +00001503 print_results( tid, lcp);
njn43c799e2003-04-08 00:08:52 +00001504
sewardjb5f6f512005-03-10 23:59:00 +00001505 VG_(free) ( lc_markstack );
philippea22f59d2012-01-26 23:13:52 +00001506 lc_markstack = NULL;
1507 // lc_chunks, lc_extras, lr_array and lr_table are kept (needed if user
1508 // calls MC_(print_block_list)). lr_table also used for delta leak reporting
1509 // between this leak search and the next leak search.
1510}
1511
1512static Addr searched_wpa;
1513static SizeT searched_szB;
1514static void
florian6bd9dc12012-11-23 16:17:43 +00001515search_address_in_GP_reg(ThreadId tid, const HChar* regname, Addr addr_in_reg)
philippea22f59d2012-01-26 23:13:52 +00001516{
1517 if (addr_in_reg >= searched_wpa
1518 && addr_in_reg < searched_wpa + searched_szB) {
1519 if (addr_in_reg == searched_wpa)
1520 VG_(umsg)
1521 ("tid %d register %s pointing at %#lx\n",
1522 tid, regname, searched_wpa);
1523 else
1524 VG_(umsg)
1525 ("tid %d register %s interior pointing %lu bytes inside %#lx\n",
1526 tid, regname, (long unsigned) addr_in_reg - searched_wpa,
1527 searched_wpa);
1528 }
1529}
1530
1531void MC_(who_points_at) ( Addr address, SizeT szB)
1532{
1533 MC_Chunk** chunks;
1534 Int n_chunks;
1535 Int i;
1536
1537 if (szB == 1)
1538 VG_(umsg) ("Searching for pointers to %#lx\n", address);
1539 else
1540 VG_(umsg) ("Searching for pointers pointing in %lu bytes from %#lx\n",
1541 szB, address);
1542
1543 // Scan memory root-set, searching for ptr pointing in address[szB]
1544 scan_memory_root_set(address, szB);
1545
1546 // Scan active malloc-ed chunks
1547 chunks = find_active_chunks(&n_chunks);
1548 for (i = 0; i < n_chunks; i++) {
1549 lc_scan_memory(chunks[i]->data, chunks[i]->szB,
1550 /*is_prior_definite*/True,
1551 /*clique*/-1, /*cur_clique*/-1,
1552 address, szB);
1553 }
1554 VG_(free) ( chunks );
1555
1556 // Scan GP registers for pointers to address range.
1557 searched_wpa = address;
1558 searched_szB = szB;
1559 VG_(apply_to_GP_regs)(search_address_in_GP_reg);
1560
njn43c799e2003-04-08 00:08:52 +00001561}
1562
1563/*--------------------------------------------------------------------*/
njn1d0825f2006-03-27 11:37:07 +00001564/*--- end ---*/
njn43c799e2003-04-08 00:08:52 +00001565/*--------------------------------------------------------------------*/
1566