blob: ba48b4c275f34f26adc876dfa781fe791f9236d2 [file] [log] [blame]
njn43c799e2003-04-08 00:08:52 +00001
2/*--------------------------------------------------------------------*/
njn1d0825f2006-03-27 11:37:07 +00003/*--- The leak checker. mc_leakcheck.c ---*/
njn43c799e2003-04-08 00:08:52 +00004/*--------------------------------------------------------------------*/
5
6/*
nethercote137bc552003-11-14 17:47:54 +00007 This file is part of MemCheck, a heavyweight Valgrind tool for
njn1d0825f2006-03-27 11:37:07 +00008 detecting memory errors.
njn43c799e2003-04-08 00:08:52 +00009
sewardjb3a1e4b2015-08-21 11:32:26 +000010 Copyright (C) 2000-2015 Julian Seward
njn43c799e2003-04-08 00:08:52 +000011 jseward@acm.org
12
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 02111-1307, USA.
27
28 The GNU General Public License is contained in the file COPYING.
29*/
30
njnc7561b92005-06-19 01:24:32 +000031#include "pub_tool_basics.h"
sewardj4cfea4f2006-10-14 19:26:10 +000032#include "pub_tool_vki.h"
njnac1e0332009-05-08 00:39:31 +000033#include "pub_tool_aspacehl.h"
njn4802b382005-06-11 04:58:29 +000034#include "pub_tool_aspacemgr.h"
njn1d0825f2006-03-27 11:37:07 +000035#include "pub_tool_execontext.h"
36#include "pub_tool_hashtable.h"
njn97405b22005-06-02 03:39:33 +000037#include "pub_tool_libcbase.h"
njn132bfcc2005-06-04 19:16:06 +000038#include "pub_tool_libcassert.h"
njn36a20fa2005-06-03 03:08:39 +000039#include "pub_tool_libcprint.h"
njnde62cbf2005-06-10 22:08:14 +000040#include "pub_tool_libcsignal.h"
njn6ace3ea2005-06-17 03:06:27 +000041#include "pub_tool_machine.h"
njnc7561b92005-06-19 01:24:32 +000042#include "pub_tool_mallocfree.h"
43#include "pub_tool_options.h"
njn29a5c012009-05-06 06:15:55 +000044#include "pub_tool_oset.h"
philippe6643e962012-01-17 21:16:30 +000045#include "pub_tool_poolalloc.h"
46#include "pub_tool_signals.h" // Needed for mc_include.h
sewardj6c591e12011-04-11 16:17:51 +000047#include "pub_tool_libcsetjmp.h" // setjmp facilities
njn1d0825f2006-03-27 11:37:07 +000048#include "pub_tool_tooliface.h" // Needed for mc_include.h
njn43c799e2003-04-08 00:08:52 +000049
njn1d0825f2006-03-27 11:37:07 +000050#include "mc_include.h"
njnc7561b92005-06-19 01:24:32 +000051
njn8225cc02009-03-09 22:52:24 +000052/*------------------------------------------------------------*/
53/*--- An overview of leak checking. ---*/
54/*------------------------------------------------------------*/
njnc7561b92005-06-19 01:24:32 +000055
njn8225cc02009-03-09 22:52:24 +000056// Leak-checking is a directed-graph traversal problem. The graph has
57// two kinds of nodes:
58// - root-set nodes:
59// - GP registers of all threads;
60// - valid, aligned, pointer-sized data words in valid client memory,
61// including stacks, but excluding words within client heap-allocated
62// blocks (they are excluded so that later on we can differentiate
63// between heap blocks that are indirectly leaked vs. directly leaked).
64// - heap-allocated blocks. A block is a mempool chunk or a malloc chunk
65// that doesn't contain a mempool chunk. Nb: the terms "blocks" and
66// "chunks" are used interchangeably below.
67//
68// There are two kinds of edges:
69// - start-pointers, i.e. pointers to the start of a block;
70// - interior-pointers, i.e. pointers to the interior of a block.
71//
72// We use "pointers" rather than "edges" below.
73//
74// Root set nodes only point to blocks. Blocks only point to blocks;
75// a block can point to itself.
76//
77// The aim is to traverse the graph and determine the status of each block.
78//
79// There are 9 distinct cases. See memcheck/docs/mc-manual.xml for details.
80// Presenting all nine categories to the user is probably too much.
81// Currently we do this:
82// - definitely lost: case 3
83// - indirectly lost: case 4, 9
84// - possibly lost: cases 5..8
85// - still reachable: cases 1, 2
86//
87// It's far from clear that this is the best possible categorisation; it's
88// accreted over time without any central guiding principle.
89
90/*------------------------------------------------------------*/
91/*--- XXX: Thoughts for improvement. ---*/
92/*------------------------------------------------------------*/
93
94// From the user's point of view:
95// - If they aren't using interior-pointers, they just have to fix the
96// directly lost blocks, and the indirectly lost ones will be fixed as
97// part of that. Any possibly lost blocks will just be due to random
98// pointer garbage and can be ignored.
99//
100// - If they are using interior-pointers, the fact that they currently are not
101// being told which ones might be directly lost vs. indirectly lost makes
102// it hard to know where to begin.
103//
104// All this makes me wonder if new option is warranted:
105// --follow-interior-pointers. By default it would be off, the leak checker
106// wouldn't follow interior-pointers and there would only be 3 categories:
107// R, DL, IL.
108//
109// If turned on, then it would show 7 categories (R, DL, IL, DR/DL, IR/IL,
110// IR/IL/DL, IL/DL). That output is harder to understand but it's your own
111// damn fault for using interior-pointers...
112//
113// ----
114//
115// Also, why are two blank lines printed between each loss record?
njnc2f8b1b2009-08-10 06:47:00 +0000116// [bug 197930]
njn8225cc02009-03-09 22:52:24 +0000117//
118// ----
119//
120// Also, --show-reachable is a bad name because it also turns on the showing
121// of indirectly leaked blocks(!) It would be better named --show-all or
122// --show-all-heap-blocks, because that's the end result.
philippe2193a7c2012-12-08 17:54:16 +0000123// We now have the option --show-leak-kinds=... which allows to specify =all.
njn8225cc02009-03-09 22:52:24 +0000124//
125// ----
126//
127// Also, the VALGRIND_LEAK_CHECK and VALGRIND_QUICK_LEAK_CHECK aren't great
128// names. VALGRIND_FULL_LEAK_CHECK and VALGRIND_SUMMARY_LEAK_CHECK would be
129// better.
130//
131// ----
132//
133// Also, VALGRIND_COUNT_LEAKS and VALGRIND_COUNT_LEAK_BLOCKS aren't great as
134// they combine direct leaks and indirect leaks into one. New, more precise
135// ones (they'll need new names) would be good. If more categories are
136// used, as per the --follow-interior-pointers option, they should be
137// updated accordingly. And they should use a struct to return the values.
138//
139// ----
140//
141// Also, for this case:
142//
143// (4) p4 BBB ---> AAA
144//
145// BBB is definitely directly lost. AAA is definitely indirectly lost.
146// Here's the relevant loss records printed for a full check (each block is
147// 16 bytes):
148//
149// ==20397== 16 bytes in 1 blocks are indirectly lost in loss record 9 of 15
150// ==20397== at 0x4C2694E: malloc (vg_replace_malloc.c:177)
151// ==20397== by 0x400521: mk (leak-cases.c:49)
152// ==20397== by 0x400578: main (leak-cases.c:72)
153//
154// ==20397== 32 (16 direct, 16 indirect) bytes in 1 blocks are definitely
155// lost in loss record 14 of 15
156// ==20397== at 0x4C2694E: malloc (vg_replace_malloc.c:177)
157// ==20397== by 0x400521: mk (leak-cases.c:49)
158// ==20397== by 0x400580: main (leak-cases.c:72)
159//
160// The first one is fine -- it describes AAA.
161//
162// The second one is for BBB. It's correct in that 16 bytes in 1 block are
163// directly lost. It's also correct that 16 are indirectly lost as a result,
164// but it means that AAA is being counted twice in the loss records. (It's
165// not, thankfully, counted twice in the summary counts). Argh.
166//
167// This would be less confusing for the second one:
168//
169// ==20397== 16 bytes in 1 blocks are definitely lost in loss record 14
170// of 15 (and 16 bytes in 1 block are indirectly lost as a result; they
philippe2193a7c2012-12-08 17:54:16 +0000171// are mentioned elsewhere (if --show-reachable=yes or indirect is given
172// in --show-leak-kinds=... !))
njn8225cc02009-03-09 22:52:24 +0000173// ==20397== at 0x4C2694E: malloc (vg_replace_malloc.c:177)
174// ==20397== by 0x400521: mk (leak-cases.c:49)
175// ==20397== by 0x400580: main (leak-cases.c:72)
176//
177// But ideally we'd present the loss record for the directly lost block and
178// then the resultant indirectly lost blocks and make it clear the
179// dependence. Double argh.
180
181/*------------------------------------------------------------*/
182/*--- The actual algorithm. ---*/
183/*------------------------------------------------------------*/
184
185// - Find all the blocks (a.k.a. chunks) to check. Mempool chunks require
186// some special treatment because they can be within malloc'd blocks.
187// - Scan every word in the root set (GP registers and valid
188// non-heap memory words).
189// - First, we skip if it doesn't point to valid memory.
190// - Then, we see if it points to the start or interior of a block. If
191// so, we push the block onto the mark stack and mark it as having been
192// reached.
193// - Then, we process the mark stack, repeating the scanning for each block;
194// this can push more blocks onto the mark stack. We repeat until the
195// mark stack is empty. Each block is marked as definitely or possibly
196// reachable, depending on whether interior-pointers were required to
197// reach it.
198// - At this point we know for every block if it's reachable or not.
199// - We then push each unreached block onto the mark stack, using the block
200// number as the "clique" number.
201// - We process the mark stack again, this time grouping blocks into cliques
202// in order to facilitate the directly/indirectly lost categorisation.
203// - We group blocks by their ExeContexts and categorisation, and print them
204// if --leak-check=full. We also print summary numbers.
205//
206// A note on "cliques":
207// - A directly lost block is one with no pointers to it. An indirectly
208// lost block is one that is pointed to by a directly or indirectly lost
209// block.
210// - Each directly lost block has zero or more indirectly lost blocks
211// hanging off it. All these blocks together form a "clique". The
212// directly lost block is called the "clique leader". The clique number
213// is the number (in lc_chunks[]) of the clique leader.
214// - Actually, a directly lost block may be pointed to if it's part of a
215// cycle. In that case, there may be more than one choice for the clique
216// leader, and the choice is arbitrary. Eg. if you have A-->B and B-->A
217// either A or B could be the clique leader.
218// - Cliques cannot overlap, and will be truncated to avoid this. Eg. if we
219// have A-->C and B-->C, the two cliques will be {A,C} and {B}, or {A} and
220// {B,C} (again the choice is arbitrary). This is because we don't want
221// to count a block as indirectly lost more than once.
222//
223// A note on 'is_prior_definite':
224// - This is a boolean used in various places that indicates if the chain
225// up to the prior node (prior to the one being considered) is definite.
226// - In the clique == -1 case:
227// - if True it means that the prior node is a root-set node, or that the
228// prior node is a block which is reachable from the root-set via
229// start-pointers.
230// - if False it means that the prior node is a block that is only
231// reachable from the root-set via a path including at least one
232// interior-pointer.
233// - In the clique != -1 case, currently it's always True because we treat
234// start-pointers and interior-pointers the same for direct/indirect leak
235// checking. If we added a PossibleIndirectLeak state then this would
236// change.
237
238
239// Define to debug the memory-leak-detector.
sewardjb5f6f512005-03-10 23:59:00 +0000240#define VG_DEBUG_LEAKCHECK 0
njn8225cc02009-03-09 22:52:24 +0000241#define VG_DEBUG_CLIQUE 0
242
sewardjb5f6f512005-03-10 23:59:00 +0000243
njn43c799e2003-04-08 00:08:52 +0000244/*------------------------------------------------------------*/
njn8225cc02009-03-09 22:52:24 +0000245/*--- Getting the initial chunks, and searching them. ---*/
njn43c799e2003-04-08 00:08:52 +0000246/*------------------------------------------------------------*/
247
njn8225cc02009-03-09 22:52:24 +0000248// Compare the MC_Chunks by 'data' (i.e. the address of the block).
florian6bd9dc12012-11-23 16:17:43 +0000249static Int compare_MC_Chunks(const void* n1, const void* n2)
njn43c799e2003-04-08 00:08:52 +0000250{
florian3e798632012-11-24 19:41:54 +0000251 const MC_Chunk* mc1 = *(const MC_Chunk *const *)n1;
252 const MC_Chunk* mc2 = *(const MC_Chunk *const *)n2;
njn8225cc02009-03-09 22:52:24 +0000253 if (mc1->data < mc2->data) return -1;
254 if (mc1->data > mc2->data) return 1;
255 return 0;
njn43c799e2003-04-08 00:08:52 +0000256}
257
njn8225cc02009-03-09 22:52:24 +0000258#if VG_DEBUG_LEAKCHECK
259// Used to sanity-check the fast binary-search mechanism.
260static
261Int find_chunk_for_OLD ( Addr ptr,
262 MC_Chunk** chunks,
263 Int n_chunks )
264
265{
266 Int i;
267 Addr a_lo, a_hi;
florian60042192015-08-04 15:58:41 +0000268 PROF_EVENT(MCPE_FIND_CHUNK_FOR_OLD);
njn8225cc02009-03-09 22:52:24 +0000269 for (i = 0; i < n_chunks; i++) {
florian60042192015-08-04 15:58:41 +0000270 PROF_EVENT(MCPE_FIND_CHUNK_FOR_OLD_LOOP);
njn8225cc02009-03-09 22:52:24 +0000271 a_lo = chunks[i]->data;
272 a_hi = ((Addr)chunks[i]->data) + chunks[i]->szB;
273 if (a_lo <= ptr && ptr < a_hi)
274 return i;
275 }
276 return -1;
277}
278#endif
279
280// Find the i such that ptr points at or inside the block described by
281// chunks[i]. Return -1 if none found. This assumes that chunks[]
282// has been sorted on the 'data' field.
283static
284Int find_chunk_for ( Addr ptr,
285 MC_Chunk** chunks,
286 Int n_chunks )
287{
288 Addr a_mid_lo, a_mid_hi;
289 Int lo, mid, hi, retVal;
290 // VG_(printf)("find chunk for %p = ", ptr);
291 retVal = -1;
292 lo = 0;
293 hi = n_chunks-1;
294 while (True) {
295 // Invariant: current unsearched space is from lo to hi, inclusive.
296 if (lo > hi) break; // not found
297
298 mid = (lo + hi) / 2;
299 a_mid_lo = chunks[mid]->data;
300 a_mid_hi = chunks[mid]->data + chunks[mid]->szB;
301 // Extent of block 'mid' is [a_mid_lo .. a_mid_hi).
302 // Special-case zero-sized blocks - treat them as if they had
303 // size 1. Not doing so causes them to not cover any address
304 // range at all and so will never be identified as the target of
305 // any pointer, which causes them to be incorrectly reported as
306 // definitely leaked.
307 if (chunks[mid]->szB == 0)
308 a_mid_hi++;
309
310 if (ptr < a_mid_lo) {
311 hi = mid-1;
312 continue;
313 }
314 if (ptr >= a_mid_hi) {
315 lo = mid+1;
316 continue;
317 }
318 tl_assert(ptr >= a_mid_lo && ptr < a_mid_hi);
319 retVal = mid;
320 break;
321 }
322
323# if VG_DEBUG_LEAKCHECK
324 tl_assert(retVal == find_chunk_for_OLD ( ptr, chunks, n_chunks ));
325# endif
326 // VG_(printf)("%d\n", retVal);
327 return retVal;
328}
329
330
331static MC_Chunk**
florian54fe2022012-10-27 23:07:42 +0000332find_active_chunks(Int* pn_chunks)
njn8225cc02009-03-09 22:52:24 +0000333{
334 // Our goal is to construct a set of chunks that includes every
335 // mempool chunk, and every malloc region that *doesn't* contain a
336 // mempool chunk.
337 MC_Mempool *mp;
338 MC_Chunk **mallocs, **chunks, *mc;
339 UInt n_mallocs, n_chunks, m, s;
340 Bool *malloc_chunk_holds_a_pool_chunk;
341
342 // First we collect all the malloc chunks into an array and sort it.
343 // We do this because we want to query the chunks by interior
344 // pointers, requiring binary search.
345 mallocs = (MC_Chunk**) VG_(HT_to_array)( MC_(malloc_list), &n_mallocs );
346 if (n_mallocs == 0) {
347 tl_assert(mallocs == NULL);
348 *pn_chunks = 0;
349 return NULL;
350 }
351 VG_(ssort)(mallocs, n_mallocs, sizeof(VgHashNode*), compare_MC_Chunks);
352
353 // Then we build an array containing a Bool for each malloc chunk,
354 // indicating whether it contains any mempools.
355 malloc_chunk_holds_a_pool_chunk = VG_(calloc)( "mc.fas.1",
356 n_mallocs, sizeof(Bool) );
357 n_chunks = n_mallocs;
358
359 // Then we loop over the mempool tables. For each chunk in each
360 // pool, we set the entry in the Bool array corresponding to the
361 // malloc chunk containing the mempool chunk.
362 VG_(HT_ResetIter)(MC_(mempool_list));
363 while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) {
364 VG_(HT_ResetIter)(mp->chunks);
365 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
366
367 // We'll need to record this chunk.
368 n_chunks++;
369
370 // Possibly invalidate the malloc holding the beginning of this chunk.
371 m = find_chunk_for(mc->data, mallocs, n_mallocs);
372 if (m != -1 && malloc_chunk_holds_a_pool_chunk[m] == False) {
373 tl_assert(n_chunks > 0);
374 n_chunks--;
375 malloc_chunk_holds_a_pool_chunk[m] = True;
376 }
377
378 // Possibly invalidate the malloc holding the end of this chunk.
379 if (mc->szB > 1) {
380 m = find_chunk_for(mc->data + (mc->szB - 1), mallocs, n_mallocs);
381 if (m != -1 && malloc_chunk_holds_a_pool_chunk[m] == False) {
382 tl_assert(n_chunks > 0);
383 n_chunks--;
384 malloc_chunk_holds_a_pool_chunk[m] = True;
385 }
386 }
387 }
388 }
389 tl_assert(n_chunks > 0);
390
391 // Create final chunk array.
392 chunks = VG_(malloc)("mc.fas.2", sizeof(VgHashNode*) * (n_chunks));
393 s = 0;
394
395 // Copy the mempool chunks and the non-marked malloc chunks into a
396 // combined array of chunks.
397 VG_(HT_ResetIter)(MC_(mempool_list));
398 while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) {
399 VG_(HT_ResetIter)(mp->chunks);
400 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
401 tl_assert(s < n_chunks);
402 chunks[s++] = mc;
403 }
404 }
405 for (m = 0; m < n_mallocs; ++m) {
406 if (!malloc_chunk_holds_a_pool_chunk[m]) {
407 tl_assert(s < n_chunks);
408 chunks[s++] = mallocs[m];
409 }
410 }
411 tl_assert(s == n_chunks);
412
413 // Free temporaries.
414 VG_(free)(mallocs);
415 VG_(free)(malloc_chunk_holds_a_pool_chunk);
416
417 *pn_chunks = n_chunks;
418
419 return chunks;
420}
421
422/*------------------------------------------------------------*/
423/*--- The leak detector proper. ---*/
424/*------------------------------------------------------------*/
425
426// Holds extra info about each block during leak checking.
427typedef
428 struct {
429 UInt state:2; // Reachedness.
philippeab1fce92013-09-29 13:47:32 +0000430 UInt pending:1; // Scan pending.
431 UInt heuristic: (sizeof(UInt)*8)-3;
432 // Heuristic with which this block was considered reachable.
433 // LchNone if state != Reachable or no heuristic needed to
434 // consider it reachable.
435
philippea22f59d2012-01-26 23:13:52 +0000436 union {
philippeab1fce92013-09-29 13:47:32 +0000437 SizeT indirect_szB;
438 // If Unreached, how many bytes are unreachable from here.
439 SizeT clique;
440 // if IndirectLeak, clique leader to which it belongs.
philippea22f59d2012-01-26 23:13:52 +0000441 } IorC;
njn8225cc02009-03-09 22:52:24 +0000442 }
443 LC_Extra;
444
445// An array holding pointers to every chunk we're checking. Sorted by address.
philippea22f59d2012-01-26 23:13:52 +0000446// lc_chunks is initialised during leak search. It is kept after leak search
447// to support printing the list of blocks belonging to a loss record.
448// lc_chunk array can only be used validly till the next "free" operation
449// (as a free operation potentially destroys one or more chunks).
450// To detect lc_chunk is valid, we store the nr of frees operations done
451// when lc_chunk was build : lc_chunks (and lc_extras) stays valid as
452// long as no free operations has been done since lc_chunks building.
njn8225cc02009-03-09 22:52:24 +0000453static MC_Chunk** lc_chunks;
454// How many chunks we're dealing with.
455static Int lc_n_chunks;
philippea22f59d2012-01-26 23:13:52 +0000456static SizeT lc_chunks_n_frees_marker;
457// This has the same number of entries as lc_chunks, and each entry
458// in lc_chunks corresponds with the entry here (ie. lc_chunks[i] and
459// lc_extras[i] describe the same block).
460static LC_Extra* lc_extras;
461
sewardjc8bd1df2011-06-26 12:41:33 +0000462// chunks will be converted and merged in loss record, maintained in lr_table
463// lr_table elements are kept from one leak_search to another to implement
464// the "print new/changed leaks" client request
465static OSet* lr_table;
philippea22f59d2012-01-26 23:13:52 +0000466// Array of sorted loss record (produced during last leak search).
467static LossRecord** lr_array;
468
philippeab1fce92013-09-29 13:47:32 +0000469// Value of the heuristics parameter used in the current (or last) leak check.
470static UInt detect_memory_leaks_last_heuristics;
sewardjc8bd1df2011-06-26 12:41:33 +0000471
472// DeltaMode used the last time we called detect_memory_leaks.
philippeab1fce92013-09-29 13:47:32 +0000473// The recorded leak errors are output using a logic based on this delta_mode.
sewardjc8bd1df2011-06-26 12:41:33 +0000474// The below avoids replicating the delta_mode in each LossRecord.
475LeakCheckDeltaMode MC_(detect_memory_leaks_last_delta_mode);
476
philippe4e32d672013-10-17 22:10:41 +0000477// Each leak search run increments the below generation counter.
478// A used suppression during a leak search will contain this
479// generation number.
480UInt MC_(leak_search_gen);
njn8225cc02009-03-09 22:52:24 +0000481
njn8225cc02009-03-09 22:52:24 +0000482// Records chunks that are currently being processed. Each element in the
483// stack is an index into lc_chunks and lc_extras. Its size is
484// 'lc_n_chunks' because in the worst case that's how many chunks could be
485// pushed onto it (actually I think the maximum is lc_n_chunks-1 but let's
486// be conservative).
487static Int* lc_markstack;
488// The index of the top element of the stack; -1 if the stack is empty, 0 if
489// the stack has one element, 1 if it has two, etc.
490static Int lc_markstack_top;
491
492// Keeps track of how many bytes of memory we've scanned, for printing.
493// (Nb: We don't keep track of how many register bytes we've scanned.)
494static SizeT lc_scanned_szB;
philippe7a76f4b2013-10-06 21:23:04 +0000495// Keeps track of how many bytes we have not scanned due to read errors that
496// caused a signal such as SIGSEGV.
497static SizeT lc_sig_skipped_szB;
njn8225cc02009-03-09 22:52:24 +0000498
499
500SizeT MC_(bytes_leaked) = 0;
501SizeT MC_(bytes_indirect) = 0;
502SizeT MC_(bytes_dubious) = 0;
503SizeT MC_(bytes_reachable) = 0;
504SizeT MC_(bytes_suppressed) = 0;
505
506SizeT MC_(blocks_leaked) = 0;
507SizeT MC_(blocks_indirect) = 0;
508SizeT MC_(blocks_dubious) = 0;
509SizeT MC_(blocks_reachable) = 0;
510SizeT MC_(blocks_suppressed) = 0;
511
philippeab1fce92013-09-29 13:47:32 +0000512// Subset of MC_(bytes_reachable) and MC_(blocks_reachable) which
513// are considered reachable due to the corresponding heuristic.
514static SizeT MC_(bytes_heuristically_reachable)[N_LEAK_CHECK_HEURISTICS]
515 = {0,0,0,0};
516static SizeT MC_(blocks_heuristically_reachable)[N_LEAK_CHECK_HEURISTICS]
517 = {0,0,0,0};
518
njn8225cc02009-03-09 22:52:24 +0000519// Determines if a pointer is to a chunk. Returns the chunk number et al
520// via call-by-reference.
521static Bool
522lc_is_a_chunk_ptr(Addr ptr, Int* pch_no, MC_Chunk** pch, LC_Extra** pex)
njn43c799e2003-04-08 00:08:52 +0000523{
njn8225cc02009-03-09 22:52:24 +0000524 Int ch_no;
525 MC_Chunk* ch;
526 LC_Extra* ex;
njn43c799e2003-04-08 00:08:52 +0000527
philippe57a16a22012-07-18 22:26:51 +0000528 // Quick filter. Note: implemented with am, not with get_vabits2
529 // as ptr might be random data pointing anywhere. On 64 bit
530 // platforms, getting va bits for random data can be quite costly
531 // due to the secondary map.
njn8225cc02009-03-09 22:52:24 +0000532 if (!VG_(am_is_valid_for_client)(ptr, 1, VKI_PROT_READ)) {
533 return False;
sewardjb5f6f512005-03-10 23:59:00 +0000534 } else {
njn8225cc02009-03-09 22:52:24 +0000535 ch_no = find_chunk_for(ptr, lc_chunks, lc_n_chunks);
536 tl_assert(ch_no >= -1 && ch_no < lc_n_chunks);
537
538 if (ch_no == -1) {
539 return False;
540 } else {
541 // Ok, we've found a pointer to a chunk. Get the MC_Chunk and its
542 // LC_Extra.
543 ch = lc_chunks[ch_no];
544 ex = &(lc_extras[ch_no]);
545
546 tl_assert(ptr >= ch->data);
547 tl_assert(ptr < ch->data + ch->szB + (ch->szB==0 ? 1 : 0));
548
549 if (VG_DEBUG_LEAKCHECK)
550 VG_(printf)("ptr=%#lx -> block %d\n", ptr, ch_no);
551
552 *pch_no = ch_no;
553 *pch = ch;
554 *pex = ex;
555
556 return True;
557 }
sewardjb5f6f512005-03-10 23:59:00 +0000558 }
559}
560
njn8225cc02009-03-09 22:52:24 +0000561// Push a chunk (well, just its index) onto the mark stack.
562static void lc_push(Int ch_no, MC_Chunk* ch)
sewardjb5f6f512005-03-10 23:59:00 +0000563{
tom1d0f3f62010-10-04 20:55:21 +0000564 if (!lc_extras[ch_no].pending) {
565 if (0) {
566 VG_(printf)("pushing %#lx-%#lx\n", ch->data, ch->data + ch->szB);
567 }
568 lc_markstack_top++;
569 tl_assert(lc_markstack_top < lc_n_chunks);
570 lc_markstack[lc_markstack_top] = ch_no;
571 tl_assert(!lc_extras[ch_no].pending);
572 lc_extras[ch_no].pending = True;
njn8225cc02009-03-09 22:52:24 +0000573 }
sewardjb5f6f512005-03-10 23:59:00 +0000574}
575
njn8225cc02009-03-09 22:52:24 +0000576// Return the index of the chunk on the top of the mark stack, or -1 if
577// there isn't one.
578static Bool lc_pop(Int* ret)
sewardjb5f6f512005-03-10 23:59:00 +0000579{
njn8225cc02009-03-09 22:52:24 +0000580 if (-1 == lc_markstack_top) {
581 return False;
582 } else {
583 tl_assert(0 <= lc_markstack_top && lc_markstack_top < lc_n_chunks);
584 *ret = lc_markstack[lc_markstack_top];
585 lc_markstack_top--;
tom1d0f3f62010-10-04 20:55:21 +0000586 tl_assert(lc_extras[*ret].pending);
587 lc_extras[*ret].pending = False;
njn8225cc02009-03-09 22:52:24 +0000588 return True;
589 }
590}
sewardjb5f6f512005-03-10 23:59:00 +0000591
philippeab1fce92013-09-29 13:47:32 +0000592static const HChar* pp_heuristic(LeakCheckHeuristic h)
593{
594 switch(h) {
595 case LchNone: return "none";
596 case LchStdString: return "stdstring";
philippe7c69a3e2014-07-21 19:55:11 +0000597 case LchLength64: return "length64";
philippeab1fce92013-09-29 13:47:32 +0000598 case LchNewArray: return "newarray";
599 case LchMultipleInheritance: return "multipleinheritance";
600 default: return "???invalid heuristic???";
601 }
602}
603
604// True if ptr looks like the address of a vtable, i.e. if ptr
605// points to an array of pointers to functions.
606// It is assumed the only caller of this function is heuristic_reachedness
607// which must check that ptr is aligned and above page 0.
608// Checking that ptr is above page 0 is an optimisation : it is assumed
609// that no vtable is located in the page 0. So, all small integer values
610// encountered during the scan will not incur the cost of calling this
611// function.
612static Bool aligned_ptr_above_page0_is_vtable_addr(Addr ptr)
613{
614 // ??? If performance problem:
615 // ??? maybe implement a cache (array indexed by ptr % primenr)
616 // ??? of "I am a vtable ptr" ???
617
618 // ??? Maybe the debug info could (efficiently?) be used to detect vtables ?
619
620 // We consider ptr as a vtable ptr if it points to a table
621 // where we find only NULL pointers or pointers pointing at an
622 // executable region. We must find at least 2 non NULL pointers
623 // before considering ptr as a vtable pointer.
624 // We scan a maximum of VTABLE_MAX_CHECK words for these 2 non NULL
625 // pointers.
626#define VTABLE_MAX_CHECK 20
627
628 NSegment const *seg;
629 UInt nr_fn_ptrs = 0;
630 Addr scan;
631 Addr scan_max;
632
633 // First verify ptr points inside a client mapped file section.
634 // ??? is a vtable always in a file mapped readable section ?
635 seg = VG_(am_find_nsegment) (ptr);
636 if (seg == NULL
637 || seg->kind != SkFileC
638 || !seg->hasR)
639 return False;
640
641 // Check potential function pointers, up to a maximum of VTABLE_MAX_CHECK.
642 scan_max = ptr + VTABLE_MAX_CHECK*sizeof(Addr);
643 // If ptr is near the end of seg, avoid scan_max exceeding the end of seg:
644 if (scan_max > seg->end - sizeof(Addr))
645 scan_max = seg->end - sizeof(Addr);
646 for (scan = ptr; scan <= scan_max; scan+=sizeof(Addr)) {
647 Addr pot_fn = *((Addr *)scan);
648 if (pot_fn == 0)
649 continue; // NULL fn pointer. Seems it can happen in vtable.
650 seg = VG_(am_find_nsegment) (pot_fn);
carll3ed5c702015-05-15 16:50:06 +0000651#if defined(VGA_ppc64be)
652 // ppc64BE uses a thunk table (function descriptors), so we have one
653 // more level of indirection to follow.
philippeab1fce92013-09-29 13:47:32 +0000654 if (seg == NULL
655 || seg->kind != SkFileC
656 || !seg->hasR
657 || !seg->hasW)
658 return False; // ptr to nowhere, or not a ptr to thunks.
659 pot_fn = *((Addr *)pot_fn);
660 if (pot_fn == 0)
661 continue; // NULL fn pointer. Seems it can happen in vtable.
662 seg = VG_(am_find_nsegment) (pot_fn);
663#endif
664 if (seg == NULL
665 || seg->kind != SkFileC
666 || !seg->hasT)
667 return False; // ptr to nowhere, or not a fn ptr.
668 nr_fn_ptrs++;
669 if (nr_fn_ptrs == 2)
670 return True;
671 }
672
673 return False;
674}
675
philippe7c69a3e2014-07-21 19:55:11 +0000676// true if a is properly aligned and points to 64bits of valid memory
677static Bool is_valid_aligned_ULong ( Addr a )
678{
679 if (sizeof(Word) == 8)
680 return MC_(is_valid_aligned_word)(a);
681
682 return MC_(is_valid_aligned_word)(a)
683 && MC_(is_valid_aligned_word)(a + 4);
684}
685
philippeab1fce92013-09-29 13:47:32 +0000686// If ch is heuristically reachable via an heuristic member of heur_set,
687// returns this heuristic.
688// If ch cannot be considered reachable using one of these heuristics,
689// return LchNone.
690// This should only be called when ptr is an interior ptr to ch.
691// The StdString/NewArray/MultipleInheritance heuristics are directly
692// inspired from DrMemory:
693// see http://www.burningcutlery.com/derek/docs/drmem-CGO11.pdf [section VI,C]
694// and bug 280271.
695static LeakCheckHeuristic heuristic_reachedness (Addr ptr,
696 MC_Chunk *ch, LC_Extra *ex,
697 UInt heur_set)
698{
699 if (HiS(LchStdString, heur_set)) {
700 // Detects inner pointers to Std::String for layout being
701 // length capacity refcount char_array[] \0
702 // where ptr points to the beginning of the char_array.
philippe078ab862013-10-13 18:38:30 +0000703 // Note: we check definedness for length and capacity but
704 // not for refcount, as refcount size might be smaller than
705 // a SizeT, giving a uninitialised hole in the first 3 SizeT.
706 if ( ptr == ch->data + 3 * sizeof(SizeT)
707 && MC_(is_valid_aligned_word)(ch->data + sizeof(SizeT))) {
708 const SizeT capacity = *((SizeT*)(ch->data + sizeof(SizeT)));
709 if (3 * sizeof(SizeT) + capacity + 1 == ch->szB
710 && MC_(is_valid_aligned_word)(ch->data)) {
711 const SizeT length = *((SizeT*)ch->data);
712 if (length <= capacity) {
713 // ??? could check there is no null byte from ptr to ptr+length-1
714 // ??? and that there is a null byte at ptr+length.
715 // ???
716 // ??? could check that ch->allockind is MC_AllocNew ???
717 // ??? probably not a good idea, as I guess stdstring
718 // ??? allocator can be done via custom allocator
719 // ??? or even a call to malloc ????
720 return LchStdString;
721 }
philippeab1fce92013-09-29 13:47:32 +0000722 }
723 }
724 }
725
philippe7c69a3e2014-07-21 19:55:11 +0000726 if (HiS(LchLength64, heur_set)) {
727 // Detects inner pointers that point at 64bit offset (8 bytes) into a
728 // block following the length of the remaining as 64bit number
729 // (=total block size - 8).
730 // This is used e.g. by sqlite for tracking the total size of allocated
731 // memory.
732 // Note that on 64bit platforms, a block matching LchLength64 will
733 // also be matched by LchNewArray.
734 if ( ptr == ch->data + sizeof(ULong)
735 && is_valid_aligned_ULong(ch->data)) {
736 const ULong size = *((ULong*)ch->data);
737 if (size > 0 && (ch->szB - sizeof(ULong)) == size) {
738 return LchLength64;
739 }
740 }
741 }
742
philippeab1fce92013-09-29 13:47:32 +0000743 if (HiS(LchNewArray, heur_set)) {
744 // Detects inner pointers at second word of new[] array, following
745 // a plausible nr of elements.
746 // Such inner pointers are used for arrays of elements
747 // having a destructor, as the delete[] of the array must know
748 // how many elements to destroy.
749 //
750 // We have a strange/wrong case for 'ptr = new MyClass[0];' :
751 // For such a case, the returned ptr points just outside the
752 // allocated chunk. This chunk is then seen as a definite
753 // leak by Valgrind, as it is not considered an interior pointer.
754 // It is the c++ equivalent of bug 99923 (malloc(0) wrongly considered
755 // as definitely leaked). See the trick in find_chunk_for handling
756 // 0-sized block. This trick does not work for 'new MyClass[0]'
757 // because a chunk "word-sized" is allocated to store the (0) nr
758 // of elements.
philippe078ab862013-10-13 18:38:30 +0000759 if ( ptr == ch->data + sizeof(SizeT)
760 && MC_(is_valid_aligned_word)(ch->data)) {
philippeab1fce92013-09-29 13:47:32 +0000761 const SizeT nr_elts = *((SizeT*)ch->data);
762 if (nr_elts > 0 && (ch->szB - sizeof(SizeT)) % nr_elts == 0) {
763 // ??? could check that ch->allockind is MC_AllocNewVec ???
764 return LchNewArray;
765 }
766 }
767 }
768
769 if (HiS(LchMultipleInheritance, heur_set)) {
770 // Detect inner pointer used for multiple inheritance.
771 // Assumption is that the vtable pointers are before the object.
philippe078ab862013-10-13 18:38:30 +0000772 if (VG_IS_WORD_ALIGNED(ptr)
773 && MC_(is_valid_aligned_word)(ptr)) {
philippeab1fce92013-09-29 13:47:32 +0000774 Addr first_addr;
775 Addr inner_addr;
776
777 // Avoid the call to is_vtable_addr when the addr is not
778 // aligned or points in the page0, as it is unlikely
779 // a vtable is located in this page. This last optimisation
780 // avoids to call aligned_ptr_above_page0_is_vtable_addr
781 // for all small integers.
782 // Note: we could possibly also avoid calling this function
783 // for small negative integers, as no vtable should be located
784 // in the last page.
785 inner_addr = *((Addr*)ptr);
786 if (VG_IS_WORD_ALIGNED(inner_addr)
philippe078ab862013-10-13 18:38:30 +0000787 && inner_addr >= (Addr)VKI_PAGE_SIZE
788 && MC_(is_valid_aligned_word)(ch->data)) {
philippeab1fce92013-09-29 13:47:32 +0000789 first_addr = *((Addr*)ch->data);
790 if (VG_IS_WORD_ALIGNED(first_addr)
791 && first_addr >= (Addr)VKI_PAGE_SIZE
792 && aligned_ptr_above_page0_is_vtable_addr(inner_addr)
793 && aligned_ptr_above_page0_is_vtable_addr(first_addr)) {
794 // ??? could check that ch->allockind is MC_AllocNew ???
795 return LchMultipleInheritance;
796 }
797 }
798 }
799 }
800
801 return LchNone;
802}
803
njn8225cc02009-03-09 22:52:24 +0000804
805// If 'ptr' is pointing to a heap-allocated block which hasn't been seen
806// before, push it onto the mark stack.
807static void
808lc_push_without_clique_if_a_chunk_ptr(Addr ptr, Bool is_prior_definite)
809{
810 Int ch_no;
811 MC_Chunk* ch;
812 LC_Extra* ex;
philippeab1fce92013-09-29 13:47:32 +0000813 Reachedness ch_via_ptr; // Is ch reachable via ptr, and how ?
njn8225cc02009-03-09 22:52:24 +0000814
815 if ( ! lc_is_a_chunk_ptr(ptr, &ch_no, &ch, &ex) )
816 return;
philippeab1fce92013-09-29 13:47:32 +0000817
818 if (ex->state == Reachable) {
philippe078ab862013-10-13 18:38:30 +0000819 if (ex->heuristic && ptr == ch->data)
820 // If block was considered reachable via an heuristic, and it is now
821 // directly reachable via ptr, clear the heuristic field.
philippeab1fce92013-09-29 13:47:32 +0000822 ex->heuristic = LchNone;
philippeab1fce92013-09-29 13:47:32 +0000823 return;
824 }
tom1d0f3f62010-10-04 20:55:21 +0000825
njn8225cc02009-03-09 22:52:24 +0000826 // Possibly upgrade the state, ie. one of:
827 // - Unreached --> Possible
828 // - Unreached --> Reachable
829 // - Possible --> Reachable
philippeab1fce92013-09-29 13:47:32 +0000830
831 if (ptr == ch->data)
832 ch_via_ptr = Reachable;
833 else if (detect_memory_leaks_last_heuristics) {
834 ex->heuristic
835 = heuristic_reachedness (ptr, ch, ex,
836 detect_memory_leaks_last_heuristics);
837 if (ex->heuristic)
838 ch_via_ptr = Reachable;
839 else
840 ch_via_ptr = Possible;
841 } else
842 ch_via_ptr = Possible;
843
844 if (ch_via_ptr == Reachable && is_prior_definite) {
845 // 'ptr' points to the start of the block or is to be considered as
846 // pointing to the start of the block, and the prior node is
njn8225cc02009-03-09 22:52:24 +0000847 // definite, which means that this block is definitely reachable.
848 ex->state = Reachable;
849
tom1d0f3f62010-10-04 20:55:21 +0000850 // State has changed to Reachable so (re)scan the block to make
851 // sure any blocks it points to are correctly marked.
852 lc_push(ch_no, ch);
853
njn8225cc02009-03-09 22:52:24 +0000854 } else if (ex->state == Unreached) {
855 // Either 'ptr' is a interior-pointer, or the prior node isn't definite,
856 // which means that we can only mark this block as possibly reachable.
857 ex->state = Possible;
tom1d0f3f62010-10-04 20:55:21 +0000858
859 // State has changed to Possible so (re)scan the block to make
860 // sure any blocks it points to are correctly marked.
861 lc_push(ch_no, ch);
njn8225cc02009-03-09 22:52:24 +0000862 }
863}
864
865static void
florian6bd9dc12012-11-23 16:17:43 +0000866lc_push_if_a_chunk_ptr_register(ThreadId tid, const HChar* regname, Addr ptr)
njn8225cc02009-03-09 22:52:24 +0000867{
868 lc_push_without_clique_if_a_chunk_ptr(ptr, /*is_prior_definite*/True);
869}
870
871// If ptr is pointing to a heap-allocated block which hasn't been seen
872// before, push it onto the mark stack. Clique is the index of the
873// clique leader.
874static void
philippea22f59d2012-01-26 23:13:52 +0000875lc_push_with_clique_if_a_chunk_ptr(Addr ptr, Int clique, Int cur_clique)
njn8225cc02009-03-09 22:52:24 +0000876{
877 Int ch_no;
878 MC_Chunk* ch;
879 LC_Extra* ex;
880
881 tl_assert(0 <= clique && clique < lc_n_chunks);
882
883 if ( ! lc_is_a_chunk_ptr(ptr, &ch_no, &ch, &ex) )
884 return;
885
886 // If it's not Unreached, it's already been handled so ignore it.
887 // If ch_no==clique, it's the clique leader, which means this is a cyclic
888 // structure; again ignore it because it's already been handled.
889 if (ex->state == Unreached && ch_no != clique) {
890 // Note that, unlike reachable blocks, we currently don't distinguish
891 // between start-pointers and interior-pointers here. We probably
892 // should, though.
njn8225cc02009-03-09 22:52:24 +0000893 lc_push(ch_no, ch);
894
895 // Add the block to the clique, and add its size to the
896 // clique-leader's indirect size. Also, if the new block was
897 // itself a clique leader, it isn't any more, so add its
898 // indirect_szB to the new clique leader.
899 if (VG_DEBUG_CLIQUE) {
philippea22f59d2012-01-26 23:13:52 +0000900 if (ex->IorC.indirect_szB > 0)
njn8225cc02009-03-09 22:52:24 +0000901 VG_(printf)(" clique %d joining clique %d adding %lu+%lu\n",
florian47755db2015-08-05 12:09:55 +0000902 ch_no, clique, (SizeT)ch->szB, ex->IorC.indirect_szB);
njn8225cc02009-03-09 22:52:24 +0000903 else
904 VG_(printf)(" block %d joining clique %d adding %lu\n",
florian47755db2015-08-05 12:09:55 +0000905 ch_no, clique, (SizeT)ch->szB);
njn8225cc02009-03-09 22:52:24 +0000906 }
907
philippea22f59d2012-01-26 23:13:52 +0000908 lc_extras[clique].IorC.indirect_szB += ch->szB;
909 lc_extras[clique].IorC.indirect_szB += ex->IorC.indirect_szB;
910 ex->state = IndirectLeak;
911 ex->IorC.clique = (SizeT) cur_clique;
njn8225cc02009-03-09 22:52:24 +0000912 }
913}
914
915static void
philippeab1fce92013-09-29 13:47:32 +0000916lc_push_if_a_chunk_ptr(Addr ptr,
917 Int clique, Int cur_clique, Bool is_prior_definite)
njn8225cc02009-03-09 22:52:24 +0000918{
919 if (-1 == clique)
920 lc_push_without_clique_if_a_chunk_ptr(ptr, is_prior_definite);
921 else
philippea22f59d2012-01-26 23:13:52 +0000922 lc_push_with_clique_if_a_chunk_ptr(ptr, clique, cur_clique);
sewardjb5f6f512005-03-10 23:59:00 +0000923}
924
sewardj45d94cc2005-04-20 14:44:11 +0000925
sewardj97d3ebb2011-04-11 18:36:34 +0000926static VG_MINIMAL_JMP_BUF(memscan_jmpbuf);
philippe7a76f4b2013-10-06 21:23:04 +0000927static volatile Addr bad_scanned_addr;
sewardjb5f6f512005-03-10 23:59:00 +0000928
njn8225cc02009-03-09 22:52:24 +0000929static
930void scan_all_valid_memory_catcher ( Int sigNo, Addr addr )
sewardjb5f6f512005-03-10 23:59:00 +0000931{
njn8225cc02009-03-09 22:52:24 +0000932 if (0)
933 VG_(printf)("OUCH! sig=%d addr=%#lx\n", sigNo, addr);
philippe7a76f4b2013-10-06 21:23:04 +0000934 if (sigNo == VKI_SIGSEGV || sigNo == VKI_SIGBUS) {
935 bad_scanned_addr = addr;
sewardj6c591e12011-04-11 16:17:51 +0000936 VG_MINIMAL_LONGJMP(memscan_jmpbuf);
philippe7a76f4b2013-10-06 21:23:04 +0000937 }
njn8225cc02009-03-09 22:52:24 +0000938}
939
philippea22f59d2012-01-26 23:13:52 +0000940// lc_scan_memory has 2 modes:
941//
942// 1. Leak check mode (searched == 0).
943// -----------------------------------
njn8225cc02009-03-09 22:52:24 +0000944// Scan a block of memory between [start, start+len). This range may
florianad4e9792015-07-05 21:53:33 +0000945// be bogus, inaccessible, or otherwise strange; we deal with it. For each
njn8225cc02009-03-09 22:52:24 +0000946// valid aligned word we assume it's a pointer to a chunk a push the chunk
947// onto the mark stack if so.
philippea22f59d2012-01-26 23:13:52 +0000948// clique is the "highest level clique" in which indirectly leaked blocks have
949// to be collected. cur_clique is the current "lower" level clique through which
950// the memory to be scanned has been found.
951// Example: in the below tree if A is leaked, the top level clique will
952// be A, while lower level cliques will be B and C.
953/*
954 A
florianf5300ff2014-12-28 16:46:14 +0000955 / \
philippea22f59d2012-01-26 23:13:52 +0000956 B C
florianf5300ff2014-12-28 16:46:14 +0000957 / \ / \
philippea22f59d2012-01-26 23:13:52 +0000958 D E F G
959*/
960// Proper handling of top and lowest level clique allows block_list of a loss
961// record to describe the hierarchy of indirectly leaked blocks.
962//
963// 2. Search ptr mode (searched != 0).
964// -----------------------------------
965// In this mode, searches for pointers to a specific address range
philippeab1fce92013-09-29 13:47:32 +0000966// In such a case, lc_scan_memory just scans [start..start+len[ for pointers
967// to searched and outputs the places where searched is found.
968// It does not recursively scans the found memory.
njn8225cc02009-03-09 22:52:24 +0000969static void
philippeab1fce92013-09-29 13:47:32 +0000970lc_scan_memory(Addr start, SizeT len, Bool is_prior_definite,
971 Int clique, Int cur_clique,
philippea22f59d2012-01-26 23:13:52 +0000972 Addr searched, SizeT szB)
njn8225cc02009-03-09 22:52:24 +0000973{
philippe57a16a22012-07-18 22:26:51 +0000974 /* memory scan is based on the assumption that valid pointers are aligned
975 on a multiple of sizeof(Addr). So, we can (and must) skip the begin and
976 end portions of the block if they are not aligned on sizeof(Addr):
977 These cannot be a valid pointer, and calls to MC_(is_valid_aligned_word)
978 will assert for a non aligned address. */
philippe110c77e2013-10-15 21:04:56 +0000979#if defined(VGA_s390x)
980 // Define ptr as volatile, as on this platform, the value of ptr
981 // is read in code executed via a longjmp.
982 volatile
983#endif
philippe7a76f4b2013-10-06 21:23:04 +0000984 Addr ptr = VG_ROUNDUP(start, sizeof(Addr));
985 const Addr end = VG_ROUNDDN(start+len, sizeof(Addr));
sewardjb5f6f512005-03-10 23:59:00 +0000986 vki_sigset_t sigmask;
987
988 if (VG_DEBUG_LEAKCHECK)
njn8225cc02009-03-09 22:52:24 +0000989 VG_(printf)("scan %#lx-%#lx (%lu)\n", start, end, len);
990
sewardjb5f6f512005-03-10 23:59:00 +0000991 VG_(sigprocmask)(VKI_SIG_SETMASK, NULL, &sigmask);
njn695c16e2005-03-27 03:40:28 +0000992 VG_(set_fault_catcher)(scan_all_valid_memory_catcher);
sewardjb5f6f512005-03-10 23:59:00 +0000993
philippe57a16a22012-07-18 22:26:51 +0000994 /* Optimisation: the loop below will check for each begin
995 of SM chunk if the chunk is fully unaddressable. The idea is to
996 skip efficiently such fully unaddressable SM chunks.
florianad4e9792015-07-05 21:53:33 +0000997 So, we preferably start the loop on a chunk boundary.
philippe57a16a22012-07-18 22:26:51 +0000998 If the chunk is not fully unaddressable, we might be in
999 an unaddressable page. Again, the idea is to skip efficiently
1000 such unaddressable page : this is the "else" part.
1001 We use an "else" so that two consecutive fully unaddressable
1002 SM chunks will be skipped efficiently: first one is skipped
1003 by this piece of code. The next SM chunk will be skipped inside
1004 the loop. */
1005 if ( ! MC_(is_within_valid_secondary)(ptr) ) {
1006 // Skip an invalid SM chunk till the beginning of the next SM Chunk.
1007 ptr = VG_ROUNDUP(ptr+1, SM_SIZE);
1008 } else if (!VG_(am_is_valid_for_client)(ptr, sizeof(Addr), VKI_PROT_READ)) {
1009 // else we are in a (at least partially) valid SM chunk.
1010 // We might be in the middle of an unreadable page.
1011 // Do a cheap check to see if it's valid;
1012 // if not, skip onto the next page.
njn8225cc02009-03-09 22:52:24 +00001013 ptr = VG_PGROUNDUP(ptr+1); // First page is bad - skip it.
philippe57a16a22012-07-18 22:26:51 +00001014 }
philippe7a76f4b2013-10-06 21:23:04 +00001015 /* The above optimisation and below loop is based on some relationships
1016 between VKI_PAGE_SIZE, SM_SIZE and sizeof(Addr) which are asserted in
philippe57a16a22012-07-18 22:26:51 +00001017 MC_(detect_memory_leaks). */
sewardjb5f6f512005-03-10 23:59:00 +00001018
philippe7a76f4b2013-10-06 21:23:04 +00001019 // During scan, we check with aspacemgr that each page is readable and
1020 // belongs to client.
1021 // We still protect against SIGSEGV and SIGBUS e.g. in case aspacemgr is
1022 // desynchronised with the real page mappings.
1023 // Such a desynchronisation could happen due to an aspacemgr bug.
1024 // Note that if the application is using mprotect(NONE), then
1025 // a page can be unreadable but have addressable and defined
1026 // VA bits (see mc_main.c function mc_new_mem_mprotect).
1027 if (VG_MINIMAL_SETJMP(memscan_jmpbuf) != 0) {
1028 // Catch read error ...
1029 // We need to restore the signal mask, because we were
1030 // longjmped out of a signal handler.
1031 VG_(sigprocmask)(VKI_SIG_SETMASK, &sigmask, NULL);
philippe110c77e2013-10-15 21:04:56 +00001032# if defined(VGA_s390x)
1033 // For a SIGSEGV, s390 delivers the page address of the bad address.
1034 // For a SIGBUS, old s390 kernels deliver a NULL address.
1035 // bad_scanned_addr can thus not be used.
1036 // So, on this platform, we always skip a full page from ptr.
1037 // The below implies to mark ptr as volatile, as we read the value
1038 // after a longjmp to here.
1039 lc_sig_skipped_szB += VKI_PAGE_SIZE;
1040 ptr = ptr + VKI_PAGE_SIZE; // Unaddressable, - skip it.
1041# else
1042 // On other platforms, just skip one Addr.
philippe7a76f4b2013-10-06 21:23:04 +00001043 lc_sig_skipped_szB += sizeof(Addr);
1044 tl_assert(bad_scanned_addr >= VG_ROUNDUP(start, sizeof(Addr)));
1045 tl_assert(bad_scanned_addr < VG_ROUNDDN(start+len, sizeof(Addr)));
1046 ptr = bad_scanned_addr + sizeof(Addr); // Unaddressable, - skip it.
philippe110c77e2013-10-15 21:04:56 +00001047#endif
philippe7a76f4b2013-10-06 21:23:04 +00001048 }
sewardj05fe85e2005-04-27 22:46:36 +00001049 while (ptr < end) {
sewardjb5f6f512005-03-10 23:59:00 +00001050 Addr addr;
1051
njn8225cc02009-03-09 22:52:24 +00001052 // Skip invalid chunks.
philippe57a16a22012-07-18 22:26:51 +00001053 if (UNLIKELY((ptr % SM_SIZE) == 0)) {
1054 if (! MC_(is_within_valid_secondary)(ptr) ) {
1055 ptr = VG_ROUNDUP(ptr+1, SM_SIZE);
1056 continue;
1057 }
sewardjb5f6f512005-03-10 23:59:00 +00001058 }
1059
njn8225cc02009-03-09 22:52:24 +00001060 // Look to see if this page seems reasonable.
philippe57a16a22012-07-18 22:26:51 +00001061 if (UNLIKELY((ptr % VKI_PAGE_SIZE) == 0)) {
njn8225cc02009-03-09 22:52:24 +00001062 if (!VG_(am_is_valid_for_client)(ptr, sizeof(Addr), VKI_PROT_READ)) {
1063 ptr += VKI_PAGE_SIZE; // Bad page - skip it.
1064 continue;
1065 }
sewardjb5f6f512005-03-10 23:59:00 +00001066 }
1067
philippe57a16a22012-07-18 22:26:51 +00001068 if ( MC_(is_valid_aligned_word)(ptr) ) {
1069 lc_scanned_szB += sizeof(Addr);
philippe7a76f4b2013-10-06 21:23:04 +00001070 // If the below read fails, we will longjmp to the loop begin.
philippe57a16a22012-07-18 22:26:51 +00001071 addr = *(Addr *)ptr;
1072 // If we get here, the scanned word is in valid memory. Now
1073 // let's see if its contents point to a chunk.
1074 if (UNLIKELY(searched)) {
1075 if (addr >= searched && addr < searched + szB) {
philippeab1fce92013-09-29 13:47:32 +00001076 if (addr == searched) {
philippe57a16a22012-07-18 22:26:51 +00001077 VG_(umsg)("*%#lx points at %#lx\n", ptr, searched);
philippeab1fce92013-09-29 13:47:32 +00001078 MC_(pp_describe_addr) (ptr);
1079 } else {
1080 Int ch_no;
1081 MC_Chunk *ch;
1082 LC_Extra *ex;
philippe57a16a22012-07-18 22:26:51 +00001083 VG_(umsg)("*%#lx interior points at %lu bytes inside %#lx\n",
1084 ptr, (long unsigned) addr - searched, searched);
philippeab1fce92013-09-29 13:47:32 +00001085 MC_(pp_describe_addr) (ptr);
1086 if (lc_is_a_chunk_ptr(addr, &ch_no, &ch, &ex) ) {
1087 Int h;
philippe7c69a3e2014-07-21 19:55:11 +00001088 for (h = LchStdString; h < N_LEAK_CHECK_HEURISTICS; h++) {
philippeab1fce92013-09-29 13:47:32 +00001089 if (heuristic_reachedness(addr, ch, ex, H2S(h)) == h) {
1090 VG_(umsg)("block at %#lx considered reachable "
1091 "by ptr %#lx using %s heuristic\n",
1092 ch->data, addr, pp_heuristic(h));
1093 }
1094 }
philippe7a76f4b2013-10-06 21:23:04 +00001095 // Verify the loop above has properly scanned all
1096 // heuristics. If the below fails, it probably means the
1097 // LeakCheckHeuristic enum is not in sync anymore with the
1098 // above loop and/or with N_LEAK_CHECK_HEURISTICS.
philippe5bd40602013-10-02 20:59:05 +00001099 tl_assert (h == N_LEAK_CHECK_HEURISTICS);
philippeab1fce92013-09-29 13:47:32 +00001100 }
1101 }
philippea22f59d2012-01-26 23:13:52 +00001102 }
philippe57a16a22012-07-18 22:26:51 +00001103 } else {
1104 lc_push_if_a_chunk_ptr(addr, clique, cur_clique, is_prior_definite);
njn8225cc02009-03-09 22:52:24 +00001105 }
philippe57a16a22012-07-18 22:26:51 +00001106 } else if (0 && VG_DEBUG_LEAKCHECK) {
1107 VG_(printf)("%#lx not valid\n", ptr);
sewardjb5f6f512005-03-10 23:59:00 +00001108 }
philippe57a16a22012-07-18 22:26:51 +00001109 ptr += sizeof(Addr);
sewardjb5f6f512005-03-10 23:59:00 +00001110 }
1111
1112 VG_(sigprocmask)(VKI_SIG_SETMASK, &sigmask, NULL);
1113 VG_(set_fault_catcher)(NULL);
1114}
1115
sewardj45d94cc2005-04-20 14:44:11 +00001116
njn8225cc02009-03-09 22:52:24 +00001117// Process the mark stack until empty.
1118static void lc_process_markstack(Int clique)
sewardjb5f6f512005-03-10 23:59:00 +00001119{
njne3675d62009-05-19 02:08:25 +00001120 Int top = -1; // shut gcc up
njn8225cc02009-03-09 22:52:24 +00001121 Bool is_prior_definite;
sewardjb5f6f512005-03-10 23:59:00 +00001122
njn8225cc02009-03-09 22:52:24 +00001123 while (lc_pop(&top)) {
tom1d0f3f62010-10-04 20:55:21 +00001124 tl_assert(top >= 0 && top < lc_n_chunks);
sewardjb5f6f512005-03-10 23:59:00 +00001125
njn8225cc02009-03-09 22:52:24 +00001126 // See comment about 'is_prior_definite' at the top to understand this.
1127 is_prior_definite = ( Possible != lc_extras[top].state );
sewardjb5f6f512005-03-10 23:59:00 +00001128
njn8225cc02009-03-09 22:52:24 +00001129 lc_scan_memory(lc_chunks[top]->data, lc_chunks[top]->szB,
philippea22f59d2012-01-26 23:13:52 +00001130 is_prior_definite, clique, (clique == -1 ? -1 : top),
1131 /*searched*/ 0, 0);
sewardjb5f6f512005-03-10 23:59:00 +00001132 }
1133}
1134
njn29a5c012009-05-06 06:15:55 +00001135static Word cmp_LossRecordKey_LossRecord(const void* key, const void* elem)
1136{
florian3e798632012-11-24 19:41:54 +00001137 const LossRecordKey* a = key;
1138 const LossRecordKey* b = &(((const LossRecord*)elem)->key);
njn29a5c012009-05-06 06:15:55 +00001139
1140 // Compare on states first because that's fast.
1141 if (a->state < b->state) return -1;
1142 if (a->state > b->state) return 1;
1143 // Ok, the states are equal. Now compare the locations, which is slower.
1144 if (VG_(eq_ExeContext)(
1145 MC_(clo_leak_resolution), a->allocated_at, b->allocated_at))
1146 return 0;
1147 // Different locations. Ordering is arbitrary, just use the ec pointer.
1148 if (a->allocated_at < b->allocated_at) return -1;
1149 if (a->allocated_at > b->allocated_at) return 1;
1150 VG_(tool_panic)("bad LossRecord comparison");
1151}
1152
florian6bd9dc12012-11-23 16:17:43 +00001153static Int cmp_LossRecords(const void* va, const void* vb)
njn29a5c012009-05-06 06:15:55 +00001154{
florian3e798632012-11-24 19:41:54 +00001155 const LossRecord* lr_a = *(const LossRecord *const *)va;
1156 const LossRecord* lr_b = *(const LossRecord *const *)vb;
njn29a5c012009-05-06 06:15:55 +00001157 SizeT total_szB_a = lr_a->szB + lr_a->indirect_szB;
1158 SizeT total_szB_b = lr_b->szB + lr_b->indirect_szB;
1159
1160 // First compare by sizes.
1161 if (total_szB_a < total_szB_b) return -1;
1162 if (total_szB_a > total_szB_b) return 1;
1163 // If size are equal, compare by states.
1164 if (lr_a->key.state < lr_b->key.state) return -1;
1165 if (lr_a->key.state > lr_b->key.state) return 1;
njne10c7f82009-05-06 06:52:47 +00001166 // If they're still equal here, it doesn't matter that much, but we keep
1167 // comparing other things so that regtests are as deterministic as
1168 // possible. So: compare num_blocks.
1169 if (lr_a->num_blocks < lr_b->num_blocks) return -1;
1170 if (lr_a->num_blocks > lr_b->num_blocks) return 1;
1171 // Finally, compare ExeContext addresses... older ones are likely to have
1172 // lower addresses.
1173 if (lr_a->key.allocated_at < lr_b->key.allocated_at) return -1;
1174 if (lr_a->key.allocated_at > lr_b->key.allocated_at) return 1;
njn29a5c012009-05-06 06:15:55 +00001175 return 0;
1176}
1177
philippea22f59d2012-01-26 23:13:52 +00001178// allocates or reallocates lr_array, and set its elements to the loss records
1179// contains in lr_table.
florian47755db2015-08-05 12:09:55 +00001180static UInt get_lr_array_from_lr_table(void) {
1181 UInt i, n_lossrecords;
philippea22f59d2012-01-26 23:13:52 +00001182 LossRecord* lr;
1183
1184 n_lossrecords = VG_(OSetGen_Size)(lr_table);
1185
1186 // (re-)create the array of pointers to the loss records.
1187 // lr_array is kept to allow producing the block list from gdbserver.
1188 if (lr_array != NULL)
1189 VG_(free)(lr_array);
1190 lr_array = VG_(malloc)("mc.pr.2", n_lossrecords * sizeof(LossRecord*));
1191 i = 0;
1192 VG_(OSetGen_ResetIter)(lr_table);
1193 while ( (lr = VG_(OSetGen_Next)(lr_table)) ) {
1194 lr_array[i++] = lr;
1195 }
1196 tl_assert(i == n_lossrecords);
1197 return n_lossrecords;
1198}
1199
philippe84234902012-01-14 13:53:13 +00001200
1201static void get_printing_rules(LeakCheckParams* lcp,
1202 LossRecord* lr,
1203 Bool* count_as_error,
1204 Bool* print_record)
sewardjb5f6f512005-03-10 23:59:00 +00001205{
philippe84234902012-01-14 13:53:13 +00001206 // Rules for printing:
1207 // - We don't show suppressed loss records ever (and that's controlled
1208 // within the error manager).
philippe2193a7c2012-12-08 17:54:16 +00001209 // - We show non-suppressed loss records that are specified in
1210 // --show-leak-kinds=... if --leak-check=yes.
philippe84234902012-01-14 13:53:13 +00001211
1212 Bool delta_considered;
1213
1214 switch (lcp->deltamode) {
1215 case LCD_Any:
1216 delta_considered = lr->num_blocks > 0;
1217 break;
1218 case LCD_Increased:
1219 delta_considered
1220 = lr->szB > lr->old_szB
1221 || lr->indirect_szB > lr->old_indirect_szB
1222 || lr->num_blocks > lr->old_num_blocks;
1223 break;
1224 case LCD_Changed:
1225 delta_considered = lr->szB != lr->old_szB
1226 || lr->indirect_szB != lr->old_indirect_szB
1227 || lr->num_blocks != lr->old_num_blocks;
1228 break;
1229 default:
1230 tl_assert(0);
1231 }
1232
philippe2193a7c2012-12-08 17:54:16 +00001233 *print_record = lcp->mode == LC_Full && delta_considered
1234 && RiS(lr->key.state,lcp->show_leak_kinds);
philippe84234902012-01-14 13:53:13 +00001235 // We don't count a leaks as errors with lcp->mode==LC_Summary.
1236 // Otherwise you can get high error counts with few or no error
philippe2193a7c2012-12-08 17:54:16 +00001237 // messages, which can be confusing. Otherwise, we count as errors
1238 // the leak kinds requested by --errors-for-leak-kinds=...
1239 *count_as_error = lcp->mode == LC_Full && delta_considered
1240 && RiS(lr->key.state,lcp->errors_for_leak_kinds);
philippe84234902012-01-14 13:53:13 +00001241}
1242
1243static void print_results(ThreadId tid, LeakCheckParams* lcp)
1244{
1245 Int i, n_lossrecords, start_lr_output_scan;
njn29a5c012009-05-06 06:15:55 +00001246 LossRecord* lr;
1247 Bool is_suppressed;
philippeab1fce92013-09-29 13:47:32 +00001248 /* old_* variables are used to report delta in summary. */
1249 SizeT old_bytes_leaked = MC_(bytes_leaked);
sewardjc8bd1df2011-06-26 12:41:33 +00001250 SizeT old_bytes_indirect = MC_(bytes_indirect);
1251 SizeT old_bytes_dubious = MC_(bytes_dubious);
1252 SizeT old_bytes_reachable = MC_(bytes_reachable);
1253 SizeT old_bytes_suppressed = MC_(bytes_suppressed);
1254 SizeT old_blocks_leaked = MC_(blocks_leaked);
1255 SizeT old_blocks_indirect = MC_(blocks_indirect);
1256 SizeT old_blocks_dubious = MC_(blocks_dubious);
1257 SizeT old_blocks_reachable = MC_(blocks_reachable);
1258 SizeT old_blocks_suppressed = MC_(blocks_suppressed);
sewardjb5f6f512005-03-10 23:59:00 +00001259
philippeab1fce92013-09-29 13:47:32 +00001260 SizeT old_bytes_heuristically_reachable[N_LEAK_CHECK_HEURISTICS];
1261 SizeT old_blocks_heuristically_reachable[N_LEAK_CHECK_HEURISTICS];
1262
1263 for (i = 0; i < N_LEAK_CHECK_HEURISTICS; i++) {
1264 old_bytes_heuristically_reachable[i]
1265 = MC_(bytes_heuristically_reachable)[i];
1266 MC_(bytes_heuristically_reachable)[i] = 0;
1267 old_blocks_heuristically_reachable[i]
1268 = MC_(blocks_heuristically_reachable)[i];
1269 MC_(blocks_heuristically_reachable)[i] = 0;
1270 }
1271
sewardjc8bd1df2011-06-26 12:41:33 +00001272 if (lr_table == NULL)
1273 // Create the lr_table, which holds the loss records.
1274 // If the lr_table already exists, it means it contains
1275 // loss_records from the previous leak search. The old_*
1276 // values in these records are used to implement the
1277 // leak check delta mode
1278 lr_table =
1279 VG_(OSetGen_Create)(offsetof(LossRecord, key),
1280 cmp_LossRecordKey_LossRecord,
1281 VG_(malloc), "mc.pr.1",
1282 VG_(free));
1283
philippea22f59d2012-01-26 23:13:52 +00001284 // If we have loss records from a previous search, reset values to have
1285 // proper printing of the deltas between previous search and this search.
1286 n_lossrecords = get_lr_array_from_lr_table();
1287 for (i = 0; i < n_lossrecords; i++) {
philippe4bbfc5f2012-02-27 21:52:45 +00001288 if (lr_array[i]->num_blocks == 0) {
philippea22f59d2012-01-26 23:13:52 +00001289 // remove from lr_table the old loss_records with 0 bytes found
1290 VG_(OSetGen_Remove) (lr_table, &lr_array[i]->key);
philippe4bbfc5f2012-02-27 21:52:45 +00001291 VG_(OSetGen_FreeNode)(lr_table, lr_array[i]);
1292 } else {
philippea22f59d2012-01-26 23:13:52 +00001293 // move the leak sizes to old_* and zero the current sizes
1294 // for next leak search
1295 lr_array[i]->old_szB = lr_array[i]->szB;
1296 lr_array[i]->old_indirect_szB = lr_array[i]->indirect_szB;
1297 lr_array[i]->old_num_blocks = lr_array[i]->num_blocks;
1298 lr_array[i]->szB = 0;
1299 lr_array[i]->indirect_szB = 0;
1300 lr_array[i]->num_blocks = 0;
1301 }
1302 }
1303 // lr_array now contains "invalid" loss records => free it.
1304 // lr_array will be re-created below with the kept and new loss records.
1305 VG_(free) (lr_array);
1306 lr_array = NULL;
njn29a5c012009-05-06 06:15:55 +00001307
1308 // Convert the chunks into loss records, merging them where appropriate.
njn8225cc02009-03-09 22:52:24 +00001309 for (i = 0; i < lc_n_chunks; i++) {
njn29a5c012009-05-06 06:15:55 +00001310 MC_Chunk* ch = lc_chunks[i];
1311 LC_Extra* ex = &(lc_extras)[i];
1312 LossRecord* old_lr;
1313 LossRecordKey lrkey;
1314 lrkey.state = ex->state;
philippe8617b5b2013-01-12 19:53:08 +00001315 lrkey.allocated_at = MC_(allocated_at)(ch);
sewardjb5f6f512005-03-10 23:59:00 +00001316
philippeab1fce92013-09-29 13:47:32 +00001317 if (ex->heuristic) {
1318 MC_(bytes_heuristically_reachable)[ex->heuristic] += ch->szB;
1319 MC_(blocks_heuristically_reachable)[ex->heuristic]++;
1320 if (VG_DEBUG_LEAKCHECK)
1321 VG_(printf)("heuristic %s %#lx len %lu\n",
1322 pp_heuristic(ex->heuristic),
florian47755db2015-08-05 12:09:55 +00001323 ch->data, (SizeT)ch->szB);
philippeab1fce92013-09-29 13:47:32 +00001324 }
1325
njn29a5c012009-05-06 06:15:55 +00001326 old_lr = VG_(OSetGen_Lookup)(lr_table, &lrkey);
1327 if (old_lr) {
1328 // We found an existing loss record matching this chunk. Update the
1329 // loss record's details in-situ. This is safe because we don't
1330 // change the elements used as the OSet key.
1331 old_lr->szB += ch->szB;
philippea22f59d2012-01-26 23:13:52 +00001332 if (ex->state == Unreached)
1333 old_lr->indirect_szB += ex->IorC.indirect_szB;
njn29a5c012009-05-06 06:15:55 +00001334 old_lr->num_blocks++;
sewardjb5f6f512005-03-10 23:59:00 +00001335 } else {
njn29a5c012009-05-06 06:15:55 +00001336 // No existing loss record matches this chunk. Create a new loss
1337 // record, initialise it from the chunk, and insert it into lr_table.
1338 lr = VG_(OSetGen_AllocNode)(lr_table, sizeof(LossRecord));
1339 lr->key = lrkey;
1340 lr->szB = ch->szB;
philippea22f59d2012-01-26 23:13:52 +00001341 if (ex->state == Unreached)
1342 lr->indirect_szB = ex->IorC.indirect_szB;
1343 else
1344 lr->indirect_szB = 0;
njn29a5c012009-05-06 06:15:55 +00001345 lr->num_blocks = 1;
sewardjc8bd1df2011-06-26 12:41:33 +00001346 lr->old_szB = 0;
1347 lr->old_indirect_szB = 0;
1348 lr->old_num_blocks = 0;
njn29a5c012009-05-06 06:15:55 +00001349 VG_(OSetGen_Insert)(lr_table, lr);
sewardjb5f6f512005-03-10 23:59:00 +00001350 }
1351 }
1352
philippea22f59d2012-01-26 23:13:52 +00001353 // (re-)create the array of pointers to the (new) loss records.
1354 n_lossrecords = get_lr_array_from_lr_table ();
1355 tl_assert(VG_(OSetGen_Size)(lr_table) == n_lossrecords);
njn29a5c012009-05-06 06:15:55 +00001356
1357 // Sort the array by loss record sizes.
1358 VG_(ssort)(lr_array, n_lossrecords, sizeof(LossRecord*),
1359 cmp_LossRecords);
1360
1361 // Zero totals.
njn8225cc02009-03-09 22:52:24 +00001362 MC_(blocks_leaked) = MC_(bytes_leaked) = 0;
1363 MC_(blocks_indirect) = MC_(bytes_indirect) = 0;
1364 MC_(blocks_dubious) = MC_(bytes_dubious) = 0;
1365 MC_(blocks_reachable) = MC_(bytes_reachable) = 0;
1366 MC_(blocks_suppressed) = MC_(bytes_suppressed) = 0;
1367
philippe84234902012-01-14 13:53:13 +00001368 // If there is a maximum nr of loss records we can output, then first
1369 // compute from where the output scan has to start.
1370 // By default, start from the first loss record. Compute a higher
1371 // value if there is a maximum to respect. We need to print the last
1372 // records, as the one with the biggest sizes are more interesting.
1373 start_lr_output_scan = 0;
1374 if (lcp->mode == LC_Full && lcp->max_loss_records_output < n_lossrecords) {
1375 Int nr_printable_records = 0;
1376 for (i = n_lossrecords - 1; i >= 0 && start_lr_output_scan == 0; i--) {
1377 Bool count_as_error, print_record;
1378 lr = lr_array[i];
1379 get_printing_rules (lcp, lr, &count_as_error, &print_record);
1380 // Do not use get_printing_rules results for is_suppressed, as we
1381 // only want to check if the record would be suppressed.
1382 is_suppressed =
1383 MC_(record_leak_error) ( tid, i+1, n_lossrecords, lr,
1384 False /* print_record */,
1385 False /* count_as_error */);
1386 if (print_record && !is_suppressed) {
1387 nr_printable_records++;
1388 if (nr_printable_records == lcp->max_loss_records_output)
1389 start_lr_output_scan = i;
1390 }
sewardjc8bd1df2011-06-26 12:41:33 +00001391 }
philippe84234902012-01-14 13:53:13 +00001392 }
sewardjc8bd1df2011-06-26 12:41:33 +00001393
philippe84234902012-01-14 13:53:13 +00001394 // Print the loss records (in size order) and collect summary stats.
1395 for (i = start_lr_output_scan; i < n_lossrecords; i++) {
1396 Bool count_as_error, print_record;
1397 lr = lr_array[i];
1398 get_printing_rules(lcp, lr, &count_as_error, &print_record);
sewardjb5f6f512005-03-10 23:59:00 +00001399 is_suppressed =
njn18afe5d2009-08-10 08:25:39 +00001400 MC_(record_leak_error) ( tid, i+1, n_lossrecords, lr, print_record,
1401 count_as_error );
sewardjb5f6f512005-03-10 23:59:00 +00001402
1403 if (is_suppressed) {
njn29a5c012009-05-06 06:15:55 +00001404 MC_(blocks_suppressed) += lr->num_blocks;
1405 MC_(bytes_suppressed) += lr->szB;
sewardjb5f6f512005-03-10 23:59:00 +00001406
njn29a5c012009-05-06 06:15:55 +00001407 } else if (Unreached == lr->key.state) {
1408 MC_(blocks_leaked) += lr->num_blocks;
1409 MC_(bytes_leaked) += lr->szB;
sewardjb5f6f512005-03-10 23:59:00 +00001410
njn29a5c012009-05-06 06:15:55 +00001411 } else if (IndirectLeak == lr->key.state) {
1412 MC_(blocks_indirect) += lr->num_blocks;
1413 MC_(bytes_indirect) += lr->szB;
sewardjb5f6f512005-03-10 23:59:00 +00001414
njn29a5c012009-05-06 06:15:55 +00001415 } else if (Possible == lr->key.state) {
1416 MC_(blocks_dubious) += lr->num_blocks;
1417 MC_(bytes_dubious) += lr->szB;
sewardjb5f6f512005-03-10 23:59:00 +00001418
njn29a5c012009-05-06 06:15:55 +00001419 } else if (Reachable == lr->key.state) {
1420 MC_(blocks_reachable) += lr->num_blocks;
1421 MC_(bytes_reachable) += lr->szB;
sewardjb5f6f512005-03-10 23:59:00 +00001422
1423 } else {
njn8225cc02009-03-09 22:52:24 +00001424 VG_(tool_panic)("unknown loss mode");
sewardjb5f6f512005-03-10 23:59:00 +00001425 }
sewardjb5f6f512005-03-10 23:59:00 +00001426 }
sewardjb5f6f512005-03-10 23:59:00 +00001427
njn8225cc02009-03-09 22:52:24 +00001428 if (VG_(clo_verbosity) > 0 && !VG_(clo_xml)) {
floriancf6e7342014-09-28 13:29:06 +00001429 HChar d_bytes[31];
1430 HChar d_blocks[31];
philippeab1fce92013-09-29 13:47:32 +00001431# define DBY(new,old) \
floriancf6e7342014-09-28 13:29:06 +00001432 MC_(snprintf_delta) (d_bytes, sizeof(d_bytes), (new), (old), \
1433 lcp->deltamode)
philippeab1fce92013-09-29 13:47:32 +00001434# define DBL(new,old) \
floriancf6e7342014-09-28 13:29:06 +00001435 MC_(snprintf_delta) (d_blocks, sizeof(d_blocks), (new), (old), \
1436 lcp->deltamode)
sewardjc8bd1df2011-06-26 12:41:33 +00001437
sewardj6b523cd2009-07-15 14:49:40 +00001438 VG_(umsg)("LEAK SUMMARY:\n");
sewardjc8bd1df2011-06-26 12:41:33 +00001439 VG_(umsg)(" definitely lost: %'lu%s bytes in %'lu%s blocks\n",
1440 MC_(bytes_leaked),
philippeab1fce92013-09-29 13:47:32 +00001441 DBY (MC_(bytes_leaked), old_bytes_leaked),
sewardjc8bd1df2011-06-26 12:41:33 +00001442 MC_(blocks_leaked),
philippeab1fce92013-09-29 13:47:32 +00001443 DBL (MC_(blocks_leaked), old_blocks_leaked));
sewardjc8bd1df2011-06-26 12:41:33 +00001444 VG_(umsg)(" indirectly lost: %'lu%s bytes in %'lu%s blocks\n",
1445 MC_(bytes_indirect),
philippeab1fce92013-09-29 13:47:32 +00001446 DBY (MC_(bytes_indirect), old_bytes_indirect),
sewardjc8bd1df2011-06-26 12:41:33 +00001447 MC_(blocks_indirect),
philippeab1fce92013-09-29 13:47:32 +00001448 DBL (MC_(blocks_indirect), old_blocks_indirect));
sewardjc8bd1df2011-06-26 12:41:33 +00001449 VG_(umsg)(" possibly lost: %'lu%s bytes in %'lu%s blocks\n",
1450 MC_(bytes_dubious),
philippeab1fce92013-09-29 13:47:32 +00001451 DBY (MC_(bytes_dubious), old_bytes_dubious),
sewardjc8bd1df2011-06-26 12:41:33 +00001452 MC_(blocks_dubious),
philippeab1fce92013-09-29 13:47:32 +00001453 DBL (MC_(blocks_dubious), old_blocks_dubious));
sewardjc8bd1df2011-06-26 12:41:33 +00001454 VG_(umsg)(" still reachable: %'lu%s bytes in %'lu%s blocks\n",
1455 MC_(bytes_reachable),
philippeab1fce92013-09-29 13:47:32 +00001456 DBY (MC_(bytes_reachable), old_bytes_reachable),
sewardjc8bd1df2011-06-26 12:41:33 +00001457 MC_(blocks_reachable),
philippeab1fce92013-09-29 13:47:32 +00001458 DBL (MC_(blocks_reachable), old_blocks_reachable));
1459 for (i = 0; i < N_LEAK_CHECK_HEURISTICS; i++)
1460 if (old_blocks_heuristically_reachable[i] > 0
1461 || MC_(blocks_heuristically_reachable)[i] > 0) {
1462 VG_(umsg)(" of which "
1463 "reachable via heuristic:\n");
1464 break;
1465 }
1466 for (i = 0; i < N_LEAK_CHECK_HEURISTICS; i++)
1467 if (old_blocks_heuristically_reachable[i] > 0
1468 || MC_(blocks_heuristically_reachable)[i] > 0)
florian866862a2014-12-13 18:35:00 +00001469 VG_(umsg)(" %-19s: "
philippeab1fce92013-09-29 13:47:32 +00001470 "%'lu%s bytes in %'lu%s blocks\n",
1471 pp_heuristic(i),
1472 MC_(bytes_heuristically_reachable)[i],
1473 DBY (MC_(bytes_heuristically_reachable)[i],
1474 old_bytes_heuristically_reachable[i]),
1475 MC_(blocks_heuristically_reachable)[i],
1476 DBL (MC_(blocks_heuristically_reachable)[i],
1477 old_blocks_heuristically_reachable[i]));
sewardjc8bd1df2011-06-26 12:41:33 +00001478 VG_(umsg)(" suppressed: %'lu%s bytes in %'lu%s blocks\n",
1479 MC_(bytes_suppressed),
philippeab1fce92013-09-29 13:47:32 +00001480 DBY (MC_(bytes_suppressed), old_bytes_suppressed),
sewardjc8bd1df2011-06-26 12:41:33 +00001481 MC_(blocks_suppressed),
philippeab1fce92013-09-29 13:47:32 +00001482 DBL (MC_(blocks_suppressed), old_blocks_suppressed));
philippe84234902012-01-14 13:53:13 +00001483 if (lcp->mode != LC_Full &&
njn8225cc02009-03-09 22:52:24 +00001484 (MC_(blocks_leaked) + MC_(blocks_indirect) +
1485 MC_(blocks_dubious) + MC_(blocks_reachable)) > 0) {
philippe84234902012-01-14 13:53:13 +00001486 if (lcp->requested_by_monitor_command)
philippeab1fce92013-09-29 13:47:32 +00001487 VG_(umsg)("To see details of leaked memory, "
1488 "give 'full' arg to leak_check\n");
sewardjc8bd1df2011-06-26 12:41:33 +00001489 else
1490 VG_(umsg)("Rerun with --leak-check=full to see details "
1491 "of leaked memory\n");
njn8225cc02009-03-09 22:52:24 +00001492 }
philippe84234902012-01-14 13:53:13 +00001493 if (lcp->mode == LC_Full &&
philippeab1fce92013-09-29 13:47:32 +00001494 MC_(blocks_reachable) > 0 && !RiS(Reachable,lcp->show_leak_kinds)) {
sewardj6b523cd2009-07-15 14:49:40 +00001495 VG_(umsg)("Reachable blocks (those to which a pointer "
1496 "was found) are not shown.\n");
philippe84234902012-01-14 13:53:13 +00001497 if (lcp->requested_by_monitor_command)
sewardj30b3eca2011-06-28 08:20:39 +00001498 VG_(umsg)("To see them, add 'reachable any' args to leak_check\n");
sewardjc8bd1df2011-06-26 12:41:33 +00001499 else
1500 VG_(umsg)("To see them, rerun with: --leak-check=full "
philippe2193a7c2012-12-08 17:54:16 +00001501 "--show-leak-kinds=all\n");
sewardjb5f6f512005-03-10 23:59:00 +00001502 }
njnb6267bd2009-08-12 00:14:16 +00001503 VG_(umsg)("\n");
philippeab1fce92013-09-29 13:47:32 +00001504 #undef DBL
1505 #undef DBY
sewardjb5f6f512005-03-10 23:59:00 +00001506 }
1507}
1508
philippea22f59d2012-01-26 23:13:52 +00001509// print recursively all indirectly leaked blocks collected in clique.
philippe6d3cb492015-08-13 22:49:32 +00001510// Printing stops when *remaining reaches 0.
1511static void print_clique (Int clique, UInt level, UInt *remaining)
philippea22f59d2012-01-26 23:13:52 +00001512{
1513 Int ind;
florian47755db2015-08-05 12:09:55 +00001514 UInt i, n_lossrecords;
philippea22f59d2012-01-26 23:13:52 +00001515
1516 n_lossrecords = VG_(OSetGen_Size)(lr_table);
1517
philippe6d3cb492015-08-13 22:49:32 +00001518 for (ind = 0; ind < lc_n_chunks && *remaining > 0; ind++) {
philippea22f59d2012-01-26 23:13:52 +00001519 LC_Extra* ind_ex = &(lc_extras)[ind];
philippeab1fce92013-09-29 13:47:32 +00001520 if (ind_ex->state == IndirectLeak
1521 && ind_ex->IorC.clique == (SizeT) clique) {
philippea22f59d2012-01-26 23:13:52 +00001522 MC_Chunk* ind_ch = lc_chunks[ind];
1523 LossRecord* ind_lr;
1524 LossRecordKey ind_lrkey;
florian47755db2015-08-05 12:09:55 +00001525 UInt lr_i;
philippea22f59d2012-01-26 23:13:52 +00001526 ind_lrkey.state = ind_ex->state;
philippe8617b5b2013-01-12 19:53:08 +00001527 ind_lrkey.allocated_at = MC_(allocated_at)(ind_ch);
philippea22f59d2012-01-26 23:13:52 +00001528 ind_lr = VG_(OSetGen_Lookup)(lr_table, &ind_lrkey);
1529 for (lr_i = 0; lr_i < n_lossrecords; lr_i++)
1530 if (ind_lr == lr_array[lr_i])
1531 break;
1532 for (i = 0; i < level; i++)
1533 VG_(umsg)(" ");
florian47755db2015-08-05 12:09:55 +00001534 VG_(umsg)("%p[%lu] indirect loss record %u\n",
1535 (void *)ind_ch->data, (SizeT)ind_ch->szB,
philippea22f59d2012-01-26 23:13:52 +00001536 lr_i+1); // lr_i+1 for user numbering.
philippe6d3cb492015-08-13 22:49:32 +00001537 (*remaining)--;
philippea22f59d2012-01-26 23:13:52 +00001538 if (lr_i >= n_lossrecords)
1539 VG_(umsg)
1540 ("error: no indirect loss record found for %p[%lu]?????\n",
florian47755db2015-08-05 12:09:55 +00001541 (void *)ind_ch->data, (SizeT)ind_ch->szB);
philippe6d3cb492015-08-13 22:49:32 +00001542 print_clique(ind, level+1, remaining);
philippea22f59d2012-01-26 23:13:52 +00001543 }
1544 }
1545 }
1546
philippe6d3cb492015-08-13 22:49:32 +00001547Bool MC_(print_block_list) ( UInt loss_record_nr, UInt max_blocks)
philippea22f59d2012-01-26 23:13:52 +00001548{
florian47755db2015-08-05 12:09:55 +00001549 UInt i, n_lossrecords;
philippea22f59d2012-01-26 23:13:52 +00001550 LossRecord* lr;
philippe6d3cb492015-08-13 22:49:32 +00001551 UInt remaining = max_blocks;
philippea22f59d2012-01-26 23:13:52 +00001552
1553 if (lr_table == NULL || lc_chunks == NULL || lc_extras == NULL) {
1554 VG_(umsg)("Can't print block list : no valid leak search result\n");
1555 return False;
1556 }
1557
1558 if (lc_chunks_n_frees_marker != MC_(get_cmalloc_n_frees)()) {
1559 VG_(umsg)("Can't print obsolete block list : redo a leak search first\n");
1560 return False;
1561 }
1562
1563 n_lossrecords = VG_(OSetGen_Size)(lr_table);
1564 if (loss_record_nr >= n_lossrecords)
1565 return False; // Invalid loss record nr.
1566
1567 tl_assert (lr_array);
1568 lr = lr_array[loss_record_nr];
1569
1570 // (re-)print the loss record details.
1571 // (+1 on loss_record_nr as user numbering for loss records starts at 1).
1572 MC_(pp_LossRecord)(loss_record_nr+1, n_lossrecords, lr);
1573
1574 // Match the chunks with loss records.
philippe6d3cb492015-08-13 22:49:32 +00001575 for (i = 0; i < lc_n_chunks && remaining > 0; i++) {
philippea22f59d2012-01-26 23:13:52 +00001576 MC_Chunk* ch = lc_chunks[i];
1577 LC_Extra* ex = &(lc_extras)[i];
1578 LossRecord* old_lr;
1579 LossRecordKey lrkey;
1580 lrkey.state = ex->state;
philippe8617b5b2013-01-12 19:53:08 +00001581 lrkey.allocated_at = MC_(allocated_at)(ch);
philippea22f59d2012-01-26 23:13:52 +00001582
1583 old_lr = VG_(OSetGen_Lookup)(lr_table, &lrkey);
1584 if (old_lr) {
1585 // We found an existing loss record matching this chunk.
philippeab1fce92013-09-29 13:47:32 +00001586 // If this is the loss record we are looking for, output the pointer.
philippea22f59d2012-01-26 23:13:52 +00001587 if (old_lr == lr_array[loss_record_nr]) {
1588 VG_(umsg)("%p[%lu]\n",
florian47755db2015-08-05 12:09:55 +00001589 (void *)ch->data, (SizeT)ch->szB);
philippe6d3cb492015-08-13 22:49:32 +00001590 remaining--;
philippea22f59d2012-01-26 23:13:52 +00001591 if (ex->state != Reachable) {
1592 // We can print the clique in all states, except Reachable.
1593 // In Unreached state, lc_chunk[i] is the clique leader.
1594 // In IndirectLeak, lc_chunk[i] might have been a clique leader
1595 // which was later collected in another clique.
1596 // For Possible, lc_chunk[i] might be the top of a clique
1597 // or an intermediate clique.
philippe6d3cb492015-08-13 22:49:32 +00001598 print_clique(i, 1, &remaining);
philippea22f59d2012-01-26 23:13:52 +00001599 }
1600 }
1601 } else {
1602 // No existing loss record matches this chunk ???
1603 VG_(umsg)("error: no loss record found for %p[%lu]?????\n",
florian47755db2015-08-05 12:09:55 +00001604 (void *)ch->data, (SizeT)ch->szB);
philippea22f59d2012-01-26 23:13:52 +00001605 }
1606 }
1607 return True;
1608}
1609
1610// If searched = 0, scan memory root set, pushing onto the mark stack the blocks
1611// encountered.
philippeab1fce92013-09-29 13:47:32 +00001612// Otherwise (searched != 0), scan the memory root set searching for ptr
1613// pointing inside [searched, searched+szB[.
philippea22f59d2012-01-26 23:13:52 +00001614static void scan_memory_root_set(Addr searched, SizeT szB)
1615{
1616 Int i;
1617 Int n_seg_starts;
florianea8a88c2015-02-20 14:00:23 +00001618 Addr* seg_starts = VG_(get_segment_starts)( SkFileC | SkAnonC | SkShmC,
1619 &n_seg_starts );
philippea22f59d2012-01-26 23:13:52 +00001620
1621 tl_assert(seg_starts && n_seg_starts > 0);
1622
1623 lc_scanned_szB = 0;
philippe7a76f4b2013-10-06 21:23:04 +00001624 lc_sig_skipped_szB = 0;
philippea22f59d2012-01-26 23:13:52 +00001625
1626 // VG_(am_show_nsegments)( 0, "leakcheck");
1627 for (i = 0; i < n_seg_starts; i++) {
1628 SizeT seg_size;
1629 NSegment const* seg = VG_(am_find_nsegment)( seg_starts[i] );
1630 tl_assert(seg);
florianea8a88c2015-02-20 14:00:23 +00001631 tl_assert(seg->kind == SkFileC || seg->kind == SkAnonC ||
1632 seg->kind == SkShmC);
philippea22f59d2012-01-26 23:13:52 +00001633
philippea22f59d2012-01-26 23:13:52 +00001634 if (!(seg->hasR && seg->hasW)) continue;
1635 if (seg->isCH) continue;
1636
1637 // Don't poke around in device segments as this may cause
florian5d3d43d2015-02-20 16:46:50 +00001638 // hangs. Include /dev/zero just in case someone allocated
philippea22f59d2012-01-26 23:13:52 +00001639 // memory by explicitly mapping /dev/zero.
1640 if (seg->kind == SkFileC
1641 && (VKI_S_ISCHR(seg->mode) || VKI_S_ISBLK(seg->mode))) {
floriand3166c42015-01-24 00:02:19 +00001642 const HChar* dev_name = VG_(am_get_filename)( seg );
philippea22f59d2012-01-26 23:13:52 +00001643 if (dev_name && 0 == VG_(strcmp)(dev_name, "/dev/zero")) {
1644 // Don't skip /dev/zero.
1645 } else {
1646 // Skip this device mapping.
1647 continue;
1648 }
1649 }
1650
1651 if (0)
1652 VG_(printf)("ACCEPT %2d %#lx %#lx\n", i, seg->start, seg->end);
1653
1654 // Scan the segment. We use -1 for the clique number, because this
1655 // is a root-set.
1656 seg_size = seg->end - seg->start + 1;
1657 if (VG_(clo_verbosity) > 2) {
1658 VG_(message)(Vg_DebugMsg,
1659 " Scanning root segment: %#lx..%#lx (%lu)\n",
1660 seg->start, seg->end, seg_size);
1661 }
1662 lc_scan_memory(seg->start, seg_size, /*is_prior_definite*/True,
1663 /*clique*/-1, /*cur_clique*/-1,
1664 searched, szB);
1665 }
philippe7d69fd92012-02-26 21:26:00 +00001666 VG_(free)(seg_starts);
philippea22f59d2012-01-26 23:13:52 +00001667}
1668
njn8225cc02009-03-09 22:52:24 +00001669/*------------------------------------------------------------*/
1670/*--- Top-level entry point. ---*/
1671/*------------------------------------------------------------*/
sewardj3cf26a52006-07-27 23:48:53 +00001672
philippe84234902012-01-14 13:53:13 +00001673void MC_(detect_memory_leaks) ( ThreadId tid, LeakCheckParams* lcp)
njn43c799e2003-04-08 00:08:52 +00001674{
njnb965efb2009-08-10 07:36:54 +00001675 Int i, j;
njn43c799e2003-04-08 00:08:52 +00001676
philippe84234902012-01-14 13:53:13 +00001677 tl_assert(lcp->mode != LC_Off);
sewardjc8bd1df2011-06-26 12:41:33 +00001678
philippe57a16a22012-07-18 22:26:51 +00001679 // Verify some assertions which are used in lc_scan_memory.
1680 tl_assert((VKI_PAGE_SIZE % sizeof(Addr)) == 0);
1681 tl_assert((SM_SIZE % sizeof(Addr)) == 0);
1682 // Above two assertions are critical, while below assertion
1683 // ensures that the optimisation in the loop is done in the
1684 // correct order : the loop checks for (big) SM chunk skipping
1685 // before checking for (smaller) page skipping.
1686 tl_assert((SM_SIZE % VKI_PAGE_SIZE) == 0);
1687
philippe4e32d672013-10-17 22:10:41 +00001688 MC_(leak_search_gen)++;
philippe84234902012-01-14 13:53:13 +00001689 MC_(detect_memory_leaks_last_delta_mode) = lcp->deltamode;
philippeab1fce92013-09-29 13:47:32 +00001690 detect_memory_leaks_last_heuristics = lcp->heuristics;
njn43c799e2003-04-08 00:08:52 +00001691
njn8225cc02009-03-09 22:52:24 +00001692 // Get the chunks, stop if there were none.
philippea22f59d2012-01-26 23:13:52 +00001693 if (lc_chunks) {
1694 VG_(free)(lc_chunks);
1695 lc_chunks = NULL;
1696 }
njn8225cc02009-03-09 22:52:24 +00001697 lc_chunks = find_active_chunks(&lc_n_chunks);
philippea22f59d2012-01-26 23:13:52 +00001698 lc_chunks_n_frees_marker = MC_(get_cmalloc_n_frees)();
njn8225cc02009-03-09 22:52:24 +00001699 if (lc_n_chunks == 0) {
1700 tl_assert(lc_chunks == NULL);
sewardjc8bd1df2011-06-26 12:41:33 +00001701 if (lr_table != NULL) {
philippea22f59d2012-01-26 23:13:52 +00001702 // forget the previous recorded LossRecords as next leak search
1703 // can in any case just create new leaks.
sewardjc8bd1df2011-06-26 12:41:33 +00001704 // Maybe it would be better to rather call print_result ?
philippea22f59d2012-01-26 23:13:52 +00001705 // (at least when leak decreases are requested)
sewardjc8bd1df2011-06-26 12:41:33 +00001706 // This will then output all LossRecords with a size decreasing to 0
1707 VG_(OSetGen_Destroy) (lr_table);
philippea22f59d2012-01-26 23:13:52 +00001708 lr_table = NULL;
sewardjc8bd1df2011-06-26 12:41:33 +00001709 }
sewardj71bc3cb2005-05-19 00:25:45 +00001710 if (VG_(clo_verbosity) >= 1 && !VG_(clo_xml)) {
njnb6267bd2009-08-12 00:14:16 +00001711 VG_(umsg)("All heap blocks were freed -- no leaks are possible\n");
sewardj2d9e8742009-08-07 15:46:56 +00001712 VG_(umsg)("\n");
sewardj37d06f22003-09-17 21:48:26 +00001713 }
njn43c799e2003-04-08 00:08:52 +00001714 return;
1715 }
1716
njn8225cc02009-03-09 22:52:24 +00001717 // Sort the array so blocks are in ascending order in memory.
1718 VG_(ssort)(lc_chunks, lc_n_chunks, sizeof(VgHashNode*), compare_MC_Chunks);
njn43c799e2003-04-08 00:08:52 +00001719
njn8225cc02009-03-09 22:52:24 +00001720 // Sanity check -- make sure they're in order.
1721 for (i = 0; i < lc_n_chunks-1; i++) {
1722 tl_assert( lc_chunks[i]->data <= lc_chunks[i+1]->data);
1723 }
njn43c799e2003-04-08 00:08:52 +00001724
njnb965efb2009-08-10 07:36:54 +00001725 // Sanity check -- make sure they don't overlap. The one exception is that
1726 // we allow a MALLOCLIKE block to sit entirely within a malloc() block.
1727 // This is for bug 100628. If this occurs, we ignore the malloc() block
1728 // for leak-checking purposes. This is a hack and probably should be done
1729 // better, but at least it's consistent with mempools (which are treated
1730 // like this in find_active_chunks). Mempools have a separate VgHashTable
1731 // for mempool chunks, but if custom-allocated blocks are put in a separate
1732 // table from normal heap blocks it makes free-mismatch checking more
1733 // difficult.
1734 //
1735 // If this check fails, it probably means that the application
njn8225cc02009-03-09 22:52:24 +00001736 // has done something stupid with VALGRIND_MALLOCLIKE_BLOCK client
njnb965efb2009-08-10 07:36:54 +00001737 // requests, eg. has made overlapping requests (which are
1738 // nonsensical), or used VALGRIND_MALLOCLIKE_BLOCK for stack locations;
1739 // again nonsensical.
1740 //
njn8225cc02009-03-09 22:52:24 +00001741 for (i = 0; i < lc_n_chunks-1; i++) {
1742 MC_Chunk* ch1 = lc_chunks[i];
1743 MC_Chunk* ch2 = lc_chunks[i+1];
njnb965efb2009-08-10 07:36:54 +00001744
1745 Addr start1 = ch1->data;
1746 Addr start2 = ch2->data;
1747 Addr end1 = ch1->data + ch1->szB - 1;
1748 Addr end2 = ch2->data + ch2->szB - 1;
1749 Bool isCustom1 = ch1->allockind == MC_AllocCustom;
1750 Bool isCustom2 = ch2->allockind == MC_AllocCustom;
1751
1752 if (end1 < start2) {
1753 // Normal case - no overlap.
1754
1755 // We used to allow exact duplicates, I'm not sure why. --njn
1756 //} else if (start1 == start2 && end1 == end2) {
1757 // Degenerate case: exact duplicates.
1758
1759 } else if (start1 >= start2 && end1 <= end2 && isCustom1 && !isCustom2) {
1760 // Block i is MALLOCLIKE and entirely within block i+1.
1761 // Remove block i+1.
1762 for (j = i+1; j < lc_n_chunks-1; j++) {
1763 lc_chunks[j] = lc_chunks[j+1];
1764 }
1765 lc_n_chunks--;
1766
1767 } else if (start2 >= start1 && end2 <= end1 && isCustom2 && !isCustom1) {
1768 // Block i+1 is MALLOCLIKE and entirely within block i.
1769 // Remove block i.
1770 for (j = i; j < lc_n_chunks-1; j++) {
1771 lc_chunks[j] = lc_chunks[j+1];
1772 }
1773 lc_n_chunks--;
1774
1775 } else {
philippe09007e32012-03-01 22:00:36 +00001776 VG_(umsg)("Block 0x%lx..0x%lx overlaps with block 0x%lx..0x%lx\n",
bart3c4fa9f2011-05-09 10:46:55 +00001777 start1, end1, start2, end2);
philippe09007e32012-03-01 22:00:36 +00001778 VG_(umsg)("Blocks allocation contexts:\n"),
philippe8617b5b2013-01-12 19:53:08 +00001779 VG_(pp_ExeContext)( MC_(allocated_at)(ch1));
philippe09007e32012-03-01 22:00:36 +00001780 VG_(umsg)("\n"),
philippe8617b5b2013-01-12 19:53:08 +00001781 VG_(pp_ExeContext)( MC_(allocated_at)(ch2));
njnb965efb2009-08-10 07:36:54 +00001782 VG_(umsg)("This is usually caused by using VALGRIND_MALLOCLIKE_BLOCK");
philippe09007e32012-03-01 22:00:36 +00001783 VG_(umsg)("in an inappropriate way.\n");
njnb965efb2009-08-10 07:36:54 +00001784 tl_assert (0);
njn8225cc02009-03-09 22:52:24 +00001785 }
njn8225cc02009-03-09 22:52:24 +00001786 }
1787
1788 // Initialise lc_extras.
philippea22f59d2012-01-26 23:13:52 +00001789 if (lc_extras) {
1790 VG_(free)(lc_extras);
1791 lc_extras = NULL;
1792 }
njn8225cc02009-03-09 22:52:24 +00001793 lc_extras = VG_(malloc)( "mc.dml.2", lc_n_chunks * sizeof(LC_Extra) );
1794 for (i = 0; i < lc_n_chunks; i++) {
1795 lc_extras[i].state = Unreached;
tom1d0f3f62010-10-04 20:55:21 +00001796 lc_extras[i].pending = False;
philippeab1fce92013-09-29 13:47:32 +00001797 lc_extras[i].heuristic = LchNone;
philippea22f59d2012-01-26 23:13:52 +00001798 lc_extras[i].IorC.indirect_szB = 0;
njn8225cc02009-03-09 22:52:24 +00001799 }
1800
1801 // Initialise lc_markstack.
1802 lc_markstack = VG_(malloc)( "mc.dml.2", lc_n_chunks * sizeof(Int) );
1803 for (i = 0; i < lc_n_chunks; i++) {
1804 lc_markstack[i] = -1;
sewardjb5f6f512005-03-10 23:59:00 +00001805 }
1806 lc_markstack_top = -1;
njn43c799e2003-04-08 00:08:52 +00001807
njn8225cc02009-03-09 22:52:24 +00001808 // Verbosity.
sewardj2d9e8742009-08-07 15:46:56 +00001809 if (VG_(clo_verbosity) > 1 && !VG_(clo_xml)) {
njnb6267bd2009-08-12 00:14:16 +00001810 VG_(umsg)( "Searching for pointers to %'d not-freed blocks\n",
sewardj6b523cd2009-07-15 14:49:40 +00001811 lc_n_chunks );
sewardj2d9e8742009-08-07 15:46:56 +00001812 }
sewardjb5f6f512005-03-10 23:59:00 +00001813
njn8225cc02009-03-09 22:52:24 +00001814 // Scan the memory root-set, pushing onto the mark stack any blocks
1815 // pointed to.
philippea22f59d2012-01-26 23:13:52 +00001816 scan_memory_root_set(/*searched*/0, 0);
sewardjb5f6f512005-03-10 23:59:00 +00001817
njn8225cc02009-03-09 22:52:24 +00001818 // Scan GP registers for chunk pointers.
1819 VG_(apply_to_GP_regs)(lc_push_if_a_chunk_ptr_register);
sewardjb5f6f512005-03-10 23:59:00 +00001820
njn8225cc02009-03-09 22:52:24 +00001821 // Process the pushed blocks. After this, every block that is reachable
1822 // from the root-set has been traced.
1823 lc_process_markstack(/*clique*/-1);
njn43c799e2003-04-08 00:08:52 +00001824
njnb6267bd2009-08-12 00:14:16 +00001825 if (VG_(clo_verbosity) > 1 && !VG_(clo_xml)) {
1826 VG_(umsg)("Checked %'lu bytes\n", lc_scanned_szB);
philippe7a76f4b2013-10-06 21:23:04 +00001827 if (lc_sig_skipped_szB > 0)
1828 VG_(umsg)("Skipped %'lu bytes due to read errors\n",
1829 lc_sig_skipped_szB);
njnb6267bd2009-08-12 00:14:16 +00001830 VG_(umsg)( "\n" );
1831 }
njn43c799e2003-04-08 00:08:52 +00001832
njn8225cc02009-03-09 22:52:24 +00001833 // Trace all the leaked blocks to determine which are directly leaked and
1834 // which are indirectly leaked. For each Unreached block, push it onto
1835 // the mark stack, and find all the as-yet-Unreached blocks reachable
1836 // from it. These form a clique and are marked IndirectLeak, and their
1837 // size is added to the clique leader's indirect size. If one of the
1838 // found blocks was itself a clique leader (from a previous clique), then
1839 // the cliques are merged.
1840 for (i = 0; i < lc_n_chunks; i++) {
1841 MC_Chunk* ch = lc_chunks[i];
1842 LC_Extra* ex = &(lc_extras[i]);
njn43c799e2003-04-08 00:08:52 +00001843
njn8225cc02009-03-09 22:52:24 +00001844 if (VG_DEBUG_CLIQUE)
1845 VG_(printf)("cliques: %d at %#lx -> Loss state %d\n",
1846 i, ch->data, ex->state);
njn43c799e2003-04-08 00:08:52 +00001847
njn8225cc02009-03-09 22:52:24 +00001848 tl_assert(lc_markstack_top == -1);
1849
1850 if (ex->state == Unreached) {
1851 if (VG_DEBUG_CLIQUE)
1852 VG_(printf)("%d: gathering clique %#lx\n", i, ch->data);
1853
1854 // Push this Unreached block onto the stack and process it.
1855 lc_push(i, ch);
philippea22f59d2012-01-26 23:13:52 +00001856 lc_process_markstack(/*clique*/i);
njn8225cc02009-03-09 22:52:24 +00001857
1858 tl_assert(lc_markstack_top == -1);
1859 tl_assert(ex->state == Unreached);
nethercote0f19bce2003-12-02 10:17:44 +00001860 }
njn43c799e2003-04-08 00:08:52 +00001861 }
njn8225cc02009-03-09 22:52:24 +00001862
sewardjc8bd1df2011-06-26 12:41:33 +00001863 print_results( tid, lcp);
njn43c799e2003-04-08 00:08:52 +00001864
sewardjb5f6f512005-03-10 23:59:00 +00001865 VG_(free) ( lc_markstack );
philippea22f59d2012-01-26 23:13:52 +00001866 lc_markstack = NULL;
1867 // lc_chunks, lc_extras, lr_array and lr_table are kept (needed if user
1868 // calls MC_(print_block_list)). lr_table also used for delta leak reporting
1869 // between this leak search and the next leak search.
1870}
1871
1872static Addr searched_wpa;
1873static SizeT searched_szB;
1874static void
florian6bd9dc12012-11-23 16:17:43 +00001875search_address_in_GP_reg(ThreadId tid, const HChar* regname, Addr addr_in_reg)
philippea22f59d2012-01-26 23:13:52 +00001876{
1877 if (addr_in_reg >= searched_wpa
1878 && addr_in_reg < searched_wpa + searched_szB) {
1879 if (addr_in_reg == searched_wpa)
1880 VG_(umsg)
floriande3df032015-08-04 21:26:10 +00001881 ("tid %u register %s pointing at %#lx\n",
philippea22f59d2012-01-26 23:13:52 +00001882 tid, regname, searched_wpa);
1883 else
1884 VG_(umsg)
floriande3df032015-08-04 21:26:10 +00001885 ("tid %u register %s interior pointing %lu bytes inside %#lx\n",
philippea22f59d2012-01-26 23:13:52 +00001886 tid, regname, (long unsigned) addr_in_reg - searched_wpa,
1887 searched_wpa);
1888 }
1889}
1890
1891void MC_(who_points_at) ( Addr address, SizeT szB)
1892{
1893 MC_Chunk** chunks;
1894 Int n_chunks;
1895 Int i;
1896
1897 if (szB == 1)
1898 VG_(umsg) ("Searching for pointers to %#lx\n", address);
1899 else
1900 VG_(umsg) ("Searching for pointers pointing in %lu bytes from %#lx\n",
1901 szB, address);
1902
philippeab1fce92013-09-29 13:47:32 +00001903 chunks = find_active_chunks(&n_chunks);
1904
philippea22f59d2012-01-26 23:13:52 +00001905 // Scan memory root-set, searching for ptr pointing in address[szB]
1906 scan_memory_root_set(address, szB);
1907
1908 // Scan active malloc-ed chunks
philippea22f59d2012-01-26 23:13:52 +00001909 for (i = 0; i < n_chunks; i++) {
1910 lc_scan_memory(chunks[i]->data, chunks[i]->szB,
1911 /*is_prior_definite*/True,
1912 /*clique*/-1, /*cur_clique*/-1,
1913 address, szB);
1914 }
1915 VG_(free) ( chunks );
1916
1917 // Scan GP registers for pointers to address range.
1918 searched_wpa = address;
1919 searched_szB = szB;
1920 VG_(apply_to_GP_regs)(search_address_in_GP_reg);
1921
njn43c799e2003-04-08 00:08:52 +00001922}
1923
1924/*--------------------------------------------------------------------*/
njn1d0825f2006-03-27 11:37:07 +00001925/*--- end ---*/
njn43c799e2003-04-08 00:08:52 +00001926/*--------------------------------------------------------------------*/
1927